]>
Commit | Line | Data |
---|---|---|
4c0315d0 | 1 | /* Passes for transactional memory support. |
711789cc | 2 | Copyright (C) 2008-2013 Free Software Foundation, Inc. |
4c0315d0 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
d9dd21a8 | 23 | #include "hash-table.h" |
4c0315d0 | 24 | #include "tree.h" |
bc61cadb | 25 | #include "basic-block.h" |
26 | #include "tree-ssa-alias.h" | |
27 | #include "internal-fn.h" | |
28 | #include "tree-eh.h" | |
29 | #include "gimple-expr.h" | |
30 | #include "is-a.h" | |
e795d6e1 | 31 | #include "gimple.h" |
9ed99284 | 32 | #include "calls.h" |
33 | #include "function.h" | |
34 | #include "rtl.h" | |
35 | #include "emit-rtl.h" | |
a8783bee | 36 | #include "gimplify.h" |
dcf1a1ec | 37 | #include "gimple-iterator.h" |
e795d6e1 | 38 | #include "gimplify-me.h" |
dcf1a1ec | 39 | #include "gimple-walk.h" |
073c1fd5 | 40 | #include "gimple-ssa.h" |
41 | #include "cgraph.h" | |
42 | #include "tree-cfg.h" | |
9ed99284 | 43 | #include "stringpool.h" |
073c1fd5 | 44 | #include "tree-ssanames.h" |
45 | #include "tree-into-ssa.h" | |
4c0315d0 | 46 | #include "tree-pass.h" |
47 | #include "tree-inline.h" | |
48 | #include "diagnostic-core.h" | |
49 | #include "demangle.h" | |
50 | #include "output.h" | |
51 | #include "trans-mem.h" | |
52 | #include "params.h" | |
53 | #include "target.h" | |
54 | #include "langhooks.h" | |
4c0315d0 | 55 | #include "gimple-pretty-print.h" |
79f958cb | 56 | #include "cfgloop.h" |
424a4a92 | 57 | #include "tree-ssa-address.h" |
137559b2 | 58 | #include "predict.h" |
4c0315d0 | 59 | |
60 | ||
4c0315d0 | 61 | #define A_RUNINSTRUMENTEDCODE 0x0001 |
62 | #define A_RUNUNINSTRUMENTEDCODE 0x0002 | |
63 | #define A_SAVELIVEVARIABLES 0x0004 | |
64 | #define A_RESTORELIVEVARIABLES 0x0008 | |
65 | #define A_ABORTTRANSACTION 0x0010 | |
66 | ||
67 | #define AR_USERABORT 0x0001 | |
68 | #define AR_USERRETRY 0x0002 | |
69 | #define AR_TMCONFLICT 0x0004 | |
70 | #define AR_EXCEPTIONBLOCKABORT 0x0008 | |
71 | #define AR_OUTERABORT 0x0010 | |
72 | ||
73 | #define MODE_SERIALIRREVOCABLE 0x0000 | |
74 | ||
75 | ||
76 | /* The representation of a transaction changes several times during the | |
77 | lowering process. In the beginning, in the front-end we have the | |
78 | GENERIC tree TRANSACTION_EXPR. For example, | |
79 | ||
80 | __transaction { | |
81 | local++; | |
82 | if (++global == 10) | |
83 | __tm_abort; | |
84 | } | |
85 | ||
86 | During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is | |
87 | trivially replaced with a GIMPLE_TRANSACTION node. | |
88 | ||
89 | During pass_lower_tm, we examine the body of transactions looking | |
90 | for aborts. Transactions that do not contain an abort may be | |
91 | merged into an outer transaction. We also add a TRY-FINALLY node | |
92 | to arrange for the transaction to be committed on any exit. | |
93 | ||
94 | [??? Think about how this arrangement affects throw-with-commit | |
95 | and throw-with-abort operations. In this case we want the TRY to | |
96 | handle gotos, but not to catch any exceptions because the transaction | |
97 | will already be closed.] | |
98 | ||
99 | GIMPLE_TRANSACTION [label=NULL] { | |
100 | try { | |
101 | local = local + 1; | |
102 | t0 = global; | |
103 | t1 = t0 + 1; | |
104 | global = t1; | |
105 | if (t1 == 10) | |
106 | __builtin___tm_abort (); | |
107 | } finally { | |
108 | __builtin___tm_commit (); | |
109 | } | |
110 | } | |
111 | ||
112 | During pass_lower_eh, we create EH regions for the transactions, | |
113 | intermixed with the regular EH stuff. This gives us a nice persistent | |
114 | mapping (all the way through rtl) from transactional memory operation | |
115 | back to the transaction, which allows us to get the abnormal edges | |
116 | correct to model transaction aborts and restarts: | |
117 | ||
118 | GIMPLE_TRANSACTION [label=over] | |
119 | local = local + 1; | |
120 | t0 = global; | |
121 | t1 = t0 + 1; | |
122 | global = t1; | |
123 | if (t1 == 10) | |
124 | __builtin___tm_abort (); | |
125 | __builtin___tm_commit (); | |
126 | over: | |
127 | ||
128 | This is the end of all_lowering_passes, and so is what is present | |
129 | during the IPA passes, and through all of the optimization passes. | |
130 | ||
131 | During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all | |
132 | functions and mark functions for cloning. | |
133 | ||
134 | At the end of gimple optimization, before exiting SSA form, | |
135 | pass_tm_edges replaces statements that perform transactional | |
136 | memory operations with the appropriate TM builtins, and swap | |
137 | out function calls with their transactional clones. At this | |
138 | point we introduce the abnormal transaction restart edges and | |
139 | complete lowering of the GIMPLE_TRANSACTION node. | |
140 | ||
141 | x = __builtin___tm_start (MAY_ABORT); | |
142 | eh_label: | |
143 | if (x & abort_transaction) | |
144 | goto over; | |
145 | local = local + 1; | |
146 | t0 = __builtin___tm_load (global); | |
147 | t1 = t0 + 1; | |
148 | __builtin___tm_store (&global, t1); | |
149 | if (t1 == 10) | |
150 | __builtin___tm_abort (); | |
151 | __builtin___tm_commit (); | |
152 | over: | |
153 | */ | |
154 | ||
0cd02a19 | 155 | static void *expand_regions (struct tm_region *, |
156 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 157 | void *, bool); |
0cd02a19 | 158 | |
4c0315d0 | 159 | \f |
160 | /* Return the attributes we want to examine for X, or NULL if it's not | |
161 | something we examine. We look at function types, but allow pointers | |
162 | to function types and function decls and peek through. */ | |
163 | ||
164 | static tree | |
165 | get_attrs_for (const_tree x) | |
166 | { | |
167 | switch (TREE_CODE (x)) | |
168 | { | |
169 | case FUNCTION_DECL: | |
170 | return TYPE_ATTRIBUTES (TREE_TYPE (x)); | |
171 | break; | |
172 | ||
173 | default: | |
174 | if (TYPE_P (x)) | |
175 | return NULL; | |
176 | x = TREE_TYPE (x); | |
177 | if (TREE_CODE (x) != POINTER_TYPE) | |
178 | return NULL; | |
179 | /* FALLTHRU */ | |
180 | ||
181 | case POINTER_TYPE: | |
182 | x = TREE_TYPE (x); | |
183 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
184 | return NULL; | |
185 | /* FALLTHRU */ | |
186 | ||
187 | case FUNCTION_TYPE: | |
188 | case METHOD_TYPE: | |
189 | return TYPE_ATTRIBUTES (x); | |
190 | } | |
191 | } | |
192 | ||
193 | /* Return true if X has been marked TM_PURE. */ | |
194 | ||
195 | bool | |
196 | is_tm_pure (const_tree x) | |
197 | { | |
198 | unsigned flags; | |
199 | ||
200 | switch (TREE_CODE (x)) | |
201 | { | |
202 | case FUNCTION_DECL: | |
203 | case FUNCTION_TYPE: | |
204 | case METHOD_TYPE: | |
205 | break; | |
206 | ||
207 | default: | |
208 | if (TYPE_P (x)) | |
209 | return false; | |
210 | x = TREE_TYPE (x); | |
211 | if (TREE_CODE (x) != POINTER_TYPE) | |
212 | return false; | |
213 | /* FALLTHRU */ | |
214 | ||
215 | case POINTER_TYPE: | |
216 | x = TREE_TYPE (x); | |
217 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
218 | return false; | |
219 | break; | |
220 | } | |
221 | ||
222 | flags = flags_from_decl_or_type (x); | |
223 | return (flags & ECF_TM_PURE) != 0; | |
224 | } | |
225 | ||
226 | /* Return true if X has been marked TM_IRREVOCABLE. */ | |
227 | ||
228 | static bool | |
229 | is_tm_irrevocable (tree x) | |
230 | { | |
231 | tree attrs = get_attrs_for (x); | |
232 | ||
233 | if (attrs && lookup_attribute ("transaction_unsafe", attrs)) | |
234 | return true; | |
235 | ||
236 | /* A call to the irrevocable builtin is by definition, | |
237 | irrevocable. */ | |
238 | if (TREE_CODE (x) == ADDR_EXPR) | |
239 | x = TREE_OPERAND (x, 0); | |
240 | if (TREE_CODE (x) == FUNCTION_DECL | |
241 | && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL | |
242 | && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE) | |
243 | return true; | |
244 | ||
245 | return false; | |
246 | } | |
247 | ||
248 | /* Return true if X has been marked TM_SAFE. */ | |
249 | ||
250 | bool | |
251 | is_tm_safe (const_tree x) | |
252 | { | |
253 | if (flag_tm) | |
254 | { | |
255 | tree attrs = get_attrs_for (x); | |
256 | if (attrs) | |
257 | { | |
258 | if (lookup_attribute ("transaction_safe", attrs)) | |
259 | return true; | |
260 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
261 | return true; | |
262 | } | |
263 | } | |
264 | return false; | |
265 | } | |
266 | ||
267 | /* Return true if CALL is const, or tm_pure. */ | |
268 | ||
269 | static bool | |
270 | is_tm_pure_call (gimple call) | |
271 | { | |
272 | tree fn = gimple_call_fn (call); | |
273 | ||
274 | if (TREE_CODE (fn) == ADDR_EXPR) | |
275 | { | |
276 | fn = TREE_OPERAND (fn, 0); | |
277 | gcc_assert (TREE_CODE (fn) == FUNCTION_DECL); | |
278 | } | |
279 | else | |
280 | fn = TREE_TYPE (fn); | |
281 | ||
282 | return is_tm_pure (fn); | |
283 | } | |
284 | ||
285 | /* Return true if X has been marked TM_CALLABLE. */ | |
286 | ||
287 | static bool | |
288 | is_tm_callable (tree x) | |
289 | { | |
290 | tree attrs = get_attrs_for (x); | |
291 | if (attrs) | |
292 | { | |
293 | if (lookup_attribute ("transaction_callable", attrs)) | |
294 | return true; | |
295 | if (lookup_attribute ("transaction_safe", attrs)) | |
296 | return true; | |
297 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
298 | return true; | |
299 | } | |
300 | return false; | |
301 | } | |
302 | ||
303 | /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */ | |
304 | ||
305 | bool | |
306 | is_tm_may_cancel_outer (tree x) | |
307 | { | |
308 | tree attrs = get_attrs_for (x); | |
309 | if (attrs) | |
310 | return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL; | |
311 | return false; | |
312 | } | |
313 | ||
314 | /* Return true for built in functions that "end" a transaction. */ | |
315 | ||
316 | bool | |
317 | is_tm_ending_fndecl (tree fndecl) | |
318 | { | |
319 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
320 | switch (DECL_FUNCTION_CODE (fndecl)) | |
321 | { | |
322 | case BUILT_IN_TM_COMMIT: | |
323 | case BUILT_IN_TM_COMMIT_EH: | |
324 | case BUILT_IN_TM_ABORT: | |
325 | case BUILT_IN_TM_IRREVOCABLE: | |
326 | return true; | |
327 | default: | |
328 | break; | |
329 | } | |
330 | ||
331 | return false; | |
332 | } | |
333 | ||
50a50143 | 334 | /* Return true if STMT is a built in function call that "ends" a |
335 | transaction. */ | |
336 | ||
337 | bool | |
338 | is_tm_ending (gimple stmt) | |
339 | { | |
340 | tree fndecl; | |
341 | ||
342 | if (gimple_code (stmt) != GIMPLE_CALL) | |
343 | return false; | |
344 | ||
345 | fndecl = gimple_call_fndecl (stmt); | |
346 | return (fndecl != NULL_TREE | |
347 | && is_tm_ending_fndecl (fndecl)); | |
348 | } | |
349 | ||
4c0315d0 | 350 | /* Return true if STMT is a TM load. */ |
351 | ||
352 | static bool | |
353 | is_tm_load (gimple stmt) | |
354 | { | |
355 | tree fndecl; | |
356 | ||
357 | if (gimple_code (stmt) != GIMPLE_CALL) | |
358 | return false; | |
359 | ||
360 | fndecl = gimple_call_fndecl (stmt); | |
361 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
362 | && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl))); | |
363 | } | |
364 | ||
365 | /* Same as above, but for simple TM loads, that is, not the | |
366 | after-write, after-read, etc optimized variants. */ | |
367 | ||
368 | static bool | |
369 | is_tm_simple_load (gimple stmt) | |
370 | { | |
371 | tree fndecl; | |
372 | ||
373 | if (gimple_code (stmt) != GIMPLE_CALL) | |
374 | return false; | |
375 | ||
376 | fndecl = gimple_call_fndecl (stmt); | |
377 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
378 | { | |
379 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
380 | return (fcode == BUILT_IN_TM_LOAD_1 | |
381 | || fcode == BUILT_IN_TM_LOAD_2 | |
382 | || fcode == BUILT_IN_TM_LOAD_4 | |
383 | || fcode == BUILT_IN_TM_LOAD_8 | |
384 | || fcode == BUILT_IN_TM_LOAD_FLOAT | |
385 | || fcode == BUILT_IN_TM_LOAD_DOUBLE | |
386 | || fcode == BUILT_IN_TM_LOAD_LDOUBLE | |
387 | || fcode == BUILT_IN_TM_LOAD_M64 | |
388 | || fcode == BUILT_IN_TM_LOAD_M128 | |
389 | || fcode == BUILT_IN_TM_LOAD_M256); | |
390 | } | |
391 | return false; | |
392 | } | |
393 | ||
394 | /* Return true if STMT is a TM store. */ | |
395 | ||
396 | static bool | |
397 | is_tm_store (gimple stmt) | |
398 | { | |
399 | tree fndecl; | |
400 | ||
401 | if (gimple_code (stmt) != GIMPLE_CALL) | |
402 | return false; | |
403 | ||
404 | fndecl = gimple_call_fndecl (stmt); | |
405 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
406 | && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl))); | |
407 | } | |
408 | ||
409 | /* Same as above, but for simple TM stores, that is, not the | |
410 | after-write, after-read, etc optimized variants. */ | |
411 | ||
412 | static bool | |
413 | is_tm_simple_store (gimple stmt) | |
414 | { | |
415 | tree fndecl; | |
416 | ||
417 | if (gimple_code (stmt) != GIMPLE_CALL) | |
418 | return false; | |
419 | ||
420 | fndecl = gimple_call_fndecl (stmt); | |
421 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
422 | { | |
423 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
424 | return (fcode == BUILT_IN_TM_STORE_1 | |
425 | || fcode == BUILT_IN_TM_STORE_2 | |
426 | || fcode == BUILT_IN_TM_STORE_4 | |
427 | || fcode == BUILT_IN_TM_STORE_8 | |
428 | || fcode == BUILT_IN_TM_STORE_FLOAT | |
429 | || fcode == BUILT_IN_TM_STORE_DOUBLE | |
430 | || fcode == BUILT_IN_TM_STORE_LDOUBLE | |
431 | || fcode == BUILT_IN_TM_STORE_M64 | |
432 | || fcode == BUILT_IN_TM_STORE_M128 | |
433 | || fcode == BUILT_IN_TM_STORE_M256); | |
434 | } | |
435 | return false; | |
436 | } | |
437 | ||
438 | /* Return true if FNDECL is BUILT_IN_TM_ABORT. */ | |
439 | ||
440 | static bool | |
441 | is_tm_abort (tree fndecl) | |
442 | { | |
443 | return (fndecl | |
444 | && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
445 | && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT); | |
446 | } | |
447 | ||
448 | /* Build a GENERIC tree for a user abort. This is called by front ends | |
449 | while transforming the __tm_abort statement. */ | |
450 | ||
451 | tree | |
452 | build_tm_abort_call (location_t loc, bool is_outer) | |
453 | { | |
454 | return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1, | |
455 | build_int_cst (integer_type_node, | |
456 | AR_USERABORT | |
457 | | (is_outer ? AR_OUTERABORT : 0))); | |
458 | } | |
459 | ||
460 | /* Common gateing function for several of the TM passes. */ | |
461 | ||
462 | static bool | |
463 | gate_tm (void) | |
464 | { | |
465 | return flag_tm; | |
466 | } | |
467 | \f | |
468 | /* Map for aribtrary function replacement under TM, as created | |
469 | by the tm_wrap attribute. */ | |
470 | ||
471 | static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map))) | |
472 | htab_t tm_wrap_map; | |
473 | ||
474 | void | |
475 | record_tm_replacement (tree from, tree to) | |
476 | { | |
477 | struct tree_map **slot, *h; | |
478 | ||
479 | /* Do not inline wrapper functions that will get replaced in the TM | |
480 | pass. | |
481 | ||
482 | Suppose you have foo() that will get replaced into tmfoo(). Make | |
483 | sure the inliner doesn't try to outsmart us and inline foo() | |
484 | before we get a chance to do the TM replacement. */ | |
485 | DECL_UNINLINABLE (from) = 1; | |
486 | ||
487 | if (tm_wrap_map == NULL) | |
488 | tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0); | |
489 | ||
490 | h = ggc_alloc_tree_map (); | |
491 | h->hash = htab_hash_pointer (from); | |
492 | h->base.from = from; | |
493 | h->to = to; | |
494 | ||
495 | slot = (struct tree_map **) | |
496 | htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT); | |
497 | *slot = h; | |
498 | } | |
499 | ||
500 | /* Return a TM-aware replacement function for DECL. */ | |
501 | ||
502 | static tree | |
503 | find_tm_replacement_function (tree fndecl) | |
504 | { | |
505 | if (tm_wrap_map) | |
506 | { | |
507 | struct tree_map *h, in; | |
508 | ||
509 | in.base.from = fndecl; | |
510 | in.hash = htab_hash_pointer (fndecl); | |
511 | h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash); | |
512 | if (h) | |
513 | return h->to; | |
514 | } | |
515 | ||
516 | /* ??? We may well want TM versions of most of the common <string.h> | |
517 | functions. For now, we've already these two defined. */ | |
518 | /* Adjust expand_call_tm() attributes as necessary for the cases | |
519 | handled here: */ | |
520 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
521 | switch (DECL_FUNCTION_CODE (fndecl)) | |
522 | { | |
523 | case BUILT_IN_MEMCPY: | |
524 | return builtin_decl_explicit (BUILT_IN_TM_MEMCPY); | |
525 | case BUILT_IN_MEMMOVE: | |
526 | return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); | |
527 | case BUILT_IN_MEMSET: | |
528 | return builtin_decl_explicit (BUILT_IN_TM_MEMSET); | |
529 | default: | |
530 | return NULL; | |
531 | } | |
532 | ||
533 | return NULL; | |
534 | } | |
535 | ||
536 | /* When appropriate, record TM replacement for memory allocation functions. | |
537 | ||
538 | FROM is the FNDECL to wrap. */ | |
539 | void | |
540 | tm_malloc_replacement (tree from) | |
541 | { | |
542 | const char *str; | |
543 | tree to; | |
544 | ||
545 | if (TREE_CODE (from) != FUNCTION_DECL) | |
546 | return; | |
547 | ||
548 | /* If we have a previous replacement, the user must be explicitly | |
549 | wrapping malloc/calloc/free. They better know what they're | |
550 | doing... */ | |
551 | if (find_tm_replacement_function (from)) | |
552 | return; | |
553 | ||
554 | str = IDENTIFIER_POINTER (DECL_NAME (from)); | |
555 | ||
556 | if (!strcmp (str, "malloc")) | |
557 | to = builtin_decl_explicit (BUILT_IN_TM_MALLOC); | |
558 | else if (!strcmp (str, "calloc")) | |
559 | to = builtin_decl_explicit (BUILT_IN_TM_CALLOC); | |
560 | else if (!strcmp (str, "free")) | |
561 | to = builtin_decl_explicit (BUILT_IN_TM_FREE); | |
562 | else | |
563 | return; | |
564 | ||
565 | TREE_NOTHROW (to) = 0; | |
566 | ||
567 | record_tm_replacement (from, to); | |
568 | } | |
569 | \f | |
570 | /* Diagnostics for tm_safe functions/regions. Called by the front end | |
571 | once we've lowered the function to high-gimple. */ | |
572 | ||
573 | /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq. | |
574 | Process exactly one statement. WI->INFO is set to non-null when in | |
575 | the context of a tm_safe function, and null for a __transaction block. */ | |
576 | ||
577 | #define DIAG_TM_OUTER 1 | |
578 | #define DIAG_TM_SAFE 2 | |
579 | #define DIAG_TM_RELAXED 4 | |
580 | ||
581 | struct diagnose_tm | |
582 | { | |
583 | unsigned int summary_flags : 8; | |
584 | unsigned int block_flags : 8; | |
585 | unsigned int func_flags : 8; | |
4c0315d0 | 586 | unsigned int saw_volatile : 1; |
587 | gimple stmt; | |
588 | }; | |
589 | ||
e153bd50 | 590 | /* Return true if T is a volatile variable of some kind. */ |
591 | ||
592 | static bool | |
593 | volatile_var_p (tree t) | |
594 | { | |
595 | return (SSA_VAR_P (t) | |
596 | && TREE_THIS_VOLATILE (TREE_TYPE (t))); | |
597 | } | |
598 | ||
4c0315d0 | 599 | /* Tree callback function for diagnose_tm pass. */ |
600 | ||
601 | static tree | |
602 | diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, | |
603 | void *data) | |
604 | { | |
605 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; | |
606 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
4c0315d0 | 607 | |
e153bd50 | 608 | if (volatile_var_p (*tp) |
609 | && d->block_flags & DIAG_TM_SAFE | |
4c0315d0 | 610 | && !d->saw_volatile) |
611 | { | |
612 | d->saw_volatile = 1; | |
613 | error_at (gimple_location (d->stmt), | |
614 | "invalid volatile use of %qD inside transaction", | |
615 | *tp); | |
616 | } | |
617 | ||
618 | return NULL_TREE; | |
619 | } | |
620 | ||
0e80b01d | 621 | static inline bool |
622 | is_tm_safe_or_pure (const_tree x) | |
623 | { | |
624 | return is_tm_safe (x) || is_tm_pure (x); | |
625 | } | |
626 | ||
4c0315d0 | 627 | static tree |
628 | diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
629 | struct walk_stmt_info *wi) | |
630 | { | |
631 | gimple stmt = gsi_stmt (*gsi); | |
632 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
633 | ||
634 | /* Save stmt for use in leaf analysis. */ | |
635 | d->stmt = stmt; | |
636 | ||
637 | switch (gimple_code (stmt)) | |
638 | { | |
639 | case GIMPLE_CALL: | |
640 | { | |
641 | tree fn = gimple_call_fn (stmt); | |
642 | ||
643 | if ((d->summary_flags & DIAG_TM_OUTER) == 0 | |
644 | && is_tm_may_cancel_outer (fn)) | |
645 | error_at (gimple_location (stmt), | |
646 | "%<transaction_may_cancel_outer%> function call not within" | |
647 | " outer transaction or %<transaction_may_cancel_outer%>"); | |
648 | ||
649 | if (d->summary_flags & DIAG_TM_SAFE) | |
650 | { | |
651 | bool is_safe, direct_call_p; | |
652 | tree replacement; | |
653 | ||
654 | if (TREE_CODE (fn) == ADDR_EXPR | |
655 | && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL) | |
656 | { | |
657 | direct_call_p = true; | |
658 | replacement = TREE_OPERAND (fn, 0); | |
659 | replacement = find_tm_replacement_function (replacement); | |
660 | if (replacement) | |
661 | fn = replacement; | |
662 | } | |
663 | else | |
664 | { | |
665 | direct_call_p = false; | |
666 | replacement = NULL_TREE; | |
667 | } | |
668 | ||
669 | if (is_tm_safe_or_pure (fn)) | |
670 | is_safe = true; | |
671 | else if (is_tm_callable (fn) || is_tm_irrevocable (fn)) | |
672 | { | |
673 | /* A function explicitly marked transaction_callable as | |
674 | opposed to transaction_safe is being defined to be | |
675 | unsafe as part of its ABI, regardless of its contents. */ | |
676 | is_safe = false; | |
677 | } | |
678 | else if (direct_call_p) | |
679 | { | |
680 | if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN) | |
681 | is_safe = true; | |
682 | else if (replacement) | |
683 | { | |
684 | /* ??? At present we've been considering replacements | |
685 | merely transaction_callable, and therefore might | |
686 | enter irrevocable. The tm_wrap attribute has not | |
687 | yet made it into the new language spec. */ | |
688 | is_safe = false; | |
689 | } | |
690 | else | |
691 | { | |
692 | /* ??? Diagnostics for unmarked direct calls moved into | |
693 | the IPA pass. Section 3.2 of the spec details how | |
694 | functions not marked should be considered "implicitly | |
695 | safe" based on having examined the function body. */ | |
696 | is_safe = true; | |
697 | } | |
698 | } | |
699 | else | |
700 | { | |
701 | /* An unmarked indirect call. Consider it unsafe even | |
702 | though optimization may yet figure out how to inline. */ | |
703 | is_safe = false; | |
704 | } | |
705 | ||
706 | if (!is_safe) | |
707 | { | |
708 | if (TREE_CODE (fn) == ADDR_EXPR) | |
709 | fn = TREE_OPERAND (fn, 0); | |
710 | if (d->block_flags & DIAG_TM_SAFE) | |
2d3bf658 | 711 | { |
712 | if (direct_call_p) | |
713 | error_at (gimple_location (stmt), | |
714 | "unsafe function call %qD within " | |
715 | "atomic transaction", fn); | |
716 | else | |
300eddba | 717 | { |
718 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
719 | error_at (gimple_location (stmt), | |
720 | "unsafe function call %qE within " | |
721 | "atomic transaction", fn); | |
722 | else | |
723 | error_at (gimple_location (stmt), | |
724 | "unsafe indirect function call within " | |
725 | "atomic transaction"); | |
726 | } | |
2d3bf658 | 727 | } |
4c0315d0 | 728 | else |
2d3bf658 | 729 | { |
730 | if (direct_call_p) | |
731 | error_at (gimple_location (stmt), | |
732 | "unsafe function call %qD within " | |
733 | "%<transaction_safe%> function", fn); | |
734 | else | |
300eddba | 735 | { |
736 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
737 | error_at (gimple_location (stmt), | |
738 | "unsafe function call %qE within " | |
739 | "%<transaction_safe%> function", fn); | |
740 | else | |
741 | error_at (gimple_location (stmt), | |
742 | "unsafe indirect function call within " | |
743 | "%<transaction_safe%> function"); | |
744 | } | |
2d3bf658 | 745 | } |
4c0315d0 | 746 | } |
747 | } | |
748 | } | |
749 | break; | |
750 | ||
751 | case GIMPLE_ASM: | |
752 | /* ??? We ought to come up with a way to add attributes to | |
753 | asm statements, and then add "transaction_safe" to it. | |
754 | Either that or get the language spec to resurrect __tm_waiver. */ | |
755 | if (d->block_flags & DIAG_TM_SAFE) | |
756 | error_at (gimple_location (stmt), | |
757 | "asm not allowed in atomic transaction"); | |
758 | else if (d->func_flags & DIAG_TM_SAFE) | |
759 | error_at (gimple_location (stmt), | |
760 | "asm not allowed in %<transaction_safe%> function"); | |
4c0315d0 | 761 | break; |
762 | ||
763 | case GIMPLE_TRANSACTION: | |
764 | { | |
765 | unsigned char inner_flags = DIAG_TM_SAFE; | |
766 | ||
767 | if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED) | |
768 | { | |
769 | if (d->block_flags & DIAG_TM_SAFE) | |
770 | error_at (gimple_location (stmt), | |
771 | "relaxed transaction in atomic transaction"); | |
772 | else if (d->func_flags & DIAG_TM_SAFE) | |
773 | error_at (gimple_location (stmt), | |
774 | "relaxed transaction in %<transaction_safe%> function"); | |
4c0315d0 | 775 | inner_flags = DIAG_TM_RELAXED; |
776 | } | |
777 | else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER) | |
778 | { | |
779 | if (d->block_flags) | |
780 | error_at (gimple_location (stmt), | |
781 | "outer transaction in transaction"); | |
782 | else if (d->func_flags & DIAG_TM_OUTER) | |
783 | error_at (gimple_location (stmt), | |
784 | "outer transaction in " | |
785 | "%<transaction_may_cancel_outer%> function"); | |
786 | else if (d->func_flags & DIAG_TM_SAFE) | |
787 | error_at (gimple_location (stmt), | |
788 | "outer transaction in %<transaction_safe%> function"); | |
4c0315d0 | 789 | inner_flags |= DIAG_TM_OUTER; |
790 | } | |
791 | ||
792 | *handled_ops_p = true; | |
793 | if (gimple_transaction_body (stmt)) | |
794 | { | |
795 | struct walk_stmt_info wi_inner; | |
796 | struct diagnose_tm d_inner; | |
797 | ||
798 | memset (&d_inner, 0, sizeof (d_inner)); | |
799 | d_inner.func_flags = d->func_flags; | |
800 | d_inner.block_flags = d->block_flags | inner_flags; | |
801 | d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags; | |
802 | ||
803 | memset (&wi_inner, 0, sizeof (wi_inner)); | |
804 | wi_inner.info = &d_inner; | |
805 | ||
806 | walk_gimple_seq (gimple_transaction_body (stmt), | |
807 | diagnose_tm_1, diagnose_tm_1_op, &wi_inner); | |
4c0315d0 | 808 | } |
809 | } | |
810 | break; | |
811 | ||
812 | default: | |
813 | break; | |
814 | } | |
815 | ||
816 | return NULL_TREE; | |
817 | } | |
818 | ||
819 | static unsigned int | |
820 | diagnose_tm_blocks (void) | |
821 | { | |
822 | struct walk_stmt_info wi; | |
823 | struct diagnose_tm d; | |
824 | ||
825 | memset (&d, 0, sizeof (d)); | |
826 | if (is_tm_may_cancel_outer (current_function_decl)) | |
827 | d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE; | |
828 | else if (is_tm_safe (current_function_decl)) | |
829 | d.func_flags = DIAG_TM_SAFE; | |
830 | d.summary_flags = d.func_flags; | |
831 | ||
832 | memset (&wi, 0, sizeof (wi)); | |
833 | wi.info = &d; | |
834 | ||
835 | walk_gimple_seq (gimple_body (current_function_decl), | |
836 | diagnose_tm_1, diagnose_tm_1_op, &wi); | |
837 | ||
4c0315d0 | 838 | return 0; |
839 | } | |
840 | ||
cbe8bda8 | 841 | namespace { |
842 | ||
843 | const pass_data pass_data_diagnose_tm_blocks = | |
844 | { | |
845 | GIMPLE_PASS, /* type */ | |
846 | "*diagnose_tm_blocks", /* name */ | |
847 | OPTGROUP_NONE, /* optinfo_flags */ | |
848 | true, /* has_gate */ | |
849 | true, /* has_execute */ | |
850 | TV_TRANS_MEM, /* tv_id */ | |
851 | PROP_gimple_any, /* properties_required */ | |
852 | 0, /* properties_provided */ | |
853 | 0, /* properties_destroyed */ | |
854 | 0, /* todo_flags_start */ | |
855 | 0, /* todo_flags_finish */ | |
4c0315d0 | 856 | }; |
cbe8bda8 | 857 | |
858 | class pass_diagnose_tm_blocks : public gimple_opt_pass | |
859 | { | |
860 | public: | |
9af5ce0c | 861 | pass_diagnose_tm_blocks (gcc::context *ctxt) |
862 | : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt) | |
cbe8bda8 | 863 | {} |
864 | ||
865 | /* opt_pass methods: */ | |
866 | bool gate () { return gate_tm (); } | |
867 | unsigned int execute () { return diagnose_tm_blocks (); } | |
868 | ||
869 | }; // class pass_diagnose_tm_blocks | |
870 | ||
871 | } // anon namespace | |
872 | ||
873 | gimple_opt_pass * | |
874 | make_pass_diagnose_tm_blocks (gcc::context *ctxt) | |
875 | { | |
876 | return new pass_diagnose_tm_blocks (ctxt); | |
877 | } | |
4c0315d0 | 878 | \f |
879 | /* Instead of instrumenting thread private memory, we save the | |
880 | addresses in a log which we later use to save/restore the addresses | |
881 | upon transaction start/restart. | |
882 | ||
883 | The log is keyed by address, where each element contains individual | |
884 | statements among different code paths that perform the store. | |
885 | ||
886 | This log is later used to generate either plain save/restore of the | |
887 | addresses upon transaction start/restart, or calls to the ITM_L* | |
888 | logging functions. | |
889 | ||
890 | So for something like: | |
891 | ||
892 | struct large { int x[1000]; }; | |
893 | struct large lala = { 0 }; | |
894 | __transaction { | |
895 | lala.x[i] = 123; | |
896 | ... | |
897 | } | |
898 | ||
899 | We can either save/restore: | |
900 | ||
901 | lala = { 0 }; | |
902 | trxn = _ITM_startTransaction (); | |
903 | if (trxn & a_saveLiveVariables) | |
904 | tmp_lala1 = lala.x[i]; | |
905 | else if (a & a_restoreLiveVariables) | |
906 | lala.x[i] = tmp_lala1; | |
907 | ||
908 | or use the logging functions: | |
909 | ||
910 | lala = { 0 }; | |
911 | trxn = _ITM_startTransaction (); | |
912 | _ITM_LU4 (&lala.x[i]); | |
913 | ||
914 | Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as | |
915 | far up the dominator tree to shadow all of the writes to a given | |
916 | location (thus reducing the total number of logging calls), but not | |
917 | so high as to be called on a path that does not perform a | |
918 | write. */ | |
919 | ||
920 | /* One individual log entry. We may have multiple statements for the | |
921 | same location if neither dominate each other (on different | |
922 | execution paths). */ | |
923 | typedef struct tm_log_entry | |
924 | { | |
925 | /* Address to save. */ | |
926 | tree addr; | |
927 | /* Entry block for the transaction this address occurs in. */ | |
928 | basic_block entry_block; | |
929 | /* Dominating statements the store occurs in. */ | |
930 | gimple_vec stmts; | |
931 | /* Initially, while we are building the log, we place a nonzero | |
932 | value here to mean that this address *will* be saved with a | |
933 | save/restore sequence. Later, when generating the save sequence | |
934 | we place the SSA temp generated here. */ | |
935 | tree save_var; | |
936 | } *tm_log_entry_t; | |
937 | ||
4c0315d0 | 938 | |
d9dd21a8 | 939 | /* Log entry hashtable helpers. */ |
4c0315d0 | 940 | |
d9dd21a8 | 941 | struct log_entry_hasher |
4c0315d0 | 942 | { |
d9dd21a8 | 943 | typedef tm_log_entry value_type; |
944 | typedef tm_log_entry compare_type; | |
945 | static inline hashval_t hash (const value_type *); | |
946 | static inline bool equal (const value_type *, const compare_type *); | |
947 | static inline void remove (value_type *); | |
948 | }; | |
4c0315d0 | 949 | |
950 | /* Htab support. Return hash value for a `tm_log_entry'. */ | |
d9dd21a8 | 951 | inline hashval_t |
952 | log_entry_hasher::hash (const value_type *log) | |
4c0315d0 | 953 | { |
4c0315d0 | 954 | return iterative_hash_expr (log->addr, 0); |
955 | } | |
956 | ||
957 | /* Htab support. Return true if two log entries are the same. */ | |
d9dd21a8 | 958 | inline bool |
959 | log_entry_hasher::equal (const value_type *log1, const compare_type *log2) | |
4c0315d0 | 960 | { |
4c0315d0 | 961 | /* FIXME: |
962 | ||
963 | rth: I suggest that we get rid of the component refs etc. | |
964 | I.e. resolve the reference to base + offset. | |
965 | ||
966 | We may need to actually finish a merge with mainline for this, | |
967 | since we'd like to be presented with Richi's MEM_REF_EXPRs more | |
968 | often than not. But in the meantime your tm_log_entry could save | |
969 | the results of get_inner_reference. | |
970 | ||
971 | See: g++.dg/tm/pr46653.C | |
972 | */ | |
973 | ||
974 | /* Special case plain equality because operand_equal_p() below will | |
975 | return FALSE if the addresses are equal but they have | |
976 | side-effects (e.g. a volatile address). */ | |
977 | if (log1->addr == log2->addr) | |
978 | return true; | |
979 | ||
980 | return operand_equal_p (log1->addr, log2->addr, 0); | |
981 | } | |
982 | ||
983 | /* Htab support. Free one tm_log_entry. */ | |
d9dd21a8 | 984 | inline void |
985 | log_entry_hasher::remove (value_type *lp) | |
4c0315d0 | 986 | { |
f1f41a6c | 987 | lp->stmts.release (); |
4c0315d0 | 988 | free (lp); |
989 | } | |
990 | ||
d9dd21a8 | 991 | |
992 | /* The actual log. */ | |
993 | static hash_table <log_entry_hasher> tm_log; | |
994 | ||
995 | /* Addresses to log with a save/restore sequence. These should be in | |
996 | dominator order. */ | |
997 | static vec<tree> tm_log_save_addresses; | |
998 | ||
999 | enum thread_memory_type | |
1000 | { | |
1001 | mem_non_local = 0, | |
1002 | mem_thread_local, | |
1003 | mem_transaction_local, | |
1004 | mem_max | |
1005 | }; | |
1006 | ||
1007 | typedef struct tm_new_mem_map | |
1008 | { | |
1009 | /* SSA_NAME being dereferenced. */ | |
1010 | tree val; | |
1011 | enum thread_memory_type local_new_memory; | |
1012 | } tm_new_mem_map_t; | |
1013 | ||
1014 | /* Hashtable helpers. */ | |
1015 | ||
1016 | struct tm_mem_map_hasher : typed_free_remove <tm_new_mem_map_t> | |
1017 | { | |
1018 | typedef tm_new_mem_map_t value_type; | |
1019 | typedef tm_new_mem_map_t compare_type; | |
1020 | static inline hashval_t hash (const value_type *); | |
1021 | static inline bool equal (const value_type *, const compare_type *); | |
1022 | }; | |
1023 | ||
1024 | inline hashval_t | |
1025 | tm_mem_map_hasher::hash (const value_type *v) | |
1026 | { | |
1027 | return (intptr_t)v->val >> 4; | |
1028 | } | |
1029 | ||
1030 | inline bool | |
1031 | tm_mem_map_hasher::equal (const value_type *v, const compare_type *c) | |
1032 | { | |
1033 | return v->val == c->val; | |
1034 | } | |
1035 | ||
1036 | /* Map for an SSA_NAME originally pointing to a non aliased new piece | |
1037 | of memory (malloc, alloc, etc). */ | |
1038 | static hash_table <tm_mem_map_hasher> tm_new_mem_hash; | |
1039 | ||
4c0315d0 | 1040 | /* Initialize logging data structures. */ |
1041 | static void | |
1042 | tm_log_init (void) | |
1043 | { | |
d9dd21a8 | 1044 | tm_log.create (10); |
1045 | tm_new_mem_hash.create (5); | |
f1f41a6c | 1046 | tm_log_save_addresses.create (5); |
4c0315d0 | 1047 | } |
1048 | ||
1049 | /* Free logging data structures. */ | |
1050 | static void | |
1051 | tm_log_delete (void) | |
1052 | { | |
d9dd21a8 | 1053 | tm_log.dispose (); |
1054 | tm_new_mem_hash.dispose (); | |
f1f41a6c | 1055 | tm_log_save_addresses.release (); |
4c0315d0 | 1056 | } |
1057 | ||
1058 | /* Return true if MEM is a transaction invariant memory for the TM | |
1059 | region starting at REGION_ENTRY_BLOCK. */ | |
1060 | static bool | |
1061 | transaction_invariant_address_p (const_tree mem, basic_block region_entry_block) | |
1062 | { | |
1063 | if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF) | |
1064 | && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME) | |
1065 | { | |
1066 | basic_block def_bb; | |
1067 | ||
1068 | def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0))); | |
1069 | return def_bb != region_entry_block | |
1070 | && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb); | |
1071 | } | |
1072 | ||
1073 | mem = strip_invariant_refs (mem); | |
1074 | return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem)); | |
1075 | } | |
1076 | ||
1077 | /* Given an address ADDR in STMT, find it in the memory log or add it, | |
1078 | making sure to keep only the addresses highest in the dominator | |
1079 | tree. | |
1080 | ||
1081 | ENTRY_BLOCK is the entry_block for the transaction. | |
1082 | ||
1083 | If we find the address in the log, make sure it's either the same | |
1084 | address, or an equivalent one that dominates ADDR. | |
1085 | ||
1086 | If we find the address, but neither ADDR dominates the found | |
1087 | address, nor the found one dominates ADDR, we're on different | |
1088 | execution paths. Add it. | |
1089 | ||
1090 | If known, ENTRY_BLOCK is the entry block for the region, otherwise | |
1091 | NULL. */ | |
1092 | static void | |
1093 | tm_log_add (basic_block entry_block, tree addr, gimple stmt) | |
1094 | { | |
d9dd21a8 | 1095 | tm_log_entry **slot; |
4c0315d0 | 1096 | struct tm_log_entry l, *lp; |
1097 | ||
1098 | l.addr = addr; | |
d9dd21a8 | 1099 | slot = tm_log.find_slot (&l, INSERT); |
4c0315d0 | 1100 | if (!*slot) |
1101 | { | |
1102 | tree type = TREE_TYPE (addr); | |
1103 | ||
1104 | lp = XNEW (struct tm_log_entry); | |
1105 | lp->addr = addr; | |
1106 | *slot = lp; | |
1107 | ||
1108 | /* Small invariant addresses can be handled as save/restores. */ | |
1109 | if (entry_block | |
1110 | && transaction_invariant_address_p (lp->addr, entry_block) | |
1111 | && TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 1112 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) |
aa59f000 | 1113 | && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type)) |
4c0315d0 | 1114 | < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) |
1115 | /* We must be able to copy this type normally. I.e., no | |
1116 | special constructors and the like. */ | |
1117 | && !TREE_ADDRESSABLE (type)) | |
1118 | { | |
54a0a932 | 1119 | lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save"); |
f1f41a6c | 1120 | lp->stmts.create (0); |
4c0315d0 | 1121 | lp->entry_block = entry_block; |
1122 | /* Save addresses separately in dominator order so we don't | |
1123 | get confused by overlapping addresses in the save/restore | |
1124 | sequence. */ | |
f1f41a6c | 1125 | tm_log_save_addresses.safe_push (lp->addr); |
4c0315d0 | 1126 | } |
1127 | else | |
1128 | { | |
1129 | /* Use the logging functions. */ | |
f1f41a6c | 1130 | lp->stmts.create (5); |
1131 | lp->stmts.quick_push (stmt); | |
4c0315d0 | 1132 | lp->save_var = NULL; |
1133 | } | |
1134 | } | |
1135 | else | |
1136 | { | |
1137 | size_t i; | |
1138 | gimple oldstmt; | |
1139 | ||
d9dd21a8 | 1140 | lp = *slot; |
4c0315d0 | 1141 | |
1142 | /* If we're generating a save/restore sequence, we don't care | |
1143 | about statements. */ | |
1144 | if (lp->save_var) | |
1145 | return; | |
1146 | ||
f1f41a6c | 1147 | for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i) |
4c0315d0 | 1148 | { |
1149 | if (stmt == oldstmt) | |
1150 | return; | |
1151 | /* We already have a store to the same address, higher up the | |
1152 | dominator tree. Nothing to do. */ | |
1153 | if (dominated_by_p (CDI_DOMINATORS, | |
1154 | gimple_bb (stmt), gimple_bb (oldstmt))) | |
1155 | return; | |
1156 | /* We should be processing blocks in dominator tree order. */ | |
1157 | gcc_assert (!dominated_by_p (CDI_DOMINATORS, | |
1158 | gimple_bb (oldstmt), gimple_bb (stmt))); | |
1159 | } | |
1160 | /* Store is on a different code path. */ | |
f1f41a6c | 1161 | lp->stmts.safe_push (stmt); |
4c0315d0 | 1162 | } |
1163 | } | |
1164 | ||
1165 | /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME | |
1166 | result, insert the new statements before GSI. */ | |
1167 | ||
1168 | static tree | |
1169 | gimplify_addr (gimple_stmt_iterator *gsi, tree x) | |
1170 | { | |
1171 | if (TREE_CODE (x) == TARGET_MEM_REF) | |
1172 | x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x); | |
1173 | else | |
1174 | x = build_fold_addr_expr (x); | |
1175 | return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT); | |
1176 | } | |
1177 | ||
1178 | /* Instrument one address with the logging functions. | |
1179 | ADDR is the address to save. | |
1180 | STMT is the statement before which to place it. */ | |
1181 | static void | |
1182 | tm_log_emit_stmt (tree addr, gimple stmt) | |
1183 | { | |
1184 | tree type = TREE_TYPE (addr); | |
1185 | tree size = TYPE_SIZE_UNIT (type); | |
1186 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
1187 | gimple log; | |
1188 | enum built_in_function code = BUILT_IN_TM_LOG; | |
1189 | ||
1190 | if (type == float_type_node) | |
1191 | code = BUILT_IN_TM_LOG_FLOAT; | |
1192 | else if (type == double_type_node) | |
1193 | code = BUILT_IN_TM_LOG_DOUBLE; | |
1194 | else if (type == long_double_type_node) | |
1195 | code = BUILT_IN_TM_LOG_LDOUBLE; | |
cd4547bf | 1196 | else if (tree_fits_uhwi_p (size)) |
4c0315d0 | 1197 | { |
6a0712d4 | 1198 | unsigned int n = tree_to_uhwi (size); |
4c0315d0 | 1199 | switch (n) |
1200 | { | |
1201 | case 1: | |
1202 | code = BUILT_IN_TM_LOG_1; | |
1203 | break; | |
1204 | case 2: | |
1205 | code = BUILT_IN_TM_LOG_2; | |
1206 | break; | |
1207 | case 4: | |
1208 | code = BUILT_IN_TM_LOG_4; | |
1209 | break; | |
1210 | case 8: | |
1211 | code = BUILT_IN_TM_LOG_8; | |
1212 | break; | |
1213 | default: | |
1214 | code = BUILT_IN_TM_LOG; | |
1215 | if (TREE_CODE (type) == VECTOR_TYPE) | |
1216 | { | |
1217 | if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64)) | |
1218 | code = BUILT_IN_TM_LOG_M64; | |
1219 | else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128)) | |
1220 | code = BUILT_IN_TM_LOG_M128; | |
1221 | else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256)) | |
1222 | code = BUILT_IN_TM_LOG_M256; | |
1223 | } | |
1224 | break; | |
1225 | } | |
1226 | } | |
1227 | ||
1228 | addr = gimplify_addr (&gsi, addr); | |
1229 | if (code == BUILT_IN_TM_LOG) | |
1230 | log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size); | |
1231 | else | |
1232 | log = gimple_build_call (builtin_decl_explicit (code), 1, addr); | |
1233 | gsi_insert_before (&gsi, log, GSI_SAME_STMT); | |
1234 | } | |
1235 | ||
1236 | /* Go through the log and instrument address that must be instrumented | |
1237 | with the logging functions. Leave the save/restore addresses for | |
1238 | later. */ | |
1239 | static void | |
1240 | tm_log_emit (void) | |
1241 | { | |
d9dd21a8 | 1242 | hash_table <log_entry_hasher>::iterator hi; |
4c0315d0 | 1243 | struct tm_log_entry *lp; |
1244 | ||
d9dd21a8 | 1245 | FOR_EACH_HASH_TABLE_ELEMENT (tm_log, lp, tm_log_entry_t, hi) |
4c0315d0 | 1246 | { |
1247 | size_t i; | |
1248 | gimple stmt; | |
1249 | ||
1250 | if (dump_file) | |
1251 | { | |
1252 | fprintf (dump_file, "TM thread private mem logging: "); | |
1253 | print_generic_expr (dump_file, lp->addr, 0); | |
1254 | fprintf (dump_file, "\n"); | |
1255 | } | |
1256 | ||
1257 | if (lp->save_var) | |
1258 | { | |
1259 | if (dump_file) | |
1260 | fprintf (dump_file, "DUMPING to variable\n"); | |
1261 | continue; | |
1262 | } | |
1263 | else | |
1264 | { | |
1265 | if (dump_file) | |
1266 | fprintf (dump_file, "DUMPING with logging functions\n"); | |
f1f41a6c | 1267 | for (i = 0; lp->stmts.iterate (i, &stmt); ++i) |
4c0315d0 | 1268 | tm_log_emit_stmt (lp->addr, stmt); |
1269 | } | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | /* Emit the save sequence for the corresponding addresses in the log. | |
1274 | ENTRY_BLOCK is the entry block for the transaction. | |
1275 | BB is the basic block to insert the code in. */ | |
1276 | static void | |
1277 | tm_log_emit_saves (basic_block entry_block, basic_block bb) | |
1278 | { | |
1279 | size_t i; | |
1280 | gimple_stmt_iterator gsi = gsi_last_bb (bb); | |
1281 | gimple stmt; | |
1282 | struct tm_log_entry l, *lp; | |
1283 | ||
f1f41a6c | 1284 | for (i = 0; i < tm_log_save_addresses.length (); ++i) |
4c0315d0 | 1285 | { |
f1f41a6c | 1286 | l.addr = tm_log_save_addresses[i]; |
d9dd21a8 | 1287 | lp = *(tm_log.find_slot (&l, NO_INSERT)); |
4c0315d0 | 1288 | gcc_assert (lp->save_var != NULL); |
1289 | ||
1290 | /* We only care about variables in the current transaction. */ | |
1291 | if (lp->entry_block != entry_block) | |
1292 | continue; | |
1293 | ||
1294 | stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr)); | |
1295 | ||
1296 | /* Make sure we can create an SSA_NAME for this type. For | |
1297 | instance, aggregates aren't allowed, in which case the system | |
1298 | will create a VOP for us and everything will just work. */ | |
1299 | if (is_gimple_reg_type (TREE_TYPE (lp->save_var))) | |
1300 | { | |
1301 | lp->save_var = make_ssa_name (lp->save_var, stmt); | |
1302 | gimple_assign_set_lhs (stmt, lp->save_var); | |
1303 | } | |
1304 | ||
1305 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
1306 | } | |
1307 | } | |
1308 | ||
1309 | /* Emit the restore sequence for the corresponding addresses in the log. | |
1310 | ENTRY_BLOCK is the entry block for the transaction. | |
1311 | BB is the basic block to insert the code in. */ | |
1312 | static void | |
1313 | tm_log_emit_restores (basic_block entry_block, basic_block bb) | |
1314 | { | |
1315 | int i; | |
1316 | struct tm_log_entry l, *lp; | |
1317 | gimple_stmt_iterator gsi; | |
1318 | gimple stmt; | |
1319 | ||
f1f41a6c | 1320 | for (i = tm_log_save_addresses.length () - 1; i >= 0; i--) |
4c0315d0 | 1321 | { |
f1f41a6c | 1322 | l.addr = tm_log_save_addresses[i]; |
d9dd21a8 | 1323 | lp = *(tm_log.find_slot (&l, NO_INSERT)); |
4c0315d0 | 1324 | gcc_assert (lp->save_var != NULL); |
1325 | ||
1326 | /* We only care about variables in the current transaction. */ | |
1327 | if (lp->entry_block != entry_block) | |
1328 | continue; | |
1329 | ||
1330 | /* Restores are in LIFO order from the saves in case we have | |
1331 | overlaps. */ | |
1332 | gsi = gsi_start_bb (bb); | |
1333 | ||
1334 | stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var); | |
1335 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1336 | } | |
1337 | } | |
1338 | ||
4c0315d0 | 1339 | \f |
1340 | static tree lower_sequence_tm (gimple_stmt_iterator *, bool *, | |
1341 | struct walk_stmt_info *); | |
1342 | static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *, | |
1343 | struct walk_stmt_info *); | |
1344 | ||
1345 | /* Evaluate an address X being dereferenced and determine if it | |
1346 | originally points to a non aliased new chunk of memory (malloc, | |
1347 | alloca, etc). | |
1348 | ||
1349 | Return MEM_THREAD_LOCAL if it points to a thread-local address. | |
1350 | Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address. | |
1351 | Return MEM_NON_LOCAL otherwise. | |
1352 | ||
1353 | ENTRY_BLOCK is the entry block to the transaction containing the | |
1354 | dereference of X. */ | |
1355 | static enum thread_memory_type | |
1356 | thread_private_new_memory (basic_block entry_block, tree x) | |
1357 | { | |
1358 | gimple stmt = NULL; | |
1359 | enum tree_code code; | |
d9dd21a8 | 1360 | tm_new_mem_map_t **slot; |
4c0315d0 | 1361 | tm_new_mem_map_t elt, *elt_p; |
1362 | tree val = x; | |
1363 | enum thread_memory_type retval = mem_transaction_local; | |
1364 | ||
1365 | if (!entry_block | |
1366 | || TREE_CODE (x) != SSA_NAME | |
1367 | /* Possible uninitialized use, or a function argument. In | |
1368 | either case, we don't care. */ | |
1369 | || SSA_NAME_IS_DEFAULT_DEF (x)) | |
1370 | return mem_non_local; | |
1371 | ||
1372 | /* Look in cache first. */ | |
1373 | elt.val = x; | |
d9dd21a8 | 1374 | slot = tm_new_mem_hash.find_slot (&elt, INSERT); |
1375 | elt_p = *slot; | |
4c0315d0 | 1376 | if (elt_p) |
1377 | return elt_p->local_new_memory; | |
1378 | ||
1379 | /* Optimistically assume the memory is transaction local during | |
1380 | processing. This catches recursion into this variable. */ | |
1381 | *slot = elt_p = XNEW (tm_new_mem_map_t); | |
1382 | elt_p->val = val; | |
1383 | elt_p->local_new_memory = mem_transaction_local; | |
1384 | ||
1385 | /* Search DEF chain to find the original definition of this address. */ | |
1386 | do | |
1387 | { | |
1388 | if (ptr_deref_may_alias_global_p (x)) | |
1389 | { | |
1390 | /* Address escapes. This is not thread-private. */ | |
1391 | retval = mem_non_local; | |
1392 | goto new_memory_ret; | |
1393 | } | |
1394 | ||
1395 | stmt = SSA_NAME_DEF_STMT (x); | |
1396 | ||
1397 | /* If the malloc call is outside the transaction, this is | |
1398 | thread-local. */ | |
1399 | if (retval != mem_thread_local | |
1400 | && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block)) | |
1401 | retval = mem_thread_local; | |
1402 | ||
1403 | if (is_gimple_assign (stmt)) | |
1404 | { | |
1405 | code = gimple_assign_rhs_code (stmt); | |
1406 | /* x = foo ==> foo */ | |
1407 | if (code == SSA_NAME) | |
1408 | x = gimple_assign_rhs1 (stmt); | |
1409 | /* x = foo + n ==> foo */ | |
1410 | else if (code == POINTER_PLUS_EXPR) | |
1411 | x = gimple_assign_rhs1 (stmt); | |
1412 | /* x = (cast*) foo ==> foo */ | |
1413 | else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR) | |
1414 | x = gimple_assign_rhs1 (stmt); | |
43ee99ea | 1415 | /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */ |
1416 | else if (code == COND_EXPR) | |
1417 | { | |
1418 | tree op1 = gimple_assign_rhs2 (stmt); | |
1419 | tree op2 = gimple_assign_rhs3 (stmt); | |
1420 | enum thread_memory_type mem; | |
1421 | retval = thread_private_new_memory (entry_block, op1); | |
1422 | if (retval == mem_non_local) | |
1423 | goto new_memory_ret; | |
1424 | mem = thread_private_new_memory (entry_block, op2); | |
1425 | retval = MIN (retval, mem); | |
1426 | goto new_memory_ret; | |
1427 | } | |
4c0315d0 | 1428 | else |
1429 | { | |
1430 | retval = mem_non_local; | |
1431 | goto new_memory_ret; | |
1432 | } | |
1433 | } | |
1434 | else | |
1435 | { | |
1436 | if (gimple_code (stmt) == GIMPLE_PHI) | |
1437 | { | |
1438 | unsigned int i; | |
1439 | enum thread_memory_type mem; | |
1440 | tree phi_result = gimple_phi_result (stmt); | |
1441 | ||
1442 | /* If any of the ancestors are non-local, we are sure to | |
1443 | be non-local. Otherwise we can avoid doing anything | |
1444 | and inherit what has already been generated. */ | |
1445 | retval = mem_max; | |
1446 | for (i = 0; i < gimple_phi_num_args (stmt); ++i) | |
1447 | { | |
1448 | tree op = PHI_ARG_DEF (stmt, i); | |
1449 | ||
1450 | /* Exclude self-assignment. */ | |
1451 | if (phi_result == op) | |
1452 | continue; | |
1453 | ||
1454 | mem = thread_private_new_memory (entry_block, op); | |
1455 | if (mem == mem_non_local) | |
1456 | { | |
1457 | retval = mem; | |
1458 | goto new_memory_ret; | |
1459 | } | |
1460 | retval = MIN (retval, mem); | |
1461 | } | |
1462 | goto new_memory_ret; | |
1463 | } | |
1464 | break; | |
1465 | } | |
1466 | } | |
1467 | while (TREE_CODE (x) == SSA_NAME); | |
1468 | ||
1469 | if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC) | |
1470 | /* Thread-local or transaction-local. */ | |
1471 | ; | |
1472 | else | |
1473 | retval = mem_non_local; | |
1474 | ||
1475 | new_memory_ret: | |
1476 | elt_p->local_new_memory = retval; | |
1477 | return retval; | |
1478 | } | |
1479 | ||
1480 | /* Determine whether X has to be instrumented using a read | |
1481 | or write barrier. | |
1482 | ||
1483 | ENTRY_BLOCK is the entry block for the region where stmt resides | |
1484 | in. NULL if unknown. | |
1485 | ||
1486 | STMT is the statement in which X occurs in. It is used for thread | |
1487 | private memory instrumentation. If no TPM instrumentation is | |
1488 | desired, STMT should be null. */ | |
1489 | static bool | |
1490 | requires_barrier (basic_block entry_block, tree x, gimple stmt) | |
1491 | { | |
1492 | tree orig = x; | |
1493 | while (handled_component_p (x)) | |
1494 | x = TREE_OPERAND (x, 0); | |
1495 | ||
1496 | switch (TREE_CODE (x)) | |
1497 | { | |
1498 | case INDIRECT_REF: | |
1499 | case MEM_REF: | |
1500 | { | |
1501 | enum thread_memory_type ret; | |
1502 | ||
1503 | ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0)); | |
1504 | if (ret == mem_non_local) | |
1505 | return true; | |
1506 | if (stmt && ret == mem_thread_local) | |
1507 | /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */ | |
1508 | tm_log_add (entry_block, orig, stmt); | |
1509 | ||
1510 | /* Transaction-locals require nothing at all. For malloc, a | |
1511 | transaction restart frees the memory and we reallocate. | |
1512 | For alloca, the stack pointer gets reset by the retry and | |
1513 | we reallocate. */ | |
1514 | return false; | |
1515 | } | |
1516 | ||
1517 | case TARGET_MEM_REF: | |
1518 | if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR) | |
1519 | return true; | |
1520 | x = TREE_OPERAND (TMR_BASE (x), 0); | |
1521 | if (TREE_CODE (x) == PARM_DECL) | |
1522 | return false; | |
1523 | gcc_assert (TREE_CODE (x) == VAR_DECL); | |
1524 | /* FALLTHRU */ | |
1525 | ||
1526 | case PARM_DECL: | |
1527 | case RESULT_DECL: | |
1528 | case VAR_DECL: | |
1529 | if (DECL_BY_REFERENCE (x)) | |
1530 | { | |
1531 | /* ??? This value is a pointer, but aggregate_value_p has been | |
1532 | jigged to return true which confuses needs_to_live_in_memory. | |
1533 | This ought to be cleaned up generically. | |
1534 | ||
1535 | FIXME: Verify this still happens after the next mainline | |
1536 | merge. Testcase ie g++.dg/tm/pr47554.C. | |
1537 | */ | |
1538 | return false; | |
1539 | } | |
1540 | ||
1541 | if (is_global_var (x)) | |
e0f3ea3e | 1542 | return !TREE_READONLY (x); |
4c0315d0 | 1543 | if (/* FIXME: This condition should actually go below in the |
1544 | tm_log_add() call, however is_call_clobbered() depends on | |
1545 | aliasing info which is not available during | |
1546 | gimplification. Since requires_barrier() gets called | |
1547 | during lower_sequence_tm/gimplification, leave the call | |
1548 | to needs_to_live_in_memory until we eliminate | |
1549 | lower_sequence_tm altogether. */ | |
3c8b8a41 | 1550 | needs_to_live_in_memory (x)) |
4c0315d0 | 1551 | return true; |
e0f3ea3e | 1552 | else |
1553 | { | |
1554 | /* For local memory that doesn't escape (aka thread private | |
1555 | memory), we can either save the value at the beginning of | |
1556 | the transaction and restore on restart, or call a tm | |
1557 | function to dynamically save and restore on restart | |
1558 | (ITM_L*). */ | |
1559 | if (stmt) | |
1560 | tm_log_add (entry_block, orig, stmt); | |
1561 | return false; | |
1562 | } | |
4c0315d0 | 1563 | |
1564 | default: | |
1565 | return false; | |
1566 | } | |
1567 | } | |
1568 | ||
1569 | /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside | |
1570 | a transaction region. */ | |
1571 | ||
1572 | static void | |
1573 | examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1574 | { | |
1575 | gimple stmt = gsi_stmt (*gsi); | |
1576 | ||
1577 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL)) | |
1578 | *state |= GTMA_HAVE_LOAD; | |
1579 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL)) | |
1580 | *state |= GTMA_HAVE_STORE; | |
1581 | } | |
1582 | ||
1583 | /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */ | |
1584 | ||
1585 | static void | |
1586 | examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1587 | { | |
1588 | gimple stmt = gsi_stmt (*gsi); | |
1589 | tree fn; | |
1590 | ||
1591 | if (is_tm_pure_call (stmt)) | |
1592 | return; | |
1593 | ||
1594 | /* Check if this call is a transaction abort. */ | |
1595 | fn = gimple_call_fndecl (stmt); | |
1596 | if (is_tm_abort (fn)) | |
1597 | *state |= GTMA_HAVE_ABORT; | |
1598 | ||
1599 | /* Note that something may happen. */ | |
1600 | *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE; | |
1601 | } | |
1602 | ||
1603 | /* Lower a GIMPLE_TRANSACTION statement. */ | |
1604 | ||
1605 | static void | |
1606 | lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi) | |
1607 | { | |
1608 | gimple g, stmt = gsi_stmt (*gsi); | |
1609 | unsigned int *outer_state = (unsigned int *) wi->info; | |
1610 | unsigned int this_state = 0; | |
1611 | struct walk_stmt_info this_wi; | |
1612 | ||
1613 | /* First, lower the body. The scanning that we do inside gives | |
1614 | us some idea of what we're dealing with. */ | |
1615 | memset (&this_wi, 0, sizeof (this_wi)); | |
1616 | this_wi.info = (void *) &this_state; | |
e3a19533 | 1617 | walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt), |
1618 | lower_sequence_tm, NULL, &this_wi); | |
4c0315d0 | 1619 | |
1620 | /* If there was absolutely nothing transaction related inside the | |
1621 | transaction, we may elide it. Likewise if this is a nested | |
1622 | transaction and does not contain an abort. */ | |
1623 | if (this_state == 0 | |
1624 | || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL)) | |
1625 | { | |
1626 | if (outer_state) | |
1627 | *outer_state |= this_state; | |
1628 | ||
1629 | gsi_insert_seq_before (gsi, gimple_transaction_body (stmt), | |
1630 | GSI_SAME_STMT); | |
1631 | gimple_transaction_set_body (stmt, NULL); | |
1632 | ||
1633 | gsi_remove (gsi, true); | |
1634 | wi->removed_stmt = true; | |
1635 | return; | |
1636 | } | |
1637 | ||
1638 | /* Wrap the body of the transaction in a try-finally node so that | |
1639 | the commit call is always properly called. */ | |
1640 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0); | |
1641 | if (flag_exceptions) | |
1642 | { | |
1643 | tree ptr; | |
1644 | gimple_seq n_seq, e_seq; | |
1645 | ||
1646 | n_seq = gimple_seq_alloc_with_stmt (g); | |
e3a19533 | 1647 | e_seq = NULL; |
4c0315d0 | 1648 | |
1649 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER), | |
1650 | 1, integer_zero_node); | |
1651 | ptr = create_tmp_var (ptr_type_node, NULL); | |
1652 | gimple_call_set_lhs (g, ptr); | |
1653 | gimple_seq_add_stmt (&e_seq, g); | |
1654 | ||
1655 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH), | |
1656 | 1, ptr); | |
1657 | gimple_seq_add_stmt (&e_seq, g); | |
1658 | ||
1659 | g = gimple_build_eh_else (n_seq, e_seq); | |
1660 | } | |
1661 | ||
1662 | g = gimple_build_try (gimple_transaction_body (stmt), | |
1663 | gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY); | |
1664 | gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); | |
1665 | ||
1666 | gimple_transaction_set_body (stmt, NULL); | |
1667 | ||
1668 | /* If the transaction calls abort or if this is an outer transaction, | |
1669 | add an "over" label afterwards. */ | |
1670 | if ((this_state & (GTMA_HAVE_ABORT)) | |
9af5ce0c | 1671 | || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)) |
4c0315d0 | 1672 | { |
1673 | tree label = create_artificial_label (UNKNOWN_LOCATION); | |
1674 | gimple_transaction_set_label (stmt, label); | |
1675 | gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); | |
1676 | } | |
1677 | ||
1678 | /* Record the set of operations found for use later. */ | |
1679 | this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK; | |
1680 | gimple_transaction_set_subcode (stmt, this_state); | |
1681 | } | |
1682 | ||
1683 | /* Iterate through the statements in the sequence, lowering them all | |
1684 | as appropriate for being in a transaction. */ | |
1685 | ||
1686 | static tree | |
1687 | lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1688 | struct walk_stmt_info *wi) | |
1689 | { | |
1690 | unsigned int *state = (unsigned int *) wi->info; | |
1691 | gimple stmt = gsi_stmt (*gsi); | |
1692 | ||
1693 | *handled_ops_p = true; | |
1694 | switch (gimple_code (stmt)) | |
1695 | { | |
1696 | case GIMPLE_ASSIGN: | |
1697 | /* Only memory reads/writes need to be instrumented. */ | |
1698 | if (gimple_assign_single_p (stmt)) | |
1699 | examine_assign_tm (state, gsi); | |
1700 | break; | |
1701 | ||
1702 | case GIMPLE_CALL: | |
1703 | examine_call_tm (state, gsi); | |
1704 | break; | |
1705 | ||
1706 | case GIMPLE_ASM: | |
1707 | *state |= GTMA_MAY_ENTER_IRREVOCABLE; | |
1708 | break; | |
1709 | ||
1710 | case GIMPLE_TRANSACTION: | |
1711 | lower_transaction (gsi, wi); | |
1712 | break; | |
1713 | ||
1714 | default: | |
1715 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1716 | break; | |
1717 | } | |
1718 | ||
1719 | return NULL_TREE; | |
1720 | } | |
1721 | ||
1722 | /* Iterate through the statements in the sequence, lowering them all | |
1723 | as appropriate for being outside of a transaction. */ | |
1724 | ||
1725 | static tree | |
1726 | lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1727 | struct walk_stmt_info * wi) | |
1728 | { | |
1729 | gimple stmt = gsi_stmt (*gsi); | |
1730 | ||
1731 | if (gimple_code (stmt) == GIMPLE_TRANSACTION) | |
1732 | { | |
1733 | *handled_ops_p = true; | |
1734 | lower_transaction (gsi, wi); | |
1735 | } | |
1736 | else | |
1737 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1738 | ||
1739 | return NULL_TREE; | |
1740 | } | |
1741 | ||
1742 | /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After | |
1743 | this, GIMPLE_TRANSACTION nodes still exist, but the nested body has | |
1744 | been moved out, and all the data required for constructing a proper | |
1745 | CFG has been recorded. */ | |
1746 | ||
1747 | static unsigned int | |
1748 | execute_lower_tm (void) | |
1749 | { | |
1750 | struct walk_stmt_info wi; | |
e3a19533 | 1751 | gimple_seq body; |
4c0315d0 | 1752 | |
1753 | /* Transactional clones aren't created until a later pass. */ | |
1754 | gcc_assert (!decl_is_tm_clone (current_function_decl)); | |
1755 | ||
e3a19533 | 1756 | body = gimple_body (current_function_decl); |
4c0315d0 | 1757 | memset (&wi, 0, sizeof (wi)); |
e3a19533 | 1758 | walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi); |
1759 | gimple_set_body (current_function_decl, body); | |
4c0315d0 | 1760 | |
1761 | return 0; | |
1762 | } | |
1763 | ||
cbe8bda8 | 1764 | namespace { |
1765 | ||
1766 | const pass_data pass_data_lower_tm = | |
1767 | { | |
1768 | GIMPLE_PASS, /* type */ | |
1769 | "tmlower", /* name */ | |
1770 | OPTGROUP_NONE, /* optinfo_flags */ | |
1771 | true, /* has_gate */ | |
1772 | true, /* has_execute */ | |
1773 | TV_TRANS_MEM, /* tv_id */ | |
1774 | PROP_gimple_lcf, /* properties_required */ | |
1775 | 0, /* properties_provided */ | |
1776 | 0, /* properties_destroyed */ | |
1777 | 0, /* todo_flags_start */ | |
1778 | 0, /* todo_flags_finish */ | |
4c0315d0 | 1779 | }; |
cbe8bda8 | 1780 | |
1781 | class pass_lower_tm : public gimple_opt_pass | |
1782 | { | |
1783 | public: | |
9af5ce0c | 1784 | pass_lower_tm (gcc::context *ctxt) |
1785 | : gimple_opt_pass (pass_data_lower_tm, ctxt) | |
cbe8bda8 | 1786 | {} |
1787 | ||
1788 | /* opt_pass methods: */ | |
1789 | bool gate () { return gate_tm (); } | |
1790 | unsigned int execute () { return execute_lower_tm (); } | |
1791 | ||
1792 | }; // class pass_lower_tm | |
1793 | ||
1794 | } // anon namespace | |
1795 | ||
1796 | gimple_opt_pass * | |
1797 | make_pass_lower_tm (gcc::context *ctxt) | |
1798 | { | |
1799 | return new pass_lower_tm (ctxt); | |
1800 | } | |
4c0315d0 | 1801 | \f |
1802 | /* Collect region information for each transaction. */ | |
1803 | ||
1804 | struct tm_region | |
1805 | { | |
1806 | /* Link to the next unnested transaction. */ | |
1807 | struct tm_region *next; | |
1808 | ||
1809 | /* Link to the next inner transaction. */ | |
1810 | struct tm_region *inner; | |
1811 | ||
1812 | /* Link to the next outer transaction. */ | |
1813 | struct tm_region *outer; | |
1814 | ||
0cd02a19 | 1815 | /* The GIMPLE_TRANSACTION statement beginning this transaction. |
1816 | After TM_MARK, this gets replaced by a call to | |
1817 | BUILT_IN_TM_START. */ | |
4c0315d0 | 1818 | gimple transaction_stmt; |
1819 | ||
0cd02a19 | 1820 | /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to |
1821 | BUILT_IN_TM_START, this field is true if the transaction is an | |
1822 | outer transaction. */ | |
1823 | bool original_transaction_was_outer; | |
1824 | ||
1825 | /* Return value from BUILT_IN_TM_START. */ | |
1826 | tree tm_state; | |
1827 | ||
1828 | /* The entry block to this region. This will always be the first | |
1829 | block of the body of the transaction. */ | |
4c0315d0 | 1830 | basic_block entry_block; |
1831 | ||
0cd02a19 | 1832 | /* The first block after an expanded call to _ITM_beginTransaction. */ |
1833 | basic_block restart_block; | |
1834 | ||
4c0315d0 | 1835 | /* The set of all blocks that end the region; NULL if only EXIT_BLOCK. |
1836 | These blocks are still a part of the region (i.e., the border is | |
1837 | inclusive). Note that this set is only complete for paths in the CFG | |
1838 | starting at ENTRY_BLOCK, and that there is no exit block recorded for | |
1839 | the edge to the "over" label. */ | |
1840 | bitmap exit_blocks; | |
1841 | ||
1842 | /* The set of all blocks that have an TM_IRREVOCABLE call. */ | |
1843 | bitmap irr_blocks; | |
1844 | }; | |
1845 | ||
5ab54790 | 1846 | typedef struct tm_region *tm_region_p; |
5ab54790 | 1847 | |
4c0315d0 | 1848 | /* True if there are pending edge statements to be committed for the |
1849 | current function being scanned in the tmmark pass. */ | |
1850 | bool pending_edge_inserts_p; | |
1851 | ||
1852 | static struct tm_region *all_tm_regions; | |
1853 | static bitmap_obstack tm_obstack; | |
1854 | ||
1855 | ||
9d75589a | 1856 | /* A subroutine of tm_region_init. Record the existence of the |
4c0315d0 | 1857 | GIMPLE_TRANSACTION statement in a tree of tm_region elements. */ |
1858 | ||
1859 | static struct tm_region * | |
1860 | tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt) | |
1861 | { | |
1862 | struct tm_region *region; | |
1863 | ||
1864 | region = (struct tm_region *) | |
1865 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
1866 | ||
1867 | if (outer) | |
1868 | { | |
1869 | region->next = outer->inner; | |
1870 | outer->inner = region; | |
1871 | } | |
1872 | else | |
1873 | { | |
1874 | region->next = all_tm_regions; | |
1875 | all_tm_regions = region; | |
1876 | } | |
1877 | region->inner = NULL; | |
1878 | region->outer = outer; | |
1879 | ||
1880 | region->transaction_stmt = stmt; | |
0cd02a19 | 1881 | region->original_transaction_was_outer = false; |
1882 | region->tm_state = NULL; | |
4c0315d0 | 1883 | |
1884 | /* There are either one or two edges out of the block containing | |
1885 | the GIMPLE_TRANSACTION, one to the actual region and one to the | |
1886 | "over" label if the region contains an abort. The former will | |
1887 | always be the one marked FALLTHRU. */ | |
1888 | region->entry_block = FALLTHRU_EDGE (bb)->dest; | |
1889 | ||
1890 | region->exit_blocks = BITMAP_ALLOC (&tm_obstack); | |
1891 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
1892 | ||
1893 | return region; | |
1894 | } | |
1895 | ||
1896 | /* A subroutine of tm_region_init. Record all the exit and | |
1897 | irrevocable blocks in BB into the region's exit_blocks and | |
1898 | irr_blocks bitmaps. Returns the new region being scanned. */ | |
1899 | ||
1900 | static struct tm_region * | |
1901 | tm_region_init_1 (struct tm_region *region, basic_block bb) | |
1902 | { | |
1903 | gimple_stmt_iterator gsi; | |
1904 | gimple g; | |
1905 | ||
1906 | if (!region | |
1907 | || (!region->irr_blocks && !region->exit_blocks)) | |
1908 | return region; | |
1909 | ||
1910 | /* Check to see if this is the end of a region by seeing if it | |
1911 | contains a call to __builtin_tm_commit{,_eh}. Note that the | |
1912 | outermost region for DECL_IS_TM_CLONE need not collect this. */ | |
1913 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
1914 | { | |
1915 | g = gsi_stmt (gsi); | |
1916 | if (gimple_code (g) == GIMPLE_CALL) | |
1917 | { | |
1918 | tree fn = gimple_call_fndecl (g); | |
1919 | if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL) | |
1920 | { | |
1921 | if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT | |
1922 | || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH) | |
1923 | && region->exit_blocks) | |
1924 | { | |
1925 | bitmap_set_bit (region->exit_blocks, bb->index); | |
1926 | region = region->outer; | |
1927 | break; | |
1928 | } | |
1929 | if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE) | |
1930 | bitmap_set_bit (region->irr_blocks, bb->index); | |
1931 | } | |
1932 | } | |
1933 | } | |
1934 | return region; | |
1935 | } | |
1936 | ||
1937 | /* Collect all of the transaction regions within the current function | |
1938 | and record them in ALL_TM_REGIONS. The REGION parameter may specify | |
1939 | an "outermost" region for use by tm clones. */ | |
1940 | ||
1941 | static void | |
1942 | tm_region_init (struct tm_region *region) | |
1943 | { | |
1944 | gimple g; | |
1945 | edge_iterator ei; | |
1946 | edge e; | |
1947 | basic_block bb; | |
c2078b80 | 1948 | auto_vec<basic_block> queue; |
4c0315d0 | 1949 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
1950 | struct tm_region *old_region; | |
c2078b80 | 1951 | auto_vec<tm_region_p> bb_regions; |
4c0315d0 | 1952 | |
1953 | all_tm_regions = region; | |
34154e27 | 1954 | bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 1955 | |
de60f90c | 1956 | /* We could store this information in bb->aux, but we may get called |
1957 | through get_all_tm_blocks() from another pass that may be already | |
1958 | using bb->aux. */ | |
fe672ac0 | 1959 | bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
de60f90c | 1960 | |
f1f41a6c | 1961 | queue.safe_push (bb); |
1962 | bb_regions[bb->index] = region; | |
4c0315d0 | 1963 | do |
1964 | { | |
f1f41a6c | 1965 | bb = queue.pop (); |
1966 | region = bb_regions[bb->index]; | |
1967 | bb_regions[bb->index] = NULL; | |
4c0315d0 | 1968 | |
1969 | /* Record exit and irrevocable blocks. */ | |
1970 | region = tm_region_init_1 (region, bb); | |
1971 | ||
1972 | /* Check for the last statement in the block beginning a new region. */ | |
1973 | g = last_stmt (bb); | |
1974 | old_region = region; | |
1975 | if (g && gimple_code (g) == GIMPLE_TRANSACTION) | |
1976 | region = tm_region_init_0 (region, bb, g); | |
1977 | ||
1978 | /* Process subsequent blocks. */ | |
1979 | FOR_EACH_EDGE (e, ei, bb->succs) | |
1980 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
1981 | { | |
1982 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 1983 | queue.safe_push (e->dest); |
4c0315d0 | 1984 | |
1985 | /* If the current block started a new region, make sure that only | |
1986 | the entry block of the new region is associated with this region. | |
1987 | Other successors are still part of the old region. */ | |
1988 | if (old_region != region && e->dest != region->entry_block) | |
f1f41a6c | 1989 | bb_regions[e->dest->index] = old_region; |
4c0315d0 | 1990 | else |
f1f41a6c | 1991 | bb_regions[e->dest->index] = region; |
4c0315d0 | 1992 | } |
1993 | } | |
f1f41a6c | 1994 | while (!queue.is_empty ()); |
4c0315d0 | 1995 | BITMAP_FREE (visited_blocks); |
1996 | } | |
1997 | ||
1998 | /* The "gate" function for all transactional memory expansion and optimization | |
1999 | passes. We collect region information for each top-level transaction, and | |
2000 | if we don't find any, we skip all of the TM passes. Each region will have | |
2001 | all of the exit blocks recorded, and the originating statement. */ | |
2002 | ||
2003 | static bool | |
2004 | gate_tm_init (void) | |
2005 | { | |
2006 | if (!flag_tm) | |
2007 | return false; | |
2008 | ||
2009 | calculate_dominance_info (CDI_DOMINATORS); | |
2010 | bitmap_obstack_initialize (&tm_obstack); | |
2011 | ||
2012 | /* If the function is a TM_CLONE, then the entire function is the region. */ | |
2013 | if (decl_is_tm_clone (current_function_decl)) | |
2014 | { | |
2015 | struct tm_region *region = (struct tm_region *) | |
2016 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
2017 | memset (region, 0, sizeof (*region)); | |
34154e27 | 2018 | region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 2019 | /* For a clone, the entire function is the region. But even if |
2020 | we don't need to record any exit blocks, we may need to | |
2021 | record irrevocable blocks. */ | |
2022 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
2023 | ||
2024 | tm_region_init (region); | |
2025 | } | |
2026 | else | |
2027 | { | |
2028 | tm_region_init (NULL); | |
2029 | ||
2030 | /* If we didn't find any regions, cleanup and skip the whole tree | |
2031 | of tm-related optimizations. */ | |
2032 | if (all_tm_regions == NULL) | |
2033 | { | |
2034 | bitmap_obstack_release (&tm_obstack); | |
2035 | return false; | |
2036 | } | |
2037 | } | |
2038 | ||
2039 | return true; | |
2040 | } | |
2041 | ||
cbe8bda8 | 2042 | namespace { |
2043 | ||
2044 | const pass_data pass_data_tm_init = | |
2045 | { | |
2046 | GIMPLE_PASS, /* type */ | |
2047 | "*tminit", /* name */ | |
2048 | OPTGROUP_NONE, /* optinfo_flags */ | |
2049 | true, /* has_gate */ | |
2050 | false, /* has_execute */ | |
2051 | TV_TRANS_MEM, /* tv_id */ | |
2052 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
2053 | 0, /* properties_provided */ | |
2054 | 0, /* properties_destroyed */ | |
2055 | 0, /* todo_flags_start */ | |
2056 | 0, /* todo_flags_finish */ | |
4c0315d0 | 2057 | }; |
cbe8bda8 | 2058 | |
2059 | class pass_tm_init : public gimple_opt_pass | |
2060 | { | |
2061 | public: | |
9af5ce0c | 2062 | pass_tm_init (gcc::context *ctxt) |
2063 | : gimple_opt_pass (pass_data_tm_init, ctxt) | |
cbe8bda8 | 2064 | {} |
2065 | ||
2066 | /* opt_pass methods: */ | |
2067 | bool gate () { return gate_tm_init (); } | |
2068 | ||
2069 | }; // class pass_tm_init | |
2070 | ||
2071 | } // anon namespace | |
2072 | ||
2073 | gimple_opt_pass * | |
2074 | make_pass_tm_init (gcc::context *ctxt) | |
2075 | { | |
2076 | return new pass_tm_init (ctxt); | |
2077 | } | |
4c0315d0 | 2078 | \f |
2079 | /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region | |
2080 | represented by STATE. */ | |
2081 | ||
2082 | static inline void | |
2083 | transaction_subcode_ior (struct tm_region *region, unsigned flags) | |
2084 | { | |
2085 | if (region && region->transaction_stmt) | |
2086 | { | |
2087 | flags |= gimple_transaction_subcode (region->transaction_stmt); | |
2088 | gimple_transaction_set_subcode (region->transaction_stmt, flags); | |
2089 | } | |
2090 | } | |
2091 | ||
2092 | /* Construct a memory load in a transactional context. Return the | |
2093 | gimple statement performing the load, or NULL if there is no | |
2094 | TM_LOAD builtin of the appropriate size to do the load. | |
2095 | ||
2096 | LOC is the location to use for the new statement(s). */ | |
2097 | ||
2098 | static gimple | |
2099 | build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) | |
2100 | { | |
2101 | enum built_in_function code = END_BUILTINS; | |
2102 | tree t, type = TREE_TYPE (rhs), decl; | |
2103 | gimple gcall; | |
2104 | ||
2105 | if (type == float_type_node) | |
2106 | code = BUILT_IN_TM_LOAD_FLOAT; | |
2107 | else if (type == double_type_node) | |
2108 | code = BUILT_IN_TM_LOAD_DOUBLE; | |
2109 | else if (type == long_double_type_node) | |
2110 | code = BUILT_IN_TM_LOAD_LDOUBLE; | |
2111 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 2112 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2113 | { |
6a0712d4 | 2114 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2115 | { |
2116 | case 1: | |
2117 | code = BUILT_IN_TM_LOAD_1; | |
2118 | break; | |
2119 | case 2: | |
2120 | code = BUILT_IN_TM_LOAD_2; | |
2121 | break; | |
2122 | case 4: | |
2123 | code = BUILT_IN_TM_LOAD_4; | |
2124 | break; | |
2125 | case 8: | |
2126 | code = BUILT_IN_TM_LOAD_8; | |
2127 | break; | |
2128 | } | |
2129 | } | |
2130 | ||
2131 | if (code == END_BUILTINS) | |
2132 | { | |
2133 | decl = targetm.vectorize.builtin_tm_load (type); | |
2134 | if (!decl) | |
2135 | return NULL; | |
2136 | } | |
2137 | else | |
2138 | decl = builtin_decl_explicit (code); | |
2139 | ||
2140 | t = gimplify_addr (gsi, rhs); | |
2141 | gcall = gimple_build_call (decl, 1, t); | |
2142 | gimple_set_location (gcall, loc); | |
2143 | ||
2144 | t = TREE_TYPE (TREE_TYPE (decl)); | |
2145 | if (useless_type_conversion_p (type, t)) | |
2146 | { | |
2147 | gimple_call_set_lhs (gcall, lhs); | |
2148 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2149 | } | |
2150 | else | |
2151 | { | |
2152 | gimple g; | |
2153 | tree temp; | |
2154 | ||
072f7ab1 | 2155 | temp = create_tmp_reg (t, NULL); |
4c0315d0 | 2156 | gimple_call_set_lhs (gcall, temp); |
2157 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2158 | ||
2159 | t = fold_build1 (VIEW_CONVERT_EXPR, type, temp); | |
2160 | g = gimple_build_assign (lhs, t); | |
2161 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2162 | } | |
2163 | ||
2164 | return gcall; | |
2165 | } | |
2166 | ||
2167 | ||
2168 | /* Similarly for storing TYPE in a transactional context. */ | |
2169 | ||
2170 | static gimple | |
2171 | build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) | |
2172 | { | |
2173 | enum built_in_function code = END_BUILTINS; | |
2174 | tree t, fn, type = TREE_TYPE (rhs), simple_type; | |
2175 | gimple gcall; | |
2176 | ||
2177 | if (type == float_type_node) | |
2178 | code = BUILT_IN_TM_STORE_FLOAT; | |
2179 | else if (type == double_type_node) | |
2180 | code = BUILT_IN_TM_STORE_DOUBLE; | |
2181 | else if (type == long_double_type_node) | |
2182 | code = BUILT_IN_TM_STORE_LDOUBLE; | |
2183 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 2184 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2185 | { |
6a0712d4 | 2186 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2187 | { |
2188 | case 1: | |
2189 | code = BUILT_IN_TM_STORE_1; | |
2190 | break; | |
2191 | case 2: | |
2192 | code = BUILT_IN_TM_STORE_2; | |
2193 | break; | |
2194 | case 4: | |
2195 | code = BUILT_IN_TM_STORE_4; | |
2196 | break; | |
2197 | case 8: | |
2198 | code = BUILT_IN_TM_STORE_8; | |
2199 | break; | |
2200 | } | |
2201 | } | |
2202 | ||
2203 | if (code == END_BUILTINS) | |
2204 | { | |
2205 | fn = targetm.vectorize.builtin_tm_store (type); | |
2206 | if (!fn) | |
2207 | return NULL; | |
2208 | } | |
2209 | else | |
2210 | fn = builtin_decl_explicit (code); | |
2211 | ||
2212 | simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn)))); | |
2213 | ||
2214 | if (TREE_CODE (rhs) == CONSTRUCTOR) | |
2215 | { | |
2216 | /* Handle the easy initialization to zero. */ | |
f1f41a6c | 2217 | if (!CONSTRUCTOR_ELTS (rhs)) |
4c0315d0 | 2218 | rhs = build_int_cst (simple_type, 0); |
2219 | else | |
2220 | { | |
2221 | /* ...otherwise punt to the caller and probably use | |
2222 | BUILT_IN_TM_MEMMOVE, because we can't wrap a | |
2223 | VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce | |
2224 | valid gimple. */ | |
2225 | return NULL; | |
2226 | } | |
2227 | } | |
2228 | else if (!useless_type_conversion_p (simple_type, type)) | |
2229 | { | |
2230 | gimple g; | |
2231 | tree temp; | |
2232 | ||
072f7ab1 | 2233 | temp = create_tmp_reg (simple_type, NULL); |
4c0315d0 | 2234 | t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs); |
2235 | g = gimple_build_assign (temp, t); | |
2236 | gimple_set_location (g, loc); | |
2237 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2238 | ||
2239 | rhs = temp; | |
2240 | } | |
2241 | ||
2242 | t = gimplify_addr (gsi, lhs); | |
2243 | gcall = gimple_build_call (fn, 2, t, rhs); | |
2244 | gimple_set_location (gcall, loc); | |
2245 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2246 | ||
2247 | return gcall; | |
2248 | } | |
2249 | ||
2250 | ||
2251 | /* Expand an assignment statement into transactional builtins. */ | |
2252 | ||
2253 | static void | |
2254 | expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) | |
2255 | { | |
2256 | gimple stmt = gsi_stmt (*gsi); | |
2257 | location_t loc = gimple_location (stmt); | |
2258 | tree lhs = gimple_assign_lhs (stmt); | |
2259 | tree rhs = gimple_assign_rhs1 (stmt); | |
2260 | bool store_p = requires_barrier (region->entry_block, lhs, NULL); | |
2261 | bool load_p = requires_barrier (region->entry_block, rhs, NULL); | |
2262 | gimple gcall = NULL; | |
2263 | ||
2264 | if (!load_p && !store_p) | |
2265 | { | |
2266 | /* Add thread private addresses to log if applicable. */ | |
2267 | requires_barrier (region->entry_block, lhs, stmt); | |
2268 | gsi_next (gsi); | |
2269 | return; | |
2270 | } | |
2271 | ||
0cd02a19 | 2272 | // Remove original load/store statement. |
4c0315d0 | 2273 | gsi_remove (gsi, true); |
2274 | ||
2275 | if (load_p && !store_p) | |
2276 | { | |
2277 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2278 | gcall = build_tm_load (loc, lhs, rhs, gsi); | |
2279 | } | |
2280 | else if (store_p && !load_p) | |
2281 | { | |
2282 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2283 | gcall = build_tm_store (loc, lhs, rhs, gsi); | |
2284 | } | |
2285 | if (!gcall) | |
2286 | { | |
ea580cf7 | 2287 | tree lhs_addr, rhs_addr, tmp; |
4c0315d0 | 2288 | |
2289 | if (load_p) | |
2290 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2291 | if (store_p) | |
2292 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2293 | ||
2294 | /* ??? Figure out if there's any possible overlap between the LHS | |
2295 | and the RHS and if not, use MEMCPY. */ | |
ea580cf7 | 2296 | |
28098b5b | 2297 | if (load_p && is_gimple_reg (lhs)) |
ea580cf7 | 2298 | { |
2299 | tmp = create_tmp_var (TREE_TYPE (lhs), NULL); | |
2300 | lhs_addr = build_fold_addr_expr (tmp); | |
2301 | } | |
2302 | else | |
2303 | { | |
2304 | tmp = NULL_TREE; | |
2305 | lhs_addr = gimplify_addr (gsi, lhs); | |
2306 | } | |
4c0315d0 | 2307 | rhs_addr = gimplify_addr (gsi, rhs); |
2308 | gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE), | |
2309 | 3, lhs_addr, rhs_addr, | |
2310 | TYPE_SIZE_UNIT (TREE_TYPE (lhs))); | |
2311 | gimple_set_location (gcall, loc); | |
2312 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
ea580cf7 | 2313 | |
2314 | if (tmp) | |
2315 | { | |
2316 | gcall = gimple_build_assign (lhs, tmp); | |
2317 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2318 | } | |
4c0315d0 | 2319 | } |
2320 | ||
2321 | /* Now that we have the load/store in its instrumented form, add | |
2322 | thread private addresses to the log if applicable. */ | |
2323 | if (!store_p) | |
2324 | requires_barrier (region->entry_block, lhs, gcall); | |
2325 | ||
0cd02a19 | 2326 | // The calls to build_tm_{store,load} above inserted the instrumented |
2327 | // call into the stream. | |
2328 | // gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
4c0315d0 | 2329 | } |
2330 | ||
2331 | ||
2332 | /* Expand a call statement as appropriate for a transaction. That is, | |
2333 | either verify that the call does not affect the transaction, or | |
2334 | redirect the call to a clone that handles transactions, or change | |
2335 | the transaction state to IRREVOCABLE. Return true if the call is | |
2336 | one of the builtins that end a transaction. */ | |
2337 | ||
2338 | static bool | |
2339 | expand_call_tm (struct tm_region *region, | |
2340 | gimple_stmt_iterator *gsi) | |
2341 | { | |
2342 | gimple stmt = gsi_stmt (*gsi); | |
2343 | tree lhs = gimple_call_lhs (stmt); | |
2344 | tree fn_decl; | |
2345 | struct cgraph_node *node; | |
2346 | bool retval = false; | |
2347 | ||
2348 | fn_decl = gimple_call_fndecl (stmt); | |
2349 | ||
2350 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) | |
2351 | || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) | |
2352 | transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); | |
2353 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) | |
2354 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2355 | ||
2356 | if (is_tm_pure_call (stmt)) | |
2357 | return false; | |
2358 | ||
2359 | if (fn_decl) | |
2360 | retval = is_tm_ending_fndecl (fn_decl); | |
2361 | if (!retval) | |
2362 | { | |
2363 | /* Assume all non-const/pure calls write to memory, except | |
2364 | transaction ending builtins. */ | |
2365 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2366 | } | |
2367 | ||
2368 | /* For indirect calls, we already generated a call into the runtime. */ | |
2369 | if (!fn_decl) | |
2370 | { | |
2371 | tree fn = gimple_call_fn (stmt); | |
2372 | ||
2373 | /* We are guaranteed never to go irrevocable on a safe or pure | |
2374 | call, and the pure call was handled above. */ | |
2375 | if (is_tm_safe (fn)) | |
2376 | return false; | |
2377 | else | |
2378 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2379 | ||
2380 | return false; | |
2381 | } | |
2382 | ||
2383 | node = cgraph_get_node (fn_decl); | |
fce5ce8e | 2384 | /* All calls should have cgraph here. */ |
2385 | if (!node) | |
2386 | { | |
2387 | /* We can have a nodeless call here if some pass after IPA-tm | |
2388 | added uninstrumented calls. For example, loop distribution | |
2389 | can transform certain loop constructs into __builtin_mem* | |
2390 | calls. In this case, see if we have a suitable TM | |
2391 | replacement and fill in the gaps. */ | |
2392 | gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL); | |
2393 | enum built_in_function code = DECL_FUNCTION_CODE (fn_decl); | |
2394 | gcc_assert (code == BUILT_IN_MEMCPY | |
2395 | || code == BUILT_IN_MEMMOVE | |
2396 | || code == BUILT_IN_MEMSET); | |
2397 | ||
2398 | tree repl = find_tm_replacement_function (fn_decl); | |
2399 | if (repl) | |
2400 | { | |
2401 | gimple_call_set_fndecl (stmt, repl); | |
2402 | update_stmt (stmt); | |
2403 | node = cgraph_create_node (repl); | |
2404 | node->local.tm_may_enter_irr = false; | |
2405 | return expand_call_tm (region, gsi); | |
2406 | } | |
2407 | gcc_unreachable (); | |
2408 | } | |
4c0315d0 | 2409 | if (node->local.tm_may_enter_irr) |
2410 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2411 | ||
2412 | if (is_tm_abort (fn_decl)) | |
2413 | { | |
2414 | transaction_subcode_ior (region, GTMA_HAVE_ABORT); | |
2415 | return true; | |
2416 | } | |
2417 | ||
2418 | /* Instrument the store if needed. | |
2419 | ||
2420 | If the assignment happens inside the function call (return slot | |
2421 | optimization), there is no instrumentation to be done, since | |
2422 | the callee should have done the right thing. */ | |
2423 | if (lhs && requires_barrier (region->entry_block, lhs, stmt) | |
2424 | && !gimple_call_return_slot_opt_p (stmt)) | |
2425 | { | |
072f7ab1 | 2426 | tree tmp = create_tmp_reg (TREE_TYPE (lhs), NULL); |
4c0315d0 | 2427 | location_t loc = gimple_location (stmt); |
2428 | edge fallthru_edge = NULL; | |
2429 | ||
2430 | /* Remember if the call was going to throw. */ | |
2431 | if (stmt_can_throw_internal (stmt)) | |
2432 | { | |
2433 | edge_iterator ei; | |
2434 | edge e; | |
2435 | basic_block bb = gimple_bb (stmt); | |
2436 | ||
2437 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2438 | if (e->flags & EDGE_FALLTHRU) | |
2439 | { | |
2440 | fallthru_edge = e; | |
2441 | break; | |
2442 | } | |
2443 | } | |
2444 | ||
2445 | gimple_call_set_lhs (stmt, tmp); | |
2446 | update_stmt (stmt); | |
2447 | stmt = gimple_build_assign (lhs, tmp); | |
2448 | gimple_set_location (stmt, loc); | |
2449 | ||
2450 | /* We cannot throw in the middle of a BB. If the call was going | |
2451 | to throw, place the instrumentation on the fallthru edge, so | |
2452 | the call remains the last statement in the block. */ | |
2453 | if (fallthru_edge) | |
2454 | { | |
2455 | gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt); | |
2456 | gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq); | |
2457 | expand_assign_tm (region, &fallthru_gsi); | |
2458 | gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq); | |
2459 | pending_edge_inserts_p = true; | |
2460 | } | |
2461 | else | |
2462 | { | |
2463 | gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); | |
2464 | expand_assign_tm (region, gsi); | |
2465 | } | |
2466 | ||
2467 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2468 | } | |
2469 | ||
2470 | return retval; | |
2471 | } | |
2472 | ||
2473 | ||
2474 | /* Expand all statements in BB as appropriate for being inside | |
2475 | a transaction. */ | |
2476 | ||
2477 | static void | |
2478 | expand_block_tm (struct tm_region *region, basic_block bb) | |
2479 | { | |
2480 | gimple_stmt_iterator gsi; | |
2481 | ||
2482 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
2483 | { | |
2484 | gimple stmt = gsi_stmt (gsi); | |
2485 | switch (gimple_code (stmt)) | |
2486 | { | |
2487 | case GIMPLE_ASSIGN: | |
2488 | /* Only memory reads/writes need to be instrumented. */ | |
73fd1e9a | 2489 | if (gimple_assign_single_p (stmt) |
2490 | && !gimple_clobber_p (stmt)) | |
4c0315d0 | 2491 | { |
2492 | expand_assign_tm (region, &gsi); | |
2493 | continue; | |
2494 | } | |
2495 | break; | |
2496 | ||
2497 | case GIMPLE_CALL: | |
2498 | if (expand_call_tm (region, &gsi)) | |
2499 | return; | |
2500 | break; | |
2501 | ||
2502 | case GIMPLE_ASM: | |
2503 | gcc_unreachable (); | |
2504 | ||
2505 | default: | |
2506 | break; | |
2507 | } | |
2508 | if (!gsi_end_p (gsi)) | |
2509 | gsi_next (&gsi); | |
2510 | } | |
2511 | } | |
2512 | ||
2513 | /* Return the list of basic-blocks in REGION. | |
2514 | ||
2515 | STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks | |
79f4a793 | 2516 | following a TM_IRREVOCABLE call. |
2517 | ||
2518 | INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the | |
2519 | uninstrumented code path blocks in the list of basic blocks | |
2520 | returned, false otherwise. */ | |
4c0315d0 | 2521 | |
f1f41a6c | 2522 | static vec<basic_block> |
4c0315d0 | 2523 | get_tm_region_blocks (basic_block entry_block, |
2524 | bitmap exit_blocks, | |
2525 | bitmap irr_blocks, | |
2526 | bitmap all_region_blocks, | |
79f4a793 | 2527 | bool stop_at_irrevocable_p, |
2528 | bool include_uninstrumented_p = true) | |
4c0315d0 | 2529 | { |
1e094109 | 2530 | vec<basic_block> bbs = vNULL; |
4c0315d0 | 2531 | unsigned i; |
2532 | edge e; | |
2533 | edge_iterator ei; | |
2534 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
2535 | ||
2536 | i = 0; | |
f1f41a6c | 2537 | bbs.safe_push (entry_block); |
4c0315d0 | 2538 | bitmap_set_bit (visited_blocks, entry_block->index); |
2539 | ||
2540 | do | |
2541 | { | |
f1f41a6c | 2542 | basic_block bb = bbs[i++]; |
4c0315d0 | 2543 | |
2544 | if (exit_blocks && | |
2545 | bitmap_bit_p (exit_blocks, bb->index)) | |
2546 | continue; | |
2547 | ||
2548 | if (stop_at_irrevocable_p | |
2549 | && irr_blocks | |
2550 | && bitmap_bit_p (irr_blocks, bb->index)) | |
2551 | continue; | |
2552 | ||
2553 | FOR_EACH_EDGE (e, ei, bb->succs) | |
79f4a793 | 2554 | if ((include_uninstrumented_p |
2555 | || !(e->flags & EDGE_TM_UNINSTRUMENTED)) | |
2556 | && !bitmap_bit_p (visited_blocks, e->dest->index)) | |
4c0315d0 | 2557 | { |
2558 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2559 | bbs.safe_push (e->dest); |
4c0315d0 | 2560 | } |
2561 | } | |
f1f41a6c | 2562 | while (i < bbs.length ()); |
4c0315d0 | 2563 | |
2564 | if (all_region_blocks) | |
2565 | bitmap_ior_into (all_region_blocks, visited_blocks); | |
2566 | ||
2567 | BITMAP_FREE (visited_blocks); | |
2568 | return bbs; | |
2569 | } | |
2570 | ||
79f4a793 | 2571 | // Callback data for collect_bb2reg. |
2572 | struct bb2reg_stuff | |
2573 | { | |
2574 | vec<tm_region_p> *bb2reg; | |
2575 | bool include_uninstrumented_p; | |
2576 | }; | |
2577 | ||
0cd02a19 | 2578 | // Callback for expand_regions, collect innermost region data for each bb. |
2579 | static void * | |
2580 | collect_bb2reg (struct tm_region *region, void *data) | |
2581 | { | |
79f4a793 | 2582 | struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data; |
2583 | vec<tm_region_p> *bb2reg = stuff->bb2reg; | |
f1f41a6c | 2584 | vec<basic_block> queue; |
0cd02a19 | 2585 | unsigned int i; |
2586 | basic_block bb; | |
2587 | ||
2588 | queue = get_tm_region_blocks (region->entry_block, | |
2589 | region->exit_blocks, | |
2590 | region->irr_blocks, | |
2591 | NULL, | |
79f4a793 | 2592 | /*stop_at_irr_p=*/true, |
2593 | stuff->include_uninstrumented_p); | |
0cd02a19 | 2594 | |
2595 | // We expect expand_region to perform a post-order traversal of the region | |
2596 | // tree. Therefore the last region seen for any bb is the innermost. | |
f1f41a6c | 2597 | FOR_EACH_VEC_ELT (queue, i, bb) |
2598 | (*bb2reg)[bb->index] = region; | |
0cd02a19 | 2599 | |
f1f41a6c | 2600 | queue.release (); |
0cd02a19 | 2601 | return NULL; |
2602 | } | |
2603 | ||
2604 | // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to | |
2605 | // which a basic block belongs. Note that we only consider the instrumented | |
79f4a793 | 2606 | // code paths for the region; the uninstrumented code paths are ignored if |
2607 | // INCLUDE_UNINSTRUMENTED_P is false. | |
0cd02a19 | 2608 | // |
2609 | // ??? This data is very similar to the bb_regions array that is collected | |
2610 | // during tm_region_init. Or, rather, this data is similar to what could | |
2611 | // be used within tm_region_init. The actual computation in tm_region_init | |
2612 | // begins and ends with bb_regions entirely full of NULL pointers, due to | |
2613 | // the way in which pointers are swapped in and out of the array. | |
2614 | // | |
2615 | // ??? Our callers expect that blocks are not shared between transactions. | |
2616 | // When the optimizers get too smart, and blocks are shared, then during | |
2617 | // the tm_mark phase we'll add log entries to only one of the two transactions, | |
2618 | // and in the tm_edge phase we'll add edges to the CFG that create invalid | |
2619 | // cycles. The symptom being SSA defs that do not dominate their uses. | |
2620 | // Note that the optimizers were locally correct with their transformation, | |
2621 | // as we have no info within the program that suggests that the blocks cannot | |
2622 | // be shared. | |
2623 | // | |
2624 | // ??? There is currently a hack inside tree-ssa-pre.c to work around the | |
2625 | // only known instance of this block sharing. | |
2626 | ||
f1f41a6c | 2627 | static vec<tm_region_p> |
79f4a793 | 2628 | get_bb_regions_instrumented (bool traverse_clones, |
2629 | bool include_uninstrumented_p) | |
0cd02a19 | 2630 | { |
fe672ac0 | 2631 | unsigned n = last_basic_block_for_fn (cfun); |
79f4a793 | 2632 | struct bb2reg_stuff stuff; |
f1f41a6c | 2633 | vec<tm_region_p> ret; |
0cd02a19 | 2634 | |
f1f41a6c | 2635 | ret.create (n); |
2636 | ret.safe_grow_cleared (n); | |
79f4a793 | 2637 | stuff.bb2reg = &ret; |
2638 | stuff.include_uninstrumented_p = include_uninstrumented_p; | |
2639 | expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones); | |
0cd02a19 | 2640 | |
2641 | return ret; | |
2642 | } | |
2643 | ||
de60f90c | 2644 | /* Set the IN_TRANSACTION for all gimple statements that appear in a |
2645 | transaction. */ | |
2646 | ||
2647 | void | |
2648 | compute_transaction_bits (void) | |
2649 | { | |
2650 | struct tm_region *region; | |
f1f41a6c | 2651 | vec<basic_block> queue; |
de60f90c | 2652 | unsigned int i; |
de60f90c | 2653 | basic_block bb; |
2654 | ||
2655 | /* ?? Perhaps we need to abstract gate_tm_init further, because we | |
2656 | certainly don't need it to calculate CDI_DOMINATOR info. */ | |
2657 | gate_tm_init (); | |
2658 | ||
fc00614f | 2659 | FOR_EACH_BB_FN (bb, cfun) |
6ad451f8 | 2660 | bb->flags &= ~BB_IN_TRANSACTION; |
2661 | ||
de60f90c | 2662 | for (region = all_tm_regions; region; region = region->next) |
2663 | { | |
2664 | queue = get_tm_region_blocks (region->entry_block, | |
2665 | region->exit_blocks, | |
2666 | region->irr_blocks, | |
2667 | NULL, | |
2668 | /*stop_at_irr_p=*/true); | |
f1f41a6c | 2669 | for (i = 0; queue.iterate (i, &bb); ++i) |
6ad451f8 | 2670 | bb->flags |= BB_IN_TRANSACTION; |
f1f41a6c | 2671 | queue.release (); |
de60f90c | 2672 | } |
2673 | ||
2674 | if (all_tm_regions) | |
2675 | bitmap_obstack_release (&tm_obstack); | |
2676 | } | |
2677 | ||
0cd02a19 | 2678 | /* Replace the GIMPLE_TRANSACTION in this region with the corresponding |
2679 | call to BUILT_IN_TM_START. */ | |
2680 | ||
2681 | static void * | |
2682 | expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2683 | { | |
2684 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2685 | basic_block transaction_bb = gimple_bb (region->transaction_stmt); | |
2686 | tree tm_state = region->tm_state; | |
2687 | tree tm_state_type = TREE_TYPE (tm_state); | |
2688 | edge abort_edge = NULL; | |
2689 | edge inst_edge = NULL; | |
2690 | edge uninst_edge = NULL; | |
2691 | edge fallthru_edge = NULL; | |
2692 | ||
2693 | // Identify the various successors of the transaction start. | |
2694 | { | |
2695 | edge_iterator i; | |
2696 | edge e; | |
2697 | FOR_EACH_EDGE (e, i, transaction_bb->succs) | |
2698 | { | |
2699 | if (e->flags & EDGE_TM_ABORT) | |
2700 | abort_edge = e; | |
2701 | else if (e->flags & EDGE_TM_UNINSTRUMENTED) | |
2702 | uninst_edge = e; | |
2703 | else | |
2704 | inst_edge = e; | |
2705 | if (e->flags & EDGE_FALLTHRU) | |
2706 | fallthru_edge = e; | |
2707 | } | |
2708 | } | |
2709 | ||
2710 | /* ??? There are plenty of bits here we're not computing. */ | |
2711 | { | |
2712 | int subcode = gimple_transaction_subcode (region->transaction_stmt); | |
2713 | int flags = 0; | |
2714 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2715 | flags |= PR_DOESGOIRREVOCABLE; | |
2716 | if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0) | |
2717 | flags |= PR_HASNOIRREVOCABLE; | |
2718 | /* If the transaction does not have an abort in lexical scope and is not | |
2719 | marked as an outer transaction, then it will never abort. */ | |
2720 | if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0) | |
2721 | flags |= PR_HASNOABORT; | |
2722 | if ((subcode & GTMA_HAVE_STORE) == 0) | |
2723 | flags |= PR_READONLY; | |
1910089e | 2724 | if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION)) |
0cd02a19 | 2725 | flags |= PR_INSTRUMENTEDCODE; |
2726 | if (uninst_edge) | |
2727 | flags |= PR_UNINSTRUMENTEDCODE; | |
2728 | if (subcode & GTMA_IS_OUTER) | |
2729 | region->original_transaction_was_outer = true; | |
2730 | tree t = build_int_cst (tm_state_type, flags); | |
2731 | gimple call = gimple_build_call (tm_start, 1, t); | |
2732 | gimple_call_set_lhs (call, tm_state); | |
2733 | gimple_set_location (call, gimple_location (region->transaction_stmt)); | |
2734 | ||
2735 | // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START. | |
2736 | gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb); | |
2737 | gcc_assert (gsi_stmt (gsi) == region->transaction_stmt); | |
2738 | gsi_insert_before (&gsi, call, GSI_SAME_STMT); | |
2739 | gsi_remove (&gsi, true); | |
2740 | region->transaction_stmt = call; | |
2741 | } | |
2742 | ||
2743 | // Generate log saves. | |
f1f41a6c | 2744 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2745 | tm_log_emit_saves (region->entry_block, transaction_bb); |
2746 | ||
2747 | // In the beginning, we've no tests to perform on transaction restart. | |
2748 | // Note that after this point, transaction_bb becomes the "most recent | |
2749 | // block containing tests for the transaction". | |
2750 | region->restart_block = region->entry_block; | |
2751 | ||
2752 | // Generate log restores. | |
f1f41a6c | 2753 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2754 | { |
2755 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2756 | basic_block code_bb = create_empty_bb (test_bb); | |
2757 | basic_block join_bb = create_empty_bb (code_bb); | |
2758 | if (current_loops && transaction_bb->loop_father) | |
2759 | { | |
2760 | add_bb_to_loop (test_bb, transaction_bb->loop_father); | |
2761 | add_bb_to_loop (code_bb, transaction_bb->loop_father); | |
2762 | add_bb_to_loop (join_bb, transaction_bb->loop_father); | |
2763 | } | |
2764 | if (region->restart_block == region->entry_block) | |
2765 | region->restart_block = test_bb; | |
2766 | ||
2767 | tree t1 = create_tmp_reg (tm_state_type, NULL); | |
2768 | tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES); | |
2769 | gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, | |
2770 | tm_state, t2); | |
2771 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); | |
2772 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2773 | ||
2774 | t2 = build_int_cst (tm_state_type, 0); | |
2775 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2776 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2777 | ||
2778 | tm_log_emit_restores (region->entry_block, code_bb); | |
2779 | ||
2780 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2781 | edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE); | |
2782 | edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE); | |
2783 | redirect_edge_pred (fallthru_edge, join_bb); | |
2784 | ||
2785 | join_bb->frequency = test_bb->frequency = transaction_bb->frequency; | |
2786 | join_bb->count = test_bb->count = transaction_bb->count; | |
2787 | ||
2788 | ei->probability = PROB_ALWAYS; | |
2789 | et->probability = PROB_LIKELY; | |
2790 | ef->probability = PROB_UNLIKELY; | |
9af5ce0c | 2791 | et->count = apply_probability (test_bb->count, et->probability); |
2792 | ef->count = apply_probability (test_bb->count, ef->probability); | |
0cd02a19 | 2793 | |
2794 | code_bb->count = et->count; | |
2795 | code_bb->frequency = EDGE_FREQUENCY (et); | |
2796 | ||
2797 | transaction_bb = join_bb; | |
2798 | } | |
2799 | ||
2800 | // If we have an ABORT edge, create a test to perform the abort. | |
2801 | if (abort_edge) | |
2802 | { | |
2803 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2804 | if (current_loops && transaction_bb->loop_father) | |
2805 | add_bb_to_loop (test_bb, transaction_bb->loop_father); | |
2806 | if (region->restart_block == region->entry_block) | |
2807 | region->restart_block = test_bb; | |
2808 | ||
2809 | tree t1 = create_tmp_reg (tm_state_type, NULL); | |
2810 | tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION); | |
2811 | gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, | |
2812 | tm_state, t2); | |
2813 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); | |
2814 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2815 | ||
2816 | t2 = build_int_cst (tm_state_type, 0); | |
2817 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2818 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2819 | ||
2820 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2821 | test_bb->frequency = transaction_bb->frequency; | |
2822 | test_bb->count = transaction_bb->count; | |
2823 | ei->probability = PROB_ALWAYS; | |
2824 | ||
2825 | // Not abort edge. If both are live, chose one at random as we'll | |
2826 | // we'll be fixing that up below. | |
2827 | redirect_edge_pred (fallthru_edge, test_bb); | |
2828 | fallthru_edge->flags = EDGE_FALSE_VALUE; | |
2829 | fallthru_edge->probability = PROB_VERY_LIKELY; | |
2830 | fallthru_edge->count | |
9af5ce0c | 2831 | = apply_probability (test_bb->count, fallthru_edge->probability); |
0cd02a19 | 2832 | |
2833 | // Abort/over edge. | |
2834 | redirect_edge_pred (abort_edge, test_bb); | |
2835 | abort_edge->flags = EDGE_TRUE_VALUE; | |
2836 | abort_edge->probability = PROB_VERY_UNLIKELY; | |
2837 | abort_edge->count | |
9af5ce0c | 2838 | = apply_probability (test_bb->count, abort_edge->probability); |
0cd02a19 | 2839 | |
2840 | transaction_bb = test_bb; | |
2841 | } | |
2842 | ||
2843 | // If we have both instrumented and uninstrumented code paths, select one. | |
2844 | if (inst_edge && uninst_edge) | |
2845 | { | |
2846 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2847 | if (current_loops && transaction_bb->loop_father) | |
2848 | add_bb_to_loop (test_bb, transaction_bb->loop_father); | |
2849 | if (region->restart_block == region->entry_block) | |
2850 | region->restart_block = test_bb; | |
2851 | ||
2852 | tree t1 = create_tmp_reg (tm_state_type, NULL); | |
2853 | tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE); | |
2854 | ||
2855 | gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, | |
2856 | tm_state, t2); | |
2857 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); | |
2858 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2859 | ||
2860 | t2 = build_int_cst (tm_state_type, 0); | |
2861 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2862 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2863 | ||
2864 | // Create the edge into test_bb first, as we want to copy values | |
2865 | // out of the fallthru edge. | |
2866 | edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags); | |
2867 | e->probability = fallthru_edge->probability; | |
2868 | test_bb->count = e->count = fallthru_edge->count; | |
2869 | test_bb->frequency = EDGE_FREQUENCY (e); | |
2870 | ||
2871 | // Now update the edges to the inst/uninist implementations. | |
2872 | // For now assume that the paths are equally likely. When using HTM, | |
2873 | // we'll try the uninst path first and fallback to inst path if htm | |
2874 | // buffers are exceeded. Without HTM we start with the inst path and | |
2875 | // use the uninst path when falling back to serial mode. | |
2876 | redirect_edge_pred (inst_edge, test_bb); | |
2877 | inst_edge->flags = EDGE_FALSE_VALUE; | |
2878 | inst_edge->probability = REG_BR_PROB_BASE / 2; | |
2879 | inst_edge->count | |
9af5ce0c | 2880 | = apply_probability (test_bb->count, inst_edge->probability); |
0cd02a19 | 2881 | |
2882 | redirect_edge_pred (uninst_edge, test_bb); | |
2883 | uninst_edge->flags = EDGE_TRUE_VALUE; | |
2884 | uninst_edge->probability = REG_BR_PROB_BASE / 2; | |
2885 | uninst_edge->count | |
9af5ce0c | 2886 | = apply_probability (test_bb->count, uninst_edge->probability); |
0cd02a19 | 2887 | } |
2888 | ||
2889 | // If we have no previous special cases, and we have PHIs at the beginning | |
2890 | // of the atomic region, this means we have a loop at the beginning of the | |
2891 | // atomic region that shares the first block. This can cause problems with | |
2892 | // the transaction restart abnormal edges to be added in the tm_edges pass. | |
2893 | // Solve this by adding a new empty block to receive the abnormal edges. | |
2894 | if (region->restart_block == region->entry_block | |
2895 | && phi_nodes (region->entry_block)) | |
2896 | { | |
2897 | basic_block empty_bb = create_empty_bb (transaction_bb); | |
2898 | region->restart_block = empty_bb; | |
2899 | if (current_loops && transaction_bb->loop_father) | |
2900 | add_bb_to_loop (empty_bb, transaction_bb->loop_father); | |
2901 | ||
2902 | redirect_edge_pred (fallthru_edge, empty_bb); | |
2903 | make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU); | |
2904 | } | |
2905 | ||
2906 | return NULL; | |
2907 | } | |
2908 | ||
2909 | /* Generate the temporary to be used for the return value of | |
2910 | BUILT_IN_TM_START. */ | |
2911 | ||
2912 | static void * | |
2913 | generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2914 | { | |
2915 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2916 | region->tm_state = | |
2917 | create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state"); | |
2918 | ||
2919 | // Reset the subcode, post optimizations. We'll fill this in | |
2920 | // again as we process blocks. | |
2921 | if (region->exit_blocks) | |
2922 | { | |
2923 | unsigned int subcode | |
2924 | = gimple_transaction_subcode (region->transaction_stmt); | |
2925 | ||
2926 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2927 | subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE | |
1910089e | 2928 | | GTMA_MAY_ENTER_IRREVOCABLE |
2929 | | GTMA_HAS_NO_INSTRUMENTATION); | |
0cd02a19 | 2930 | else |
2931 | subcode &= GTMA_DECLARATION_MASK; | |
2932 | gimple_transaction_set_subcode (region->transaction_stmt, subcode); | |
2933 | } | |
2934 | ||
2935 | return NULL; | |
2936 | } | |
2937 | ||
2938 | // Propagate flags from inner transactions outwards. | |
2939 | static void | |
2940 | propagate_tm_flags_out (struct tm_region *region) | |
2941 | { | |
2942 | if (region == NULL) | |
2943 | return; | |
2944 | propagate_tm_flags_out (region->inner); | |
2945 | ||
2946 | if (region->outer && region->outer->transaction_stmt) | |
2947 | { | |
2948 | unsigned s = gimple_transaction_subcode (region->transaction_stmt); | |
2949 | s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE | |
2950 | | GTMA_MAY_ENTER_IRREVOCABLE); | |
2951 | s |= gimple_transaction_subcode (region->outer->transaction_stmt); | |
2952 | gimple_transaction_set_subcode (region->outer->transaction_stmt, s); | |
2953 | } | |
2954 | ||
2955 | propagate_tm_flags_out (region->next); | |
2956 | } | |
2957 | ||
4c0315d0 | 2958 | /* Entry point to the MARK phase of TM expansion. Here we replace |
2959 | transactional memory statements with calls to builtins, and function | |
2960 | calls with their transactional clones (if available). But we don't | |
2961 | yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */ | |
2962 | ||
2963 | static unsigned int | |
2964 | execute_tm_mark (void) | |
2965 | { | |
4c0315d0 | 2966 | pending_edge_inserts_p = false; |
2967 | ||
00d83cc8 | 2968 | expand_regions (all_tm_regions, generate_tm_state, NULL, |
2969 | /*traverse_clones=*/true); | |
4c0315d0 | 2970 | |
0cd02a19 | 2971 | tm_log_init (); |
4c0315d0 | 2972 | |
00d83cc8 | 2973 | vec<tm_region_p> bb_regions |
79f4a793 | 2974 | = get_bb_regions_instrumented (/*traverse_clones=*/true, |
2975 | /*include_uninstrumented_p=*/false); | |
0cd02a19 | 2976 | struct tm_region *r; |
2977 | unsigned i; | |
4c0315d0 | 2978 | |
0cd02a19 | 2979 | // Expand memory operations into calls into the runtime. |
2980 | // This collects log entries as well. | |
f1f41a6c | 2981 | FOR_EACH_VEC_ELT (bb_regions, i, r) |
ded1a556 | 2982 | { |
2983 | if (r != NULL) | |
2984 | { | |
2985 | if (r->transaction_stmt) | |
2986 | { | |
2987 | unsigned sub = gimple_transaction_subcode (r->transaction_stmt); | |
2988 | ||
2989 | /* If we're sure to go irrevocable, there won't be | |
2990 | anything to expand, since the run-time will go | |
2991 | irrevocable right away. */ | |
2992 | if (sub & GTMA_DOES_GO_IRREVOCABLE | |
2993 | && sub & GTMA_MAY_ENTER_IRREVOCABLE) | |
2994 | continue; | |
2995 | } | |
f5a6b05f | 2996 | expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i)); |
ded1a556 | 2997 | } |
2998 | } | |
0cd02a19 | 2999 | |
4aac6cf8 | 3000 | bb_regions.release (); |
3001 | ||
0cd02a19 | 3002 | // Propagate flags from inner transactions outwards. |
3003 | propagate_tm_flags_out (all_tm_regions); | |
3004 | ||
3005 | // Expand GIMPLE_TRANSACTIONs into calls into the runtime. | |
00d83cc8 | 3006 | expand_regions (all_tm_regions, expand_transaction, NULL, |
3007 | /*traverse_clones=*/false); | |
0cd02a19 | 3008 | |
3009 | tm_log_emit (); | |
3010 | tm_log_delete (); | |
4c0315d0 | 3011 | |
3012 | if (pending_edge_inserts_p) | |
3013 | gsi_commit_edge_inserts (); | |
0cd02a19 | 3014 | free_dominance_info (CDI_DOMINATORS); |
4c0315d0 | 3015 | return 0; |
3016 | } | |
3017 | ||
cbe8bda8 | 3018 | namespace { |
3019 | ||
3020 | const pass_data pass_data_tm_mark = | |
3021 | { | |
3022 | GIMPLE_PASS, /* type */ | |
3023 | "tmmark", /* name */ | |
3024 | OPTGROUP_NONE, /* optinfo_flags */ | |
3025 | false, /* has_gate */ | |
3026 | true, /* has_execute */ | |
3027 | TV_TRANS_MEM, /* tv_id */ | |
3028 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3029 | 0, /* properties_provided */ | |
3030 | 0, /* properties_destroyed */ | |
3031 | 0, /* todo_flags_start */ | |
3032 | ( TODO_update_ssa | TODO_verify_ssa ), /* todo_flags_finish */ | |
4c0315d0 | 3033 | }; |
cbe8bda8 | 3034 | |
3035 | class pass_tm_mark : public gimple_opt_pass | |
3036 | { | |
3037 | public: | |
9af5ce0c | 3038 | pass_tm_mark (gcc::context *ctxt) |
3039 | : gimple_opt_pass (pass_data_tm_mark, ctxt) | |
cbe8bda8 | 3040 | {} |
3041 | ||
3042 | /* opt_pass methods: */ | |
3043 | unsigned int execute () { return execute_tm_mark (); } | |
3044 | ||
3045 | }; // class pass_tm_mark | |
3046 | ||
3047 | } // anon namespace | |
3048 | ||
3049 | gimple_opt_pass * | |
3050 | make_pass_tm_mark (gcc::context *ctxt) | |
3051 | { | |
3052 | return new pass_tm_mark (ctxt); | |
3053 | } | |
4c0315d0 | 3054 | \f |
0cd02a19 | 3055 | |
3056 | /* Create an abnormal edge from STMT at iter, splitting the block | |
3057 | as necessary. Adjust *PNEXT as needed for the split block. */ | |
4c0315d0 | 3058 | |
3059 | static inline void | |
0cd02a19 | 3060 | split_bb_make_tm_edge (gimple stmt, basic_block dest_bb, |
3061 | gimple_stmt_iterator iter, gimple_stmt_iterator *pnext) | |
4c0315d0 | 3062 | { |
0cd02a19 | 3063 | basic_block bb = gimple_bb (stmt); |
3064 | if (!gsi_one_before_end_p (iter)) | |
3065 | { | |
3066 | edge e = split_block (bb, stmt); | |
3067 | *pnext = gsi_start_bb (e->dest); | |
3068 | } | |
3069 | make_edge (bb, dest_bb, EDGE_ABNORMAL); | |
4c0315d0 | 3070 | |
0cd02a19 | 3071 | // Record the need for the edge for the benefit of the rtl passes. |
4c0315d0 | 3072 | if (cfun->gimple_df->tm_restart == NULL) |
3073 | cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash, | |
3074 | struct_ptr_eq, ggc_free); | |
3075 | ||
0cd02a19 | 3076 | struct tm_restart_node dummy; |
4c0315d0 | 3077 | dummy.stmt = stmt; |
0cd02a19 | 3078 | dummy.label_or_list = gimple_block_label (dest_bb); |
3079 | ||
3080 | void **slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT); | |
3081 | struct tm_restart_node *n = (struct tm_restart_node *) *slot; | |
4c0315d0 | 3082 | if (n == NULL) |
3083 | { | |
3084 | n = ggc_alloc_tm_restart_node (); | |
3085 | *n = dummy; | |
3086 | } | |
3087 | else | |
3088 | { | |
3089 | tree old = n->label_or_list; | |
3090 | if (TREE_CODE (old) == LABEL_DECL) | |
0cd02a19 | 3091 | old = tree_cons (NULL, old, NULL); |
4c0315d0 | 3092 | n->label_or_list = tree_cons (NULL, dummy.label_or_list, old); |
3093 | } | |
4c0315d0 | 3094 | } |
3095 | ||
4c0315d0 | 3096 | /* Split block BB as necessary for every builtin function we added, and |
3097 | wire up the abnormal back edges implied by the transaction restart. */ | |
3098 | ||
3099 | static void | |
0cd02a19 | 3100 | expand_block_edges (struct tm_region *const region, basic_block bb) |
4c0315d0 | 3101 | { |
0cd02a19 | 3102 | gimple_stmt_iterator gsi, next_gsi; |
4c0315d0 | 3103 | |
0cd02a19 | 3104 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi) |
4c0315d0 | 3105 | { |
3106 | gimple stmt = gsi_stmt (gsi); | |
3107 | ||
0cd02a19 | 3108 | next_gsi = gsi; |
3109 | gsi_next (&next_gsi); | |
3110 | ||
3111 | // ??? Shouldn't we split for any non-pure, non-irrevocable function? | |
3112 | if (gimple_code (stmt) != GIMPLE_CALL | |
3113 | || (gimple_call_flags (stmt) & ECF_TM_BUILTIN) == 0) | |
3114 | continue; | |
3115 | ||
3116 | if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_TM_ABORT) | |
4c0315d0 | 3117 | { |
0cd02a19 | 3118 | // If we have a ``_transaction_cancel [[outer]]'', there is only |
3119 | // one abnormal edge: to the transaction marked OUTER. | |
3120 | // All compiler-generated instances of BUILT_IN_TM_ABORT have a | |
3121 | // constant argument, which we can examine here. Users invoking | |
3122 | // TM_ABORT directly get what they deserve. | |
3123 | tree arg = gimple_call_arg (stmt, 0); | |
3124 | if (TREE_CODE (arg) == INTEGER_CST | |
3125 | && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 | |
3126 | && !decl_is_tm_clone (current_function_decl)) | |
4c0315d0 | 3127 | { |
0cd02a19 | 3128 | // Find the GTMA_IS_OUTER transaction. |
3129 | for (struct tm_region *o = region; o; o = o->outer) | |
3130 | if (o->original_transaction_was_outer) | |
3131 | { | |
3132 | split_bb_make_tm_edge (stmt, o->restart_block, | |
3133 | gsi, &next_gsi); | |
3134 | break; | |
3135 | } | |
3136 | ||
3137 | // Otherwise, the front-end should have semantically checked | |
3138 | // outer aborts, but in either case the target region is not | |
3139 | // within this function. | |
3140 | continue; | |
4c0315d0 | 3141 | } |
3142 | ||
0cd02a19 | 3143 | // Non-outer, TM aborts have an abnormal edge to the inner-most |
3144 | // transaction, the one being aborted; | |
3145 | split_bb_make_tm_edge (stmt, region->restart_block, gsi, &next_gsi); | |
4c0315d0 | 3146 | } |
3147 | ||
0cd02a19 | 3148 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3149 | // We never restart inner transactions. For tm clones, we know a-priori | |
3150 | // that the outer-most transaction is outside the function. | |
3151 | if (decl_is_tm_clone (current_function_decl)) | |
3152 | continue; | |
4c0315d0 | 3153 | |
0cd02a19 | 3154 | if (cfun->gimple_df->tm_restart == NULL) |
3155 | cfun->gimple_df->tm_restart | |
3156 | = htab_create_ggc (31, struct_ptr_hash, struct_ptr_eq, ggc_free); | |
4c0315d0 | 3157 | |
0cd02a19 | 3158 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3159 | // We never restart inner transactions. | |
3160 | for (struct tm_region *o = region; o; o = o->outer) | |
3161 | if (!o->outer) | |
3162 | { | |
3163 | split_bb_make_tm_edge (stmt, o->restart_block, gsi, &next_gsi); | |
3164 | break; | |
3165 | } | |
4c0315d0 | 3166 | |
0cd02a19 | 3167 | // Delete any tail-call annotation that may have been added. |
3168 | // The tail-call pass may have mis-identified the commit as being | |
3169 | // a candidate because we had not yet added this restart edge. | |
3170 | gimple_call_set_tail (stmt, false); | |
4c0315d0 | 3171 | } |
3172 | } | |
3173 | ||
3174 | /* Entry point to the final expansion of transactional nodes. */ | |
3175 | ||
3176 | static unsigned int | |
3177 | execute_tm_edges (void) | |
3178 | { | |
00d83cc8 | 3179 | vec<tm_region_p> bb_regions |
79f4a793 | 3180 | = get_bb_regions_instrumented (/*traverse_clones=*/false, |
3181 | /*include_uninstrumented_p=*/true); | |
0cd02a19 | 3182 | struct tm_region *r; |
3183 | unsigned i; | |
3184 | ||
f1f41a6c | 3185 | FOR_EACH_VEC_ELT (bb_regions, i, r) |
0cd02a19 | 3186 | if (r != NULL) |
f5a6b05f | 3187 | expand_block_edges (r, BASIC_BLOCK_FOR_FN (cfun, i)); |
0cd02a19 | 3188 | |
f1f41a6c | 3189 | bb_regions.release (); |
4c0315d0 | 3190 | |
3191 | /* We've got to release the dominance info now, to indicate that it | |
3192 | must be rebuilt completely. Otherwise we'll crash trying to update | |
3193 | the SSA web in the TODO section following this pass. */ | |
3194 | free_dominance_info (CDI_DOMINATORS); | |
3195 | bitmap_obstack_release (&tm_obstack); | |
3196 | all_tm_regions = NULL; | |
3197 | ||
3198 | return 0; | |
3199 | } | |
3200 | ||
cbe8bda8 | 3201 | namespace { |
3202 | ||
3203 | const pass_data pass_data_tm_edges = | |
3204 | { | |
3205 | GIMPLE_PASS, /* type */ | |
3206 | "tmedge", /* name */ | |
3207 | OPTGROUP_NONE, /* optinfo_flags */ | |
3208 | false, /* has_gate */ | |
3209 | true, /* has_execute */ | |
3210 | TV_TRANS_MEM, /* tv_id */ | |
3211 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3212 | 0, /* properties_provided */ | |
3213 | 0, /* properties_destroyed */ | |
3214 | 0, /* todo_flags_start */ | |
3215 | ( TODO_update_ssa | TODO_verify_ssa ), /* todo_flags_finish */ | |
4c0315d0 | 3216 | }; |
cbe8bda8 | 3217 | |
3218 | class pass_tm_edges : public gimple_opt_pass | |
3219 | { | |
3220 | public: | |
9af5ce0c | 3221 | pass_tm_edges (gcc::context *ctxt) |
3222 | : gimple_opt_pass (pass_data_tm_edges, ctxt) | |
cbe8bda8 | 3223 | {} |
3224 | ||
3225 | /* opt_pass methods: */ | |
3226 | unsigned int execute () { return execute_tm_edges (); } | |
3227 | ||
3228 | }; // class pass_tm_edges | |
3229 | ||
3230 | } // anon namespace | |
3231 | ||
3232 | gimple_opt_pass * | |
3233 | make_pass_tm_edges (gcc::context *ctxt) | |
3234 | { | |
3235 | return new pass_tm_edges (ctxt); | |
3236 | } | |
0cd02a19 | 3237 | \f |
3238 | /* Helper function for expand_regions. Expand REGION and recurse to | |
3239 | the inner region. Call CALLBACK on each region. CALLBACK returns | |
3240 | NULL to continue the traversal, otherwise a non-null value which | |
00d83cc8 | 3241 | this function will return as well. TRAVERSE_CLONES is true if we |
3242 | should traverse transactional clones. */ | |
0cd02a19 | 3243 | |
3244 | static void * | |
3245 | expand_regions_1 (struct tm_region *region, | |
3246 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3247 | void *data, |
3248 | bool traverse_clones) | |
0cd02a19 | 3249 | { |
3250 | void *retval = NULL; | |
00d83cc8 | 3251 | if (region->exit_blocks |
3252 | || (traverse_clones && decl_is_tm_clone (current_function_decl))) | |
0cd02a19 | 3253 | { |
3254 | retval = callback (region, data); | |
3255 | if (retval) | |
3256 | return retval; | |
3257 | } | |
3258 | if (region->inner) | |
3259 | { | |
00d83cc8 | 3260 | retval = expand_regions (region->inner, callback, data, traverse_clones); |
0cd02a19 | 3261 | if (retval) |
3262 | return retval; | |
3263 | } | |
3264 | return retval; | |
3265 | } | |
3266 | ||
3267 | /* Traverse the regions enclosed and including REGION. Execute | |
3268 | CALLBACK for each region, passing DATA. CALLBACK returns NULL to | |
3269 | continue the traversal, otherwise a non-null value which this | |
00d83cc8 | 3270 | function will return as well. TRAVERSE_CLONES is true if we should |
3271 | traverse transactional clones. */ | |
0cd02a19 | 3272 | |
3273 | static void * | |
3274 | expand_regions (struct tm_region *region, | |
3275 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3276 | void *data, |
3277 | bool traverse_clones) | |
0cd02a19 | 3278 | { |
3279 | void *retval = NULL; | |
3280 | while (region) | |
3281 | { | |
00d83cc8 | 3282 | retval = expand_regions_1 (region, callback, data, traverse_clones); |
0cd02a19 | 3283 | if (retval) |
3284 | return retval; | |
3285 | region = region->next; | |
3286 | } | |
3287 | return retval; | |
3288 | } | |
3289 | ||
4c0315d0 | 3290 | \f |
3291 | /* A unique TM memory operation. */ | |
3292 | typedef struct tm_memop | |
3293 | { | |
3294 | /* Unique ID that all memory operations to the same location have. */ | |
3295 | unsigned int value_id; | |
3296 | /* Address of load/store. */ | |
3297 | tree addr; | |
3298 | } *tm_memop_t; | |
3299 | ||
d9dd21a8 | 3300 | /* TM memory operation hashtable helpers. */ |
3301 | ||
3302 | struct tm_memop_hasher : typed_free_remove <tm_memop> | |
3303 | { | |
3304 | typedef tm_memop value_type; | |
3305 | typedef tm_memop compare_type; | |
3306 | static inline hashval_t hash (const value_type *); | |
3307 | static inline bool equal (const value_type *, const compare_type *); | |
3308 | }; | |
3309 | ||
3310 | /* Htab support. Return a hash value for a `tm_memop'. */ | |
3311 | inline hashval_t | |
3312 | tm_memop_hasher::hash (const value_type *mem) | |
3313 | { | |
3314 | tree addr = mem->addr; | |
3315 | /* We drill down to the SSA_NAME/DECL for the hash, but equality is | |
3316 | actually done with operand_equal_p (see tm_memop_eq). */ | |
3317 | if (TREE_CODE (addr) == ADDR_EXPR) | |
3318 | addr = TREE_OPERAND (addr, 0); | |
3319 | return iterative_hash_expr (addr, 0); | |
3320 | } | |
3321 | ||
3322 | /* Htab support. Return true if two tm_memop's are the same. */ | |
3323 | inline bool | |
3324 | tm_memop_hasher::equal (const value_type *mem1, const compare_type *mem2) | |
3325 | { | |
3326 | return operand_equal_p (mem1->addr, mem2->addr, 0); | |
3327 | } | |
3328 | ||
4c0315d0 | 3329 | /* Sets for solving data flow equations in the memory optimization pass. */ |
3330 | struct tm_memopt_bitmaps | |
3331 | { | |
3332 | /* Stores available to this BB upon entry. Basically, stores that | |
3333 | dominate this BB. */ | |
3334 | bitmap store_avail_in; | |
3335 | /* Stores available at the end of this BB. */ | |
3336 | bitmap store_avail_out; | |
3337 | bitmap store_antic_in; | |
3338 | bitmap store_antic_out; | |
3339 | /* Reads available to this BB upon entry. Basically, reads that | |
3340 | dominate this BB. */ | |
3341 | bitmap read_avail_in; | |
3342 | /* Reads available at the end of this BB. */ | |
3343 | bitmap read_avail_out; | |
3344 | /* Reads performed in this BB. */ | |
3345 | bitmap read_local; | |
3346 | /* Writes performed in this BB. */ | |
3347 | bitmap store_local; | |
3348 | ||
3349 | /* Temporary storage for pass. */ | |
3350 | /* Is the current BB in the worklist? */ | |
3351 | bool avail_in_worklist_p; | |
3352 | /* Have we visited this BB? */ | |
3353 | bool visited_p; | |
3354 | }; | |
3355 | ||
3356 | static bitmap_obstack tm_memopt_obstack; | |
3357 | ||
3358 | /* Unique counter for TM loads and stores. Loads and stores of the | |
3359 | same address get the same ID. */ | |
3360 | static unsigned int tm_memopt_value_id; | |
d9dd21a8 | 3361 | static hash_table <tm_memop_hasher> tm_memopt_value_numbers; |
4c0315d0 | 3362 | |
3363 | #define STORE_AVAIL_IN(BB) \ | |
3364 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in | |
3365 | #define STORE_AVAIL_OUT(BB) \ | |
3366 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out | |
3367 | #define STORE_ANTIC_IN(BB) \ | |
3368 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in | |
3369 | #define STORE_ANTIC_OUT(BB) \ | |
3370 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out | |
3371 | #define READ_AVAIL_IN(BB) \ | |
3372 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in | |
3373 | #define READ_AVAIL_OUT(BB) \ | |
3374 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out | |
3375 | #define READ_LOCAL(BB) \ | |
3376 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local | |
3377 | #define STORE_LOCAL(BB) \ | |
3378 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local | |
3379 | #define AVAIL_IN_WORKLIST_P(BB) \ | |
3380 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p | |
3381 | #define BB_VISITED_P(BB) \ | |
3382 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p | |
3383 | ||
4c0315d0 | 3384 | /* Given a TM load/store in STMT, return the value number for the address |
3385 | it accesses. */ | |
3386 | ||
3387 | static unsigned int | |
3388 | tm_memopt_value_number (gimple stmt, enum insert_option op) | |
3389 | { | |
3390 | struct tm_memop tmpmem, *mem; | |
d9dd21a8 | 3391 | tm_memop **slot; |
4c0315d0 | 3392 | |
3393 | gcc_assert (is_tm_load (stmt) || is_tm_store (stmt)); | |
3394 | tmpmem.addr = gimple_call_arg (stmt, 0); | |
d9dd21a8 | 3395 | slot = tm_memopt_value_numbers.find_slot (&tmpmem, op); |
4c0315d0 | 3396 | if (*slot) |
d9dd21a8 | 3397 | mem = *slot; |
4c0315d0 | 3398 | else if (op == INSERT) |
3399 | { | |
3400 | mem = XNEW (struct tm_memop); | |
3401 | *slot = mem; | |
3402 | mem->value_id = tm_memopt_value_id++; | |
3403 | mem->addr = tmpmem.addr; | |
3404 | } | |
3405 | else | |
3406 | gcc_unreachable (); | |
3407 | return mem->value_id; | |
3408 | } | |
3409 | ||
3410 | /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */ | |
3411 | ||
3412 | static void | |
3413 | tm_memopt_accumulate_memops (basic_block bb) | |
3414 | { | |
3415 | gimple_stmt_iterator gsi; | |
3416 | ||
3417 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3418 | { | |
3419 | gimple stmt = gsi_stmt (gsi); | |
3420 | bitmap bits; | |
3421 | unsigned int loc; | |
3422 | ||
3423 | if (is_tm_store (stmt)) | |
3424 | bits = STORE_LOCAL (bb); | |
3425 | else if (is_tm_load (stmt)) | |
3426 | bits = READ_LOCAL (bb); | |
3427 | else | |
3428 | continue; | |
3429 | ||
3430 | loc = tm_memopt_value_number (stmt, INSERT); | |
3431 | bitmap_set_bit (bits, loc); | |
3432 | if (dump_file) | |
3433 | { | |
3434 | fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=", | |
3435 | is_tm_load (stmt) ? "LOAD" : "STORE", loc, | |
3436 | gimple_bb (stmt)->index); | |
3437 | print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0); | |
3438 | fprintf (dump_file, "\n"); | |
3439 | } | |
3440 | } | |
3441 | } | |
3442 | ||
3443 | /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */ | |
3444 | ||
3445 | static void | |
3446 | dump_tm_memopt_set (const char *set_name, bitmap bits) | |
3447 | { | |
3448 | unsigned i; | |
3449 | bitmap_iterator bi; | |
3450 | const char *comma = ""; | |
3451 | ||
3452 | fprintf (dump_file, "TM memopt: %s: [", set_name); | |
3453 | EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi) | |
3454 | { | |
d9dd21a8 | 3455 | hash_table <tm_memop_hasher>::iterator hi; |
3456 | struct tm_memop *mem = NULL; | |
4c0315d0 | 3457 | |
3458 | /* Yeah, yeah, yeah. Whatever. This is just for debugging. */ | |
d9dd21a8 | 3459 | FOR_EACH_HASH_TABLE_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi) |
4c0315d0 | 3460 | if (mem->value_id == i) |
3461 | break; | |
3462 | gcc_assert (mem->value_id == i); | |
3463 | fprintf (dump_file, "%s", comma); | |
3464 | comma = ", "; | |
3465 | print_generic_expr (dump_file, mem->addr, 0); | |
3466 | } | |
3467 | fprintf (dump_file, "]\n"); | |
3468 | } | |
3469 | ||
3470 | /* Prettily dump all of the memopt sets in BLOCKS. */ | |
3471 | ||
3472 | static void | |
f1f41a6c | 3473 | dump_tm_memopt_sets (vec<basic_block> blocks) |
4c0315d0 | 3474 | { |
3475 | size_t i; | |
3476 | basic_block bb; | |
3477 | ||
f1f41a6c | 3478 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3479 | { |
3480 | fprintf (dump_file, "------------BB %d---------\n", bb->index); | |
3481 | dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb)); | |
3482 | dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb)); | |
3483 | dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb)); | |
3484 | dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb)); | |
3485 | dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb)); | |
3486 | dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb)); | |
3487 | } | |
3488 | } | |
3489 | ||
3490 | /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */ | |
3491 | ||
3492 | static void | |
3493 | tm_memopt_compute_avin (basic_block bb) | |
3494 | { | |
3495 | edge e; | |
3496 | unsigned ix; | |
3497 | ||
3498 | /* Seed with the AVOUT of any predecessor. */ | |
3499 | for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++) | |
3500 | { | |
3501 | e = EDGE_PRED (bb, ix); | |
3502 | /* Make sure we have already visited this BB, and is thus | |
3503 | initialized. | |
3504 | ||
3505 | If e->src->aux is NULL, this predecessor is actually on an | |
3506 | enclosing transaction. We only care about the current | |
3507 | transaction, so ignore it. */ | |
3508 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3509 | { | |
3510 | bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3511 | bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3512 | break; | |
3513 | } | |
3514 | } | |
3515 | ||
3516 | for (; ix < EDGE_COUNT (bb->preds); ix++) | |
3517 | { | |
3518 | e = EDGE_PRED (bb, ix); | |
3519 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3520 | { | |
3521 | bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3522 | bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3523 | } | |
3524 | } | |
3525 | ||
3526 | BB_VISITED_P (bb) = true; | |
3527 | } | |
3528 | ||
3529 | /* Compute the STORE_ANTIC_IN for the basic block BB. */ | |
3530 | ||
3531 | static void | |
3532 | tm_memopt_compute_antin (basic_block bb) | |
3533 | { | |
3534 | edge e; | |
3535 | unsigned ix; | |
3536 | ||
3537 | /* Seed with the ANTIC_OUT of any successor. */ | |
3538 | for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++) | |
3539 | { | |
3540 | e = EDGE_SUCC (bb, ix); | |
3541 | /* Make sure we have already visited this BB, and is thus | |
3542 | initialized. */ | |
3543 | if (BB_VISITED_P (e->dest)) | |
3544 | { | |
3545 | bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3546 | break; | |
3547 | } | |
3548 | } | |
3549 | ||
3550 | for (; ix < EDGE_COUNT (bb->succs); ix++) | |
3551 | { | |
3552 | e = EDGE_SUCC (bb, ix); | |
3553 | if (BB_VISITED_P (e->dest)) | |
3554 | bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3555 | } | |
3556 | ||
3557 | BB_VISITED_P (bb) = true; | |
3558 | } | |
3559 | ||
3560 | /* Compute the AVAIL sets for every basic block in BLOCKS. | |
3561 | ||
3562 | We compute {STORE,READ}_AVAIL_{OUT,IN} as follows: | |
3563 | ||
3564 | AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb]) | |
3565 | AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors]) | |
3566 | ||
3567 | This is basically what we do in lcm's compute_available(), but here | |
3568 | we calculate two sets of sets (one for STOREs and one for READs), | |
3569 | and we work on a region instead of the entire CFG. | |
3570 | ||
3571 | REGION is the TM region. | |
3572 | BLOCKS are the basic blocks in the region. */ | |
3573 | ||
3574 | static void | |
3575 | tm_memopt_compute_available (struct tm_region *region, | |
f1f41a6c | 3576 | vec<basic_block> blocks) |
4c0315d0 | 3577 | { |
3578 | edge e; | |
3579 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3580 | unsigned int qlen, i; | |
3581 | edge_iterator ei; | |
3582 | bool changed; | |
3583 | ||
3584 | /* Allocate a worklist array/queue. Entries are only added to the | |
3585 | list if they were not already on the list. So the size is | |
3586 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3587 | qlen = blocks.length () - 1; |
4c0315d0 | 3588 | qin = qout = worklist = |
3589 | XNEWVEC (basic_block, qlen); | |
3590 | ||
3591 | /* Put every block in the region on the worklist. */ | |
f1f41a6c | 3592 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3593 | { |
3594 | /* Seed AVAIL_OUT with the LOCAL set. */ | |
3595 | bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb)); | |
3596 | bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb)); | |
3597 | ||
3598 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3599 | /* No need to insert the entry block, since it has an AVIN of | |
3600 | null, and an AVOUT that has already been seeded in. */ | |
3601 | if (bb != region->entry_block) | |
3602 | *qin++ = bb; | |
3603 | } | |
3604 | ||
3605 | /* The entry block has been initialized with the local sets. */ | |
3606 | BB_VISITED_P (region->entry_block) = true; | |
3607 | ||
3608 | qin = worklist; | |
3609 | qend = &worklist[qlen]; | |
3610 | ||
3611 | /* Iterate until the worklist is empty. */ | |
3612 | while (qlen) | |
3613 | { | |
3614 | /* Take the first entry off the worklist. */ | |
3615 | bb = *qout++; | |
3616 | qlen--; | |
3617 | ||
3618 | if (qout >= qend) | |
3619 | qout = worklist; | |
3620 | ||
3621 | /* This block can be added to the worklist again if necessary. */ | |
3622 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3623 | tm_memopt_compute_avin (bb); | |
3624 | ||
3625 | /* Note: We do not add the LOCAL sets here because we already | |
3626 | seeded the AVAIL_OUT sets with them. */ | |
3627 | changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb)); | |
3628 | changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb)); | |
3629 | if (changed | |
3630 | && (region->exit_blocks == NULL | |
3631 | || !bitmap_bit_p (region->exit_blocks, bb->index))) | |
3632 | /* If the out state of this block changed, then we need to add | |
3633 | its successors to the worklist if they are not already in. */ | |
3634 | FOR_EACH_EDGE (e, ei, bb->succs) | |
34154e27 | 3635 | if (!AVAIL_IN_WORKLIST_P (e->dest) |
3636 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
4c0315d0 | 3637 | { |
3638 | *qin++ = e->dest; | |
3639 | AVAIL_IN_WORKLIST_P (e->dest) = true; | |
3640 | qlen++; | |
3641 | ||
3642 | if (qin >= qend) | |
3643 | qin = worklist; | |
3644 | } | |
3645 | } | |
3646 | ||
3647 | free (worklist); | |
3648 | ||
3649 | if (dump_file) | |
3650 | dump_tm_memopt_sets (blocks); | |
3651 | } | |
3652 | ||
3653 | /* Compute ANTIC sets for every basic block in BLOCKS. | |
3654 | ||
3655 | We compute STORE_ANTIC_OUT as follows: | |
3656 | ||
3657 | STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb]) | |
3658 | STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors]) | |
3659 | ||
3660 | REGION is the TM region. | |
3661 | BLOCKS are the basic blocks in the region. */ | |
3662 | ||
3663 | static void | |
3664 | tm_memopt_compute_antic (struct tm_region *region, | |
f1f41a6c | 3665 | vec<basic_block> blocks) |
4c0315d0 | 3666 | { |
3667 | edge e; | |
3668 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3669 | unsigned int qlen; | |
3670 | int i; | |
3671 | edge_iterator ei; | |
3672 | ||
3673 | /* Allocate a worklist array/queue. Entries are only added to the | |
3674 | list if they were not already on the list. So the size is | |
3675 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3676 | qin = qout = worklist = XNEWVEC (basic_block, blocks.length ()); |
4c0315d0 | 3677 | |
f1f41a6c | 3678 | for (qlen = 0, i = blocks.length () - 1; i >= 0; --i) |
4c0315d0 | 3679 | { |
f1f41a6c | 3680 | bb = blocks[i]; |
4c0315d0 | 3681 | |
3682 | /* Seed ANTIC_OUT with the LOCAL set. */ | |
3683 | bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb)); | |
3684 | ||
3685 | /* Put every block in the region on the worklist. */ | |
3686 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3687 | /* No need to insert exit blocks, since their ANTIC_IN is NULL, | |
3688 | and their ANTIC_OUT has already been seeded in. */ | |
3689 | if (region->exit_blocks | |
3690 | && !bitmap_bit_p (region->exit_blocks, bb->index)) | |
3691 | { | |
3692 | qlen++; | |
3693 | *qin++ = bb; | |
3694 | } | |
3695 | } | |
3696 | ||
3697 | /* The exit blocks have been initialized with the local sets. */ | |
3698 | if (region->exit_blocks) | |
3699 | { | |
3700 | unsigned int i; | |
3701 | bitmap_iterator bi; | |
3702 | EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi) | |
f5a6b05f | 3703 | BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true; |
4c0315d0 | 3704 | } |
3705 | ||
3706 | qin = worklist; | |
3707 | qend = &worklist[qlen]; | |
3708 | ||
3709 | /* Iterate until the worklist is empty. */ | |
3710 | while (qlen) | |
3711 | { | |
3712 | /* Take the first entry off the worklist. */ | |
3713 | bb = *qout++; | |
3714 | qlen--; | |
3715 | ||
3716 | if (qout >= qend) | |
3717 | qout = worklist; | |
3718 | ||
3719 | /* This block can be added to the worklist again if necessary. */ | |
3720 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3721 | tm_memopt_compute_antin (bb); | |
3722 | ||
3723 | /* Note: We do not add the LOCAL sets here because we already | |
3724 | seeded the ANTIC_OUT sets with them. */ | |
3725 | if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb)) | |
3726 | && bb != region->entry_block) | |
3727 | /* If the out state of this block changed, then we need to add | |
3728 | its predecessors to the worklist if they are not already in. */ | |
3729 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3730 | if (!AVAIL_IN_WORKLIST_P (e->src)) | |
3731 | { | |
3732 | *qin++ = e->src; | |
3733 | AVAIL_IN_WORKLIST_P (e->src) = true; | |
3734 | qlen++; | |
3735 | ||
3736 | if (qin >= qend) | |
3737 | qin = worklist; | |
3738 | } | |
3739 | } | |
3740 | ||
3741 | free (worklist); | |
3742 | ||
3743 | if (dump_file) | |
3744 | dump_tm_memopt_sets (blocks); | |
3745 | } | |
3746 | ||
3747 | /* Offsets of load variants from TM_LOAD. For example, | |
3748 | BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*. | |
3749 | See gtm-builtins.def. */ | |
3750 | #define TRANSFORM_RAR 1 | |
3751 | #define TRANSFORM_RAW 2 | |
3752 | #define TRANSFORM_RFW 3 | |
3753 | /* Offsets of store variants from TM_STORE. */ | |
3754 | #define TRANSFORM_WAR 1 | |
3755 | #define TRANSFORM_WAW 2 | |
3756 | ||
3757 | /* Inform about a load/store optimization. */ | |
3758 | ||
3759 | static void | |
3760 | dump_tm_memopt_transform (gimple stmt) | |
3761 | { | |
3762 | if (dump_file) | |
3763 | { | |
3764 | fprintf (dump_file, "TM memopt: transforming: "); | |
3765 | print_gimple_stmt (dump_file, stmt, 0, 0); | |
3766 | fprintf (dump_file, "\n"); | |
3767 | } | |
3768 | } | |
3769 | ||
3770 | /* Perform a read/write optimization. Replaces the TM builtin in STMT | |
3771 | by a builtin that is OFFSET entries down in the builtins table in | |
3772 | gtm-builtins.def. */ | |
3773 | ||
3774 | static void | |
3775 | tm_memopt_transform_stmt (unsigned int offset, | |
3776 | gimple stmt, | |
3777 | gimple_stmt_iterator *gsi) | |
3778 | { | |
3779 | tree fn = gimple_call_fn (stmt); | |
3780 | gcc_assert (TREE_CODE (fn) == ADDR_EXPR); | |
3781 | TREE_OPERAND (fn, 0) | |
3782 | = builtin_decl_explicit ((enum built_in_function) | |
3783 | (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)) | |
3784 | + offset)); | |
3785 | gimple_call_set_fn (stmt, fn); | |
3786 | gsi_replace (gsi, stmt, true); | |
3787 | dump_tm_memopt_transform (stmt); | |
3788 | } | |
3789 | ||
3790 | /* Perform the actual TM memory optimization transformations in the | |
3791 | basic blocks in BLOCKS. */ | |
3792 | ||
3793 | static void | |
f1f41a6c | 3794 | tm_memopt_transform_blocks (vec<basic_block> blocks) |
4c0315d0 | 3795 | { |
3796 | size_t i; | |
3797 | basic_block bb; | |
3798 | gimple_stmt_iterator gsi; | |
3799 | ||
f1f41a6c | 3800 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3801 | { |
3802 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3803 | { | |
3804 | gimple stmt = gsi_stmt (gsi); | |
3805 | bitmap read_avail = READ_AVAIL_IN (bb); | |
3806 | bitmap store_avail = STORE_AVAIL_IN (bb); | |
3807 | bitmap store_antic = STORE_ANTIC_OUT (bb); | |
3808 | unsigned int loc; | |
3809 | ||
3810 | if (is_tm_simple_load (stmt)) | |
3811 | { | |
3812 | loc = tm_memopt_value_number (stmt, NO_INSERT); | |
3813 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
3814 | tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi); | |
3815 | else if (store_antic && bitmap_bit_p (store_antic, loc)) | |
3816 | { | |
3817 | tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi); | |
3818 | bitmap_set_bit (store_avail, loc); | |
3819 | } | |
3820 | else if (read_avail && bitmap_bit_p (read_avail, loc)) | |
3821 | tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi); | |
3822 | else | |
3823 | bitmap_set_bit (read_avail, loc); | |
3824 | } | |
3825 | else if (is_tm_simple_store (stmt)) | |
3826 | { | |
3827 | loc = tm_memopt_value_number (stmt, NO_INSERT); | |
3828 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
3829 | tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi); | |
3830 | else | |
3831 | { | |
3832 | if (read_avail && bitmap_bit_p (read_avail, loc)) | |
3833 | tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi); | |
3834 | bitmap_set_bit (store_avail, loc); | |
3835 | } | |
3836 | } | |
3837 | } | |
3838 | } | |
3839 | } | |
3840 | ||
3841 | /* Return a new set of bitmaps for a BB. */ | |
3842 | ||
3843 | static struct tm_memopt_bitmaps * | |
3844 | tm_memopt_init_sets (void) | |
3845 | { | |
3846 | struct tm_memopt_bitmaps *b | |
3847 | = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps); | |
3848 | b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3849 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3850 | b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3851 | b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3852 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3853 | b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3854 | b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3855 | b->read_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3856 | b->store_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3857 | return b; | |
3858 | } | |
3859 | ||
3860 | /* Free sets computed for each BB. */ | |
3861 | ||
3862 | static void | |
f1f41a6c | 3863 | tm_memopt_free_sets (vec<basic_block> blocks) |
4c0315d0 | 3864 | { |
3865 | size_t i; | |
3866 | basic_block bb; | |
3867 | ||
f1f41a6c | 3868 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3869 | bb->aux = NULL; |
3870 | } | |
3871 | ||
3872 | /* Clear the visited bit for every basic block in BLOCKS. */ | |
3873 | ||
3874 | static void | |
f1f41a6c | 3875 | tm_memopt_clear_visited (vec<basic_block> blocks) |
4c0315d0 | 3876 | { |
3877 | size_t i; | |
3878 | basic_block bb; | |
3879 | ||
f1f41a6c | 3880 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3881 | BB_VISITED_P (bb) = false; |
3882 | } | |
3883 | ||
3884 | /* Replace TM load/stores with hints for the runtime. We handle | |
3885 | things like read-after-write, write-after-read, read-after-read, | |
3886 | read-for-write, etc. */ | |
3887 | ||
3888 | static unsigned int | |
3889 | execute_tm_memopt (void) | |
3890 | { | |
3891 | struct tm_region *region; | |
f1f41a6c | 3892 | vec<basic_block> bbs; |
4c0315d0 | 3893 | |
3894 | tm_memopt_value_id = 0; | |
d9dd21a8 | 3895 | tm_memopt_value_numbers.create (10); |
4c0315d0 | 3896 | |
3897 | for (region = all_tm_regions; region; region = region->next) | |
3898 | { | |
3899 | /* All the TM stores/loads in the current region. */ | |
3900 | size_t i; | |
3901 | basic_block bb; | |
3902 | ||
3903 | bitmap_obstack_initialize (&tm_memopt_obstack); | |
3904 | ||
3905 | /* Save all BBs for the current region. */ | |
3906 | bbs = get_tm_region_blocks (region->entry_block, | |
3907 | region->exit_blocks, | |
3908 | region->irr_blocks, | |
3909 | NULL, | |
3910 | false); | |
3911 | ||
3912 | /* Collect all the memory operations. */ | |
f1f41a6c | 3913 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 3914 | { |
3915 | bb->aux = tm_memopt_init_sets (); | |
3916 | tm_memopt_accumulate_memops (bb); | |
3917 | } | |
3918 | ||
3919 | /* Solve data flow equations and transform each block accordingly. */ | |
3920 | tm_memopt_clear_visited (bbs); | |
3921 | tm_memopt_compute_available (region, bbs); | |
3922 | tm_memopt_clear_visited (bbs); | |
3923 | tm_memopt_compute_antic (region, bbs); | |
3924 | tm_memopt_transform_blocks (bbs); | |
3925 | ||
3926 | tm_memopt_free_sets (bbs); | |
f1f41a6c | 3927 | bbs.release (); |
4c0315d0 | 3928 | bitmap_obstack_release (&tm_memopt_obstack); |
d9dd21a8 | 3929 | tm_memopt_value_numbers.empty (); |
4c0315d0 | 3930 | } |
3931 | ||
d9dd21a8 | 3932 | tm_memopt_value_numbers.dispose (); |
4c0315d0 | 3933 | return 0; |
3934 | } | |
3935 | ||
3936 | static bool | |
3937 | gate_tm_memopt (void) | |
3938 | { | |
3939 | return flag_tm && optimize > 0; | |
3940 | } | |
3941 | ||
cbe8bda8 | 3942 | namespace { |
3943 | ||
3944 | const pass_data pass_data_tm_memopt = | |
3945 | { | |
3946 | GIMPLE_PASS, /* type */ | |
3947 | "tmmemopt", /* name */ | |
3948 | OPTGROUP_NONE, /* optinfo_flags */ | |
3949 | true, /* has_gate */ | |
3950 | true, /* has_execute */ | |
3951 | TV_TRANS_MEM, /* tv_id */ | |
3952 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3953 | 0, /* properties_provided */ | |
3954 | 0, /* properties_destroyed */ | |
3955 | 0, /* todo_flags_start */ | |
3956 | 0, /* todo_flags_finish */ | |
4c0315d0 | 3957 | }; |
3958 | ||
cbe8bda8 | 3959 | class pass_tm_memopt : public gimple_opt_pass |
3960 | { | |
3961 | public: | |
9af5ce0c | 3962 | pass_tm_memopt (gcc::context *ctxt) |
3963 | : gimple_opt_pass (pass_data_tm_memopt, ctxt) | |
cbe8bda8 | 3964 | {} |
3965 | ||
3966 | /* opt_pass methods: */ | |
3967 | bool gate () { return gate_tm_memopt (); } | |
3968 | unsigned int execute () { return execute_tm_memopt (); } | |
3969 | ||
3970 | }; // class pass_tm_memopt | |
3971 | ||
3972 | } // anon namespace | |
3973 | ||
3974 | gimple_opt_pass * | |
3975 | make_pass_tm_memopt (gcc::context *ctxt) | |
3976 | { | |
3977 | return new pass_tm_memopt (ctxt); | |
3978 | } | |
3979 | ||
4c0315d0 | 3980 | \f |
3981 | /* Interprocedual analysis for the creation of transactional clones. | |
3982 | The aim of this pass is to find which functions are referenced in | |
3983 | a non-irrevocable transaction context, and for those over which | |
3984 | we have control (or user directive), create a version of the | |
3985 | function which uses only the transactional interface to reference | |
3986 | protected memories. This analysis proceeds in several steps: | |
3987 | ||
3988 | (1) Collect the set of all possible transactional clones: | |
3989 | ||
3990 | (a) For all local public functions marked tm_callable, push | |
3991 | it onto the tm_callee queue. | |
3992 | ||
3993 | (b) For all local functions, scan for calls in transaction blocks. | |
3994 | Push the caller and callee onto the tm_caller and tm_callee | |
3995 | queues. Count the number of callers for each callee. | |
3996 | ||
3997 | (c) For each local function on the callee list, assume we will | |
3998 | create a transactional clone. Push *all* calls onto the | |
3999 | callee queues; count the number of clone callers separately | |
4000 | to the number of original callers. | |
4001 | ||
4002 | (2) Propagate irrevocable status up the dominator tree: | |
4003 | ||
4004 | (a) Any external function on the callee list that is not marked | |
4005 | tm_callable is irrevocable. Push all callers of such onto | |
4006 | a worklist. | |
4007 | ||
4008 | (b) For each function on the worklist, mark each block that | |
4009 | contains an irrevocable call. Use the AND operator to | |
4010 | propagate that mark up the dominator tree. | |
4011 | ||
4012 | (c) If we reach the entry block for a possible transactional | |
4013 | clone, then the transactional clone is irrevocable, and | |
4014 | we should not create the clone after all. Push all | |
4015 | callers onto the worklist. | |
4016 | ||
4017 | (d) Place tm_irrevocable calls at the beginning of the relevant | |
4018 | blocks. Special case here is the entry block for the entire | |
4019 | transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for | |
4020 | the library to begin the region in serial mode. Decrement | |
4021 | the call count for all callees in the irrevocable region. | |
4022 | ||
4023 | (3) Create the transactional clones: | |
4024 | ||
4025 | Any tm_callee that still has a non-zero call count is cloned. | |
4026 | */ | |
4027 | ||
4028 | /* This structure is stored in the AUX field of each cgraph_node. */ | |
4029 | struct tm_ipa_cg_data | |
4030 | { | |
4031 | /* The clone of the function that got created. */ | |
4032 | struct cgraph_node *clone; | |
4033 | ||
4034 | /* The tm regions in the normal function. */ | |
4035 | struct tm_region *all_tm_regions; | |
4036 | ||
4037 | /* The blocks of the normal/clone functions that contain irrevocable | |
4038 | calls, or blocks that are post-dominated by irrevocable calls. */ | |
4039 | bitmap irrevocable_blocks_normal; | |
4040 | bitmap irrevocable_blocks_clone; | |
4041 | ||
4042 | /* The blocks of the normal function that are involved in transactions. */ | |
4043 | bitmap transaction_blocks_normal; | |
4044 | ||
4045 | /* The number of callers to the transactional clone of this function | |
4046 | from normal and transactional clones respectively. */ | |
4047 | unsigned tm_callers_normal; | |
4048 | unsigned tm_callers_clone; | |
4049 | ||
4050 | /* True if all calls to this function's transactional clone | |
4051 | are irrevocable. Also automatically true if the function | |
4052 | has no transactional clone. */ | |
4053 | bool is_irrevocable; | |
4054 | ||
4055 | /* Flags indicating the presence of this function in various queues. */ | |
4056 | bool in_callee_queue; | |
4057 | bool in_worklist; | |
4058 | ||
4059 | /* Flags indicating the kind of scan desired while in the worklist. */ | |
4060 | bool want_irr_scan_normal; | |
4061 | }; | |
4062 | ||
f1f41a6c | 4063 | typedef vec<cgraph_node_ptr> cgraph_node_queue; |
4c0315d0 | 4064 | |
4065 | /* Return the ipa data associated with NODE, allocating zeroed memory | |
3e426b86 | 4066 | if necessary. TRAVERSE_ALIASES is true if we must traverse aliases |
4067 | and set *NODE accordingly. */ | |
4c0315d0 | 4068 | |
4069 | static struct tm_ipa_cg_data * | |
3e426b86 | 4070 | get_cg_data (struct cgraph_node **node, bool traverse_aliases) |
4c0315d0 | 4071 | { |
3e426b86 | 4072 | struct tm_ipa_cg_data *d; |
4073 | ||
02774f2d | 4074 | if (traverse_aliases && (*node)->alias) |
48669653 | 4075 | *node = cgraph_alias_target (*node); |
3e426b86 | 4076 | |
02774f2d | 4077 | d = (struct tm_ipa_cg_data *) (*node)->aux; |
4c0315d0 | 4078 | |
4079 | if (d == NULL) | |
4080 | { | |
4081 | d = (struct tm_ipa_cg_data *) | |
4082 | obstack_alloc (&tm_obstack.obstack, sizeof (*d)); | |
02774f2d | 4083 | (*node)->aux = (void *) d; |
4c0315d0 | 4084 | memset (d, 0, sizeof (*d)); |
4085 | } | |
4086 | ||
4087 | return d; | |
4088 | } | |
4089 | ||
4090 | /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that | |
4091 | it is already present. */ | |
4092 | ||
4093 | static void | |
4094 | maybe_push_queue (struct cgraph_node *node, | |
4095 | cgraph_node_queue *queue_p, bool *in_queue_p) | |
4096 | { | |
4097 | if (!*in_queue_p) | |
4098 | { | |
4099 | *in_queue_p = true; | |
f1f41a6c | 4100 | queue_p->safe_push (node); |
4c0315d0 | 4101 | } |
4102 | } | |
4103 | ||
0cd02a19 | 4104 | /* Duplicate the basic blocks in QUEUE for use in the uninstrumented |
4105 | code path. QUEUE are the basic blocks inside the transaction | |
4106 | represented in REGION. | |
4107 | ||
4108 | Later in split_code_paths() we will add the conditional to choose | |
4109 | between the two alternatives. */ | |
4110 | ||
4111 | static void | |
4112 | ipa_uninstrument_transaction (struct tm_region *region, | |
f1f41a6c | 4113 | vec<basic_block> queue) |
0cd02a19 | 4114 | { |
4115 | gimple transaction = region->transaction_stmt; | |
4116 | basic_block transaction_bb = gimple_bb (transaction); | |
f1f41a6c | 4117 | int n = queue.length (); |
0cd02a19 | 4118 | basic_block *new_bbs = XNEWVEC (basic_block, n); |
4119 | ||
d99f53b2 | 4120 | copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb, |
4121 | true); | |
0cd02a19 | 4122 | edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED); |
4123 | add_phi_args_after_copy (new_bbs, n, e); | |
4124 | ||
4125 | // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it. | |
4126 | // a) EDGE_FALLTHRU into the transaction | |
4127 | // b) EDGE_TM_ABORT out of the transaction | |
4128 | // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks. | |
4129 | ||
4130 | free (new_bbs); | |
4131 | } | |
4132 | ||
4c0315d0 | 4133 | /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone. |
4134 | Queue all callees within block BB. */ | |
4135 | ||
4136 | static void | |
4137 | ipa_tm_scan_calls_block (cgraph_node_queue *callees_p, | |
4138 | basic_block bb, bool for_clone) | |
4139 | { | |
4140 | gimple_stmt_iterator gsi; | |
4141 | ||
4142 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4143 | { | |
4144 | gimple stmt = gsi_stmt (gsi); | |
4145 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) | |
4146 | { | |
4147 | tree fndecl = gimple_call_fndecl (stmt); | |
4148 | if (fndecl) | |
4149 | { | |
4150 | struct tm_ipa_cg_data *d; | |
4151 | unsigned *pcallers; | |
4152 | struct cgraph_node *node; | |
4153 | ||
4154 | if (is_tm_ending_fndecl (fndecl)) | |
4155 | continue; | |
4156 | if (find_tm_replacement_function (fndecl)) | |
4157 | continue; | |
4158 | ||
4159 | node = cgraph_get_node (fndecl); | |
4160 | gcc_assert (node != NULL); | |
3e426b86 | 4161 | d = get_cg_data (&node, true); |
4c0315d0 | 4162 | |
4163 | pcallers = (for_clone ? &d->tm_callers_clone | |
4164 | : &d->tm_callers_normal); | |
4165 | *pcallers += 1; | |
4166 | ||
4167 | maybe_push_queue (node, callees_p, &d->in_callee_queue); | |
4168 | } | |
4169 | } | |
4170 | } | |
4171 | } | |
4172 | ||
4173 | /* Scan all calls in NODE that are within a transaction region, | |
4174 | and push the resulting nodes into the callee queue. */ | |
4175 | ||
4176 | static void | |
4177 | ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d, | |
4178 | cgraph_node_queue *callees_p) | |
4179 | { | |
4180 | struct tm_region *r; | |
4181 | ||
4182 | d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack); | |
4183 | d->all_tm_regions = all_tm_regions; | |
4184 | ||
4185 | for (r = all_tm_regions; r; r = r->next) | |
4186 | { | |
f1f41a6c | 4187 | vec<basic_block> bbs; |
4c0315d0 | 4188 | basic_block bb; |
4189 | unsigned i; | |
4190 | ||
4191 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL, | |
4192 | d->transaction_blocks_normal, false); | |
4193 | ||
0cd02a19 | 4194 | // Generate the uninstrumented code path for this transaction. |
4195 | ipa_uninstrument_transaction (r, bbs); | |
4196 | ||
f1f41a6c | 4197 | FOR_EACH_VEC_ELT (bbs, i, bb) |
4c0315d0 | 4198 | ipa_tm_scan_calls_block (callees_p, bb, false); |
4199 | ||
f1f41a6c | 4200 | bbs.release (); |
4c0315d0 | 4201 | } |
0cd02a19 | 4202 | |
4203 | // ??? copy_bbs should maintain cgraph edges for the blocks as it is | |
4204 | // copying them, rather than forcing us to do this externally. | |
4205 | rebuild_cgraph_edges (); | |
4206 | ||
4207 | // ??? In ipa_uninstrument_transaction we don't try to update dominators | |
4208 | // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects. | |
4209 | // Instead, just release dominators here so update_ssa recomputes them. | |
4210 | free_dominance_info (CDI_DOMINATORS); | |
4211 | ||
4212 | // When building the uninstrumented code path, copy_bbs will have invoked | |
4213 | // create_new_def_for starting an "ssa update context". There is only one | |
4214 | // instance of this context, so resolve ssa updates before moving on to | |
4215 | // the next function. | |
4216 | update_ssa (TODO_update_ssa); | |
4c0315d0 | 4217 | } |
4218 | ||
4219 | /* Scan all calls in NODE as if this is the transactional clone, | |
4220 | and push the destinations into the callee queue. */ | |
4221 | ||
4222 | static void | |
4223 | ipa_tm_scan_calls_clone (struct cgraph_node *node, | |
4224 | cgraph_node_queue *callees_p) | |
4225 | { | |
02774f2d | 4226 | struct function *fn = DECL_STRUCT_FUNCTION (node->decl); |
4c0315d0 | 4227 | basic_block bb; |
4228 | ||
4229 | FOR_EACH_BB_FN (bb, fn) | |
4230 | ipa_tm_scan_calls_block (callees_p, bb, true); | |
4231 | } | |
4232 | ||
4233 | /* The function NODE has been detected to be irrevocable. Push all | |
4234 | of its callers onto WORKLIST for the purpose of re-scanning them. */ | |
4235 | ||
4236 | static void | |
4237 | ipa_tm_note_irrevocable (struct cgraph_node *node, | |
4238 | cgraph_node_queue *worklist_p) | |
4239 | { | |
3e426b86 | 4240 | struct tm_ipa_cg_data *d = get_cg_data (&node, true); |
4c0315d0 | 4241 | struct cgraph_edge *e; |
4242 | ||
4243 | d->is_irrevocable = true; | |
4244 | ||
4245 | for (e = node->callers; e ; e = e->next_caller) | |
4246 | { | |
4247 | basic_block bb; | |
3e426b86 | 4248 | struct cgraph_node *caller; |
4c0315d0 | 4249 | |
4250 | /* Don't examine recursive calls. */ | |
4251 | if (e->caller == node) | |
4252 | continue; | |
4253 | /* Even if we think we can go irrevocable, believe the user | |
4254 | above all. */ | |
02774f2d | 4255 | if (is_tm_safe_or_pure (e->caller->decl)) |
4c0315d0 | 4256 | continue; |
4257 | ||
3e426b86 | 4258 | caller = e->caller; |
4259 | d = get_cg_data (&caller, true); | |
4c0315d0 | 4260 | |
4261 | /* Check if the callee is in a transactional region. If so, | |
4262 | schedule the function for normal re-scan as well. */ | |
4263 | bb = gimple_bb (e->call_stmt); | |
4264 | gcc_assert (bb != NULL); | |
4265 | if (d->transaction_blocks_normal | |
4266 | && bitmap_bit_p (d->transaction_blocks_normal, bb->index)) | |
4267 | d->want_irr_scan_normal = true; | |
4268 | ||
3e426b86 | 4269 | maybe_push_queue (caller, worklist_p, &d->in_worklist); |
4c0315d0 | 4270 | } |
4271 | } | |
4272 | ||
4273 | /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement | |
4274 | within the block is irrevocable. */ | |
4275 | ||
4276 | static bool | |
4277 | ipa_tm_scan_irr_block (basic_block bb) | |
4278 | { | |
4279 | gimple_stmt_iterator gsi; | |
4280 | tree fn; | |
4281 | ||
4282 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4283 | { | |
4284 | gimple stmt = gsi_stmt (gsi); | |
4285 | switch (gimple_code (stmt)) | |
4286 | { | |
e153bd50 | 4287 | case GIMPLE_ASSIGN: |
4288 | if (gimple_assign_single_p (stmt)) | |
4289 | { | |
4290 | tree lhs = gimple_assign_lhs (stmt); | |
4291 | tree rhs = gimple_assign_rhs1 (stmt); | |
4292 | if (volatile_var_p (lhs) || volatile_var_p (rhs)) | |
4293 | return true; | |
4294 | } | |
4295 | break; | |
4296 | ||
4c0315d0 | 4297 | case GIMPLE_CALL: |
e153bd50 | 4298 | { |
4299 | tree lhs = gimple_call_lhs (stmt); | |
4300 | if (lhs && volatile_var_p (lhs)) | |
4301 | return true; | |
4c0315d0 | 4302 | |
e153bd50 | 4303 | if (is_tm_pure_call (stmt)) |
4304 | break; | |
4c0315d0 | 4305 | |
e153bd50 | 4306 | fn = gimple_call_fn (stmt); |
4c0315d0 | 4307 | |
e153bd50 | 4308 | /* Functions with the attribute are by definition irrevocable. */ |
4309 | if (is_tm_irrevocable (fn)) | |
4310 | return true; | |
4c0315d0 | 4311 | |
e153bd50 | 4312 | /* For direct function calls, go ahead and check for replacement |
4313 | functions, or transitive irrevocable functions. For indirect | |
4314 | functions, we'll ask the runtime. */ | |
4315 | if (TREE_CODE (fn) == ADDR_EXPR) | |
4316 | { | |
4317 | struct tm_ipa_cg_data *d; | |
4318 | struct cgraph_node *node; | |
4c0315d0 | 4319 | |
e153bd50 | 4320 | fn = TREE_OPERAND (fn, 0); |
4321 | if (is_tm_ending_fndecl (fn)) | |
4322 | break; | |
4323 | if (find_tm_replacement_function (fn)) | |
4324 | break; | |
40879ac6 | 4325 | |
9af5ce0c | 4326 | node = cgraph_get_node (fn); |
e153bd50 | 4327 | d = get_cg_data (&node, true); |
4328 | ||
4329 | /* Return true if irrevocable, but above all, believe | |
4330 | the user. */ | |
4331 | if (d->is_irrevocable | |
4332 | && !is_tm_safe_or_pure (fn)) | |
4333 | return true; | |
4334 | } | |
4335 | break; | |
4336 | } | |
4c0315d0 | 4337 | |
4338 | case GIMPLE_ASM: | |
4339 | /* ??? The Approved Method of indicating that an inline | |
4340 | assembly statement is not relevant to the transaction | |
4341 | is to wrap it in a __tm_waiver block. This is not | |
4342 | yet implemented, so we can't check for it. */ | |
43156aa3 | 4343 | if (is_tm_safe (current_function_decl)) |
4344 | { | |
4345 | tree t = build1 (NOP_EXPR, void_type_node, size_zero_node); | |
4346 | SET_EXPR_LOCATION (t, gimple_location (stmt)); | |
43156aa3 | 4347 | error ("%Kasm not allowed in %<transaction_safe%> function", t); |
4348 | } | |
4c0315d0 | 4349 | return true; |
4350 | ||
4351 | default: | |
4352 | break; | |
4353 | } | |
4354 | } | |
4355 | ||
4356 | return false; | |
4357 | } | |
4358 | ||
4359 | /* For each of the blocks seeded witin PQUEUE, walk the CFG looking | |
4360 | for new irrevocable blocks, marking them in NEW_IRR. Don't bother | |
4361 | scanning past OLD_IRR or EXIT_BLOCKS. */ | |
4362 | ||
4363 | static bool | |
f1f41a6c | 4364 | ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr, |
4c0315d0 | 4365 | bitmap old_irr, bitmap exit_blocks) |
4366 | { | |
4367 | bool any_new_irr = false; | |
4368 | edge e; | |
4369 | edge_iterator ei; | |
4370 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
4371 | ||
4372 | do | |
4373 | { | |
f1f41a6c | 4374 | basic_block bb = pqueue->pop (); |
4c0315d0 | 4375 | |
4376 | /* Don't re-scan blocks we know already are irrevocable. */ | |
4377 | if (old_irr && bitmap_bit_p (old_irr, bb->index)) | |
4378 | continue; | |
4379 | ||
4380 | if (ipa_tm_scan_irr_block (bb)) | |
4381 | { | |
4382 | bitmap_set_bit (new_irr, bb->index); | |
4383 | any_new_irr = true; | |
4384 | } | |
4385 | else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index)) | |
4386 | { | |
4387 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4388 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
4389 | { | |
4390 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 4391 | pqueue->safe_push (e->dest); |
4c0315d0 | 4392 | } |
4393 | } | |
4394 | } | |
f1f41a6c | 4395 | while (!pqueue->is_empty ()); |
4c0315d0 | 4396 | |
4397 | BITMAP_FREE (visited_blocks); | |
4398 | ||
4399 | return any_new_irr; | |
4400 | } | |
4401 | ||
4402 | /* Propagate the irrevocable property both up and down the dominator tree. | |
4403 | BB is the current block being scanned; EXIT_BLOCKS are the edges of the | |
4404 | TM regions; OLD_IRR are the results of a previous scan of the dominator | |
4405 | tree which has been fully propagated; NEW_IRR is the set of new blocks | |
4406 | which are gaining the irrevocable property during the current scan. */ | |
4407 | ||
4408 | static void | |
4409 | ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr, | |
4410 | bitmap old_irr, bitmap exit_blocks) | |
4411 | { | |
f1f41a6c | 4412 | vec<basic_block> bbs; |
4c0315d0 | 4413 | bitmap all_region_blocks; |
4414 | ||
4415 | /* If this block is in the old set, no need to rescan. */ | |
4416 | if (old_irr && bitmap_bit_p (old_irr, entry_block->index)) | |
4417 | return; | |
4418 | ||
4419 | all_region_blocks = BITMAP_ALLOC (&tm_obstack); | |
4420 | bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL, | |
4421 | all_region_blocks, false); | |
4422 | do | |
4423 | { | |
f1f41a6c | 4424 | basic_block bb = bbs.pop (); |
4c0315d0 | 4425 | bool this_irr = bitmap_bit_p (new_irr, bb->index); |
4426 | bool all_son_irr = false; | |
4427 | edge_iterator ei; | |
4428 | edge e; | |
4429 | ||
4430 | /* Propagate up. If my children are, I am too, but we must have | |
4431 | at least one child that is. */ | |
4432 | if (!this_irr) | |
4433 | { | |
4434 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4435 | { | |
4436 | if (!bitmap_bit_p (new_irr, e->dest->index)) | |
4437 | { | |
4438 | all_son_irr = false; | |
4439 | break; | |
4440 | } | |
4441 | else | |
4442 | all_son_irr = true; | |
4443 | } | |
4444 | if (all_son_irr) | |
4445 | { | |
4446 | /* Add block to new_irr if it hasn't already been processed. */ | |
4447 | if (!old_irr || !bitmap_bit_p (old_irr, bb->index)) | |
4448 | { | |
4449 | bitmap_set_bit (new_irr, bb->index); | |
4450 | this_irr = true; | |
4451 | } | |
4452 | } | |
4453 | } | |
4454 | ||
4455 | /* Propagate down to everyone we immediately dominate. */ | |
4456 | if (this_irr) | |
4457 | { | |
4458 | basic_block son; | |
4459 | for (son = first_dom_son (CDI_DOMINATORS, bb); | |
4460 | son; | |
4461 | son = next_dom_son (CDI_DOMINATORS, son)) | |
4462 | { | |
4463 | /* Make sure block is actually in a TM region, and it | |
4464 | isn't already in old_irr. */ | |
4465 | if ((!old_irr || !bitmap_bit_p (old_irr, son->index)) | |
4466 | && bitmap_bit_p (all_region_blocks, son->index)) | |
4467 | bitmap_set_bit (new_irr, son->index); | |
4468 | } | |
4469 | } | |
4470 | } | |
f1f41a6c | 4471 | while (!bbs.is_empty ()); |
4c0315d0 | 4472 | |
4473 | BITMAP_FREE (all_region_blocks); | |
f1f41a6c | 4474 | bbs.release (); |
4c0315d0 | 4475 | } |
4476 | ||
4477 | static void | |
4478 | ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone) | |
4479 | { | |
4480 | gimple_stmt_iterator gsi; | |
4481 | ||
4482 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4483 | { | |
4484 | gimple stmt = gsi_stmt (gsi); | |
4485 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) | |
4486 | { | |
4487 | tree fndecl = gimple_call_fndecl (stmt); | |
4488 | if (fndecl) | |
4489 | { | |
4490 | struct tm_ipa_cg_data *d; | |
4491 | unsigned *pcallers; | |
3e426b86 | 4492 | struct cgraph_node *tnode; |
4c0315d0 | 4493 | |
4494 | if (is_tm_ending_fndecl (fndecl)) | |
4495 | continue; | |
4496 | if (find_tm_replacement_function (fndecl)) | |
4497 | continue; | |
4498 | ||
3e426b86 | 4499 | tnode = cgraph_get_node (fndecl); |
4500 | d = get_cg_data (&tnode, true); | |
4501 | ||
4c0315d0 | 4502 | pcallers = (for_clone ? &d->tm_callers_clone |
4503 | : &d->tm_callers_normal); | |
4504 | ||
4505 | gcc_assert (*pcallers > 0); | |
4506 | *pcallers -= 1; | |
4507 | } | |
4508 | } | |
4509 | } | |
4510 | } | |
4511 | ||
4512 | /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions, | |
4513 | as well as other irrevocable actions such as inline assembly. Mark all | |
4514 | such blocks as irrevocable and decrement the number of calls to | |
4515 | transactional clones. Return true if, for the transactional clone, the | |
4516 | entire function is irrevocable. */ | |
4517 | ||
4518 | static bool | |
4519 | ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone) | |
4520 | { | |
4521 | struct tm_ipa_cg_data *d; | |
4522 | bitmap new_irr, old_irr; | |
4c0315d0 | 4523 | bool ret = false; |
4524 | ||
40879ac6 | 4525 | /* Builtin operators (operator new, and such). */ |
02774f2d | 4526 | if (DECL_STRUCT_FUNCTION (node->decl) == NULL |
4527 | || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL) | |
40879ac6 | 4528 | return false; |
4529 | ||
02774f2d | 4530 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 4531 | calculate_dominance_info (CDI_DOMINATORS); |
4532 | ||
3e426b86 | 4533 | d = get_cg_data (&node, true); |
c2078b80 | 4534 | stack_vec<basic_block, 10> queue; |
4c0315d0 | 4535 | new_irr = BITMAP_ALLOC (&tm_obstack); |
4536 | ||
4537 | /* Scan each tm region, propagating irrevocable status through the tree. */ | |
4538 | if (for_clone) | |
4539 | { | |
4540 | old_irr = d->irrevocable_blocks_clone; | |
34154e27 | 4541 | queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
4c0315d0 | 4542 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL)) |
4543 | { | |
34154e27 | 4544 | ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), |
4545 | new_irr, | |
4c0315d0 | 4546 | old_irr, NULL); |
34154e27 | 4547 | ret = bitmap_bit_p (new_irr, |
4548 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index); | |
4c0315d0 | 4549 | } |
4550 | } | |
4551 | else | |
4552 | { | |
4553 | struct tm_region *region; | |
4554 | ||
4555 | old_irr = d->irrevocable_blocks_normal; | |
4556 | for (region = d->all_tm_regions; region; region = region->next) | |
4557 | { | |
f1f41a6c | 4558 | queue.quick_push (region->entry_block); |
4c0315d0 | 4559 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, |
4560 | region->exit_blocks)) | |
4561 | ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr, | |
4562 | region->exit_blocks); | |
4563 | } | |
4564 | } | |
4565 | ||
4566 | /* If we found any new irrevocable blocks, reduce the call count for | |
4567 | transactional clones within the irrevocable blocks. Save the new | |
4568 | set of irrevocable blocks for next time. */ | |
4569 | if (!bitmap_empty_p (new_irr)) | |
4570 | { | |
4571 | bitmap_iterator bmi; | |
4572 | unsigned i; | |
4573 | ||
4574 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
f5a6b05f | 4575 | ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i), |
4576 | for_clone); | |
4c0315d0 | 4577 | |
4578 | if (old_irr) | |
4579 | { | |
4580 | bitmap_ior_into (old_irr, new_irr); | |
4581 | BITMAP_FREE (new_irr); | |
4582 | } | |
4583 | else if (for_clone) | |
4584 | d->irrevocable_blocks_clone = new_irr; | |
4585 | else | |
4586 | d->irrevocable_blocks_normal = new_irr; | |
4587 | ||
4588 | if (dump_file && new_irr) | |
4589 | { | |
4590 | const char *dname; | |
4591 | bitmap_iterator bmi; | |
4592 | unsigned i; | |
4593 | ||
4594 | dname = lang_hooks.decl_printable_name (current_function_decl, 2); | |
4595 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
4596 | fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i); | |
4597 | } | |
4598 | } | |
4599 | else | |
4600 | BITMAP_FREE (new_irr); | |
4601 | ||
4c0315d0 | 4602 | pop_cfun (); |
4c0315d0 | 4603 | |
4604 | return ret; | |
4605 | } | |
4606 | ||
4607 | /* Return true if, for the transactional clone of NODE, any call | |
4608 | may enter irrevocable mode. */ | |
4609 | ||
4610 | static bool | |
4611 | ipa_tm_mayenterirr_function (struct cgraph_node *node) | |
4612 | { | |
3e426b86 | 4613 | struct tm_ipa_cg_data *d; |
4614 | tree decl; | |
4615 | unsigned flags; | |
4616 | ||
4617 | d = get_cg_data (&node, true); | |
02774f2d | 4618 | decl = node->decl; |
3e426b86 | 4619 | flags = flags_from_decl_or_type (decl); |
4c0315d0 | 4620 | |
4621 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
4622 | at this point, but handling these functions when written in by the | |
4623 | user makes it easier to build unit tests. */ | |
4624 | if (flags & ECF_TM_BUILTIN) | |
4625 | return false; | |
4626 | ||
4627 | /* Filter out all functions that are marked. */ | |
4628 | if (flags & ECF_TM_PURE) | |
4629 | return false; | |
4630 | if (is_tm_safe (decl)) | |
4631 | return false; | |
4632 | if (is_tm_irrevocable (decl)) | |
4633 | return true; | |
4634 | if (is_tm_callable (decl)) | |
4635 | return true; | |
4636 | if (find_tm_replacement_function (decl)) | |
4637 | return true; | |
4638 | ||
4639 | /* If we aren't seeing the final version of the function we don't | |
4640 | know what it will contain at runtime. */ | |
4641 | if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE) | |
4642 | return true; | |
4643 | ||
4644 | /* If the function must go irrevocable, then of course true. */ | |
4645 | if (d->is_irrevocable) | |
4646 | return true; | |
4647 | ||
4648 | /* If there are any blocks marked irrevocable, then the function | |
4649 | as a whole may enter irrevocable. */ | |
4650 | if (d->irrevocable_blocks_clone) | |
4651 | return true; | |
4652 | ||
4653 | /* We may have previously marked this function as tm_may_enter_irr; | |
4654 | see pass_diagnose_tm_blocks. */ | |
4655 | if (node->local.tm_may_enter_irr) | |
4656 | return true; | |
4657 | ||
4658 | /* Recurse on the main body for aliases. In general, this will | |
4659 | result in one of the bits above being set so that we will not | |
4660 | have to recurse next time. */ | |
02774f2d | 4661 | if (node->alias) |
4c0315d0 | 4662 | return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias)); |
4663 | ||
4664 | /* What remains is unmarked local functions without items that force | |
4665 | the function to go irrevocable. */ | |
4666 | return false; | |
4667 | } | |
4668 | ||
4669 | /* Diagnose calls from transaction_safe functions to unmarked | |
4670 | functions that are determined to not be safe. */ | |
4671 | ||
4672 | static void | |
4673 | ipa_tm_diagnose_tm_safe (struct cgraph_node *node) | |
4674 | { | |
4675 | struct cgraph_edge *e; | |
4676 | ||
4677 | for (e = node->callees; e ; e = e->next_callee) | |
02774f2d | 4678 | if (!is_tm_callable (e->callee->decl) |
4c0315d0 | 4679 | && e->callee->local.tm_may_enter_irr) |
4680 | error_at (gimple_location (e->call_stmt), | |
4681 | "unsafe function call %qD within " | |
02774f2d | 4682 | "%<transaction_safe%> function", e->callee->decl); |
4c0315d0 | 4683 | } |
4684 | ||
4685 | /* Diagnose call from atomic transactions to unmarked functions | |
4686 | that are determined to not be safe. */ | |
4687 | ||
4688 | static void | |
4689 | ipa_tm_diagnose_transaction (struct cgraph_node *node, | |
4690 | struct tm_region *all_tm_regions) | |
4691 | { | |
4692 | struct tm_region *r; | |
4693 | ||
4694 | for (r = all_tm_regions; r ; r = r->next) | |
4695 | if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED) | |
4696 | { | |
4697 | /* Atomic transactions can be nested inside relaxed. */ | |
4698 | if (r->inner) | |
4699 | ipa_tm_diagnose_transaction (node, r->inner); | |
4700 | } | |
4701 | else | |
4702 | { | |
f1f41a6c | 4703 | vec<basic_block> bbs; |
4c0315d0 | 4704 | gimple_stmt_iterator gsi; |
4705 | basic_block bb; | |
4706 | size_t i; | |
4707 | ||
4708 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, | |
4709 | r->irr_blocks, NULL, false); | |
4710 | ||
f1f41a6c | 4711 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 4712 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
4713 | { | |
4714 | gimple stmt = gsi_stmt (gsi); | |
4715 | tree fndecl; | |
4716 | ||
4717 | if (gimple_code (stmt) == GIMPLE_ASM) | |
4718 | { | |
4719 | error_at (gimple_location (stmt), | |
4720 | "asm not allowed in atomic transaction"); | |
4721 | continue; | |
4722 | } | |
4723 | ||
4724 | if (!is_gimple_call (stmt)) | |
4725 | continue; | |
4726 | fndecl = gimple_call_fndecl (stmt); | |
4727 | ||
4728 | /* Indirect function calls have been diagnosed already. */ | |
4729 | if (!fndecl) | |
4730 | continue; | |
4731 | ||
4732 | /* Stop at the end of the transaction. */ | |
4733 | if (is_tm_ending_fndecl (fndecl)) | |
4734 | { | |
4735 | if (bitmap_bit_p (r->exit_blocks, bb->index)) | |
4736 | break; | |
4737 | continue; | |
4738 | } | |
4739 | ||
4740 | /* Marked functions have been diagnosed already. */ | |
4741 | if (is_tm_pure_call (stmt)) | |
4742 | continue; | |
4743 | if (is_tm_callable (fndecl)) | |
4744 | continue; | |
4745 | ||
4746 | if (cgraph_local_info (fndecl)->tm_may_enter_irr) | |
4747 | error_at (gimple_location (stmt), | |
4748 | "unsafe function call %qD within " | |
4749 | "atomic transaction", fndecl); | |
4750 | } | |
4751 | ||
f1f41a6c | 4752 | bbs.release (); |
4c0315d0 | 4753 | } |
4754 | } | |
4755 | ||
4756 | /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in | |
4757 | OLD_DECL. The returned value is a freshly malloced pointer that | |
4758 | should be freed by the caller. */ | |
4759 | ||
4760 | static tree | |
4761 | tm_mangle (tree old_asm_id) | |
4762 | { | |
4763 | const char *old_asm_name; | |
4764 | char *tm_name; | |
4765 | void *alloc = NULL; | |
4766 | struct demangle_component *dc; | |
4767 | tree new_asm_id; | |
4768 | ||
4769 | /* Determine if the symbol is already a valid C++ mangled name. Do this | |
4770 | even for C, which might be interfacing with C++ code via appropriately | |
4771 | ugly identifiers. */ | |
4772 | /* ??? We could probably do just as well checking for "_Z" and be done. */ | |
4773 | old_asm_name = IDENTIFIER_POINTER (old_asm_id); | |
4774 | dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc); | |
4775 | ||
4776 | if (dc == NULL) | |
4777 | { | |
4778 | char length[8]; | |
4779 | ||
4780 | do_unencoded: | |
4781 | sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id)); | |
4782 | tm_name = concat ("_ZGTt", length, old_asm_name, NULL); | |
4783 | } | |
4784 | else | |
4785 | { | |
4786 | old_asm_name += 2; /* Skip _Z */ | |
4787 | ||
4788 | switch (dc->type) | |
4789 | { | |
4790 | case DEMANGLE_COMPONENT_TRANSACTION_CLONE: | |
4791 | case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE: | |
4792 | /* Don't play silly games, you! */ | |
4793 | goto do_unencoded; | |
4794 | ||
4795 | case DEMANGLE_COMPONENT_HIDDEN_ALIAS: | |
4796 | /* I'd really like to know if we can ever be passed one of | |
4797 | these from the C++ front end. The Logical Thing would | |
4798 | seem that hidden-alias should be outer-most, so that we | |
4799 | get hidden-alias of a transaction-clone and not vice-versa. */ | |
4800 | old_asm_name += 2; | |
4801 | break; | |
4802 | ||
4803 | default: | |
4804 | break; | |
4805 | } | |
4806 | ||
4807 | tm_name = concat ("_ZGTt", old_asm_name, NULL); | |
4808 | } | |
4809 | free (alloc); | |
4810 | ||
4811 | new_asm_id = get_identifier (tm_name); | |
4812 | free (tm_name); | |
4813 | ||
4814 | return new_asm_id; | |
4815 | } | |
4816 | ||
4817 | static inline void | |
8efa224a | 4818 | ipa_tm_mark_force_output_node (struct cgraph_node *node) |
4c0315d0 | 4819 | { |
8efa224a | 4820 | cgraph_mark_force_output_node (node); |
02774f2d | 4821 | node->analyzed = true; |
4c0315d0 | 4822 | } |
4823 | ||
6a1c0403 | 4824 | static inline void |
4825 | ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node) | |
4826 | { | |
02774f2d | 4827 | node->forced_by_abi = true; |
4828 | node->analyzed = true; | |
6a1c0403 | 4829 | } |
4830 | ||
4c0315d0 | 4831 | /* Callback data for ipa_tm_create_version_alias. */ |
4832 | struct create_version_alias_info | |
4833 | { | |
4834 | struct cgraph_node *old_node; | |
4835 | tree new_decl; | |
4836 | }; | |
4837 | ||
caf29404 | 4838 | /* A subroutine of ipa_tm_create_version, called via |
4c0315d0 | 4839 | cgraph_for_node_and_aliases. Create new tm clones for each of |
4840 | the existing aliases. */ | |
4841 | static bool | |
4842 | ipa_tm_create_version_alias (struct cgraph_node *node, void *data) | |
4843 | { | |
4844 | struct create_version_alias_info *info | |
4845 | = (struct create_version_alias_info *)data; | |
4846 | tree old_decl, new_decl, tm_name; | |
4847 | struct cgraph_node *new_node; | |
4848 | ||
02774f2d | 4849 | if (!node->cpp_implicit_alias) |
4c0315d0 | 4850 | return false; |
4851 | ||
02774f2d | 4852 | old_decl = node->decl; |
4c0315d0 | 4853 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); |
4854 | new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl), | |
4855 | TREE_CODE (old_decl), tm_name, | |
4856 | TREE_TYPE (old_decl)); | |
4857 | ||
4858 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4859 | SET_DECL_RTL (new_decl, NULL); | |
4860 | ||
4861 | /* Based loosely on C++'s make_alias_for(). */ | |
4862 | TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl); | |
f7c22b34 | 4863 | DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl); |
4864 | DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl); | |
4c0315d0 | 4865 | TREE_READONLY (new_decl) = TREE_READONLY (old_decl); |
4866 | DECL_EXTERNAL (new_decl) = 0; | |
4867 | DECL_ARTIFICIAL (new_decl) = 1; | |
4868 | TREE_ADDRESSABLE (new_decl) = 1; | |
4869 | TREE_USED (new_decl) = 1; | |
4870 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4871 | ||
4872 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4873 | if (DECL_ONE_ONLY (new_decl)) |
4c0315d0 | 4874 | DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl)); |
4875 | ||
4876 | new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl); | |
4877 | new_node->tm_clone = true; | |
02774f2d | 4878 | new_node->externally_visible = info->old_node->externally_visible; |
3e426b86 | 4879 | /* ?? Do not traverse aliases here. */ |
4880 | get_cg_data (&node, false)->clone = new_node; | |
4c0315d0 | 4881 | |
4882 | record_tm_clone_pair (old_decl, new_decl); | |
4883 | ||
02774f2d | 4884 | if (info->old_node->force_output |
4885 | || ipa_ref_list_first_referring (&info->old_node->ref_list)) | |
8efa224a | 4886 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4887 | if (info->old_node->forced_by_abi) |
6a1c0403 | 4888 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4889 | return false; |
4890 | } | |
4891 | ||
4892 | /* Create a copy of the function (possibly declaration only) of OLD_NODE, | |
4893 | appropriate for the transactional clone. */ | |
4894 | ||
4895 | static void | |
4896 | ipa_tm_create_version (struct cgraph_node *old_node) | |
4897 | { | |
4898 | tree new_decl, old_decl, tm_name; | |
4899 | struct cgraph_node *new_node; | |
4900 | ||
02774f2d | 4901 | old_decl = old_node->decl; |
4c0315d0 | 4902 | new_decl = copy_node (old_decl); |
4903 | ||
4904 | /* DECL_ASSEMBLER_NAME needs to be set before we call | |
4905 | cgraph_copy_node_for_versioning below, because cgraph_node will | |
4906 | fill the assembler_name_hash. */ | |
4907 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); | |
4908 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4909 | SET_DECL_RTL (new_decl, NULL); | |
4910 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4911 | ||
4912 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4913 | if (DECL_ONE_ONLY (new_decl)) |
4c0315d0 | 4914 | DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl)); |
4915 | ||
1e094109 | 4916 | new_node = cgraph_copy_node_for_versioning (old_node, new_decl, vNULL, NULL); |
281dea26 | 4917 | new_node->local.local = false; |
02774f2d | 4918 | new_node->externally_visible = old_node->externally_visible; |
4c0315d0 | 4919 | new_node->lowered = true; |
4920 | new_node->tm_clone = 1; | |
3e426b86 | 4921 | get_cg_data (&old_node, true)->clone = new_node; |
4c0315d0 | 4922 | |
4923 | if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE) | |
4924 | { | |
4925 | /* Remap extern inline to static inline. */ | |
4926 | /* ??? Is it worth trying to use make_decl_one_only? */ | |
4927 | if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl)) | |
4928 | { | |
4929 | DECL_EXTERNAL (new_decl) = 0; | |
4930 | TREE_PUBLIC (new_decl) = 0; | |
7a571443 | 4931 | DECL_WEAK (new_decl) = 0; |
4c0315d0 | 4932 | } |
4933 | ||
f1f41a6c | 4934 | tree_function_versioning (old_decl, new_decl, |
4935 | NULL, false, NULL, | |
4936 | false, NULL, NULL); | |
4c0315d0 | 4937 | } |
4938 | ||
4939 | record_tm_clone_pair (old_decl, new_decl); | |
4940 | ||
4941 | cgraph_call_function_insertion_hooks (new_node); | |
02774f2d | 4942 | if (old_node->force_output |
4943 | || ipa_ref_list_first_referring (&old_node->ref_list)) | |
8efa224a | 4944 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4945 | if (old_node->forced_by_abi) |
6a1c0403 | 4946 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4947 | |
4948 | /* Do the same thing, but for any aliases of the original node. */ | |
4949 | { | |
4950 | struct create_version_alias_info data; | |
4951 | data.old_node = old_node; | |
4952 | data.new_decl = new_decl; | |
4953 | cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias, | |
4954 | &data, true); | |
4955 | } | |
4956 | } | |
4957 | ||
4958 | /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */ | |
4959 | ||
4960 | static void | |
4961 | ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region, | |
4962 | basic_block bb) | |
4963 | { | |
4964 | gimple_stmt_iterator gsi; | |
4965 | gimple g; | |
4966 | ||
4967 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
4968 | ||
4969 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE), | |
4970 | 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE)); | |
4971 | ||
4972 | split_block_after_labels (bb); | |
4973 | gsi = gsi_after_labels (bb); | |
4974 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
4975 | ||
4976 | cgraph_create_edge (node, | |
4977 | cgraph_get_create_node | |
4978 | (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)), | |
4979 | g, 0, | |
02774f2d | 4980 | compute_call_stmt_bb_frequency (node->decl, |
4c0315d0 | 4981 | gimple_bb (g))); |
4982 | } | |
4983 | ||
4984 | /* Construct a call to TM_GETTMCLONE and insert it before GSI. */ | |
4985 | ||
4986 | static bool | |
4987 | ipa_tm_insert_gettmclone_call (struct cgraph_node *node, | |
4988 | struct tm_region *region, | |
4989 | gimple_stmt_iterator *gsi, gimple stmt) | |
4990 | { | |
4991 | tree gettm_fn, ret, old_fn, callfn; | |
4992 | gimple g, g2; | |
4993 | bool safe; | |
4994 | ||
4995 | old_fn = gimple_call_fn (stmt); | |
4996 | ||
4997 | if (TREE_CODE (old_fn) == ADDR_EXPR) | |
4998 | { | |
4999 | tree fndecl = TREE_OPERAND (old_fn, 0); | |
5000 | tree clone = get_tm_clone_pair (fndecl); | |
5001 | ||
5002 | /* By transforming the call into a TM_GETTMCLONE, we are | |
5003 | technically taking the address of the original function and | |
5004 | its clone. Explain this so inlining will know this function | |
5005 | is needed. */ | |
5006 | cgraph_mark_address_taken_node (cgraph_get_node (fndecl)); | |
5007 | if (clone) | |
5008 | cgraph_mark_address_taken_node (cgraph_get_node (clone)); | |
5009 | } | |
5010 | ||
5011 | safe = is_tm_safe (TREE_TYPE (old_fn)); | |
5012 | gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE | |
5013 | : BUILT_IN_TM_GETTMCLONE_IRR); | |
5014 | ret = create_tmp_var (ptr_type_node, NULL); | |
4c0315d0 | 5015 | |
5016 | if (!safe) | |
5017 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
5018 | ||
5019 | /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */ | |
5020 | if (TREE_CODE (old_fn) == OBJ_TYPE_REF) | |
5021 | old_fn = OBJ_TYPE_REF_EXPR (old_fn); | |
5022 | ||
5023 | g = gimple_build_call (gettm_fn, 1, old_fn); | |
5024 | ret = make_ssa_name (ret, g); | |
5025 | gimple_call_set_lhs (g, ret); | |
5026 | ||
5027 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
5028 | ||
5029 | cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0, | |
02774f2d | 5030 | compute_call_stmt_bb_frequency (node->decl, |
9af5ce0c | 5031 | gimple_bb (g))); |
4c0315d0 | 5032 | |
5033 | /* Cast return value from tm_gettmclone* into appropriate function | |
5034 | pointer. */ | |
5035 | callfn = create_tmp_var (TREE_TYPE (old_fn), NULL); | |
4c0315d0 | 5036 | g2 = gimple_build_assign (callfn, |
5037 | fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret)); | |
5038 | callfn = make_ssa_name (callfn, g2); | |
5039 | gimple_assign_set_lhs (g2, callfn); | |
5040 | gsi_insert_before (gsi, g2, GSI_SAME_STMT); | |
5041 | ||
5042 | /* ??? This is a hack to preserve the NOTHROW bit on the call, | |
5043 | which we would have derived from the decl. Failure to save | |
5044 | this bit means we might have to split the basic block. */ | |
5045 | if (gimple_call_nothrow_p (stmt)) | |
5046 | gimple_call_set_nothrow (stmt, true); | |
5047 | ||
5048 | gimple_call_set_fn (stmt, callfn); | |
5049 | ||
5050 | /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS | |
5051 | for a call statement. Fix it. */ | |
5052 | { | |
5053 | tree lhs = gimple_call_lhs (stmt); | |
5054 | tree rettype = TREE_TYPE (gimple_call_fntype (stmt)); | |
5055 | if (lhs | |
5056 | && !useless_type_conversion_p (TREE_TYPE (lhs), rettype)) | |
5057 | { | |
5058 | tree temp; | |
5059 | ||
072f7ab1 | 5060 | temp = create_tmp_reg (rettype, 0); |
4c0315d0 | 5061 | gimple_call_set_lhs (stmt, temp); |
5062 | ||
5063 | g2 = gimple_build_assign (lhs, | |
5064 | fold_build1 (VIEW_CONVERT_EXPR, | |
5065 | TREE_TYPE (lhs), temp)); | |
5066 | gsi_insert_after (gsi, g2, GSI_SAME_STMT); | |
5067 | } | |
5068 | } | |
5069 | ||
5070 | update_stmt (stmt); | |
5071 | ||
5072 | return true; | |
5073 | } | |
5074 | ||
5075 | /* Helper function for ipa_tm_transform_calls*. Given a call | |
5076 | statement in GSI which resides inside transaction REGION, redirect | |
5077 | the call to either its wrapper function, or its clone. */ | |
5078 | ||
5079 | static void | |
5080 | ipa_tm_transform_calls_redirect (struct cgraph_node *node, | |
5081 | struct tm_region *region, | |
5082 | gimple_stmt_iterator *gsi, | |
5083 | bool *need_ssa_rename_p) | |
5084 | { | |
5085 | gimple stmt = gsi_stmt (*gsi); | |
5086 | struct cgraph_node *new_node; | |
5087 | struct cgraph_edge *e = cgraph_edge (node, stmt); | |
5088 | tree fndecl = gimple_call_fndecl (stmt); | |
5089 | ||
5090 | /* For indirect calls, pass the address through the runtime. */ | |
5091 | if (fndecl == NULL) | |
5092 | { | |
5093 | *need_ssa_rename_p |= | |
5094 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
5095 | return; | |
5096 | } | |
5097 | ||
5098 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
5099 | at this point, but handling these functions when written in by the | |
5100 | user makes it easier to build unit tests. */ | |
5101 | if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN) | |
5102 | return; | |
5103 | ||
5104 | /* Fixup recursive calls inside clones. */ | |
5105 | /* ??? Why did cgraph_copy_node_for_versioning update the call edges | |
5106 | for recursion but not update the call statements themselves? */ | |
5107 | if (e->caller == e->callee && decl_is_tm_clone (current_function_decl)) | |
5108 | { | |
5109 | gimple_call_set_fndecl (stmt, current_function_decl); | |
5110 | return; | |
5111 | } | |
5112 | ||
5113 | /* If there is a replacement, use it. */ | |
5114 | fndecl = find_tm_replacement_function (fndecl); | |
5115 | if (fndecl) | |
5116 | { | |
5117 | new_node = cgraph_get_create_node (fndecl); | |
5118 | ||
5119 | /* ??? Mark all transaction_wrap functions tm_may_enter_irr. | |
5120 | ||
5121 | We can't do this earlier in record_tm_replacement because | |
5122 | cgraph_remove_unreachable_nodes is called before we inject | |
5123 | references to the node. Further, we can't do this in some | |
5124 | nice central place in ipa_tm_execute because we don't have | |
5125 | the exact list of wrapper functions that would be used. | |
5126 | Marking more wrappers than necessary results in the creation | |
5127 | of unnecessary cgraph_nodes, which can cause some of the | |
5128 | other IPA passes to crash. | |
5129 | ||
5130 | We do need to mark these nodes so that we get the proper | |
5131 | result in expand_call_tm. */ | |
5132 | /* ??? This seems broken. How is it that we're marking the | |
5133 | CALLEE as may_enter_irr? Surely we should be marking the | |
5134 | CALLER. Also note that find_tm_replacement_function also | |
5135 | contains mappings into the TM runtime, e.g. memcpy. These | |
5136 | we know won't go irrevocable. */ | |
5137 | new_node->local.tm_may_enter_irr = 1; | |
5138 | } | |
5139 | else | |
5140 | { | |
3e426b86 | 5141 | struct tm_ipa_cg_data *d; |
5142 | struct cgraph_node *tnode = e->callee; | |
5143 | ||
5144 | d = get_cg_data (&tnode, true); | |
4c0315d0 | 5145 | new_node = d->clone; |
5146 | ||
5147 | /* As we've already skipped pure calls and appropriate builtins, | |
5148 | and we've already marked irrevocable blocks, if we can't come | |
5149 | up with a static replacement, then ask the runtime. */ | |
5150 | if (new_node == NULL) | |
5151 | { | |
5152 | *need_ssa_rename_p |= | |
5153 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
4c0315d0 | 5154 | return; |
5155 | } | |
5156 | ||
02774f2d | 5157 | fndecl = new_node->decl; |
4c0315d0 | 5158 | } |
5159 | ||
5160 | cgraph_redirect_edge_callee (e, new_node); | |
5161 | gimple_call_set_fndecl (stmt, fndecl); | |
5162 | } | |
5163 | ||
5164 | /* Helper function for ipa_tm_transform_calls. For a given BB, | |
5165 | install calls to tm_irrevocable when IRR_BLOCKS are reached, | |
5166 | redirect other calls to the generated transactional clone. */ | |
5167 | ||
5168 | static bool | |
5169 | ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region, | |
5170 | basic_block bb, bitmap irr_blocks) | |
5171 | { | |
5172 | gimple_stmt_iterator gsi; | |
5173 | bool need_ssa_rename = false; | |
5174 | ||
5175 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5176 | { | |
5177 | ipa_tm_insert_irr_call (node, region, bb); | |
5178 | return true; | |
5179 | } | |
5180 | ||
5181 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
5182 | { | |
5183 | gimple stmt = gsi_stmt (gsi); | |
5184 | ||
5185 | if (!is_gimple_call (stmt)) | |
5186 | continue; | |
5187 | if (is_tm_pure_call (stmt)) | |
5188 | continue; | |
5189 | ||
5190 | /* Redirect edges to the appropriate replacement or clone. */ | |
5191 | ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename); | |
5192 | } | |
5193 | ||
5194 | return need_ssa_rename; | |
5195 | } | |
5196 | ||
5197 | /* Walk the CFG for REGION, beginning at BB. Install calls to | |
5198 | tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to | |
5199 | the generated transactional clone. */ | |
5200 | ||
5201 | static bool | |
5202 | ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region, | |
5203 | basic_block bb, bitmap irr_blocks) | |
5204 | { | |
5205 | bool need_ssa_rename = false; | |
5206 | edge e; | |
5207 | edge_iterator ei; | |
c2078b80 | 5208 | auto_vec<basic_block> queue; |
4c0315d0 | 5209 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
5210 | ||
f1f41a6c | 5211 | queue.safe_push (bb); |
4c0315d0 | 5212 | do |
5213 | { | |
f1f41a6c | 5214 | bb = queue.pop (); |
4c0315d0 | 5215 | |
5216 | need_ssa_rename |= | |
5217 | ipa_tm_transform_calls_1 (node, region, bb, irr_blocks); | |
5218 | ||
5219 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5220 | continue; | |
5221 | ||
5222 | if (region && bitmap_bit_p (region->exit_blocks, bb->index)) | |
5223 | continue; | |
5224 | ||
5225 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5226 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
5227 | { | |
5228 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 5229 | queue.safe_push (e->dest); |
4c0315d0 | 5230 | } |
5231 | } | |
f1f41a6c | 5232 | while (!queue.is_empty ()); |
4c0315d0 | 5233 | |
4c0315d0 | 5234 | BITMAP_FREE (visited_blocks); |
5235 | ||
5236 | return need_ssa_rename; | |
5237 | } | |
5238 | ||
5239 | /* Transform the calls within the TM regions within NODE. */ | |
5240 | ||
5241 | static void | |
5242 | ipa_tm_transform_transaction (struct cgraph_node *node) | |
5243 | { | |
3e426b86 | 5244 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5245 | struct tm_region *region; |
5246 | bool need_ssa_rename = false; | |
5247 | ||
3e426b86 | 5248 | d = get_cg_data (&node, true); |
5249 | ||
02774f2d | 5250 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5251 | calculate_dominance_info (CDI_DOMINATORS); |
5252 | ||
5253 | for (region = d->all_tm_regions; region; region = region->next) | |
5254 | { | |
5255 | /* If we're sure to go irrevocable, don't transform anything. */ | |
5256 | if (d->irrevocable_blocks_normal | |
5257 | && bitmap_bit_p (d->irrevocable_blocks_normal, | |
5258 | region->entry_block->index)) | |
5259 | { | |
1910089e | 5260 | transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE |
5261 | | GTMA_MAY_ENTER_IRREVOCABLE | |
5262 | | GTMA_HAS_NO_INSTRUMENTATION); | |
4c0315d0 | 5263 | continue; |
5264 | } | |
5265 | ||
5266 | need_ssa_rename |= | |
5267 | ipa_tm_transform_calls (node, region, region->entry_block, | |
5268 | d->irrevocable_blocks_normal); | |
5269 | } | |
5270 | ||
5271 | if (need_ssa_rename) | |
5272 | update_ssa (TODO_update_ssa_only_virtuals); | |
5273 | ||
5274 | pop_cfun (); | |
4c0315d0 | 5275 | } |
5276 | ||
5277 | /* Transform the calls within the transactional clone of NODE. */ | |
5278 | ||
5279 | static void | |
5280 | ipa_tm_transform_clone (struct cgraph_node *node) | |
5281 | { | |
3e426b86 | 5282 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5283 | bool need_ssa_rename; |
5284 | ||
3e426b86 | 5285 | d = get_cg_data (&node, true); |
5286 | ||
4c0315d0 | 5287 | /* If this function makes no calls and has no irrevocable blocks, |
5288 | then there's nothing to do. */ | |
5289 | /* ??? Remove non-aborting top-level transactions. */ | |
2670559d | 5290 | if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone) |
4c0315d0 | 5291 | return; |
5292 | ||
02774f2d | 5293 | push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl)); |
4c0315d0 | 5294 | calculate_dominance_info (CDI_DOMINATORS); |
5295 | ||
5296 | need_ssa_rename = | |
34154e27 | 5297 | ipa_tm_transform_calls (d->clone, NULL, |
5298 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), | |
4c0315d0 | 5299 | d->irrevocable_blocks_clone); |
5300 | ||
5301 | if (need_ssa_rename) | |
5302 | update_ssa (TODO_update_ssa_only_virtuals); | |
5303 | ||
5304 | pop_cfun (); | |
4c0315d0 | 5305 | } |
5306 | ||
5307 | /* Main entry point for the transactional memory IPA pass. */ | |
5308 | ||
5309 | static unsigned int | |
5310 | ipa_tm_execute (void) | |
5311 | { | |
9af5ce0c | 5312 | cgraph_node_queue tm_callees = cgraph_node_queue (); |
4c0315d0 | 5313 | /* List of functions that will go irrevocable. */ |
9af5ce0c | 5314 | cgraph_node_queue irr_worklist = cgraph_node_queue (); |
4c0315d0 | 5315 | |
5316 | struct cgraph_node *node; | |
5317 | struct tm_ipa_cg_data *d; | |
5318 | enum availability a; | |
5319 | unsigned int i; | |
5320 | ||
5321 | #ifdef ENABLE_CHECKING | |
5322 | verify_cgraph (); | |
5323 | #endif | |
5324 | ||
5325 | bitmap_obstack_initialize (&tm_obstack); | |
0cd02a19 | 5326 | initialize_original_copy_tables (); |
4c0315d0 | 5327 | |
5328 | /* For all local functions marked tm_callable, queue them. */ | |
7c455d87 | 5329 | FOR_EACH_DEFINED_FUNCTION (node) |
02774f2d | 5330 | if (is_tm_callable (node->decl) |
4c0315d0 | 5331 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5332 | { | |
3e426b86 | 5333 | d = get_cg_data (&node, true); |
4c0315d0 | 5334 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5335 | } | |
5336 | ||
5337 | /* For all local reachable functions... */ | |
7c455d87 | 5338 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5339 | if (node->lowered |
4c0315d0 | 5340 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5341 | { | |
5342 | /* ... marked tm_pure, record that fact for the runtime by | |
5343 | indicating that the pure function is its own tm_callable. | |
5344 | No need to do this if the function's address can't be taken. */ | |
02774f2d | 5345 | if (is_tm_pure (node->decl)) |
4c0315d0 | 5346 | { |
5347 | if (!node->local.local) | |
02774f2d | 5348 | record_tm_clone_pair (node->decl, node->decl); |
4c0315d0 | 5349 | continue; |
5350 | } | |
5351 | ||
02774f2d | 5352 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5353 | calculate_dominance_info (CDI_DOMINATORS); |
5354 | ||
5355 | tm_region_init (NULL); | |
5356 | if (all_tm_regions) | |
5357 | { | |
3e426b86 | 5358 | d = get_cg_data (&node, true); |
4c0315d0 | 5359 | |
0cd02a19 | 5360 | /* Scan for calls that are in each transaction, and |
5361 | generate the uninstrumented code path. */ | |
4c0315d0 | 5362 | ipa_tm_scan_calls_transaction (d, &tm_callees); |
5363 | ||
40879ac6 | 5364 | /* Put it in the worklist so we can scan the function |
5365 | later (ipa_tm_scan_irr_function) and mark the | |
5366 | irrevocable blocks. */ | |
5367 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5368 | d->want_irr_scan_normal = true; | |
4c0315d0 | 5369 | } |
5370 | ||
5371 | pop_cfun (); | |
4c0315d0 | 5372 | } |
5373 | ||
5374 | /* For every local function on the callee list, scan as if we will be | |
5375 | creating a transactional clone, queueing all new functions we find | |
5376 | along the way. */ | |
f1f41a6c | 5377 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5378 | { |
f1f41a6c | 5379 | node = tm_callees[i]; |
4c0315d0 | 5380 | a = cgraph_function_body_availability (node); |
3e426b86 | 5381 | d = get_cg_data (&node, true); |
4c0315d0 | 5382 | |
40879ac6 | 5383 | /* Put it in the worklist so we can scan the function later |
5384 | (ipa_tm_scan_irr_function) and mark the irrevocable | |
5385 | blocks. */ | |
5386 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
4c0315d0 | 5387 | |
5388 | /* Some callees cannot be arbitrarily cloned. These will always be | |
5389 | irrevocable. Mark these now, so that we need not scan them. */ | |
02774f2d | 5390 | if (is_tm_irrevocable (node->decl)) |
4c0315d0 | 5391 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5392 | else if (a <= AVAIL_NOT_AVAILABLE | |
02774f2d | 5393 | && !is_tm_safe_or_pure (node->decl)) |
4c0315d0 | 5394 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5395 | else if (a >= AVAIL_OVERWRITABLE) | |
5396 | { | |
02774f2d | 5397 | if (!tree_versionable_function_p (node->decl)) |
4c0315d0 | 5398 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5399 | else if (!d->is_irrevocable) | |
5400 | { | |
5401 | /* If this is an alias, make sure its base is queued as well. | |
5402 | we need not scan the callees now, as the base will do. */ | |
02774f2d | 5403 | if (node->alias) |
4c0315d0 | 5404 | { |
5405 | node = cgraph_get_node (node->thunk.alias); | |
3e426b86 | 5406 | d = get_cg_data (&node, true); |
4c0315d0 | 5407 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5408 | continue; | |
5409 | } | |
5410 | ||
5411 | /* Add all nodes called by this function into | |
5412 | tm_callees as well. */ | |
5413 | ipa_tm_scan_calls_clone (node, &tm_callees); | |
5414 | } | |
5415 | } | |
5416 | } | |
5417 | ||
5418 | /* Iterate scans until no more work to be done. Prefer not to use | |
f1f41a6c | 5419 | vec::pop because the worklist tends to follow a breadth-first |
4c0315d0 | 5420 | search of the callgraph, which should allow convergance with a |
5421 | minimum number of scans. But we also don't want the worklist | |
5422 | array to grow without bound, so we shift the array up periodically. */ | |
f1f41a6c | 5423 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5424 | { |
f1f41a6c | 5425 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5426 | { |
f1f41a6c | 5427 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5428 | i = 0; |
5429 | } | |
5430 | ||
f1f41a6c | 5431 | node = irr_worklist[i]; |
3e426b86 | 5432 | d = get_cg_data (&node, true); |
4c0315d0 | 5433 | d->in_worklist = false; |
5434 | ||
5435 | if (d->want_irr_scan_normal) | |
5436 | { | |
5437 | d->want_irr_scan_normal = false; | |
5438 | ipa_tm_scan_irr_function (node, false); | |
5439 | } | |
5440 | if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true)) | |
5441 | ipa_tm_note_irrevocable (node, &irr_worklist); | |
5442 | } | |
5443 | ||
5444 | /* For every function on the callee list, collect the tm_may_enter_irr | |
5445 | bit on the node. */ | |
f1f41a6c | 5446 | irr_worklist.truncate (0); |
5447 | for (i = 0; i < tm_callees.length (); ++i) | |
4c0315d0 | 5448 | { |
f1f41a6c | 5449 | node = tm_callees[i]; |
4c0315d0 | 5450 | if (ipa_tm_mayenterirr_function (node)) |
5451 | { | |
3e426b86 | 5452 | d = get_cg_data (&node, true); |
4c0315d0 | 5453 | gcc_assert (d->in_worklist == false); |
5454 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5455 | } | |
5456 | } | |
5457 | ||
5458 | /* Propagate the tm_may_enter_irr bit to callers until stable. */ | |
f1f41a6c | 5459 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5460 | { |
5461 | struct cgraph_node *caller; | |
5462 | struct cgraph_edge *e; | |
5463 | struct ipa_ref *ref; | |
5464 | unsigned j; | |
5465 | ||
f1f41a6c | 5466 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5467 | { |
f1f41a6c | 5468 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5469 | i = 0; |
5470 | } | |
5471 | ||
f1f41a6c | 5472 | node = irr_worklist[i]; |
3e426b86 | 5473 | d = get_cg_data (&node, true); |
4c0315d0 | 5474 | d->in_worklist = false; |
5475 | node->local.tm_may_enter_irr = true; | |
5476 | ||
5477 | /* Propagate back to normal callers. */ | |
5478 | for (e = node->callers; e ; e = e->next_caller) | |
5479 | { | |
5480 | caller = e->caller; | |
02774f2d | 5481 | if (!is_tm_safe_or_pure (caller->decl) |
4c0315d0 | 5482 | && !caller->local.tm_may_enter_irr) |
5483 | { | |
3e426b86 | 5484 | d = get_cg_data (&caller, true); |
4c0315d0 | 5485 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5486 | } | |
5487 | } | |
5488 | ||
5489 | /* Propagate back to referring aliases as well. */ | |
02774f2d | 5490 | for (j = 0; ipa_ref_list_referring_iterate (&node->ref_list, j, ref); j++) |
4c0315d0 | 5491 | { |
04ec15fa | 5492 | caller = cgraph (ref->referring); |
4c0315d0 | 5493 | if (ref->use == IPA_REF_ALIAS |
5494 | && !caller->local.tm_may_enter_irr) | |
5495 | { | |
3e426b86 | 5496 | /* ?? Do not traverse aliases here. */ |
5497 | d = get_cg_data (&caller, false); | |
4c0315d0 | 5498 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5499 | } | |
5500 | } | |
5501 | } | |
5502 | ||
5503 | /* Now validate all tm_safe functions, and all atomic regions in | |
5504 | other functions. */ | |
7c455d87 | 5505 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5506 | if (node->lowered |
4c0315d0 | 5507 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5508 | { | |
3e426b86 | 5509 | d = get_cg_data (&node, true); |
02774f2d | 5510 | if (is_tm_safe (node->decl)) |
4c0315d0 | 5511 | ipa_tm_diagnose_tm_safe (node); |
5512 | else if (d->all_tm_regions) | |
5513 | ipa_tm_diagnose_transaction (node, d->all_tm_regions); | |
5514 | } | |
5515 | ||
5516 | /* Create clones. Do those that are not irrevocable and have a | |
5517 | positive call count. Do those publicly visible functions that | |
5518 | the user directed us to clone. */ | |
f1f41a6c | 5519 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5520 | { |
5521 | bool doit = false; | |
5522 | ||
f1f41a6c | 5523 | node = tm_callees[i]; |
02774f2d | 5524 | if (node->cpp_implicit_alias) |
4c0315d0 | 5525 | continue; |
5526 | ||
5527 | a = cgraph_function_body_availability (node); | |
3e426b86 | 5528 | d = get_cg_data (&node, true); |
4c0315d0 | 5529 | |
5530 | if (a <= AVAIL_NOT_AVAILABLE) | |
02774f2d | 5531 | doit = is_tm_callable (node->decl); |
5532 | else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl)) | |
4c0315d0 | 5533 | doit = true; |
5534 | else if (!d->is_irrevocable | |
5535 | && d->tm_callers_normal + d->tm_callers_clone > 0) | |
5536 | doit = true; | |
5537 | ||
5538 | if (doit) | |
5539 | ipa_tm_create_version (node); | |
5540 | } | |
5541 | ||
5542 | /* Redirect calls to the new clones, and insert irrevocable marks. */ | |
f1f41a6c | 5543 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5544 | { |
f1f41a6c | 5545 | node = tm_callees[i]; |
02774f2d | 5546 | if (node->analyzed) |
4c0315d0 | 5547 | { |
3e426b86 | 5548 | d = get_cg_data (&node, true); |
4c0315d0 | 5549 | if (d->clone) |
5550 | ipa_tm_transform_clone (node); | |
5551 | } | |
5552 | } | |
7c455d87 | 5553 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5554 | if (node->lowered |
4c0315d0 | 5555 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5556 | { | |
3e426b86 | 5557 | d = get_cg_data (&node, true); |
4c0315d0 | 5558 | if (d->all_tm_regions) |
5559 | ipa_tm_transform_transaction (node); | |
5560 | } | |
5561 | ||
5562 | /* Free and clear all data structures. */ | |
f1f41a6c | 5563 | tm_callees.release (); |
5564 | irr_worklist.release (); | |
4c0315d0 | 5565 | bitmap_obstack_release (&tm_obstack); |
0cd02a19 | 5566 | free_original_copy_tables (); |
4c0315d0 | 5567 | |
7c455d87 | 5568 | FOR_EACH_FUNCTION (node) |
02774f2d | 5569 | node->aux = NULL; |
4c0315d0 | 5570 | |
5571 | #ifdef ENABLE_CHECKING | |
5572 | verify_cgraph (); | |
5573 | #endif | |
5574 | ||
5575 | return 0; | |
5576 | } | |
5577 | ||
cbe8bda8 | 5578 | namespace { |
5579 | ||
5580 | const pass_data pass_data_ipa_tm = | |
5581 | { | |
5582 | SIMPLE_IPA_PASS, /* type */ | |
5583 | "tmipa", /* name */ | |
5584 | OPTGROUP_NONE, /* optinfo_flags */ | |
5585 | true, /* has_gate */ | |
5586 | true, /* has_execute */ | |
5587 | TV_TRANS_MEM, /* tv_id */ | |
5588 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
5589 | 0, /* properties_provided */ | |
5590 | 0, /* properties_destroyed */ | |
5591 | 0, /* todo_flags_start */ | |
5592 | 0, /* todo_flags_finish */ | |
4c0315d0 | 5593 | }; |
5594 | ||
cbe8bda8 | 5595 | class pass_ipa_tm : public simple_ipa_opt_pass |
5596 | { | |
5597 | public: | |
9af5ce0c | 5598 | pass_ipa_tm (gcc::context *ctxt) |
5599 | : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt) | |
cbe8bda8 | 5600 | {} |
5601 | ||
5602 | /* opt_pass methods: */ | |
5603 | bool gate () { return gate_tm (); } | |
5604 | unsigned int execute () { return ipa_tm_execute (); } | |
5605 | ||
5606 | }; // class pass_ipa_tm | |
5607 | ||
5608 | } // anon namespace | |
5609 | ||
5610 | simple_ipa_opt_pass * | |
5611 | make_pass_ipa_tm (gcc::context *ctxt) | |
5612 | { | |
5613 | return new pass_ipa_tm (ctxt); | |
5614 | } | |
5615 | ||
4c0315d0 | 5616 | #include "gt-trans-mem.h" |