]>
Commit | Line | Data |
---|---|---|
0a35513e | 1 | /* Passes for transactional memory support. |
23a5b65a | 2 | Copyright (C) 2008-2014 Free Software Foundation, Inc. |
0a35513e AH |
3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
4a8fb1a1 | 23 | #include "hash-table.h" |
0a35513e | 24 | #include "tree.h" |
2fb9a547 AM |
25 | #include "basic-block.h" |
26 | #include "tree-ssa-alias.h" | |
27 | #include "internal-fn.h" | |
28 | #include "tree-eh.h" | |
29 | #include "gimple-expr.h" | |
30 | #include "is-a.h" | |
18f429e2 | 31 | #include "gimple.h" |
d8a2d370 DN |
32 | #include "calls.h" |
33 | #include "function.h" | |
34 | #include "rtl.h" | |
35 | #include "emit-rtl.h" | |
45b0be94 | 36 | #include "gimplify.h" |
5be5c238 | 37 | #include "gimple-iterator.h" |
18f429e2 | 38 | #include "gimplify-me.h" |
5be5c238 | 39 | #include "gimple-walk.h" |
442b4905 AM |
40 | #include "gimple-ssa.h" |
41 | #include "cgraph.h" | |
42 | #include "tree-cfg.h" | |
d8a2d370 | 43 | #include "stringpool.h" |
442b4905 AM |
44 | #include "tree-ssanames.h" |
45 | #include "tree-into-ssa.h" | |
0a35513e AH |
46 | #include "tree-pass.h" |
47 | #include "tree-inline.h" | |
48 | #include "diagnostic-core.h" | |
49 | #include "demangle.h" | |
50 | #include "output.h" | |
51 | #include "trans-mem.h" | |
52 | #include "params.h" | |
53 | #include "target.h" | |
54 | #include "langhooks.h" | |
0a35513e | 55 | #include "gimple-pretty-print.h" |
7d776ee2 | 56 | #include "cfgloop.h" |
4484a35a | 57 | #include "tree-ssa-address.h" |
31e071ae | 58 | #include "predict.h" |
0a35513e AH |
59 | |
60 | ||
0a35513e AH |
61 | #define A_RUNINSTRUMENTEDCODE 0x0001 |
62 | #define A_RUNUNINSTRUMENTEDCODE 0x0002 | |
63 | #define A_SAVELIVEVARIABLES 0x0004 | |
64 | #define A_RESTORELIVEVARIABLES 0x0008 | |
65 | #define A_ABORTTRANSACTION 0x0010 | |
66 | ||
67 | #define AR_USERABORT 0x0001 | |
68 | #define AR_USERRETRY 0x0002 | |
69 | #define AR_TMCONFLICT 0x0004 | |
70 | #define AR_EXCEPTIONBLOCKABORT 0x0008 | |
71 | #define AR_OUTERABORT 0x0010 | |
72 | ||
73 | #define MODE_SERIALIRREVOCABLE 0x0000 | |
74 | ||
75 | ||
76 | /* The representation of a transaction changes several times during the | |
77 | lowering process. In the beginning, in the front-end we have the | |
78 | GENERIC tree TRANSACTION_EXPR. For example, | |
79 | ||
80 | __transaction { | |
81 | local++; | |
82 | if (++global == 10) | |
83 | __tm_abort; | |
84 | } | |
85 | ||
86 | During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is | |
87 | trivially replaced with a GIMPLE_TRANSACTION node. | |
88 | ||
89 | During pass_lower_tm, we examine the body of transactions looking | |
90 | for aborts. Transactions that do not contain an abort may be | |
91 | merged into an outer transaction. We also add a TRY-FINALLY node | |
92 | to arrange for the transaction to be committed on any exit. | |
93 | ||
94 | [??? Think about how this arrangement affects throw-with-commit | |
95 | and throw-with-abort operations. In this case we want the TRY to | |
96 | handle gotos, but not to catch any exceptions because the transaction | |
97 | will already be closed.] | |
98 | ||
99 | GIMPLE_TRANSACTION [label=NULL] { | |
100 | try { | |
101 | local = local + 1; | |
102 | t0 = global; | |
103 | t1 = t0 + 1; | |
104 | global = t1; | |
105 | if (t1 == 10) | |
106 | __builtin___tm_abort (); | |
107 | } finally { | |
108 | __builtin___tm_commit (); | |
109 | } | |
110 | } | |
111 | ||
112 | During pass_lower_eh, we create EH regions for the transactions, | |
113 | intermixed with the regular EH stuff. This gives us a nice persistent | |
114 | mapping (all the way through rtl) from transactional memory operation | |
115 | back to the transaction, which allows us to get the abnormal edges | |
116 | correct to model transaction aborts and restarts: | |
117 | ||
118 | GIMPLE_TRANSACTION [label=over] | |
119 | local = local + 1; | |
120 | t0 = global; | |
121 | t1 = t0 + 1; | |
122 | global = t1; | |
123 | if (t1 == 10) | |
124 | __builtin___tm_abort (); | |
125 | __builtin___tm_commit (); | |
126 | over: | |
127 | ||
128 | This is the end of all_lowering_passes, and so is what is present | |
129 | during the IPA passes, and through all of the optimization passes. | |
130 | ||
131 | During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all | |
132 | functions and mark functions for cloning. | |
133 | ||
134 | At the end of gimple optimization, before exiting SSA form, | |
135 | pass_tm_edges replaces statements that perform transactional | |
136 | memory operations with the appropriate TM builtins, and swap | |
137 | out function calls with their transactional clones. At this | |
138 | point we introduce the abnormal transaction restart edges and | |
139 | complete lowering of the GIMPLE_TRANSACTION node. | |
140 | ||
141 | x = __builtin___tm_start (MAY_ABORT); | |
142 | eh_label: | |
143 | if (x & abort_transaction) | |
144 | goto over; | |
145 | local = local + 1; | |
146 | t0 = __builtin___tm_load (global); | |
147 | t1 = t0 + 1; | |
148 | __builtin___tm_store (&global, t1); | |
149 | if (t1 == 10) | |
150 | __builtin___tm_abort (); | |
151 | __builtin___tm_commit (); | |
152 | over: | |
153 | */ | |
154 | ||
398b1daa AH |
155 | static void *expand_regions (struct tm_region *, |
156 | void *(*callback)(struct tm_region *, void *), | |
b5e10eac | 157 | void *, bool); |
398b1daa | 158 | |
0a35513e AH |
159 | \f |
160 | /* Return the attributes we want to examine for X, or NULL if it's not | |
161 | something we examine. We look at function types, but allow pointers | |
162 | to function types and function decls and peek through. */ | |
163 | ||
164 | static tree | |
165 | get_attrs_for (const_tree x) | |
166 | { | |
167 | switch (TREE_CODE (x)) | |
168 | { | |
169 | case FUNCTION_DECL: | |
170 | return TYPE_ATTRIBUTES (TREE_TYPE (x)); | |
171 | break; | |
172 | ||
173 | default: | |
174 | if (TYPE_P (x)) | |
175 | return NULL; | |
176 | x = TREE_TYPE (x); | |
177 | if (TREE_CODE (x) != POINTER_TYPE) | |
178 | return NULL; | |
179 | /* FALLTHRU */ | |
180 | ||
181 | case POINTER_TYPE: | |
182 | x = TREE_TYPE (x); | |
183 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
184 | return NULL; | |
185 | /* FALLTHRU */ | |
186 | ||
187 | case FUNCTION_TYPE: | |
188 | case METHOD_TYPE: | |
189 | return TYPE_ATTRIBUTES (x); | |
190 | } | |
191 | } | |
192 | ||
193 | /* Return true if X has been marked TM_PURE. */ | |
194 | ||
195 | bool | |
196 | is_tm_pure (const_tree x) | |
197 | { | |
198 | unsigned flags; | |
199 | ||
200 | switch (TREE_CODE (x)) | |
201 | { | |
202 | case FUNCTION_DECL: | |
203 | case FUNCTION_TYPE: | |
204 | case METHOD_TYPE: | |
205 | break; | |
206 | ||
207 | default: | |
208 | if (TYPE_P (x)) | |
209 | return false; | |
210 | x = TREE_TYPE (x); | |
211 | if (TREE_CODE (x) != POINTER_TYPE) | |
212 | return false; | |
213 | /* FALLTHRU */ | |
214 | ||
215 | case POINTER_TYPE: | |
216 | x = TREE_TYPE (x); | |
217 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
218 | return false; | |
219 | break; | |
220 | } | |
221 | ||
222 | flags = flags_from_decl_or_type (x); | |
223 | return (flags & ECF_TM_PURE) != 0; | |
224 | } | |
225 | ||
226 | /* Return true if X has been marked TM_IRREVOCABLE. */ | |
227 | ||
228 | static bool | |
229 | is_tm_irrevocable (tree x) | |
230 | { | |
231 | tree attrs = get_attrs_for (x); | |
232 | ||
233 | if (attrs && lookup_attribute ("transaction_unsafe", attrs)) | |
234 | return true; | |
235 | ||
236 | /* A call to the irrevocable builtin is by definition, | |
237 | irrevocable. */ | |
238 | if (TREE_CODE (x) == ADDR_EXPR) | |
239 | x = TREE_OPERAND (x, 0); | |
240 | if (TREE_CODE (x) == FUNCTION_DECL | |
241 | && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL | |
242 | && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE) | |
243 | return true; | |
244 | ||
245 | return false; | |
246 | } | |
247 | ||
248 | /* Return true if X has been marked TM_SAFE. */ | |
249 | ||
250 | bool | |
251 | is_tm_safe (const_tree x) | |
252 | { | |
253 | if (flag_tm) | |
254 | { | |
255 | tree attrs = get_attrs_for (x); | |
256 | if (attrs) | |
257 | { | |
258 | if (lookup_attribute ("transaction_safe", attrs)) | |
259 | return true; | |
260 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
261 | return true; | |
262 | } | |
263 | } | |
264 | return false; | |
265 | } | |
266 | ||
267 | /* Return true if CALL is const, or tm_pure. */ | |
268 | ||
269 | static bool | |
270 | is_tm_pure_call (gimple call) | |
271 | { | |
272 | tree fn = gimple_call_fn (call); | |
273 | ||
274 | if (TREE_CODE (fn) == ADDR_EXPR) | |
275 | { | |
276 | fn = TREE_OPERAND (fn, 0); | |
277 | gcc_assert (TREE_CODE (fn) == FUNCTION_DECL); | |
278 | } | |
279 | else | |
280 | fn = TREE_TYPE (fn); | |
281 | ||
282 | return is_tm_pure (fn); | |
283 | } | |
284 | ||
285 | /* Return true if X has been marked TM_CALLABLE. */ | |
286 | ||
287 | static bool | |
288 | is_tm_callable (tree x) | |
289 | { | |
290 | tree attrs = get_attrs_for (x); | |
291 | if (attrs) | |
292 | { | |
293 | if (lookup_attribute ("transaction_callable", attrs)) | |
294 | return true; | |
295 | if (lookup_attribute ("transaction_safe", attrs)) | |
296 | return true; | |
297 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
298 | return true; | |
299 | } | |
300 | return false; | |
301 | } | |
302 | ||
303 | /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */ | |
304 | ||
305 | bool | |
306 | is_tm_may_cancel_outer (tree x) | |
307 | { | |
308 | tree attrs = get_attrs_for (x); | |
309 | if (attrs) | |
310 | return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL; | |
311 | return false; | |
312 | } | |
313 | ||
314 | /* Return true for built in functions that "end" a transaction. */ | |
315 | ||
316 | bool | |
317 | is_tm_ending_fndecl (tree fndecl) | |
318 | { | |
319 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
320 | switch (DECL_FUNCTION_CODE (fndecl)) | |
321 | { | |
322 | case BUILT_IN_TM_COMMIT: | |
323 | case BUILT_IN_TM_COMMIT_EH: | |
324 | case BUILT_IN_TM_ABORT: | |
325 | case BUILT_IN_TM_IRREVOCABLE: | |
326 | return true; | |
327 | default: | |
328 | break; | |
329 | } | |
330 | ||
331 | return false; | |
332 | } | |
333 | ||
d5ae1c25 TV |
334 | /* Return true if STMT is a built in function call that "ends" a |
335 | transaction. */ | |
336 | ||
337 | bool | |
338 | is_tm_ending (gimple stmt) | |
339 | { | |
340 | tree fndecl; | |
341 | ||
342 | if (gimple_code (stmt) != GIMPLE_CALL) | |
343 | return false; | |
344 | ||
345 | fndecl = gimple_call_fndecl (stmt); | |
346 | return (fndecl != NULL_TREE | |
347 | && is_tm_ending_fndecl (fndecl)); | |
348 | } | |
349 | ||
0a35513e AH |
350 | /* Return true if STMT is a TM load. */ |
351 | ||
352 | static bool | |
353 | is_tm_load (gimple stmt) | |
354 | { | |
355 | tree fndecl; | |
356 | ||
357 | if (gimple_code (stmt) != GIMPLE_CALL) | |
358 | return false; | |
359 | ||
360 | fndecl = gimple_call_fndecl (stmt); | |
361 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
362 | && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl))); | |
363 | } | |
364 | ||
365 | /* Same as above, but for simple TM loads, that is, not the | |
366 | after-write, after-read, etc optimized variants. */ | |
367 | ||
368 | static bool | |
369 | is_tm_simple_load (gimple stmt) | |
370 | { | |
371 | tree fndecl; | |
372 | ||
373 | if (gimple_code (stmt) != GIMPLE_CALL) | |
374 | return false; | |
375 | ||
376 | fndecl = gimple_call_fndecl (stmt); | |
377 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
378 | { | |
379 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
380 | return (fcode == BUILT_IN_TM_LOAD_1 | |
381 | || fcode == BUILT_IN_TM_LOAD_2 | |
382 | || fcode == BUILT_IN_TM_LOAD_4 | |
383 | || fcode == BUILT_IN_TM_LOAD_8 | |
384 | || fcode == BUILT_IN_TM_LOAD_FLOAT | |
385 | || fcode == BUILT_IN_TM_LOAD_DOUBLE | |
386 | || fcode == BUILT_IN_TM_LOAD_LDOUBLE | |
387 | || fcode == BUILT_IN_TM_LOAD_M64 | |
388 | || fcode == BUILT_IN_TM_LOAD_M128 | |
389 | || fcode == BUILT_IN_TM_LOAD_M256); | |
390 | } | |
391 | return false; | |
392 | } | |
393 | ||
394 | /* Return true if STMT is a TM store. */ | |
395 | ||
396 | static bool | |
397 | is_tm_store (gimple stmt) | |
398 | { | |
399 | tree fndecl; | |
400 | ||
401 | if (gimple_code (stmt) != GIMPLE_CALL) | |
402 | return false; | |
403 | ||
404 | fndecl = gimple_call_fndecl (stmt); | |
405 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
406 | && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl))); | |
407 | } | |
408 | ||
409 | /* Same as above, but for simple TM stores, that is, not the | |
410 | after-write, after-read, etc optimized variants. */ | |
411 | ||
412 | static bool | |
413 | is_tm_simple_store (gimple stmt) | |
414 | { | |
415 | tree fndecl; | |
416 | ||
417 | if (gimple_code (stmt) != GIMPLE_CALL) | |
418 | return false; | |
419 | ||
420 | fndecl = gimple_call_fndecl (stmt); | |
421 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
422 | { | |
423 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
424 | return (fcode == BUILT_IN_TM_STORE_1 | |
425 | || fcode == BUILT_IN_TM_STORE_2 | |
426 | || fcode == BUILT_IN_TM_STORE_4 | |
427 | || fcode == BUILT_IN_TM_STORE_8 | |
428 | || fcode == BUILT_IN_TM_STORE_FLOAT | |
429 | || fcode == BUILT_IN_TM_STORE_DOUBLE | |
430 | || fcode == BUILT_IN_TM_STORE_LDOUBLE | |
431 | || fcode == BUILT_IN_TM_STORE_M64 | |
432 | || fcode == BUILT_IN_TM_STORE_M128 | |
433 | || fcode == BUILT_IN_TM_STORE_M256); | |
434 | } | |
435 | return false; | |
436 | } | |
437 | ||
438 | /* Return true if FNDECL is BUILT_IN_TM_ABORT. */ | |
439 | ||
440 | static bool | |
441 | is_tm_abort (tree fndecl) | |
442 | { | |
443 | return (fndecl | |
444 | && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
445 | && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT); | |
446 | } | |
447 | ||
448 | /* Build a GENERIC tree for a user abort. This is called by front ends | |
449 | while transforming the __tm_abort statement. */ | |
450 | ||
451 | tree | |
452 | build_tm_abort_call (location_t loc, bool is_outer) | |
453 | { | |
454 | return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1, | |
455 | build_int_cst (integer_type_node, | |
456 | AR_USERABORT | |
457 | | (is_outer ? AR_OUTERABORT : 0))); | |
458 | } | |
0a35513e AH |
459 | \f |
460 | /* Map for aribtrary function replacement under TM, as created | |
461 | by the tm_wrap attribute. */ | |
462 | ||
463 | static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map))) | |
464 | htab_t tm_wrap_map; | |
465 | ||
466 | void | |
467 | record_tm_replacement (tree from, tree to) | |
468 | { | |
469 | struct tree_map **slot, *h; | |
470 | ||
471 | /* Do not inline wrapper functions that will get replaced in the TM | |
472 | pass. | |
473 | ||
474 | Suppose you have foo() that will get replaced into tmfoo(). Make | |
475 | sure the inliner doesn't try to outsmart us and inline foo() | |
476 | before we get a chance to do the TM replacement. */ | |
477 | DECL_UNINLINABLE (from) = 1; | |
478 | ||
479 | if (tm_wrap_map == NULL) | |
480 | tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0); | |
481 | ||
766090c2 | 482 | h = ggc_alloc<tree_map> (); |
0a35513e AH |
483 | h->hash = htab_hash_pointer (from); |
484 | h->base.from = from; | |
485 | h->to = to; | |
486 | ||
487 | slot = (struct tree_map **) | |
488 | htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT); | |
489 | *slot = h; | |
490 | } | |
491 | ||
492 | /* Return a TM-aware replacement function for DECL. */ | |
493 | ||
494 | static tree | |
495 | find_tm_replacement_function (tree fndecl) | |
496 | { | |
497 | if (tm_wrap_map) | |
498 | { | |
499 | struct tree_map *h, in; | |
500 | ||
501 | in.base.from = fndecl; | |
502 | in.hash = htab_hash_pointer (fndecl); | |
503 | h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash); | |
504 | if (h) | |
505 | return h->to; | |
506 | } | |
507 | ||
508 | /* ??? We may well want TM versions of most of the common <string.h> | |
509 | functions. For now, we've already these two defined. */ | |
510 | /* Adjust expand_call_tm() attributes as necessary for the cases | |
511 | handled here: */ | |
512 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
513 | switch (DECL_FUNCTION_CODE (fndecl)) | |
514 | { | |
515 | case BUILT_IN_MEMCPY: | |
516 | return builtin_decl_explicit (BUILT_IN_TM_MEMCPY); | |
517 | case BUILT_IN_MEMMOVE: | |
518 | return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); | |
519 | case BUILT_IN_MEMSET: | |
520 | return builtin_decl_explicit (BUILT_IN_TM_MEMSET); | |
521 | default: | |
522 | return NULL; | |
523 | } | |
524 | ||
525 | return NULL; | |
526 | } | |
527 | ||
528 | /* When appropriate, record TM replacement for memory allocation functions. | |
529 | ||
530 | FROM is the FNDECL to wrap. */ | |
531 | void | |
532 | tm_malloc_replacement (tree from) | |
533 | { | |
534 | const char *str; | |
535 | tree to; | |
536 | ||
537 | if (TREE_CODE (from) != FUNCTION_DECL) | |
538 | return; | |
539 | ||
540 | /* If we have a previous replacement, the user must be explicitly | |
541 | wrapping malloc/calloc/free. They better know what they're | |
542 | doing... */ | |
543 | if (find_tm_replacement_function (from)) | |
544 | return; | |
545 | ||
546 | str = IDENTIFIER_POINTER (DECL_NAME (from)); | |
547 | ||
548 | if (!strcmp (str, "malloc")) | |
549 | to = builtin_decl_explicit (BUILT_IN_TM_MALLOC); | |
550 | else if (!strcmp (str, "calloc")) | |
551 | to = builtin_decl_explicit (BUILT_IN_TM_CALLOC); | |
552 | else if (!strcmp (str, "free")) | |
553 | to = builtin_decl_explicit (BUILT_IN_TM_FREE); | |
554 | else | |
555 | return; | |
556 | ||
557 | TREE_NOTHROW (to) = 0; | |
558 | ||
559 | record_tm_replacement (from, to); | |
560 | } | |
561 | \f | |
562 | /* Diagnostics for tm_safe functions/regions. Called by the front end | |
563 | once we've lowered the function to high-gimple. */ | |
564 | ||
565 | /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq. | |
566 | Process exactly one statement. WI->INFO is set to non-null when in | |
567 | the context of a tm_safe function, and null for a __transaction block. */ | |
568 | ||
569 | #define DIAG_TM_OUTER 1 | |
570 | #define DIAG_TM_SAFE 2 | |
571 | #define DIAG_TM_RELAXED 4 | |
572 | ||
573 | struct diagnose_tm | |
574 | { | |
575 | unsigned int summary_flags : 8; | |
576 | unsigned int block_flags : 8; | |
577 | unsigned int func_flags : 8; | |
0a35513e AH |
578 | unsigned int saw_volatile : 1; |
579 | gimple stmt; | |
580 | }; | |
581 | ||
a3770d3b AH |
582 | /* Return true if T is a volatile variable of some kind. */ |
583 | ||
584 | static bool | |
585 | volatile_var_p (tree t) | |
586 | { | |
587 | return (SSA_VAR_P (t) | |
588 | && TREE_THIS_VOLATILE (TREE_TYPE (t))); | |
589 | } | |
590 | ||
0a35513e AH |
591 | /* Tree callback function for diagnose_tm pass. */ |
592 | ||
593 | static tree | |
594 | diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, | |
595 | void *data) | |
596 | { | |
597 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; | |
598 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
0a35513e | 599 | |
a3770d3b AH |
600 | if (volatile_var_p (*tp) |
601 | && d->block_flags & DIAG_TM_SAFE | |
0a35513e AH |
602 | && !d->saw_volatile) |
603 | { | |
604 | d->saw_volatile = 1; | |
605 | error_at (gimple_location (d->stmt), | |
606 | "invalid volatile use of %qD inside transaction", | |
607 | *tp); | |
608 | } | |
609 | ||
610 | return NULL_TREE; | |
611 | } | |
612 | ||
862d0b35 DN |
613 | static inline bool |
614 | is_tm_safe_or_pure (const_tree x) | |
615 | { | |
616 | return is_tm_safe (x) || is_tm_pure (x); | |
617 | } | |
618 | ||
0a35513e AH |
619 | static tree |
620 | diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
621 | struct walk_stmt_info *wi) | |
622 | { | |
623 | gimple stmt = gsi_stmt (*gsi); | |
624 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
625 | ||
626 | /* Save stmt for use in leaf analysis. */ | |
627 | d->stmt = stmt; | |
628 | ||
629 | switch (gimple_code (stmt)) | |
630 | { | |
631 | case GIMPLE_CALL: | |
632 | { | |
633 | tree fn = gimple_call_fn (stmt); | |
634 | ||
635 | if ((d->summary_flags & DIAG_TM_OUTER) == 0 | |
636 | && is_tm_may_cancel_outer (fn)) | |
637 | error_at (gimple_location (stmt), | |
638 | "%<transaction_may_cancel_outer%> function call not within" | |
639 | " outer transaction or %<transaction_may_cancel_outer%>"); | |
640 | ||
641 | if (d->summary_flags & DIAG_TM_SAFE) | |
642 | { | |
643 | bool is_safe, direct_call_p; | |
644 | tree replacement; | |
645 | ||
646 | if (TREE_CODE (fn) == ADDR_EXPR | |
647 | && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL) | |
648 | { | |
649 | direct_call_p = true; | |
650 | replacement = TREE_OPERAND (fn, 0); | |
651 | replacement = find_tm_replacement_function (replacement); | |
652 | if (replacement) | |
653 | fn = replacement; | |
654 | } | |
655 | else | |
656 | { | |
657 | direct_call_p = false; | |
658 | replacement = NULL_TREE; | |
659 | } | |
660 | ||
661 | if (is_tm_safe_or_pure (fn)) | |
662 | is_safe = true; | |
663 | else if (is_tm_callable (fn) || is_tm_irrevocable (fn)) | |
664 | { | |
665 | /* A function explicitly marked transaction_callable as | |
666 | opposed to transaction_safe is being defined to be | |
667 | unsafe as part of its ABI, regardless of its contents. */ | |
668 | is_safe = false; | |
669 | } | |
670 | else if (direct_call_p) | |
671 | { | |
17fc8d6f AH |
672 | if (IS_TYPE_OR_DECL_P (fn) |
673 | && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN) | |
0a35513e AH |
674 | is_safe = true; |
675 | else if (replacement) | |
676 | { | |
677 | /* ??? At present we've been considering replacements | |
678 | merely transaction_callable, and therefore might | |
679 | enter irrevocable. The tm_wrap attribute has not | |
680 | yet made it into the new language spec. */ | |
681 | is_safe = false; | |
682 | } | |
683 | else | |
684 | { | |
685 | /* ??? Diagnostics for unmarked direct calls moved into | |
686 | the IPA pass. Section 3.2 of the spec details how | |
687 | functions not marked should be considered "implicitly | |
688 | safe" based on having examined the function body. */ | |
689 | is_safe = true; | |
690 | } | |
691 | } | |
692 | else | |
693 | { | |
694 | /* An unmarked indirect call. Consider it unsafe even | |
695 | though optimization may yet figure out how to inline. */ | |
696 | is_safe = false; | |
697 | } | |
698 | ||
699 | if (!is_safe) | |
700 | { | |
701 | if (TREE_CODE (fn) == ADDR_EXPR) | |
702 | fn = TREE_OPERAND (fn, 0); | |
703 | if (d->block_flags & DIAG_TM_SAFE) | |
cd6baa16 TR |
704 | { |
705 | if (direct_call_p) | |
706 | error_at (gimple_location (stmt), | |
707 | "unsafe function call %qD within " | |
708 | "atomic transaction", fn); | |
709 | else | |
3a54c456 AH |
710 | { |
711 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
712 | error_at (gimple_location (stmt), | |
713 | "unsafe function call %qE within " | |
714 | "atomic transaction", fn); | |
715 | else | |
716 | error_at (gimple_location (stmt), | |
717 | "unsafe indirect function call within " | |
718 | "atomic transaction"); | |
719 | } | |
cd6baa16 | 720 | } |
0a35513e | 721 | else |
cd6baa16 TR |
722 | { |
723 | if (direct_call_p) | |
724 | error_at (gimple_location (stmt), | |
725 | "unsafe function call %qD within " | |
726 | "%<transaction_safe%> function", fn); | |
727 | else | |
3a54c456 AH |
728 | { |
729 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
730 | error_at (gimple_location (stmt), | |
731 | "unsafe function call %qE within " | |
732 | "%<transaction_safe%> function", fn); | |
733 | else | |
734 | error_at (gimple_location (stmt), | |
735 | "unsafe indirect function call within " | |
736 | "%<transaction_safe%> function"); | |
737 | } | |
cd6baa16 | 738 | } |
0a35513e AH |
739 | } |
740 | } | |
741 | } | |
742 | break; | |
743 | ||
744 | case GIMPLE_ASM: | |
745 | /* ??? We ought to come up with a way to add attributes to | |
746 | asm statements, and then add "transaction_safe" to it. | |
747 | Either that or get the language spec to resurrect __tm_waiver. */ | |
748 | if (d->block_flags & DIAG_TM_SAFE) | |
749 | error_at (gimple_location (stmt), | |
750 | "asm not allowed in atomic transaction"); | |
751 | else if (d->func_flags & DIAG_TM_SAFE) | |
752 | error_at (gimple_location (stmt), | |
753 | "asm not allowed in %<transaction_safe%> function"); | |
0a35513e AH |
754 | break; |
755 | ||
756 | case GIMPLE_TRANSACTION: | |
757 | { | |
758 | unsigned char inner_flags = DIAG_TM_SAFE; | |
759 | ||
760 | if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED) | |
761 | { | |
762 | if (d->block_flags & DIAG_TM_SAFE) | |
763 | error_at (gimple_location (stmt), | |
764 | "relaxed transaction in atomic transaction"); | |
765 | else if (d->func_flags & DIAG_TM_SAFE) | |
766 | error_at (gimple_location (stmt), | |
767 | "relaxed transaction in %<transaction_safe%> function"); | |
0a35513e AH |
768 | inner_flags = DIAG_TM_RELAXED; |
769 | } | |
770 | else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER) | |
771 | { | |
772 | if (d->block_flags) | |
773 | error_at (gimple_location (stmt), | |
774 | "outer transaction in transaction"); | |
775 | else if (d->func_flags & DIAG_TM_OUTER) | |
776 | error_at (gimple_location (stmt), | |
777 | "outer transaction in " | |
778 | "%<transaction_may_cancel_outer%> function"); | |
779 | else if (d->func_flags & DIAG_TM_SAFE) | |
780 | error_at (gimple_location (stmt), | |
781 | "outer transaction in %<transaction_safe%> function"); | |
0a35513e AH |
782 | inner_flags |= DIAG_TM_OUTER; |
783 | } | |
784 | ||
785 | *handled_ops_p = true; | |
786 | if (gimple_transaction_body (stmt)) | |
787 | { | |
788 | struct walk_stmt_info wi_inner; | |
789 | struct diagnose_tm d_inner; | |
790 | ||
791 | memset (&d_inner, 0, sizeof (d_inner)); | |
792 | d_inner.func_flags = d->func_flags; | |
793 | d_inner.block_flags = d->block_flags | inner_flags; | |
794 | d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags; | |
795 | ||
796 | memset (&wi_inner, 0, sizeof (wi_inner)); | |
797 | wi_inner.info = &d_inner; | |
798 | ||
799 | walk_gimple_seq (gimple_transaction_body (stmt), | |
800 | diagnose_tm_1, diagnose_tm_1_op, &wi_inner); | |
0a35513e AH |
801 | } |
802 | } | |
803 | break; | |
804 | ||
805 | default: | |
806 | break; | |
807 | } | |
808 | ||
809 | return NULL_TREE; | |
810 | } | |
811 | ||
812 | static unsigned int | |
813 | diagnose_tm_blocks (void) | |
814 | { | |
815 | struct walk_stmt_info wi; | |
816 | struct diagnose_tm d; | |
817 | ||
818 | memset (&d, 0, sizeof (d)); | |
819 | if (is_tm_may_cancel_outer (current_function_decl)) | |
820 | d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE; | |
821 | else if (is_tm_safe (current_function_decl)) | |
822 | d.func_flags = DIAG_TM_SAFE; | |
823 | d.summary_flags = d.func_flags; | |
824 | ||
825 | memset (&wi, 0, sizeof (wi)); | |
826 | wi.info = &d; | |
827 | ||
828 | walk_gimple_seq (gimple_body (current_function_decl), | |
829 | diagnose_tm_1, diagnose_tm_1_op, &wi); | |
830 | ||
0a35513e AH |
831 | return 0; |
832 | } | |
833 | ||
27a4cd48 DM |
834 | namespace { |
835 | ||
836 | const pass_data pass_data_diagnose_tm_blocks = | |
837 | { | |
838 | GIMPLE_PASS, /* type */ | |
839 | "*diagnose_tm_blocks", /* name */ | |
840 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
841 | true, /* has_execute */ |
842 | TV_TRANS_MEM, /* tv_id */ | |
843 | PROP_gimple_any, /* properties_required */ | |
844 | 0, /* properties_provided */ | |
845 | 0, /* properties_destroyed */ | |
846 | 0, /* todo_flags_start */ | |
847 | 0, /* todo_flags_finish */ | |
0a35513e | 848 | }; |
27a4cd48 DM |
849 | |
850 | class pass_diagnose_tm_blocks : public gimple_opt_pass | |
851 | { | |
852 | public: | |
c3284718 RS |
853 | pass_diagnose_tm_blocks (gcc::context *ctxt) |
854 | : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt) | |
27a4cd48 DM |
855 | {} |
856 | ||
857 | /* opt_pass methods: */ | |
1a3d085c | 858 | virtual bool gate (function *) { return flag_tm; } |
be55bfe6 | 859 | virtual unsigned int execute (function *) { return diagnose_tm_blocks (); } |
27a4cd48 DM |
860 | |
861 | }; // class pass_diagnose_tm_blocks | |
862 | ||
863 | } // anon namespace | |
864 | ||
865 | gimple_opt_pass * | |
866 | make_pass_diagnose_tm_blocks (gcc::context *ctxt) | |
867 | { | |
868 | return new pass_diagnose_tm_blocks (ctxt); | |
869 | } | |
0a35513e AH |
870 | \f |
871 | /* Instead of instrumenting thread private memory, we save the | |
872 | addresses in a log which we later use to save/restore the addresses | |
873 | upon transaction start/restart. | |
874 | ||
875 | The log is keyed by address, where each element contains individual | |
876 | statements among different code paths that perform the store. | |
877 | ||
878 | This log is later used to generate either plain save/restore of the | |
879 | addresses upon transaction start/restart, or calls to the ITM_L* | |
880 | logging functions. | |
881 | ||
882 | So for something like: | |
883 | ||
884 | struct large { int x[1000]; }; | |
885 | struct large lala = { 0 }; | |
886 | __transaction { | |
887 | lala.x[i] = 123; | |
888 | ... | |
889 | } | |
890 | ||
891 | We can either save/restore: | |
892 | ||
893 | lala = { 0 }; | |
894 | trxn = _ITM_startTransaction (); | |
895 | if (trxn & a_saveLiveVariables) | |
896 | tmp_lala1 = lala.x[i]; | |
897 | else if (a & a_restoreLiveVariables) | |
898 | lala.x[i] = tmp_lala1; | |
899 | ||
900 | or use the logging functions: | |
901 | ||
902 | lala = { 0 }; | |
903 | trxn = _ITM_startTransaction (); | |
904 | _ITM_LU4 (&lala.x[i]); | |
905 | ||
906 | Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as | |
907 | far up the dominator tree to shadow all of the writes to a given | |
908 | location (thus reducing the total number of logging calls), but not | |
909 | so high as to be called on a path that does not perform a | |
910 | write. */ | |
911 | ||
912 | /* One individual log entry. We may have multiple statements for the | |
913 | same location if neither dominate each other (on different | |
914 | execution paths). */ | |
915 | typedef struct tm_log_entry | |
916 | { | |
917 | /* Address to save. */ | |
918 | tree addr; | |
919 | /* Entry block for the transaction this address occurs in. */ | |
920 | basic_block entry_block; | |
921 | /* Dominating statements the store occurs in. */ | |
922 | gimple_vec stmts; | |
923 | /* Initially, while we are building the log, we place a nonzero | |
924 | value here to mean that this address *will* be saved with a | |
925 | save/restore sequence. Later, when generating the save sequence | |
926 | we place the SSA temp generated here. */ | |
927 | tree save_var; | |
928 | } *tm_log_entry_t; | |
929 | ||
0a35513e | 930 | |
4a8fb1a1 | 931 | /* Log entry hashtable helpers. */ |
0a35513e | 932 | |
4a8fb1a1 | 933 | struct log_entry_hasher |
0a35513e | 934 | { |
4a8fb1a1 LC |
935 | typedef tm_log_entry value_type; |
936 | typedef tm_log_entry compare_type; | |
937 | static inline hashval_t hash (const value_type *); | |
938 | static inline bool equal (const value_type *, const compare_type *); | |
939 | static inline void remove (value_type *); | |
940 | }; | |
0a35513e AH |
941 | |
942 | /* Htab support. Return hash value for a `tm_log_entry'. */ | |
4a8fb1a1 LC |
943 | inline hashval_t |
944 | log_entry_hasher::hash (const value_type *log) | |
0a35513e | 945 | { |
0a35513e AH |
946 | return iterative_hash_expr (log->addr, 0); |
947 | } | |
948 | ||
949 | /* Htab support. Return true if two log entries are the same. */ | |
4a8fb1a1 LC |
950 | inline bool |
951 | log_entry_hasher::equal (const value_type *log1, const compare_type *log2) | |
0a35513e | 952 | { |
0a35513e AH |
953 | /* FIXME: |
954 | ||
955 | rth: I suggest that we get rid of the component refs etc. | |
956 | I.e. resolve the reference to base + offset. | |
957 | ||
958 | We may need to actually finish a merge with mainline for this, | |
959 | since we'd like to be presented with Richi's MEM_REF_EXPRs more | |
960 | often than not. But in the meantime your tm_log_entry could save | |
961 | the results of get_inner_reference. | |
962 | ||
963 | See: g++.dg/tm/pr46653.C | |
964 | */ | |
965 | ||
966 | /* Special case plain equality because operand_equal_p() below will | |
967 | return FALSE if the addresses are equal but they have | |
968 | side-effects (e.g. a volatile address). */ | |
969 | if (log1->addr == log2->addr) | |
970 | return true; | |
971 | ||
972 | return operand_equal_p (log1->addr, log2->addr, 0); | |
973 | } | |
974 | ||
975 | /* Htab support. Free one tm_log_entry. */ | |
4a8fb1a1 LC |
976 | inline void |
977 | log_entry_hasher::remove (value_type *lp) | |
0a35513e | 978 | { |
9771b263 | 979 | lp->stmts.release (); |
0a35513e AH |
980 | free (lp); |
981 | } | |
982 | ||
4a8fb1a1 LC |
983 | |
984 | /* The actual log. */ | |
c203e8a7 | 985 | static hash_table<log_entry_hasher> *tm_log; |
4a8fb1a1 LC |
986 | |
987 | /* Addresses to log with a save/restore sequence. These should be in | |
988 | dominator order. */ | |
989 | static vec<tree> tm_log_save_addresses; | |
990 | ||
991 | enum thread_memory_type | |
992 | { | |
993 | mem_non_local = 0, | |
994 | mem_thread_local, | |
995 | mem_transaction_local, | |
996 | mem_max | |
997 | }; | |
998 | ||
999 | typedef struct tm_new_mem_map | |
1000 | { | |
1001 | /* SSA_NAME being dereferenced. */ | |
1002 | tree val; | |
1003 | enum thread_memory_type local_new_memory; | |
1004 | } tm_new_mem_map_t; | |
1005 | ||
1006 | /* Hashtable helpers. */ | |
1007 | ||
1008 | struct tm_mem_map_hasher : typed_free_remove <tm_new_mem_map_t> | |
1009 | { | |
1010 | typedef tm_new_mem_map_t value_type; | |
1011 | typedef tm_new_mem_map_t compare_type; | |
1012 | static inline hashval_t hash (const value_type *); | |
1013 | static inline bool equal (const value_type *, const compare_type *); | |
1014 | }; | |
1015 | ||
1016 | inline hashval_t | |
1017 | tm_mem_map_hasher::hash (const value_type *v) | |
1018 | { | |
1019 | return (intptr_t)v->val >> 4; | |
1020 | } | |
1021 | ||
1022 | inline bool | |
1023 | tm_mem_map_hasher::equal (const value_type *v, const compare_type *c) | |
1024 | { | |
1025 | return v->val == c->val; | |
1026 | } | |
1027 | ||
1028 | /* Map for an SSA_NAME originally pointing to a non aliased new piece | |
1029 | of memory (malloc, alloc, etc). */ | |
c203e8a7 | 1030 | static hash_table<tm_mem_map_hasher> *tm_new_mem_hash; |
4a8fb1a1 | 1031 | |
0a35513e AH |
1032 | /* Initialize logging data structures. */ |
1033 | static void | |
1034 | tm_log_init (void) | |
1035 | { | |
c203e8a7 TS |
1036 | tm_log = new hash_table<log_entry_hasher> (10); |
1037 | tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5); | |
9771b263 | 1038 | tm_log_save_addresses.create (5); |
0a35513e AH |
1039 | } |
1040 | ||
1041 | /* Free logging data structures. */ | |
1042 | static void | |
1043 | tm_log_delete (void) | |
1044 | { | |
c203e8a7 TS |
1045 | delete tm_log; |
1046 | tm_log = NULL; | |
1047 | delete tm_new_mem_hash; | |
1048 | tm_new_mem_hash = NULL; | |
9771b263 | 1049 | tm_log_save_addresses.release (); |
0a35513e AH |
1050 | } |
1051 | ||
1052 | /* Return true if MEM is a transaction invariant memory for the TM | |
1053 | region starting at REGION_ENTRY_BLOCK. */ | |
1054 | static bool | |
1055 | transaction_invariant_address_p (const_tree mem, basic_block region_entry_block) | |
1056 | { | |
1057 | if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF) | |
1058 | && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME) | |
1059 | { | |
1060 | basic_block def_bb; | |
1061 | ||
1062 | def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0))); | |
1063 | return def_bb != region_entry_block | |
1064 | && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb); | |
1065 | } | |
1066 | ||
1067 | mem = strip_invariant_refs (mem); | |
1068 | return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem)); | |
1069 | } | |
1070 | ||
1071 | /* Given an address ADDR in STMT, find it in the memory log or add it, | |
1072 | making sure to keep only the addresses highest in the dominator | |
1073 | tree. | |
1074 | ||
1075 | ENTRY_BLOCK is the entry_block for the transaction. | |
1076 | ||
1077 | If we find the address in the log, make sure it's either the same | |
1078 | address, or an equivalent one that dominates ADDR. | |
1079 | ||
1080 | If we find the address, but neither ADDR dominates the found | |
1081 | address, nor the found one dominates ADDR, we're on different | |
1082 | execution paths. Add it. | |
1083 | ||
1084 | If known, ENTRY_BLOCK is the entry block for the region, otherwise | |
1085 | NULL. */ | |
1086 | static void | |
1087 | tm_log_add (basic_block entry_block, tree addr, gimple stmt) | |
1088 | { | |
4a8fb1a1 | 1089 | tm_log_entry **slot; |
0a35513e AH |
1090 | struct tm_log_entry l, *lp; |
1091 | ||
1092 | l.addr = addr; | |
c203e8a7 | 1093 | slot = tm_log->find_slot (&l, INSERT); |
0a35513e AH |
1094 | if (!*slot) |
1095 | { | |
1096 | tree type = TREE_TYPE (addr); | |
1097 | ||
1098 | lp = XNEW (struct tm_log_entry); | |
1099 | lp->addr = addr; | |
1100 | *slot = lp; | |
1101 | ||
1102 | /* Small invariant addresses can be handled as save/restores. */ | |
1103 | if (entry_block | |
1104 | && transaction_invariant_address_p (lp->addr, entry_block) | |
1105 | && TYPE_SIZE_UNIT (type) != NULL | |
cc269bb6 | 1106 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) |
7d362f6c | 1107 | && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type)) |
0a35513e AH |
1108 | < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) |
1109 | /* We must be able to copy this type normally. I.e., no | |
1110 | special constructors and the like. */ | |
1111 | && !TREE_ADDRESSABLE (type)) | |
1112 | { | |
2091795a | 1113 | lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save"); |
9771b263 | 1114 | lp->stmts.create (0); |
0a35513e AH |
1115 | lp->entry_block = entry_block; |
1116 | /* Save addresses separately in dominator order so we don't | |
1117 | get confused by overlapping addresses in the save/restore | |
1118 | sequence. */ | |
9771b263 | 1119 | tm_log_save_addresses.safe_push (lp->addr); |
0a35513e AH |
1120 | } |
1121 | else | |
1122 | { | |
1123 | /* Use the logging functions. */ | |
9771b263 DN |
1124 | lp->stmts.create (5); |
1125 | lp->stmts.quick_push (stmt); | |
0a35513e AH |
1126 | lp->save_var = NULL; |
1127 | } | |
1128 | } | |
1129 | else | |
1130 | { | |
1131 | size_t i; | |
1132 | gimple oldstmt; | |
1133 | ||
4a8fb1a1 | 1134 | lp = *slot; |
0a35513e AH |
1135 | |
1136 | /* If we're generating a save/restore sequence, we don't care | |
1137 | about statements. */ | |
1138 | if (lp->save_var) | |
1139 | return; | |
1140 | ||
9771b263 | 1141 | for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i) |
0a35513e AH |
1142 | { |
1143 | if (stmt == oldstmt) | |
1144 | return; | |
1145 | /* We already have a store to the same address, higher up the | |
1146 | dominator tree. Nothing to do. */ | |
1147 | if (dominated_by_p (CDI_DOMINATORS, | |
1148 | gimple_bb (stmt), gimple_bb (oldstmt))) | |
1149 | return; | |
1150 | /* We should be processing blocks in dominator tree order. */ | |
1151 | gcc_assert (!dominated_by_p (CDI_DOMINATORS, | |
1152 | gimple_bb (oldstmt), gimple_bb (stmt))); | |
1153 | } | |
1154 | /* Store is on a different code path. */ | |
9771b263 | 1155 | lp->stmts.safe_push (stmt); |
0a35513e AH |
1156 | } |
1157 | } | |
1158 | ||
1159 | /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME | |
1160 | result, insert the new statements before GSI. */ | |
1161 | ||
1162 | static tree | |
1163 | gimplify_addr (gimple_stmt_iterator *gsi, tree x) | |
1164 | { | |
1165 | if (TREE_CODE (x) == TARGET_MEM_REF) | |
1166 | x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x); | |
1167 | else | |
1168 | x = build_fold_addr_expr (x); | |
1169 | return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT); | |
1170 | } | |
1171 | ||
1172 | /* Instrument one address with the logging functions. | |
1173 | ADDR is the address to save. | |
1174 | STMT is the statement before which to place it. */ | |
1175 | static void | |
1176 | tm_log_emit_stmt (tree addr, gimple stmt) | |
1177 | { | |
1178 | tree type = TREE_TYPE (addr); | |
1179 | tree size = TYPE_SIZE_UNIT (type); | |
1180 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
1181 | gimple log; | |
1182 | enum built_in_function code = BUILT_IN_TM_LOG; | |
1183 | ||
1184 | if (type == float_type_node) | |
1185 | code = BUILT_IN_TM_LOG_FLOAT; | |
1186 | else if (type == double_type_node) | |
1187 | code = BUILT_IN_TM_LOG_DOUBLE; | |
1188 | else if (type == long_double_type_node) | |
1189 | code = BUILT_IN_TM_LOG_LDOUBLE; | |
cc269bb6 | 1190 | else if (tree_fits_uhwi_p (size)) |
0a35513e | 1191 | { |
ae7e9ddd | 1192 | unsigned int n = tree_to_uhwi (size); |
0a35513e AH |
1193 | switch (n) |
1194 | { | |
1195 | case 1: | |
1196 | code = BUILT_IN_TM_LOG_1; | |
1197 | break; | |
1198 | case 2: | |
1199 | code = BUILT_IN_TM_LOG_2; | |
1200 | break; | |
1201 | case 4: | |
1202 | code = BUILT_IN_TM_LOG_4; | |
1203 | break; | |
1204 | case 8: | |
1205 | code = BUILT_IN_TM_LOG_8; | |
1206 | break; | |
1207 | default: | |
1208 | code = BUILT_IN_TM_LOG; | |
1209 | if (TREE_CODE (type) == VECTOR_TYPE) | |
1210 | { | |
1211 | if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64)) | |
1212 | code = BUILT_IN_TM_LOG_M64; | |
1213 | else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128)) | |
1214 | code = BUILT_IN_TM_LOG_M128; | |
1215 | else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256)) | |
1216 | code = BUILT_IN_TM_LOG_M256; | |
1217 | } | |
1218 | break; | |
1219 | } | |
1220 | } | |
1221 | ||
1222 | addr = gimplify_addr (&gsi, addr); | |
1223 | if (code == BUILT_IN_TM_LOG) | |
1224 | log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size); | |
1225 | else | |
1226 | log = gimple_build_call (builtin_decl_explicit (code), 1, addr); | |
1227 | gsi_insert_before (&gsi, log, GSI_SAME_STMT); | |
1228 | } | |
1229 | ||
1230 | /* Go through the log and instrument address that must be instrumented | |
1231 | with the logging functions. Leave the save/restore addresses for | |
1232 | later. */ | |
1233 | static void | |
1234 | tm_log_emit (void) | |
1235 | { | |
c203e8a7 | 1236 | hash_table<log_entry_hasher>::iterator hi; |
0a35513e AH |
1237 | struct tm_log_entry *lp; |
1238 | ||
c203e8a7 | 1239 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi) |
0a35513e AH |
1240 | { |
1241 | size_t i; | |
1242 | gimple stmt; | |
1243 | ||
1244 | if (dump_file) | |
1245 | { | |
1246 | fprintf (dump_file, "TM thread private mem logging: "); | |
1247 | print_generic_expr (dump_file, lp->addr, 0); | |
1248 | fprintf (dump_file, "\n"); | |
1249 | } | |
1250 | ||
1251 | if (lp->save_var) | |
1252 | { | |
1253 | if (dump_file) | |
1254 | fprintf (dump_file, "DUMPING to variable\n"); | |
1255 | continue; | |
1256 | } | |
1257 | else | |
1258 | { | |
1259 | if (dump_file) | |
1260 | fprintf (dump_file, "DUMPING with logging functions\n"); | |
9771b263 | 1261 | for (i = 0; lp->stmts.iterate (i, &stmt); ++i) |
0a35513e AH |
1262 | tm_log_emit_stmt (lp->addr, stmt); |
1263 | } | |
1264 | } | |
1265 | } | |
1266 | ||
1267 | /* Emit the save sequence for the corresponding addresses in the log. | |
1268 | ENTRY_BLOCK is the entry block for the transaction. | |
1269 | BB is the basic block to insert the code in. */ | |
1270 | static void | |
1271 | tm_log_emit_saves (basic_block entry_block, basic_block bb) | |
1272 | { | |
1273 | size_t i; | |
1274 | gimple_stmt_iterator gsi = gsi_last_bb (bb); | |
1275 | gimple stmt; | |
1276 | struct tm_log_entry l, *lp; | |
1277 | ||
9771b263 | 1278 | for (i = 0; i < tm_log_save_addresses.length (); ++i) |
0a35513e | 1279 | { |
9771b263 | 1280 | l.addr = tm_log_save_addresses[i]; |
c203e8a7 | 1281 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
0a35513e AH |
1282 | gcc_assert (lp->save_var != NULL); |
1283 | ||
1284 | /* We only care about variables in the current transaction. */ | |
1285 | if (lp->entry_block != entry_block) | |
1286 | continue; | |
1287 | ||
1288 | stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr)); | |
1289 | ||
1290 | /* Make sure we can create an SSA_NAME for this type. For | |
1291 | instance, aggregates aren't allowed, in which case the system | |
1292 | will create a VOP for us and everything will just work. */ | |
1293 | if (is_gimple_reg_type (TREE_TYPE (lp->save_var))) | |
1294 | { | |
1295 | lp->save_var = make_ssa_name (lp->save_var, stmt); | |
1296 | gimple_assign_set_lhs (stmt, lp->save_var); | |
1297 | } | |
1298 | ||
1299 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
1300 | } | |
1301 | } | |
1302 | ||
1303 | /* Emit the restore sequence for the corresponding addresses in the log. | |
1304 | ENTRY_BLOCK is the entry block for the transaction. | |
1305 | BB is the basic block to insert the code in. */ | |
1306 | static void | |
1307 | tm_log_emit_restores (basic_block entry_block, basic_block bb) | |
1308 | { | |
1309 | int i; | |
1310 | struct tm_log_entry l, *lp; | |
1311 | gimple_stmt_iterator gsi; | |
1312 | gimple stmt; | |
1313 | ||
9771b263 | 1314 | for (i = tm_log_save_addresses.length () - 1; i >= 0; i--) |
0a35513e | 1315 | { |
9771b263 | 1316 | l.addr = tm_log_save_addresses[i]; |
c203e8a7 | 1317 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
0a35513e AH |
1318 | gcc_assert (lp->save_var != NULL); |
1319 | ||
1320 | /* We only care about variables in the current transaction. */ | |
1321 | if (lp->entry_block != entry_block) | |
1322 | continue; | |
1323 | ||
1324 | /* Restores are in LIFO order from the saves in case we have | |
1325 | overlaps. */ | |
1326 | gsi = gsi_start_bb (bb); | |
1327 | ||
1328 | stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var); | |
1329 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1330 | } | |
1331 | } | |
1332 | ||
0a35513e AH |
1333 | \f |
1334 | static tree lower_sequence_tm (gimple_stmt_iterator *, bool *, | |
1335 | struct walk_stmt_info *); | |
1336 | static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *, | |
1337 | struct walk_stmt_info *); | |
1338 | ||
1339 | /* Evaluate an address X being dereferenced and determine if it | |
1340 | originally points to a non aliased new chunk of memory (malloc, | |
1341 | alloca, etc). | |
1342 | ||
1343 | Return MEM_THREAD_LOCAL if it points to a thread-local address. | |
1344 | Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address. | |
1345 | Return MEM_NON_LOCAL otherwise. | |
1346 | ||
1347 | ENTRY_BLOCK is the entry block to the transaction containing the | |
1348 | dereference of X. */ | |
1349 | static enum thread_memory_type | |
1350 | thread_private_new_memory (basic_block entry_block, tree x) | |
1351 | { | |
1352 | gimple stmt = NULL; | |
1353 | enum tree_code code; | |
4a8fb1a1 | 1354 | tm_new_mem_map_t **slot; |
0a35513e AH |
1355 | tm_new_mem_map_t elt, *elt_p; |
1356 | tree val = x; | |
1357 | enum thread_memory_type retval = mem_transaction_local; | |
1358 | ||
1359 | if (!entry_block | |
1360 | || TREE_CODE (x) != SSA_NAME | |
1361 | /* Possible uninitialized use, or a function argument. In | |
1362 | either case, we don't care. */ | |
1363 | || SSA_NAME_IS_DEFAULT_DEF (x)) | |
1364 | return mem_non_local; | |
1365 | ||
1366 | /* Look in cache first. */ | |
1367 | elt.val = x; | |
c203e8a7 | 1368 | slot = tm_new_mem_hash->find_slot (&elt, INSERT); |
4a8fb1a1 | 1369 | elt_p = *slot; |
0a35513e AH |
1370 | if (elt_p) |
1371 | return elt_p->local_new_memory; | |
1372 | ||
1373 | /* Optimistically assume the memory is transaction local during | |
1374 | processing. This catches recursion into this variable. */ | |
1375 | *slot = elt_p = XNEW (tm_new_mem_map_t); | |
1376 | elt_p->val = val; | |
1377 | elt_p->local_new_memory = mem_transaction_local; | |
1378 | ||
1379 | /* Search DEF chain to find the original definition of this address. */ | |
1380 | do | |
1381 | { | |
1382 | if (ptr_deref_may_alias_global_p (x)) | |
1383 | { | |
1384 | /* Address escapes. This is not thread-private. */ | |
1385 | retval = mem_non_local; | |
1386 | goto new_memory_ret; | |
1387 | } | |
1388 | ||
1389 | stmt = SSA_NAME_DEF_STMT (x); | |
1390 | ||
1391 | /* If the malloc call is outside the transaction, this is | |
1392 | thread-local. */ | |
1393 | if (retval != mem_thread_local | |
1394 | && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block)) | |
1395 | retval = mem_thread_local; | |
1396 | ||
1397 | if (is_gimple_assign (stmt)) | |
1398 | { | |
1399 | code = gimple_assign_rhs_code (stmt); | |
1400 | /* x = foo ==> foo */ | |
1401 | if (code == SSA_NAME) | |
1402 | x = gimple_assign_rhs1 (stmt); | |
1403 | /* x = foo + n ==> foo */ | |
1404 | else if (code == POINTER_PLUS_EXPR) | |
1405 | x = gimple_assign_rhs1 (stmt); | |
1406 | /* x = (cast*) foo ==> foo */ | |
1407 | else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR) | |
1408 | x = gimple_assign_rhs1 (stmt); | |
01a723fa AP |
1409 | /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */ |
1410 | else if (code == COND_EXPR) | |
1411 | { | |
1412 | tree op1 = gimple_assign_rhs2 (stmt); | |
1413 | tree op2 = gimple_assign_rhs3 (stmt); | |
1414 | enum thread_memory_type mem; | |
1415 | retval = thread_private_new_memory (entry_block, op1); | |
1416 | if (retval == mem_non_local) | |
1417 | goto new_memory_ret; | |
1418 | mem = thread_private_new_memory (entry_block, op2); | |
1419 | retval = MIN (retval, mem); | |
1420 | goto new_memory_ret; | |
1421 | } | |
0a35513e AH |
1422 | else |
1423 | { | |
1424 | retval = mem_non_local; | |
1425 | goto new_memory_ret; | |
1426 | } | |
1427 | } | |
1428 | else | |
1429 | { | |
1430 | if (gimple_code (stmt) == GIMPLE_PHI) | |
1431 | { | |
1432 | unsigned int i; | |
1433 | enum thread_memory_type mem; | |
1434 | tree phi_result = gimple_phi_result (stmt); | |
1435 | ||
1436 | /* If any of the ancestors are non-local, we are sure to | |
1437 | be non-local. Otherwise we can avoid doing anything | |
1438 | and inherit what has already been generated. */ | |
1439 | retval = mem_max; | |
1440 | for (i = 0; i < gimple_phi_num_args (stmt); ++i) | |
1441 | { | |
1442 | tree op = PHI_ARG_DEF (stmt, i); | |
1443 | ||
1444 | /* Exclude self-assignment. */ | |
1445 | if (phi_result == op) | |
1446 | continue; | |
1447 | ||
1448 | mem = thread_private_new_memory (entry_block, op); | |
1449 | if (mem == mem_non_local) | |
1450 | { | |
1451 | retval = mem; | |
1452 | goto new_memory_ret; | |
1453 | } | |
1454 | retval = MIN (retval, mem); | |
1455 | } | |
1456 | goto new_memory_ret; | |
1457 | } | |
1458 | break; | |
1459 | } | |
1460 | } | |
1461 | while (TREE_CODE (x) == SSA_NAME); | |
1462 | ||
1463 | if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC) | |
1464 | /* Thread-local or transaction-local. */ | |
1465 | ; | |
1466 | else | |
1467 | retval = mem_non_local; | |
1468 | ||
1469 | new_memory_ret: | |
1470 | elt_p->local_new_memory = retval; | |
1471 | return retval; | |
1472 | } | |
1473 | ||
1474 | /* Determine whether X has to be instrumented using a read | |
1475 | or write barrier. | |
1476 | ||
1477 | ENTRY_BLOCK is the entry block for the region where stmt resides | |
1478 | in. NULL if unknown. | |
1479 | ||
1480 | STMT is the statement in which X occurs in. It is used for thread | |
1481 | private memory instrumentation. If no TPM instrumentation is | |
1482 | desired, STMT should be null. */ | |
1483 | static bool | |
1484 | requires_barrier (basic_block entry_block, tree x, gimple stmt) | |
1485 | { | |
1486 | tree orig = x; | |
1487 | while (handled_component_p (x)) | |
1488 | x = TREE_OPERAND (x, 0); | |
1489 | ||
1490 | switch (TREE_CODE (x)) | |
1491 | { | |
1492 | case INDIRECT_REF: | |
1493 | case MEM_REF: | |
1494 | { | |
1495 | enum thread_memory_type ret; | |
1496 | ||
1497 | ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0)); | |
1498 | if (ret == mem_non_local) | |
1499 | return true; | |
1500 | if (stmt && ret == mem_thread_local) | |
1501 | /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */ | |
1502 | tm_log_add (entry_block, orig, stmt); | |
1503 | ||
1504 | /* Transaction-locals require nothing at all. For malloc, a | |
1505 | transaction restart frees the memory and we reallocate. | |
1506 | For alloca, the stack pointer gets reset by the retry and | |
1507 | we reallocate. */ | |
1508 | return false; | |
1509 | } | |
1510 | ||
1511 | case TARGET_MEM_REF: | |
1512 | if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR) | |
1513 | return true; | |
1514 | x = TREE_OPERAND (TMR_BASE (x), 0); | |
1515 | if (TREE_CODE (x) == PARM_DECL) | |
1516 | return false; | |
1517 | gcc_assert (TREE_CODE (x) == VAR_DECL); | |
1518 | /* FALLTHRU */ | |
1519 | ||
1520 | case PARM_DECL: | |
1521 | case RESULT_DECL: | |
1522 | case VAR_DECL: | |
1523 | if (DECL_BY_REFERENCE (x)) | |
1524 | { | |
1525 | /* ??? This value is a pointer, but aggregate_value_p has been | |
1526 | jigged to return true which confuses needs_to_live_in_memory. | |
1527 | This ought to be cleaned up generically. | |
1528 | ||
1529 | FIXME: Verify this still happens after the next mainline | |
1530 | merge. Testcase ie g++.dg/tm/pr47554.C. | |
1531 | */ | |
1532 | return false; | |
1533 | } | |
1534 | ||
1535 | if (is_global_var (x)) | |
3be18e47 | 1536 | return !TREE_READONLY (x); |
0a35513e AH |
1537 | if (/* FIXME: This condition should actually go below in the |
1538 | tm_log_add() call, however is_call_clobbered() depends on | |
1539 | aliasing info which is not available during | |
1540 | gimplification. Since requires_barrier() gets called | |
1541 | during lower_sequence_tm/gimplification, leave the call | |
1542 | to needs_to_live_in_memory until we eliminate | |
1543 | lower_sequence_tm altogether. */ | |
63c0efdb | 1544 | needs_to_live_in_memory (x)) |
0a35513e | 1545 | return true; |
3be18e47 RH |
1546 | else |
1547 | { | |
1548 | /* For local memory that doesn't escape (aka thread private | |
1549 | memory), we can either save the value at the beginning of | |
1550 | the transaction and restore on restart, or call a tm | |
1551 | function to dynamically save and restore on restart | |
1552 | (ITM_L*). */ | |
1553 | if (stmt) | |
1554 | tm_log_add (entry_block, orig, stmt); | |
1555 | return false; | |
1556 | } | |
0a35513e AH |
1557 | |
1558 | default: | |
1559 | return false; | |
1560 | } | |
1561 | } | |
1562 | ||
1563 | /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside | |
1564 | a transaction region. */ | |
1565 | ||
1566 | static void | |
1567 | examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1568 | { | |
1569 | gimple stmt = gsi_stmt (*gsi); | |
1570 | ||
1571 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL)) | |
1572 | *state |= GTMA_HAVE_LOAD; | |
1573 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL)) | |
1574 | *state |= GTMA_HAVE_STORE; | |
1575 | } | |
1576 | ||
1577 | /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */ | |
1578 | ||
1579 | static void | |
1580 | examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1581 | { | |
1582 | gimple stmt = gsi_stmt (*gsi); | |
1583 | tree fn; | |
1584 | ||
1585 | if (is_tm_pure_call (stmt)) | |
1586 | return; | |
1587 | ||
1588 | /* Check if this call is a transaction abort. */ | |
1589 | fn = gimple_call_fndecl (stmt); | |
1590 | if (is_tm_abort (fn)) | |
1591 | *state |= GTMA_HAVE_ABORT; | |
1592 | ||
1593 | /* Note that something may happen. */ | |
1594 | *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE; | |
1595 | } | |
1596 | ||
1597 | /* Lower a GIMPLE_TRANSACTION statement. */ | |
1598 | ||
1599 | static void | |
1600 | lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi) | |
1601 | { | |
1602 | gimple g, stmt = gsi_stmt (*gsi); | |
1603 | unsigned int *outer_state = (unsigned int *) wi->info; | |
1604 | unsigned int this_state = 0; | |
1605 | struct walk_stmt_info this_wi; | |
1606 | ||
1607 | /* First, lower the body. The scanning that we do inside gives | |
1608 | us some idea of what we're dealing with. */ | |
1609 | memset (&this_wi, 0, sizeof (this_wi)); | |
1610 | this_wi.info = (void *) &this_state; | |
355a7673 MM |
1611 | walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt), |
1612 | lower_sequence_tm, NULL, &this_wi); | |
0a35513e AH |
1613 | |
1614 | /* If there was absolutely nothing transaction related inside the | |
1615 | transaction, we may elide it. Likewise if this is a nested | |
1616 | transaction and does not contain an abort. */ | |
1617 | if (this_state == 0 | |
1618 | || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL)) | |
1619 | { | |
1620 | if (outer_state) | |
1621 | *outer_state |= this_state; | |
1622 | ||
1623 | gsi_insert_seq_before (gsi, gimple_transaction_body (stmt), | |
1624 | GSI_SAME_STMT); | |
1625 | gimple_transaction_set_body (stmt, NULL); | |
1626 | ||
1627 | gsi_remove (gsi, true); | |
1628 | wi->removed_stmt = true; | |
1629 | return; | |
1630 | } | |
1631 | ||
1632 | /* Wrap the body of the transaction in a try-finally node so that | |
1633 | the commit call is always properly called. */ | |
1634 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0); | |
1635 | if (flag_exceptions) | |
1636 | { | |
1637 | tree ptr; | |
1638 | gimple_seq n_seq, e_seq; | |
1639 | ||
1640 | n_seq = gimple_seq_alloc_with_stmt (g); | |
355a7673 | 1641 | e_seq = NULL; |
0a35513e AH |
1642 | |
1643 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER), | |
1644 | 1, integer_zero_node); | |
1645 | ptr = create_tmp_var (ptr_type_node, NULL); | |
1646 | gimple_call_set_lhs (g, ptr); | |
1647 | gimple_seq_add_stmt (&e_seq, g); | |
1648 | ||
1649 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH), | |
1650 | 1, ptr); | |
1651 | gimple_seq_add_stmt (&e_seq, g); | |
1652 | ||
1653 | g = gimple_build_eh_else (n_seq, e_seq); | |
1654 | } | |
1655 | ||
1656 | g = gimple_build_try (gimple_transaction_body (stmt), | |
1657 | gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY); | |
1658 | gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); | |
1659 | ||
1660 | gimple_transaction_set_body (stmt, NULL); | |
1661 | ||
1662 | /* If the transaction calls abort or if this is an outer transaction, | |
1663 | add an "over" label afterwards. */ | |
1664 | if ((this_state & (GTMA_HAVE_ABORT)) | |
c3284718 | 1665 | || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)) |
0a35513e AH |
1666 | { |
1667 | tree label = create_artificial_label (UNKNOWN_LOCATION); | |
1668 | gimple_transaction_set_label (stmt, label); | |
1669 | gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); | |
1670 | } | |
1671 | ||
1672 | /* Record the set of operations found for use later. */ | |
1673 | this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK; | |
1674 | gimple_transaction_set_subcode (stmt, this_state); | |
1675 | } | |
1676 | ||
1677 | /* Iterate through the statements in the sequence, lowering them all | |
1678 | as appropriate for being in a transaction. */ | |
1679 | ||
1680 | static tree | |
1681 | lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1682 | struct walk_stmt_info *wi) | |
1683 | { | |
1684 | unsigned int *state = (unsigned int *) wi->info; | |
1685 | gimple stmt = gsi_stmt (*gsi); | |
1686 | ||
1687 | *handled_ops_p = true; | |
1688 | switch (gimple_code (stmt)) | |
1689 | { | |
1690 | case GIMPLE_ASSIGN: | |
1691 | /* Only memory reads/writes need to be instrumented. */ | |
1692 | if (gimple_assign_single_p (stmt)) | |
1693 | examine_assign_tm (state, gsi); | |
1694 | break; | |
1695 | ||
1696 | case GIMPLE_CALL: | |
1697 | examine_call_tm (state, gsi); | |
1698 | break; | |
1699 | ||
1700 | case GIMPLE_ASM: | |
1701 | *state |= GTMA_MAY_ENTER_IRREVOCABLE; | |
1702 | break; | |
1703 | ||
1704 | case GIMPLE_TRANSACTION: | |
1705 | lower_transaction (gsi, wi); | |
1706 | break; | |
1707 | ||
1708 | default: | |
1709 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1710 | break; | |
1711 | } | |
1712 | ||
1713 | return NULL_TREE; | |
1714 | } | |
1715 | ||
1716 | /* Iterate through the statements in the sequence, lowering them all | |
1717 | as appropriate for being outside of a transaction. */ | |
1718 | ||
1719 | static tree | |
1720 | lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1721 | struct walk_stmt_info * wi) | |
1722 | { | |
1723 | gimple stmt = gsi_stmt (*gsi); | |
1724 | ||
1725 | if (gimple_code (stmt) == GIMPLE_TRANSACTION) | |
1726 | { | |
1727 | *handled_ops_p = true; | |
1728 | lower_transaction (gsi, wi); | |
1729 | } | |
1730 | else | |
1731 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1732 | ||
1733 | return NULL_TREE; | |
1734 | } | |
1735 | ||
1736 | /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After | |
1737 | this, GIMPLE_TRANSACTION nodes still exist, but the nested body has | |
1738 | been moved out, and all the data required for constructing a proper | |
1739 | CFG has been recorded. */ | |
1740 | ||
1741 | static unsigned int | |
1742 | execute_lower_tm (void) | |
1743 | { | |
1744 | struct walk_stmt_info wi; | |
355a7673 | 1745 | gimple_seq body; |
0a35513e AH |
1746 | |
1747 | /* Transactional clones aren't created until a later pass. */ | |
1748 | gcc_assert (!decl_is_tm_clone (current_function_decl)); | |
1749 | ||
355a7673 | 1750 | body = gimple_body (current_function_decl); |
0a35513e | 1751 | memset (&wi, 0, sizeof (wi)); |
355a7673 MM |
1752 | walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi); |
1753 | gimple_set_body (current_function_decl, body); | |
0a35513e AH |
1754 | |
1755 | return 0; | |
1756 | } | |
1757 | ||
27a4cd48 DM |
1758 | namespace { |
1759 | ||
1760 | const pass_data pass_data_lower_tm = | |
1761 | { | |
1762 | GIMPLE_PASS, /* type */ | |
1763 | "tmlower", /* name */ | |
1764 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
1765 | true, /* has_execute */ |
1766 | TV_TRANS_MEM, /* tv_id */ | |
1767 | PROP_gimple_lcf, /* properties_required */ | |
1768 | 0, /* properties_provided */ | |
1769 | 0, /* properties_destroyed */ | |
1770 | 0, /* todo_flags_start */ | |
1771 | 0, /* todo_flags_finish */ | |
0a35513e | 1772 | }; |
27a4cd48 DM |
1773 | |
1774 | class pass_lower_tm : public gimple_opt_pass | |
1775 | { | |
1776 | public: | |
c3284718 RS |
1777 | pass_lower_tm (gcc::context *ctxt) |
1778 | : gimple_opt_pass (pass_data_lower_tm, ctxt) | |
27a4cd48 DM |
1779 | {} |
1780 | ||
1781 | /* opt_pass methods: */ | |
1a3d085c | 1782 | virtual bool gate (function *) { return flag_tm; } |
be55bfe6 | 1783 | virtual unsigned int execute (function *) { return execute_lower_tm (); } |
27a4cd48 DM |
1784 | |
1785 | }; // class pass_lower_tm | |
1786 | ||
1787 | } // anon namespace | |
1788 | ||
1789 | gimple_opt_pass * | |
1790 | make_pass_lower_tm (gcc::context *ctxt) | |
1791 | { | |
1792 | return new pass_lower_tm (ctxt); | |
1793 | } | |
0a35513e AH |
1794 | \f |
1795 | /* Collect region information for each transaction. */ | |
1796 | ||
1797 | struct tm_region | |
1798 | { | |
1799 | /* Link to the next unnested transaction. */ | |
1800 | struct tm_region *next; | |
1801 | ||
1802 | /* Link to the next inner transaction. */ | |
1803 | struct tm_region *inner; | |
1804 | ||
1805 | /* Link to the next outer transaction. */ | |
1806 | struct tm_region *outer; | |
1807 | ||
398b1daa AH |
1808 | /* The GIMPLE_TRANSACTION statement beginning this transaction. |
1809 | After TM_MARK, this gets replaced by a call to | |
1810 | BUILT_IN_TM_START. */ | |
0a35513e AH |
1811 | gimple transaction_stmt; |
1812 | ||
398b1daa AH |
1813 | /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to |
1814 | BUILT_IN_TM_START, this field is true if the transaction is an | |
1815 | outer transaction. */ | |
1816 | bool original_transaction_was_outer; | |
1817 | ||
1818 | /* Return value from BUILT_IN_TM_START. */ | |
1819 | tree tm_state; | |
1820 | ||
1821 | /* The entry block to this region. This will always be the first | |
1822 | block of the body of the transaction. */ | |
0a35513e AH |
1823 | basic_block entry_block; |
1824 | ||
398b1daa AH |
1825 | /* The first block after an expanded call to _ITM_beginTransaction. */ |
1826 | basic_block restart_block; | |
1827 | ||
0a35513e AH |
1828 | /* The set of all blocks that end the region; NULL if only EXIT_BLOCK. |
1829 | These blocks are still a part of the region (i.e., the border is | |
1830 | inclusive). Note that this set is only complete for paths in the CFG | |
1831 | starting at ENTRY_BLOCK, and that there is no exit block recorded for | |
1832 | the edge to the "over" label. */ | |
1833 | bitmap exit_blocks; | |
1834 | ||
1835 | /* The set of all blocks that have an TM_IRREVOCABLE call. */ | |
1836 | bitmap irr_blocks; | |
1837 | }; | |
1838 | ||
6342e53f | 1839 | typedef struct tm_region *tm_region_p; |
6342e53f | 1840 | |
0a35513e AH |
1841 | /* True if there are pending edge statements to be committed for the |
1842 | current function being scanned in the tmmark pass. */ | |
1843 | bool pending_edge_inserts_p; | |
1844 | ||
1845 | static struct tm_region *all_tm_regions; | |
1846 | static bitmap_obstack tm_obstack; | |
1847 | ||
1848 | ||
073a8998 | 1849 | /* A subroutine of tm_region_init. Record the existence of the |
0a35513e AH |
1850 | GIMPLE_TRANSACTION statement in a tree of tm_region elements. */ |
1851 | ||
1852 | static struct tm_region * | |
1853 | tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt) | |
1854 | { | |
1855 | struct tm_region *region; | |
1856 | ||
1857 | region = (struct tm_region *) | |
1858 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
1859 | ||
1860 | if (outer) | |
1861 | { | |
1862 | region->next = outer->inner; | |
1863 | outer->inner = region; | |
1864 | } | |
1865 | else | |
1866 | { | |
1867 | region->next = all_tm_regions; | |
1868 | all_tm_regions = region; | |
1869 | } | |
1870 | region->inner = NULL; | |
1871 | region->outer = outer; | |
1872 | ||
1873 | region->transaction_stmt = stmt; | |
398b1daa AH |
1874 | region->original_transaction_was_outer = false; |
1875 | region->tm_state = NULL; | |
0a35513e AH |
1876 | |
1877 | /* There are either one or two edges out of the block containing | |
1878 | the GIMPLE_TRANSACTION, one to the actual region and one to the | |
1879 | "over" label if the region contains an abort. The former will | |
1880 | always be the one marked FALLTHRU. */ | |
1881 | region->entry_block = FALLTHRU_EDGE (bb)->dest; | |
1882 | ||
1883 | region->exit_blocks = BITMAP_ALLOC (&tm_obstack); | |
1884 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
1885 | ||
1886 | return region; | |
1887 | } | |
1888 | ||
1889 | /* A subroutine of tm_region_init. Record all the exit and | |
1890 | irrevocable blocks in BB into the region's exit_blocks and | |
1891 | irr_blocks bitmaps. Returns the new region being scanned. */ | |
1892 | ||
1893 | static struct tm_region * | |
1894 | tm_region_init_1 (struct tm_region *region, basic_block bb) | |
1895 | { | |
1896 | gimple_stmt_iterator gsi; | |
1897 | gimple g; | |
1898 | ||
1899 | if (!region | |
1900 | || (!region->irr_blocks && !region->exit_blocks)) | |
1901 | return region; | |
1902 | ||
1903 | /* Check to see if this is the end of a region by seeing if it | |
1904 | contains a call to __builtin_tm_commit{,_eh}. Note that the | |
1905 | outermost region for DECL_IS_TM_CLONE need not collect this. */ | |
1906 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
1907 | { | |
1908 | g = gsi_stmt (gsi); | |
1909 | if (gimple_code (g) == GIMPLE_CALL) | |
1910 | { | |
1911 | tree fn = gimple_call_fndecl (g); | |
1912 | if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL) | |
1913 | { | |
1914 | if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT | |
1915 | || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH) | |
1916 | && region->exit_blocks) | |
1917 | { | |
1918 | bitmap_set_bit (region->exit_blocks, bb->index); | |
1919 | region = region->outer; | |
1920 | break; | |
1921 | } | |
1922 | if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE) | |
1923 | bitmap_set_bit (region->irr_blocks, bb->index); | |
1924 | } | |
1925 | } | |
1926 | } | |
1927 | return region; | |
1928 | } | |
1929 | ||
1930 | /* Collect all of the transaction regions within the current function | |
1931 | and record them in ALL_TM_REGIONS. The REGION parameter may specify | |
1932 | an "outermost" region for use by tm clones. */ | |
1933 | ||
1934 | static void | |
1935 | tm_region_init (struct tm_region *region) | |
1936 | { | |
1937 | gimple g; | |
1938 | edge_iterator ei; | |
1939 | edge e; | |
1940 | basic_block bb; | |
ef062b13 | 1941 | auto_vec<basic_block> queue; |
0a35513e AH |
1942 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
1943 | struct tm_region *old_region; | |
ef062b13 | 1944 | auto_vec<tm_region_p> bb_regions; |
0a35513e AH |
1945 | |
1946 | all_tm_regions = region; | |
fefa31b5 | 1947 | bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
0a35513e | 1948 | |
19c0d7df AH |
1949 | /* We could store this information in bb->aux, but we may get called |
1950 | through get_all_tm_blocks() from another pass that may be already | |
1951 | using bb->aux. */ | |
8b1c6fd7 | 1952 | bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
19c0d7df | 1953 | |
9771b263 DN |
1954 | queue.safe_push (bb); |
1955 | bb_regions[bb->index] = region; | |
0a35513e AH |
1956 | do |
1957 | { | |
9771b263 DN |
1958 | bb = queue.pop (); |
1959 | region = bb_regions[bb->index]; | |
1960 | bb_regions[bb->index] = NULL; | |
0a35513e AH |
1961 | |
1962 | /* Record exit and irrevocable blocks. */ | |
1963 | region = tm_region_init_1 (region, bb); | |
1964 | ||
1965 | /* Check for the last statement in the block beginning a new region. */ | |
1966 | g = last_stmt (bb); | |
1967 | old_region = region; | |
1968 | if (g && gimple_code (g) == GIMPLE_TRANSACTION) | |
1969 | region = tm_region_init_0 (region, bb, g); | |
1970 | ||
1971 | /* Process subsequent blocks. */ | |
1972 | FOR_EACH_EDGE (e, ei, bb->succs) | |
1973 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
1974 | { | |
1975 | bitmap_set_bit (visited_blocks, e->dest->index); | |
9771b263 | 1976 | queue.safe_push (e->dest); |
0a35513e AH |
1977 | |
1978 | /* If the current block started a new region, make sure that only | |
1979 | the entry block of the new region is associated with this region. | |
1980 | Other successors are still part of the old region. */ | |
1981 | if (old_region != region && e->dest != region->entry_block) | |
9771b263 | 1982 | bb_regions[e->dest->index] = old_region; |
0a35513e | 1983 | else |
9771b263 | 1984 | bb_regions[e->dest->index] = region; |
0a35513e AH |
1985 | } |
1986 | } | |
9771b263 | 1987 | while (!queue.is_empty ()); |
0a35513e AH |
1988 | BITMAP_FREE (visited_blocks); |
1989 | } | |
1990 | ||
1991 | /* The "gate" function for all transactional memory expansion and optimization | |
1992 | passes. We collect region information for each top-level transaction, and | |
1993 | if we don't find any, we skip all of the TM passes. Each region will have | |
1994 | all of the exit blocks recorded, and the originating statement. */ | |
1995 | ||
1996 | static bool | |
1997 | gate_tm_init (void) | |
1998 | { | |
1999 | if (!flag_tm) | |
2000 | return false; | |
2001 | ||
2002 | calculate_dominance_info (CDI_DOMINATORS); | |
2003 | bitmap_obstack_initialize (&tm_obstack); | |
2004 | ||
2005 | /* If the function is a TM_CLONE, then the entire function is the region. */ | |
2006 | if (decl_is_tm_clone (current_function_decl)) | |
2007 | { | |
2008 | struct tm_region *region = (struct tm_region *) | |
2009 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
2010 | memset (region, 0, sizeof (*region)); | |
fefa31b5 | 2011 | region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
0a35513e AH |
2012 | /* For a clone, the entire function is the region. But even if |
2013 | we don't need to record any exit blocks, we may need to | |
2014 | record irrevocable blocks. */ | |
2015 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
2016 | ||
2017 | tm_region_init (region); | |
2018 | } | |
2019 | else | |
2020 | { | |
2021 | tm_region_init (NULL); | |
2022 | ||
2023 | /* If we didn't find any regions, cleanup and skip the whole tree | |
2024 | of tm-related optimizations. */ | |
2025 | if (all_tm_regions == NULL) | |
2026 | { | |
2027 | bitmap_obstack_release (&tm_obstack); | |
2028 | return false; | |
2029 | } | |
2030 | } | |
2031 | ||
2032 | return true; | |
2033 | } | |
2034 | ||
27a4cd48 DM |
2035 | namespace { |
2036 | ||
2037 | const pass_data pass_data_tm_init = | |
2038 | { | |
2039 | GIMPLE_PASS, /* type */ | |
2040 | "*tminit", /* name */ | |
2041 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
2042 | false, /* has_execute */ |
2043 | TV_TRANS_MEM, /* tv_id */ | |
2044 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
2045 | 0, /* properties_provided */ | |
2046 | 0, /* properties_destroyed */ | |
2047 | 0, /* todo_flags_start */ | |
2048 | 0, /* todo_flags_finish */ | |
0a35513e | 2049 | }; |
27a4cd48 DM |
2050 | |
2051 | class pass_tm_init : public gimple_opt_pass | |
2052 | { | |
2053 | public: | |
c3284718 RS |
2054 | pass_tm_init (gcc::context *ctxt) |
2055 | : gimple_opt_pass (pass_data_tm_init, ctxt) | |
27a4cd48 DM |
2056 | {} |
2057 | ||
2058 | /* opt_pass methods: */ | |
1a3d085c | 2059 | virtual bool gate (function *) { return gate_tm_init (); } |
27a4cd48 DM |
2060 | |
2061 | }; // class pass_tm_init | |
2062 | ||
2063 | } // anon namespace | |
2064 | ||
2065 | gimple_opt_pass * | |
2066 | make_pass_tm_init (gcc::context *ctxt) | |
2067 | { | |
2068 | return new pass_tm_init (ctxt); | |
2069 | } | |
0a35513e AH |
2070 | \f |
2071 | /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region | |
2072 | represented by STATE. */ | |
2073 | ||
2074 | static inline void | |
2075 | transaction_subcode_ior (struct tm_region *region, unsigned flags) | |
2076 | { | |
2077 | if (region && region->transaction_stmt) | |
2078 | { | |
2079 | flags |= gimple_transaction_subcode (region->transaction_stmt); | |
2080 | gimple_transaction_set_subcode (region->transaction_stmt, flags); | |
2081 | } | |
2082 | } | |
2083 | ||
2084 | /* Construct a memory load in a transactional context. Return the | |
2085 | gimple statement performing the load, or NULL if there is no | |
2086 | TM_LOAD builtin of the appropriate size to do the load. | |
2087 | ||
2088 | LOC is the location to use for the new statement(s). */ | |
2089 | ||
2090 | static gimple | |
2091 | build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) | |
2092 | { | |
2093 | enum built_in_function code = END_BUILTINS; | |
2094 | tree t, type = TREE_TYPE (rhs), decl; | |
2095 | gimple gcall; | |
2096 | ||
2097 | if (type == float_type_node) | |
2098 | code = BUILT_IN_TM_LOAD_FLOAT; | |
2099 | else if (type == double_type_node) | |
2100 | code = BUILT_IN_TM_LOAD_DOUBLE; | |
2101 | else if (type == long_double_type_node) | |
2102 | code = BUILT_IN_TM_LOAD_LDOUBLE; | |
2103 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cc269bb6 | 2104 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
0a35513e | 2105 | { |
ae7e9ddd | 2106 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
0a35513e AH |
2107 | { |
2108 | case 1: | |
2109 | code = BUILT_IN_TM_LOAD_1; | |
2110 | break; | |
2111 | case 2: | |
2112 | code = BUILT_IN_TM_LOAD_2; | |
2113 | break; | |
2114 | case 4: | |
2115 | code = BUILT_IN_TM_LOAD_4; | |
2116 | break; | |
2117 | case 8: | |
2118 | code = BUILT_IN_TM_LOAD_8; | |
2119 | break; | |
2120 | } | |
2121 | } | |
2122 | ||
2123 | if (code == END_BUILTINS) | |
2124 | { | |
2125 | decl = targetm.vectorize.builtin_tm_load (type); | |
2126 | if (!decl) | |
2127 | return NULL; | |
2128 | } | |
2129 | else | |
2130 | decl = builtin_decl_explicit (code); | |
2131 | ||
2132 | t = gimplify_addr (gsi, rhs); | |
2133 | gcall = gimple_build_call (decl, 1, t); | |
2134 | gimple_set_location (gcall, loc); | |
2135 | ||
2136 | t = TREE_TYPE (TREE_TYPE (decl)); | |
2137 | if (useless_type_conversion_p (type, t)) | |
2138 | { | |
2139 | gimple_call_set_lhs (gcall, lhs); | |
2140 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2141 | } | |
2142 | else | |
2143 | { | |
2144 | gimple g; | |
2145 | tree temp; | |
2146 | ||
7cc434a3 | 2147 | temp = create_tmp_reg (t, NULL); |
0a35513e AH |
2148 | gimple_call_set_lhs (gcall, temp); |
2149 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2150 | ||
2151 | t = fold_build1 (VIEW_CONVERT_EXPR, type, temp); | |
2152 | g = gimple_build_assign (lhs, t); | |
2153 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2154 | } | |
2155 | ||
2156 | return gcall; | |
2157 | } | |
2158 | ||
2159 | ||
2160 | /* Similarly for storing TYPE in a transactional context. */ | |
2161 | ||
2162 | static gimple | |
2163 | build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) | |
2164 | { | |
2165 | enum built_in_function code = END_BUILTINS; | |
2166 | tree t, fn, type = TREE_TYPE (rhs), simple_type; | |
2167 | gimple gcall; | |
2168 | ||
2169 | if (type == float_type_node) | |
2170 | code = BUILT_IN_TM_STORE_FLOAT; | |
2171 | else if (type == double_type_node) | |
2172 | code = BUILT_IN_TM_STORE_DOUBLE; | |
2173 | else if (type == long_double_type_node) | |
2174 | code = BUILT_IN_TM_STORE_LDOUBLE; | |
2175 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cc269bb6 | 2176 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
0a35513e | 2177 | { |
ae7e9ddd | 2178 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
0a35513e AH |
2179 | { |
2180 | case 1: | |
2181 | code = BUILT_IN_TM_STORE_1; | |
2182 | break; | |
2183 | case 2: | |
2184 | code = BUILT_IN_TM_STORE_2; | |
2185 | break; | |
2186 | case 4: | |
2187 | code = BUILT_IN_TM_STORE_4; | |
2188 | break; | |
2189 | case 8: | |
2190 | code = BUILT_IN_TM_STORE_8; | |
2191 | break; | |
2192 | } | |
2193 | } | |
2194 | ||
2195 | if (code == END_BUILTINS) | |
2196 | { | |
2197 | fn = targetm.vectorize.builtin_tm_store (type); | |
2198 | if (!fn) | |
2199 | return NULL; | |
2200 | } | |
2201 | else | |
2202 | fn = builtin_decl_explicit (code); | |
2203 | ||
2204 | simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn)))); | |
2205 | ||
2206 | if (TREE_CODE (rhs) == CONSTRUCTOR) | |
2207 | { | |
2208 | /* Handle the easy initialization to zero. */ | |
9771b263 | 2209 | if (!CONSTRUCTOR_ELTS (rhs)) |
0a35513e AH |
2210 | rhs = build_int_cst (simple_type, 0); |
2211 | else | |
2212 | { | |
2213 | /* ...otherwise punt to the caller and probably use | |
2214 | BUILT_IN_TM_MEMMOVE, because we can't wrap a | |
2215 | VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce | |
2216 | valid gimple. */ | |
2217 | return NULL; | |
2218 | } | |
2219 | } | |
2220 | else if (!useless_type_conversion_p (simple_type, type)) | |
2221 | { | |
2222 | gimple g; | |
2223 | tree temp; | |
2224 | ||
7cc434a3 | 2225 | temp = create_tmp_reg (simple_type, NULL); |
0a35513e AH |
2226 | t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs); |
2227 | g = gimple_build_assign (temp, t); | |
2228 | gimple_set_location (g, loc); | |
2229 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2230 | ||
2231 | rhs = temp; | |
2232 | } | |
2233 | ||
2234 | t = gimplify_addr (gsi, lhs); | |
2235 | gcall = gimple_build_call (fn, 2, t, rhs); | |
2236 | gimple_set_location (gcall, loc); | |
2237 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2238 | ||
2239 | return gcall; | |
2240 | } | |
2241 | ||
2242 | ||
2243 | /* Expand an assignment statement into transactional builtins. */ | |
2244 | ||
2245 | static void | |
2246 | expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) | |
2247 | { | |
2248 | gimple stmt = gsi_stmt (*gsi); | |
2249 | location_t loc = gimple_location (stmt); | |
2250 | tree lhs = gimple_assign_lhs (stmt); | |
2251 | tree rhs = gimple_assign_rhs1 (stmt); | |
2252 | bool store_p = requires_barrier (region->entry_block, lhs, NULL); | |
2253 | bool load_p = requires_barrier (region->entry_block, rhs, NULL); | |
2254 | gimple gcall = NULL; | |
2255 | ||
2256 | if (!load_p && !store_p) | |
2257 | { | |
2258 | /* Add thread private addresses to log if applicable. */ | |
2259 | requires_barrier (region->entry_block, lhs, stmt); | |
2260 | gsi_next (gsi); | |
2261 | return; | |
2262 | } | |
2263 | ||
398b1daa | 2264 | // Remove original load/store statement. |
0a35513e AH |
2265 | gsi_remove (gsi, true); |
2266 | ||
2267 | if (load_p && !store_p) | |
2268 | { | |
2269 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2270 | gcall = build_tm_load (loc, lhs, rhs, gsi); | |
2271 | } | |
2272 | else if (store_p && !load_p) | |
2273 | { | |
2274 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2275 | gcall = build_tm_store (loc, lhs, rhs, gsi); | |
2276 | } | |
2277 | if (!gcall) | |
2278 | { | |
713b8dfb | 2279 | tree lhs_addr, rhs_addr, tmp; |
0a35513e AH |
2280 | |
2281 | if (load_p) | |
2282 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2283 | if (store_p) | |
2284 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2285 | ||
2286 | /* ??? Figure out if there's any possible overlap between the LHS | |
2287 | and the RHS and if not, use MEMCPY. */ | |
713b8dfb | 2288 | |
345ae177 | 2289 | if (load_p && is_gimple_reg (lhs)) |
713b8dfb AH |
2290 | { |
2291 | tmp = create_tmp_var (TREE_TYPE (lhs), NULL); | |
2292 | lhs_addr = build_fold_addr_expr (tmp); | |
2293 | } | |
2294 | else | |
2295 | { | |
2296 | tmp = NULL_TREE; | |
2297 | lhs_addr = gimplify_addr (gsi, lhs); | |
2298 | } | |
0a35513e AH |
2299 | rhs_addr = gimplify_addr (gsi, rhs); |
2300 | gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE), | |
2301 | 3, lhs_addr, rhs_addr, | |
2302 | TYPE_SIZE_UNIT (TREE_TYPE (lhs))); | |
2303 | gimple_set_location (gcall, loc); | |
2304 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
713b8dfb AH |
2305 | |
2306 | if (tmp) | |
2307 | { | |
2308 | gcall = gimple_build_assign (lhs, tmp); | |
2309 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2310 | } | |
0a35513e AH |
2311 | } |
2312 | ||
2313 | /* Now that we have the load/store in its instrumented form, add | |
2314 | thread private addresses to the log if applicable. */ | |
2315 | if (!store_p) | |
2316 | requires_barrier (region->entry_block, lhs, gcall); | |
2317 | ||
398b1daa AH |
2318 | // The calls to build_tm_{store,load} above inserted the instrumented |
2319 | // call into the stream. | |
2320 | // gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
0a35513e AH |
2321 | } |
2322 | ||
2323 | ||
2324 | /* Expand a call statement as appropriate for a transaction. That is, | |
2325 | either verify that the call does not affect the transaction, or | |
2326 | redirect the call to a clone that handles transactions, or change | |
2327 | the transaction state to IRREVOCABLE. Return true if the call is | |
2328 | one of the builtins that end a transaction. */ | |
2329 | ||
2330 | static bool | |
2331 | expand_call_tm (struct tm_region *region, | |
2332 | gimple_stmt_iterator *gsi) | |
2333 | { | |
2334 | gimple stmt = gsi_stmt (*gsi); | |
2335 | tree lhs = gimple_call_lhs (stmt); | |
2336 | tree fn_decl; | |
2337 | struct cgraph_node *node; | |
2338 | bool retval = false; | |
2339 | ||
2340 | fn_decl = gimple_call_fndecl (stmt); | |
2341 | ||
2342 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) | |
2343 | || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) | |
2344 | transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); | |
2345 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) | |
2346 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2347 | ||
2348 | if (is_tm_pure_call (stmt)) | |
2349 | return false; | |
2350 | ||
2351 | if (fn_decl) | |
2352 | retval = is_tm_ending_fndecl (fn_decl); | |
2353 | if (!retval) | |
2354 | { | |
2355 | /* Assume all non-const/pure calls write to memory, except | |
2356 | transaction ending builtins. */ | |
2357 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2358 | } | |
2359 | ||
2360 | /* For indirect calls, we already generated a call into the runtime. */ | |
2361 | if (!fn_decl) | |
2362 | { | |
2363 | tree fn = gimple_call_fn (stmt); | |
2364 | ||
2365 | /* We are guaranteed never to go irrevocable on a safe or pure | |
2366 | call, and the pure call was handled above. */ | |
2367 | if (is_tm_safe (fn)) | |
2368 | return false; | |
2369 | else | |
2370 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2371 | ||
2372 | return false; | |
2373 | } | |
2374 | ||
2375 | node = cgraph_get_node (fn_decl); | |
91cad09b AH |
2376 | /* All calls should have cgraph here. */ |
2377 | if (!node) | |
2378 | { | |
2379 | /* We can have a nodeless call here if some pass after IPA-tm | |
2380 | added uninstrumented calls. For example, loop distribution | |
2381 | can transform certain loop constructs into __builtin_mem* | |
2382 | calls. In this case, see if we have a suitable TM | |
2383 | replacement and fill in the gaps. */ | |
2384 | gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL); | |
2385 | enum built_in_function code = DECL_FUNCTION_CODE (fn_decl); | |
2386 | gcc_assert (code == BUILT_IN_MEMCPY | |
2387 | || code == BUILT_IN_MEMMOVE | |
2388 | || code == BUILT_IN_MEMSET); | |
2389 | ||
2390 | tree repl = find_tm_replacement_function (fn_decl); | |
2391 | if (repl) | |
2392 | { | |
2393 | gimple_call_set_fndecl (stmt, repl); | |
2394 | update_stmt (stmt); | |
2395 | node = cgraph_create_node (repl); | |
2396 | node->local.tm_may_enter_irr = false; | |
2397 | return expand_call_tm (region, gsi); | |
2398 | } | |
2399 | gcc_unreachable (); | |
2400 | } | |
0a35513e AH |
2401 | if (node->local.tm_may_enter_irr) |
2402 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2403 | ||
2404 | if (is_tm_abort (fn_decl)) | |
2405 | { | |
2406 | transaction_subcode_ior (region, GTMA_HAVE_ABORT); | |
2407 | return true; | |
2408 | } | |
2409 | ||
2410 | /* Instrument the store if needed. | |
2411 | ||
2412 | If the assignment happens inside the function call (return slot | |
2413 | optimization), there is no instrumentation to be done, since | |
2414 | the callee should have done the right thing. */ | |
2415 | if (lhs && requires_barrier (region->entry_block, lhs, stmt) | |
2416 | && !gimple_call_return_slot_opt_p (stmt)) | |
2417 | { | |
7cc434a3 | 2418 | tree tmp = create_tmp_reg (TREE_TYPE (lhs), NULL); |
0a35513e AH |
2419 | location_t loc = gimple_location (stmt); |
2420 | edge fallthru_edge = NULL; | |
2421 | ||
2422 | /* Remember if the call was going to throw. */ | |
2423 | if (stmt_can_throw_internal (stmt)) | |
2424 | { | |
2425 | edge_iterator ei; | |
2426 | edge e; | |
2427 | basic_block bb = gimple_bb (stmt); | |
2428 | ||
2429 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2430 | if (e->flags & EDGE_FALLTHRU) | |
2431 | { | |
2432 | fallthru_edge = e; | |
2433 | break; | |
2434 | } | |
2435 | } | |
2436 | ||
2437 | gimple_call_set_lhs (stmt, tmp); | |
2438 | update_stmt (stmt); | |
2439 | stmt = gimple_build_assign (lhs, tmp); | |
2440 | gimple_set_location (stmt, loc); | |
2441 | ||
2442 | /* We cannot throw in the middle of a BB. If the call was going | |
2443 | to throw, place the instrumentation on the fallthru edge, so | |
2444 | the call remains the last statement in the block. */ | |
2445 | if (fallthru_edge) | |
2446 | { | |
2447 | gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt); | |
2448 | gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq); | |
2449 | expand_assign_tm (region, &fallthru_gsi); | |
2450 | gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq); | |
2451 | pending_edge_inserts_p = true; | |
2452 | } | |
2453 | else | |
2454 | { | |
2455 | gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); | |
2456 | expand_assign_tm (region, gsi); | |
2457 | } | |
2458 | ||
2459 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2460 | } | |
2461 | ||
2462 | return retval; | |
2463 | } | |
2464 | ||
2465 | ||
2466 | /* Expand all statements in BB as appropriate for being inside | |
2467 | a transaction. */ | |
2468 | ||
2469 | static void | |
2470 | expand_block_tm (struct tm_region *region, basic_block bb) | |
2471 | { | |
2472 | gimple_stmt_iterator gsi; | |
2473 | ||
2474 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
2475 | { | |
2476 | gimple stmt = gsi_stmt (gsi); | |
2477 | switch (gimple_code (stmt)) | |
2478 | { | |
2479 | case GIMPLE_ASSIGN: | |
2480 | /* Only memory reads/writes need to be instrumented. */ | |
56cb44d4 MM |
2481 | if (gimple_assign_single_p (stmt) |
2482 | && !gimple_clobber_p (stmt)) | |
0a35513e AH |
2483 | { |
2484 | expand_assign_tm (region, &gsi); | |
2485 | continue; | |
2486 | } | |
2487 | break; | |
2488 | ||
2489 | case GIMPLE_CALL: | |
2490 | if (expand_call_tm (region, &gsi)) | |
2491 | return; | |
2492 | break; | |
2493 | ||
2494 | case GIMPLE_ASM: | |
2495 | gcc_unreachable (); | |
2496 | ||
2497 | default: | |
2498 | break; | |
2499 | } | |
2500 | if (!gsi_end_p (gsi)) | |
2501 | gsi_next (&gsi); | |
2502 | } | |
2503 | } | |
2504 | ||
2505 | /* Return the list of basic-blocks in REGION. | |
2506 | ||
2507 | STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks | |
0ed3d24e AH |
2508 | following a TM_IRREVOCABLE call. |
2509 | ||
2510 | INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the | |
2511 | uninstrumented code path blocks in the list of basic blocks | |
2512 | returned, false otherwise. */ | |
0a35513e | 2513 | |
9771b263 | 2514 | static vec<basic_block> |
0a35513e AH |
2515 | get_tm_region_blocks (basic_block entry_block, |
2516 | bitmap exit_blocks, | |
2517 | bitmap irr_blocks, | |
2518 | bitmap all_region_blocks, | |
0ed3d24e AH |
2519 | bool stop_at_irrevocable_p, |
2520 | bool include_uninstrumented_p = true) | |
0a35513e | 2521 | { |
6e1aa848 | 2522 | vec<basic_block> bbs = vNULL; |
0a35513e AH |
2523 | unsigned i; |
2524 | edge e; | |
2525 | edge_iterator ei; | |
2526 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
2527 | ||
2528 | i = 0; | |
9771b263 | 2529 | bbs.safe_push (entry_block); |
0a35513e AH |
2530 | bitmap_set_bit (visited_blocks, entry_block->index); |
2531 | ||
2532 | do | |
2533 | { | |
9771b263 | 2534 | basic_block bb = bbs[i++]; |
0a35513e AH |
2535 | |
2536 | if (exit_blocks && | |
2537 | bitmap_bit_p (exit_blocks, bb->index)) | |
2538 | continue; | |
2539 | ||
2540 | if (stop_at_irrevocable_p | |
2541 | && irr_blocks | |
2542 | && bitmap_bit_p (irr_blocks, bb->index)) | |
2543 | continue; | |
2544 | ||
2545 | FOR_EACH_EDGE (e, ei, bb->succs) | |
0ed3d24e AH |
2546 | if ((include_uninstrumented_p |
2547 | || !(e->flags & EDGE_TM_UNINSTRUMENTED)) | |
2548 | && !bitmap_bit_p (visited_blocks, e->dest->index)) | |
0a35513e AH |
2549 | { |
2550 | bitmap_set_bit (visited_blocks, e->dest->index); | |
9771b263 | 2551 | bbs.safe_push (e->dest); |
0a35513e AH |
2552 | } |
2553 | } | |
9771b263 | 2554 | while (i < bbs.length ()); |
0a35513e AH |
2555 | |
2556 | if (all_region_blocks) | |
2557 | bitmap_ior_into (all_region_blocks, visited_blocks); | |
2558 | ||
2559 | BITMAP_FREE (visited_blocks); | |
2560 | return bbs; | |
2561 | } | |
2562 | ||
0ed3d24e AH |
2563 | // Callback data for collect_bb2reg. |
2564 | struct bb2reg_stuff | |
2565 | { | |
2566 | vec<tm_region_p> *bb2reg; | |
2567 | bool include_uninstrumented_p; | |
2568 | }; | |
2569 | ||
398b1daa AH |
2570 | // Callback for expand_regions, collect innermost region data for each bb. |
2571 | static void * | |
2572 | collect_bb2reg (struct tm_region *region, void *data) | |
2573 | { | |
0ed3d24e AH |
2574 | struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data; |
2575 | vec<tm_region_p> *bb2reg = stuff->bb2reg; | |
9771b263 | 2576 | vec<basic_block> queue; |
398b1daa AH |
2577 | unsigned int i; |
2578 | basic_block bb; | |
2579 | ||
2580 | queue = get_tm_region_blocks (region->entry_block, | |
2581 | region->exit_blocks, | |
2582 | region->irr_blocks, | |
2583 | NULL, | |
0ed3d24e AH |
2584 | /*stop_at_irr_p=*/true, |
2585 | stuff->include_uninstrumented_p); | |
398b1daa AH |
2586 | |
2587 | // We expect expand_region to perform a post-order traversal of the region | |
2588 | // tree. Therefore the last region seen for any bb is the innermost. | |
9771b263 DN |
2589 | FOR_EACH_VEC_ELT (queue, i, bb) |
2590 | (*bb2reg)[bb->index] = region; | |
398b1daa | 2591 | |
9771b263 | 2592 | queue.release (); |
398b1daa AH |
2593 | return NULL; |
2594 | } | |
2595 | ||
2596 | // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to | |
2597 | // which a basic block belongs. Note that we only consider the instrumented | |
0ed3d24e AH |
2598 | // code paths for the region; the uninstrumented code paths are ignored if |
2599 | // INCLUDE_UNINSTRUMENTED_P is false. | |
398b1daa AH |
2600 | // |
2601 | // ??? This data is very similar to the bb_regions array that is collected | |
2602 | // during tm_region_init. Or, rather, this data is similar to what could | |
2603 | // be used within tm_region_init. The actual computation in tm_region_init | |
2604 | // begins and ends with bb_regions entirely full of NULL pointers, due to | |
2605 | // the way in which pointers are swapped in and out of the array. | |
2606 | // | |
2607 | // ??? Our callers expect that blocks are not shared between transactions. | |
2608 | // When the optimizers get too smart, and blocks are shared, then during | |
2609 | // the tm_mark phase we'll add log entries to only one of the two transactions, | |
2610 | // and in the tm_edge phase we'll add edges to the CFG that create invalid | |
2611 | // cycles. The symptom being SSA defs that do not dominate their uses. | |
2612 | // Note that the optimizers were locally correct with their transformation, | |
2613 | // as we have no info within the program that suggests that the blocks cannot | |
2614 | // be shared. | |
2615 | // | |
2616 | // ??? There is currently a hack inside tree-ssa-pre.c to work around the | |
2617 | // only known instance of this block sharing. | |
2618 | ||
9771b263 | 2619 | static vec<tm_region_p> |
0ed3d24e AH |
2620 | get_bb_regions_instrumented (bool traverse_clones, |
2621 | bool include_uninstrumented_p) | |
398b1daa | 2622 | { |
8b1c6fd7 | 2623 | unsigned n = last_basic_block_for_fn (cfun); |
0ed3d24e | 2624 | struct bb2reg_stuff stuff; |
9771b263 | 2625 | vec<tm_region_p> ret; |
398b1daa | 2626 | |
9771b263 DN |
2627 | ret.create (n); |
2628 | ret.safe_grow_cleared (n); | |
0ed3d24e AH |
2629 | stuff.bb2reg = &ret; |
2630 | stuff.include_uninstrumented_p = include_uninstrumented_p; | |
2631 | expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones); | |
398b1daa AH |
2632 | |
2633 | return ret; | |
2634 | } | |
2635 | ||
19c0d7df AH |
2636 | /* Set the IN_TRANSACTION for all gimple statements that appear in a |
2637 | transaction. */ | |
2638 | ||
2639 | void | |
2640 | compute_transaction_bits (void) | |
2641 | { | |
2642 | struct tm_region *region; | |
9771b263 | 2643 | vec<basic_block> queue; |
19c0d7df | 2644 | unsigned int i; |
19c0d7df AH |
2645 | basic_block bb; |
2646 | ||
2647 | /* ?? Perhaps we need to abstract gate_tm_init further, because we | |
2648 | certainly don't need it to calculate CDI_DOMINATOR info. */ | |
2649 | gate_tm_init (); | |
2650 | ||
11cd3bed | 2651 | FOR_EACH_BB_FN (bb, cfun) |
83e1a7f0 AH |
2652 | bb->flags &= ~BB_IN_TRANSACTION; |
2653 | ||
19c0d7df AH |
2654 | for (region = all_tm_regions; region; region = region->next) |
2655 | { | |
2656 | queue = get_tm_region_blocks (region->entry_block, | |
2657 | region->exit_blocks, | |
2658 | region->irr_blocks, | |
2659 | NULL, | |
2660 | /*stop_at_irr_p=*/true); | |
9771b263 | 2661 | for (i = 0; queue.iterate (i, &bb); ++i) |
83e1a7f0 | 2662 | bb->flags |= BB_IN_TRANSACTION; |
9771b263 | 2663 | queue.release (); |
19c0d7df AH |
2664 | } |
2665 | ||
2666 | if (all_tm_regions) | |
2667 | bitmap_obstack_release (&tm_obstack); | |
2668 | } | |
2669 | ||
398b1daa AH |
2670 | /* Replace the GIMPLE_TRANSACTION in this region with the corresponding |
2671 | call to BUILT_IN_TM_START. */ | |
2672 | ||
2673 | static void * | |
2674 | expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2675 | { | |
2676 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2677 | basic_block transaction_bb = gimple_bb (region->transaction_stmt); | |
2678 | tree tm_state = region->tm_state; | |
2679 | tree tm_state_type = TREE_TYPE (tm_state); | |
2680 | edge abort_edge = NULL; | |
2681 | edge inst_edge = NULL; | |
2682 | edge uninst_edge = NULL; | |
2683 | edge fallthru_edge = NULL; | |
2684 | ||
2685 | // Identify the various successors of the transaction start. | |
2686 | { | |
2687 | edge_iterator i; | |
2688 | edge e; | |
2689 | FOR_EACH_EDGE (e, i, transaction_bb->succs) | |
2690 | { | |
2691 | if (e->flags & EDGE_TM_ABORT) | |
2692 | abort_edge = e; | |
2693 | else if (e->flags & EDGE_TM_UNINSTRUMENTED) | |
2694 | uninst_edge = e; | |
2695 | else | |
2696 | inst_edge = e; | |
2697 | if (e->flags & EDGE_FALLTHRU) | |
2698 | fallthru_edge = e; | |
2699 | } | |
2700 | } | |
2701 | ||
2702 | /* ??? There are plenty of bits here we're not computing. */ | |
2703 | { | |
2704 | int subcode = gimple_transaction_subcode (region->transaction_stmt); | |
2705 | int flags = 0; | |
2706 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2707 | flags |= PR_DOESGOIRREVOCABLE; | |
2708 | if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0) | |
2709 | flags |= PR_HASNOIRREVOCABLE; | |
2710 | /* If the transaction does not have an abort in lexical scope and is not | |
2711 | marked as an outer transaction, then it will never abort. */ | |
2712 | if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0) | |
2713 | flags |= PR_HASNOABORT; | |
2714 | if ((subcode & GTMA_HAVE_STORE) == 0) | |
2715 | flags |= PR_READONLY; | |
b7a78683 | 2716 | if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION)) |
398b1daa AH |
2717 | flags |= PR_INSTRUMENTEDCODE; |
2718 | if (uninst_edge) | |
2719 | flags |= PR_UNINSTRUMENTEDCODE; | |
2720 | if (subcode & GTMA_IS_OUTER) | |
2721 | region->original_transaction_was_outer = true; | |
2722 | tree t = build_int_cst (tm_state_type, flags); | |
2723 | gimple call = gimple_build_call (tm_start, 1, t); | |
2724 | gimple_call_set_lhs (call, tm_state); | |
2725 | gimple_set_location (call, gimple_location (region->transaction_stmt)); | |
2726 | ||
2727 | // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START. | |
2728 | gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb); | |
2729 | gcc_assert (gsi_stmt (gsi) == region->transaction_stmt); | |
2730 | gsi_insert_before (&gsi, call, GSI_SAME_STMT); | |
2731 | gsi_remove (&gsi, true); | |
2732 | region->transaction_stmt = call; | |
2733 | } | |
2734 | ||
2735 | // Generate log saves. | |
9771b263 | 2736 | if (!tm_log_save_addresses.is_empty ()) |
398b1daa AH |
2737 | tm_log_emit_saves (region->entry_block, transaction_bb); |
2738 | ||
2739 | // In the beginning, we've no tests to perform on transaction restart. | |
2740 | // Note that after this point, transaction_bb becomes the "most recent | |
2741 | // block containing tests for the transaction". | |
2742 | region->restart_block = region->entry_block; | |
2743 | ||
2744 | // Generate log restores. | |
9771b263 | 2745 | if (!tm_log_save_addresses.is_empty ()) |
398b1daa AH |
2746 | { |
2747 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2748 | basic_block code_bb = create_empty_bb (test_bb); | |
2749 | basic_block join_bb = create_empty_bb (code_bb); | |
726338f4 RB |
2750 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
2751 | add_bb_to_loop (code_bb, transaction_bb->loop_father); | |
2752 | add_bb_to_loop (join_bb, transaction_bb->loop_father); | |
398b1daa AH |
2753 | if (region->restart_block == region->entry_block) |
2754 | region->restart_block = test_bb; | |
2755 | ||
2756 | tree t1 = create_tmp_reg (tm_state_type, NULL); | |
2757 | tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES); | |
2758 | gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, | |
2759 | tm_state, t2); | |
2760 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); | |
2761 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2762 | ||
2763 | t2 = build_int_cst (tm_state_type, 0); | |
2764 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2765 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2766 | ||
2767 | tm_log_emit_restores (region->entry_block, code_bb); | |
2768 | ||
2769 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2770 | edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE); | |
2771 | edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE); | |
2772 | redirect_edge_pred (fallthru_edge, join_bb); | |
2773 | ||
2774 | join_bb->frequency = test_bb->frequency = transaction_bb->frequency; | |
2775 | join_bb->count = test_bb->count = transaction_bb->count; | |
2776 | ||
2777 | ei->probability = PROB_ALWAYS; | |
2778 | et->probability = PROB_LIKELY; | |
2779 | ef->probability = PROB_UNLIKELY; | |
c3284718 RS |
2780 | et->count = apply_probability (test_bb->count, et->probability); |
2781 | ef->count = apply_probability (test_bb->count, ef->probability); | |
398b1daa AH |
2782 | |
2783 | code_bb->count = et->count; | |
2784 | code_bb->frequency = EDGE_FREQUENCY (et); | |
2785 | ||
2786 | transaction_bb = join_bb; | |
2787 | } | |
2788 | ||
2789 | // If we have an ABORT edge, create a test to perform the abort. | |
2790 | if (abort_edge) | |
2791 | { | |
2792 | basic_block test_bb = create_empty_bb (transaction_bb); | |
726338f4 | 2793 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
398b1daa AH |
2794 | if (region->restart_block == region->entry_block) |
2795 | region->restart_block = test_bb; | |
2796 | ||
2797 | tree t1 = create_tmp_reg (tm_state_type, NULL); | |
2798 | tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION); | |
2799 | gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, | |
2800 | tm_state, t2); | |
2801 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); | |
2802 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2803 | ||
2804 | t2 = build_int_cst (tm_state_type, 0); | |
2805 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2806 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2807 | ||
2808 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2809 | test_bb->frequency = transaction_bb->frequency; | |
2810 | test_bb->count = transaction_bb->count; | |
2811 | ei->probability = PROB_ALWAYS; | |
2812 | ||
2813 | // Not abort edge. If both are live, chose one at random as we'll | |
2814 | // we'll be fixing that up below. | |
2815 | redirect_edge_pred (fallthru_edge, test_bb); | |
2816 | fallthru_edge->flags = EDGE_FALSE_VALUE; | |
2817 | fallthru_edge->probability = PROB_VERY_LIKELY; | |
2818 | fallthru_edge->count | |
c3284718 | 2819 | = apply_probability (test_bb->count, fallthru_edge->probability); |
398b1daa AH |
2820 | |
2821 | // Abort/over edge. | |
2822 | redirect_edge_pred (abort_edge, test_bb); | |
2823 | abort_edge->flags = EDGE_TRUE_VALUE; | |
2824 | abort_edge->probability = PROB_VERY_UNLIKELY; | |
2825 | abort_edge->count | |
c3284718 | 2826 | = apply_probability (test_bb->count, abort_edge->probability); |
398b1daa AH |
2827 | |
2828 | transaction_bb = test_bb; | |
2829 | } | |
2830 | ||
2831 | // If we have both instrumented and uninstrumented code paths, select one. | |
2832 | if (inst_edge && uninst_edge) | |
2833 | { | |
2834 | basic_block test_bb = create_empty_bb (transaction_bb); | |
726338f4 | 2835 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
398b1daa AH |
2836 | if (region->restart_block == region->entry_block) |
2837 | region->restart_block = test_bb; | |
2838 | ||
2839 | tree t1 = create_tmp_reg (tm_state_type, NULL); | |
2840 | tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE); | |
2841 | ||
2842 | gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, | |
2843 | tm_state, t2); | |
2844 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); | |
2845 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2846 | ||
2847 | t2 = build_int_cst (tm_state_type, 0); | |
2848 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2849 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2850 | ||
2851 | // Create the edge into test_bb first, as we want to copy values | |
2852 | // out of the fallthru edge. | |
2853 | edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags); | |
2854 | e->probability = fallthru_edge->probability; | |
2855 | test_bb->count = e->count = fallthru_edge->count; | |
2856 | test_bb->frequency = EDGE_FREQUENCY (e); | |
2857 | ||
2858 | // Now update the edges to the inst/uninist implementations. | |
2859 | // For now assume that the paths are equally likely. When using HTM, | |
2860 | // we'll try the uninst path first and fallback to inst path if htm | |
2861 | // buffers are exceeded. Without HTM we start with the inst path and | |
2862 | // use the uninst path when falling back to serial mode. | |
2863 | redirect_edge_pred (inst_edge, test_bb); | |
2864 | inst_edge->flags = EDGE_FALSE_VALUE; | |
2865 | inst_edge->probability = REG_BR_PROB_BASE / 2; | |
2866 | inst_edge->count | |
c3284718 | 2867 | = apply_probability (test_bb->count, inst_edge->probability); |
398b1daa AH |
2868 | |
2869 | redirect_edge_pred (uninst_edge, test_bb); | |
2870 | uninst_edge->flags = EDGE_TRUE_VALUE; | |
2871 | uninst_edge->probability = REG_BR_PROB_BASE / 2; | |
2872 | uninst_edge->count | |
c3284718 | 2873 | = apply_probability (test_bb->count, uninst_edge->probability); |
398b1daa AH |
2874 | } |
2875 | ||
2876 | // If we have no previous special cases, and we have PHIs at the beginning | |
2877 | // of the atomic region, this means we have a loop at the beginning of the | |
2878 | // atomic region that shares the first block. This can cause problems with | |
2879 | // the transaction restart abnormal edges to be added in the tm_edges pass. | |
2880 | // Solve this by adding a new empty block to receive the abnormal edges. | |
2881 | if (region->restart_block == region->entry_block | |
2882 | && phi_nodes (region->entry_block)) | |
2883 | { | |
2884 | basic_block empty_bb = create_empty_bb (transaction_bb); | |
2885 | region->restart_block = empty_bb; | |
726338f4 | 2886 | add_bb_to_loop (empty_bb, transaction_bb->loop_father); |
398b1daa AH |
2887 | |
2888 | redirect_edge_pred (fallthru_edge, empty_bb); | |
2889 | make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU); | |
2890 | } | |
2891 | ||
2892 | return NULL; | |
2893 | } | |
2894 | ||
2895 | /* Generate the temporary to be used for the return value of | |
2896 | BUILT_IN_TM_START. */ | |
2897 | ||
2898 | static void * | |
2899 | generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2900 | { | |
2901 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2902 | region->tm_state = | |
2903 | create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state"); | |
2904 | ||
2905 | // Reset the subcode, post optimizations. We'll fill this in | |
2906 | // again as we process blocks. | |
2907 | if (region->exit_blocks) | |
2908 | { | |
2909 | unsigned int subcode | |
2910 | = gimple_transaction_subcode (region->transaction_stmt); | |
2911 | ||
2912 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2913 | subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE | |
b7a78683 AH |
2914 | | GTMA_MAY_ENTER_IRREVOCABLE |
2915 | | GTMA_HAS_NO_INSTRUMENTATION); | |
398b1daa AH |
2916 | else |
2917 | subcode &= GTMA_DECLARATION_MASK; | |
2918 | gimple_transaction_set_subcode (region->transaction_stmt, subcode); | |
2919 | } | |
2920 | ||
2921 | return NULL; | |
2922 | } | |
2923 | ||
2924 | // Propagate flags from inner transactions outwards. | |
2925 | static void | |
2926 | propagate_tm_flags_out (struct tm_region *region) | |
2927 | { | |
2928 | if (region == NULL) | |
2929 | return; | |
2930 | propagate_tm_flags_out (region->inner); | |
2931 | ||
2932 | if (region->outer && region->outer->transaction_stmt) | |
2933 | { | |
2934 | unsigned s = gimple_transaction_subcode (region->transaction_stmt); | |
2935 | s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE | |
2936 | | GTMA_MAY_ENTER_IRREVOCABLE); | |
2937 | s |= gimple_transaction_subcode (region->outer->transaction_stmt); | |
2938 | gimple_transaction_set_subcode (region->outer->transaction_stmt, s); | |
2939 | } | |
2940 | ||
2941 | propagate_tm_flags_out (region->next); | |
2942 | } | |
2943 | ||
0a35513e AH |
2944 | /* Entry point to the MARK phase of TM expansion. Here we replace |
2945 | transactional memory statements with calls to builtins, and function | |
2946 | calls with their transactional clones (if available). But we don't | |
2947 | yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */ | |
2948 | ||
2949 | static unsigned int | |
2950 | execute_tm_mark (void) | |
2951 | { | |
0a35513e AH |
2952 | pending_edge_inserts_p = false; |
2953 | ||
b5e10eac AH |
2954 | expand_regions (all_tm_regions, generate_tm_state, NULL, |
2955 | /*traverse_clones=*/true); | |
0a35513e | 2956 | |
398b1daa | 2957 | tm_log_init (); |
0a35513e | 2958 | |
b5e10eac | 2959 | vec<tm_region_p> bb_regions |
0ed3d24e AH |
2960 | = get_bb_regions_instrumented (/*traverse_clones=*/true, |
2961 | /*include_uninstrumented_p=*/false); | |
398b1daa AH |
2962 | struct tm_region *r; |
2963 | unsigned i; | |
0a35513e | 2964 | |
398b1daa AH |
2965 | // Expand memory operations into calls into the runtime. |
2966 | // This collects log entries as well. | |
9771b263 | 2967 | FOR_EACH_VEC_ELT (bb_regions, i, r) |
6aad4455 AH |
2968 | { |
2969 | if (r != NULL) | |
2970 | { | |
2971 | if (r->transaction_stmt) | |
2972 | { | |
2973 | unsigned sub = gimple_transaction_subcode (r->transaction_stmt); | |
2974 | ||
2975 | /* If we're sure to go irrevocable, there won't be | |
2976 | anything to expand, since the run-time will go | |
2977 | irrevocable right away. */ | |
2978 | if (sub & GTMA_DOES_GO_IRREVOCABLE | |
2979 | && sub & GTMA_MAY_ENTER_IRREVOCABLE) | |
2980 | continue; | |
2981 | } | |
06e28de2 | 2982 | expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i)); |
6aad4455 AH |
2983 | } |
2984 | } | |
398b1daa | 2985 | |
639498a8 AH |
2986 | bb_regions.release (); |
2987 | ||
398b1daa AH |
2988 | // Propagate flags from inner transactions outwards. |
2989 | propagate_tm_flags_out (all_tm_regions); | |
2990 | ||
2991 | // Expand GIMPLE_TRANSACTIONs into calls into the runtime. | |
b5e10eac AH |
2992 | expand_regions (all_tm_regions, expand_transaction, NULL, |
2993 | /*traverse_clones=*/false); | |
398b1daa AH |
2994 | |
2995 | tm_log_emit (); | |
2996 | tm_log_delete (); | |
0a35513e AH |
2997 | |
2998 | if (pending_edge_inserts_p) | |
2999 | gsi_commit_edge_inserts (); | |
398b1daa | 3000 | free_dominance_info (CDI_DOMINATORS); |
0a35513e AH |
3001 | return 0; |
3002 | } | |
3003 | ||
27a4cd48 DM |
3004 | namespace { |
3005 | ||
3006 | const pass_data pass_data_tm_mark = | |
3007 | { | |
3008 | GIMPLE_PASS, /* type */ | |
3009 | "tmmark", /* name */ | |
3010 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3011 | true, /* has_execute */ |
3012 | TV_TRANS_MEM, /* tv_id */ | |
3013 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3014 | 0, /* properties_provided */ | |
3015 | 0, /* properties_destroyed */ | |
3016 | 0, /* todo_flags_start */ | |
3bea341f | 3017 | TODO_update_ssa, /* todo_flags_finish */ |
0a35513e | 3018 | }; |
27a4cd48 DM |
3019 | |
3020 | class pass_tm_mark : public gimple_opt_pass | |
3021 | { | |
3022 | public: | |
c3284718 RS |
3023 | pass_tm_mark (gcc::context *ctxt) |
3024 | : gimple_opt_pass (pass_data_tm_mark, ctxt) | |
27a4cd48 DM |
3025 | {} |
3026 | ||
3027 | /* opt_pass methods: */ | |
be55bfe6 | 3028 | virtual unsigned int execute (function *) { return execute_tm_mark (); } |
27a4cd48 DM |
3029 | |
3030 | }; // class pass_tm_mark | |
3031 | ||
3032 | } // anon namespace | |
3033 | ||
3034 | gimple_opt_pass * | |
3035 | make_pass_tm_mark (gcc::context *ctxt) | |
3036 | { | |
3037 | return new pass_tm_mark (ctxt); | |
3038 | } | |
0a35513e | 3039 | \f |
398b1daa AH |
3040 | |
3041 | /* Create an abnormal edge from STMT at iter, splitting the block | |
3042 | as necessary. Adjust *PNEXT as needed for the split block. */ | |
0a35513e AH |
3043 | |
3044 | static inline void | |
398b1daa AH |
3045 | split_bb_make_tm_edge (gimple stmt, basic_block dest_bb, |
3046 | gimple_stmt_iterator iter, gimple_stmt_iterator *pnext) | |
0a35513e | 3047 | { |
398b1daa AH |
3048 | basic_block bb = gimple_bb (stmt); |
3049 | if (!gsi_one_before_end_p (iter)) | |
3050 | { | |
3051 | edge e = split_block (bb, stmt); | |
3052 | *pnext = gsi_start_bb (e->dest); | |
3053 | } | |
3054 | make_edge (bb, dest_bb, EDGE_ABNORMAL); | |
0a35513e | 3055 | |
398b1daa | 3056 | // Record the need for the edge for the benefit of the rtl passes. |
0a35513e AH |
3057 | if (cfun->gimple_df->tm_restart == NULL) |
3058 | cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash, | |
3059 | struct_ptr_eq, ggc_free); | |
3060 | ||
398b1daa | 3061 | struct tm_restart_node dummy; |
0a35513e | 3062 | dummy.stmt = stmt; |
398b1daa AH |
3063 | dummy.label_or_list = gimple_block_label (dest_bb); |
3064 | ||
3065 | void **slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT); | |
3066 | struct tm_restart_node *n = (struct tm_restart_node *) *slot; | |
0a35513e AH |
3067 | if (n == NULL) |
3068 | { | |
766090c2 | 3069 | n = ggc_alloc<tm_restart_node> (); |
0a35513e AH |
3070 | *n = dummy; |
3071 | } | |
3072 | else | |
3073 | { | |
3074 | tree old = n->label_or_list; | |
3075 | if (TREE_CODE (old) == LABEL_DECL) | |
398b1daa | 3076 | old = tree_cons (NULL, old, NULL); |
0a35513e AH |
3077 | n->label_or_list = tree_cons (NULL, dummy.label_or_list, old); |
3078 | } | |
0a35513e AH |
3079 | } |
3080 | ||
0a35513e AH |
3081 | /* Split block BB as necessary for every builtin function we added, and |
3082 | wire up the abnormal back edges implied by the transaction restart. */ | |
3083 | ||
3084 | static void | |
398b1daa | 3085 | expand_block_edges (struct tm_region *const region, basic_block bb) |
0a35513e | 3086 | { |
398b1daa | 3087 | gimple_stmt_iterator gsi, next_gsi; |
0a35513e | 3088 | |
398b1daa | 3089 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi) |
0a35513e AH |
3090 | { |
3091 | gimple stmt = gsi_stmt (gsi); | |
3092 | ||
398b1daa AH |
3093 | next_gsi = gsi; |
3094 | gsi_next (&next_gsi); | |
3095 | ||
3096 | // ??? Shouldn't we split for any non-pure, non-irrevocable function? | |
3097 | if (gimple_code (stmt) != GIMPLE_CALL | |
3098 | || (gimple_call_flags (stmt) & ECF_TM_BUILTIN) == 0) | |
3099 | continue; | |
3100 | ||
3101 | if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_TM_ABORT) | |
0a35513e | 3102 | { |
398b1daa AH |
3103 | // If we have a ``_transaction_cancel [[outer]]'', there is only |
3104 | // one abnormal edge: to the transaction marked OUTER. | |
3105 | // All compiler-generated instances of BUILT_IN_TM_ABORT have a | |
3106 | // constant argument, which we can examine here. Users invoking | |
3107 | // TM_ABORT directly get what they deserve. | |
3108 | tree arg = gimple_call_arg (stmt, 0); | |
3109 | if (TREE_CODE (arg) == INTEGER_CST | |
3110 | && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 | |
3111 | && !decl_is_tm_clone (current_function_decl)) | |
0a35513e | 3112 | { |
398b1daa AH |
3113 | // Find the GTMA_IS_OUTER transaction. |
3114 | for (struct tm_region *o = region; o; o = o->outer) | |
3115 | if (o->original_transaction_was_outer) | |
3116 | { | |
3117 | split_bb_make_tm_edge (stmt, o->restart_block, | |
3118 | gsi, &next_gsi); | |
3119 | break; | |
3120 | } | |
3121 | ||
3122 | // Otherwise, the front-end should have semantically checked | |
3123 | // outer aborts, but in either case the target region is not | |
3124 | // within this function. | |
3125 | continue; | |
0a35513e AH |
3126 | } |
3127 | ||
398b1daa AH |
3128 | // Non-outer, TM aborts have an abnormal edge to the inner-most |
3129 | // transaction, the one being aborted; | |
3130 | split_bb_make_tm_edge (stmt, region->restart_block, gsi, &next_gsi); | |
0a35513e AH |
3131 | } |
3132 | ||
398b1daa AH |
3133 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3134 | // We never restart inner transactions. For tm clones, we know a-priori | |
3135 | // that the outer-most transaction is outside the function. | |
3136 | if (decl_is_tm_clone (current_function_decl)) | |
3137 | continue; | |
0a35513e | 3138 | |
398b1daa AH |
3139 | if (cfun->gimple_df->tm_restart == NULL) |
3140 | cfun->gimple_df->tm_restart | |
3141 | = htab_create_ggc (31, struct_ptr_hash, struct_ptr_eq, ggc_free); | |
0a35513e | 3142 | |
398b1daa AH |
3143 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3144 | // We never restart inner transactions. | |
3145 | for (struct tm_region *o = region; o; o = o->outer) | |
3146 | if (!o->outer) | |
3147 | { | |
3148 | split_bb_make_tm_edge (stmt, o->restart_block, gsi, &next_gsi); | |
3149 | break; | |
3150 | } | |
0a35513e | 3151 | |
398b1daa AH |
3152 | // Delete any tail-call annotation that may have been added. |
3153 | // The tail-call pass may have mis-identified the commit as being | |
3154 | // a candidate because we had not yet added this restart edge. | |
3155 | gimple_call_set_tail (stmt, false); | |
0a35513e AH |
3156 | } |
3157 | } | |
3158 | ||
3159 | /* Entry point to the final expansion of transactional nodes. */ | |
3160 | ||
27a4cd48 DM |
3161 | namespace { |
3162 | ||
3163 | const pass_data pass_data_tm_edges = | |
3164 | { | |
3165 | GIMPLE_PASS, /* type */ | |
3166 | "tmedge", /* name */ | |
3167 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3168 | true, /* has_execute */ |
3169 | TV_TRANS_MEM, /* tv_id */ | |
3170 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3171 | 0, /* properties_provided */ | |
3172 | 0, /* properties_destroyed */ | |
3173 | 0, /* todo_flags_start */ | |
3bea341f | 3174 | TODO_update_ssa, /* todo_flags_finish */ |
0a35513e | 3175 | }; |
27a4cd48 DM |
3176 | |
3177 | class pass_tm_edges : public gimple_opt_pass | |
3178 | { | |
3179 | public: | |
c3284718 RS |
3180 | pass_tm_edges (gcc::context *ctxt) |
3181 | : gimple_opt_pass (pass_data_tm_edges, ctxt) | |
27a4cd48 DM |
3182 | {} |
3183 | ||
3184 | /* opt_pass methods: */ | |
be55bfe6 | 3185 | virtual unsigned int execute (function *); |
27a4cd48 DM |
3186 | |
3187 | }; // class pass_tm_edges | |
3188 | ||
be55bfe6 TS |
3189 | unsigned int |
3190 | pass_tm_edges::execute (function *fun) | |
3191 | { | |
3192 | vec<tm_region_p> bb_regions | |
3193 | = get_bb_regions_instrumented (/*traverse_clones=*/false, | |
3194 | /*include_uninstrumented_p=*/true); | |
3195 | struct tm_region *r; | |
3196 | unsigned i; | |
3197 | ||
3198 | FOR_EACH_VEC_ELT (bb_regions, i, r) | |
3199 | if (r != NULL) | |
3200 | expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i)); | |
3201 | ||
3202 | bb_regions.release (); | |
3203 | ||
3204 | /* We've got to release the dominance info now, to indicate that it | |
3205 | must be rebuilt completely. Otherwise we'll crash trying to update | |
3206 | the SSA web in the TODO section following this pass. */ | |
3207 | free_dominance_info (CDI_DOMINATORS); | |
3208 | bitmap_obstack_release (&tm_obstack); | |
3209 | all_tm_regions = NULL; | |
3210 | ||
3211 | return 0; | |
3212 | } | |
3213 | ||
27a4cd48 DM |
3214 | } // anon namespace |
3215 | ||
3216 | gimple_opt_pass * | |
3217 | make_pass_tm_edges (gcc::context *ctxt) | |
3218 | { | |
3219 | return new pass_tm_edges (ctxt); | |
3220 | } | |
398b1daa AH |
3221 | \f |
3222 | /* Helper function for expand_regions. Expand REGION and recurse to | |
3223 | the inner region. Call CALLBACK on each region. CALLBACK returns | |
3224 | NULL to continue the traversal, otherwise a non-null value which | |
b5e10eac AH |
3225 | this function will return as well. TRAVERSE_CLONES is true if we |
3226 | should traverse transactional clones. */ | |
398b1daa AH |
3227 | |
3228 | static void * | |
3229 | expand_regions_1 (struct tm_region *region, | |
3230 | void *(*callback)(struct tm_region *, void *), | |
b5e10eac AH |
3231 | void *data, |
3232 | bool traverse_clones) | |
398b1daa AH |
3233 | { |
3234 | void *retval = NULL; | |
b5e10eac AH |
3235 | if (region->exit_blocks |
3236 | || (traverse_clones && decl_is_tm_clone (current_function_decl))) | |
398b1daa AH |
3237 | { |
3238 | retval = callback (region, data); | |
3239 | if (retval) | |
3240 | return retval; | |
3241 | } | |
3242 | if (region->inner) | |
3243 | { | |
b5e10eac | 3244 | retval = expand_regions (region->inner, callback, data, traverse_clones); |
398b1daa AH |
3245 | if (retval) |
3246 | return retval; | |
3247 | } | |
3248 | return retval; | |
3249 | } | |
3250 | ||
3251 | /* Traverse the regions enclosed and including REGION. Execute | |
3252 | CALLBACK for each region, passing DATA. CALLBACK returns NULL to | |
3253 | continue the traversal, otherwise a non-null value which this | |
b5e10eac AH |
3254 | function will return as well. TRAVERSE_CLONES is true if we should |
3255 | traverse transactional clones. */ | |
398b1daa AH |
3256 | |
3257 | static void * | |
3258 | expand_regions (struct tm_region *region, | |
3259 | void *(*callback)(struct tm_region *, void *), | |
b5e10eac AH |
3260 | void *data, |
3261 | bool traverse_clones) | |
398b1daa AH |
3262 | { |
3263 | void *retval = NULL; | |
3264 | while (region) | |
3265 | { | |
b5e10eac | 3266 | retval = expand_regions_1 (region, callback, data, traverse_clones); |
398b1daa AH |
3267 | if (retval) |
3268 | return retval; | |
3269 | region = region->next; | |
3270 | } | |
3271 | return retval; | |
3272 | } | |
3273 | ||
0a35513e AH |
3274 | \f |
3275 | /* A unique TM memory operation. */ | |
3276 | typedef struct tm_memop | |
3277 | { | |
3278 | /* Unique ID that all memory operations to the same location have. */ | |
3279 | unsigned int value_id; | |
3280 | /* Address of load/store. */ | |
3281 | tree addr; | |
3282 | } *tm_memop_t; | |
3283 | ||
4a8fb1a1 LC |
3284 | /* TM memory operation hashtable helpers. */ |
3285 | ||
3286 | struct tm_memop_hasher : typed_free_remove <tm_memop> | |
3287 | { | |
3288 | typedef tm_memop value_type; | |
3289 | typedef tm_memop compare_type; | |
3290 | static inline hashval_t hash (const value_type *); | |
3291 | static inline bool equal (const value_type *, const compare_type *); | |
3292 | }; | |
3293 | ||
3294 | /* Htab support. Return a hash value for a `tm_memop'. */ | |
3295 | inline hashval_t | |
3296 | tm_memop_hasher::hash (const value_type *mem) | |
3297 | { | |
3298 | tree addr = mem->addr; | |
3299 | /* We drill down to the SSA_NAME/DECL for the hash, but equality is | |
3300 | actually done with operand_equal_p (see tm_memop_eq). */ | |
3301 | if (TREE_CODE (addr) == ADDR_EXPR) | |
3302 | addr = TREE_OPERAND (addr, 0); | |
3303 | return iterative_hash_expr (addr, 0); | |
3304 | } | |
3305 | ||
3306 | /* Htab support. Return true if two tm_memop's are the same. */ | |
3307 | inline bool | |
3308 | tm_memop_hasher::equal (const value_type *mem1, const compare_type *mem2) | |
3309 | { | |
3310 | return operand_equal_p (mem1->addr, mem2->addr, 0); | |
3311 | } | |
3312 | ||
0a35513e AH |
3313 | /* Sets for solving data flow equations in the memory optimization pass. */ |
3314 | struct tm_memopt_bitmaps | |
3315 | { | |
3316 | /* Stores available to this BB upon entry. Basically, stores that | |
3317 | dominate this BB. */ | |
3318 | bitmap store_avail_in; | |
3319 | /* Stores available at the end of this BB. */ | |
3320 | bitmap store_avail_out; | |
3321 | bitmap store_antic_in; | |
3322 | bitmap store_antic_out; | |
3323 | /* Reads available to this BB upon entry. Basically, reads that | |
3324 | dominate this BB. */ | |
3325 | bitmap read_avail_in; | |
3326 | /* Reads available at the end of this BB. */ | |
3327 | bitmap read_avail_out; | |
3328 | /* Reads performed in this BB. */ | |
3329 | bitmap read_local; | |
3330 | /* Writes performed in this BB. */ | |
3331 | bitmap store_local; | |
3332 | ||
3333 | /* Temporary storage for pass. */ | |
3334 | /* Is the current BB in the worklist? */ | |
3335 | bool avail_in_worklist_p; | |
3336 | /* Have we visited this BB? */ | |
3337 | bool visited_p; | |
3338 | }; | |
3339 | ||
3340 | static bitmap_obstack tm_memopt_obstack; | |
3341 | ||
3342 | /* Unique counter for TM loads and stores. Loads and stores of the | |
3343 | same address get the same ID. */ | |
3344 | static unsigned int tm_memopt_value_id; | |
c203e8a7 | 3345 | static hash_table<tm_memop_hasher> *tm_memopt_value_numbers; |
0a35513e AH |
3346 | |
3347 | #define STORE_AVAIL_IN(BB) \ | |
3348 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in | |
3349 | #define STORE_AVAIL_OUT(BB) \ | |
3350 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out | |
3351 | #define STORE_ANTIC_IN(BB) \ | |
3352 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in | |
3353 | #define STORE_ANTIC_OUT(BB) \ | |
3354 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out | |
3355 | #define READ_AVAIL_IN(BB) \ | |
3356 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in | |
3357 | #define READ_AVAIL_OUT(BB) \ | |
3358 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out | |
3359 | #define READ_LOCAL(BB) \ | |
3360 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local | |
3361 | #define STORE_LOCAL(BB) \ | |
3362 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local | |
3363 | #define AVAIL_IN_WORKLIST_P(BB) \ | |
3364 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p | |
3365 | #define BB_VISITED_P(BB) \ | |
3366 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p | |
3367 | ||
0a35513e AH |
3368 | /* Given a TM load/store in STMT, return the value number for the address |
3369 | it accesses. */ | |
3370 | ||
3371 | static unsigned int | |
3372 | tm_memopt_value_number (gimple stmt, enum insert_option op) | |
3373 | { | |
3374 | struct tm_memop tmpmem, *mem; | |
4a8fb1a1 | 3375 | tm_memop **slot; |
0a35513e AH |
3376 | |
3377 | gcc_assert (is_tm_load (stmt) || is_tm_store (stmt)); | |
3378 | tmpmem.addr = gimple_call_arg (stmt, 0); | |
c203e8a7 | 3379 | slot = tm_memopt_value_numbers->find_slot (&tmpmem, op); |
0a35513e | 3380 | if (*slot) |
4a8fb1a1 | 3381 | mem = *slot; |
0a35513e AH |
3382 | else if (op == INSERT) |
3383 | { | |
3384 | mem = XNEW (struct tm_memop); | |
3385 | *slot = mem; | |
3386 | mem->value_id = tm_memopt_value_id++; | |
3387 | mem->addr = tmpmem.addr; | |
3388 | } | |
3389 | else | |
3390 | gcc_unreachable (); | |
3391 | return mem->value_id; | |
3392 | } | |
3393 | ||
3394 | /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */ | |
3395 | ||
3396 | static void | |
3397 | tm_memopt_accumulate_memops (basic_block bb) | |
3398 | { | |
3399 | gimple_stmt_iterator gsi; | |
3400 | ||
3401 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3402 | { | |
3403 | gimple stmt = gsi_stmt (gsi); | |
3404 | bitmap bits; | |
3405 | unsigned int loc; | |
3406 | ||
3407 | if (is_tm_store (stmt)) | |
3408 | bits = STORE_LOCAL (bb); | |
3409 | else if (is_tm_load (stmt)) | |
3410 | bits = READ_LOCAL (bb); | |
3411 | else | |
3412 | continue; | |
3413 | ||
3414 | loc = tm_memopt_value_number (stmt, INSERT); | |
3415 | bitmap_set_bit (bits, loc); | |
3416 | if (dump_file) | |
3417 | { | |
3418 | fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=", | |
3419 | is_tm_load (stmt) ? "LOAD" : "STORE", loc, | |
3420 | gimple_bb (stmt)->index); | |
3421 | print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0); | |
3422 | fprintf (dump_file, "\n"); | |
3423 | } | |
3424 | } | |
3425 | } | |
3426 | ||
3427 | /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */ | |
3428 | ||
3429 | static void | |
3430 | dump_tm_memopt_set (const char *set_name, bitmap bits) | |
3431 | { | |
3432 | unsigned i; | |
3433 | bitmap_iterator bi; | |
3434 | const char *comma = ""; | |
3435 | ||
3436 | fprintf (dump_file, "TM memopt: %s: [", set_name); | |
3437 | EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi) | |
3438 | { | |
c203e8a7 | 3439 | hash_table<tm_memop_hasher>::iterator hi; |
4a8fb1a1 | 3440 | struct tm_memop *mem = NULL; |
0a35513e AH |
3441 | |
3442 | /* Yeah, yeah, yeah. Whatever. This is just for debugging. */ | |
c203e8a7 | 3443 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi) |
0a35513e AH |
3444 | if (mem->value_id == i) |
3445 | break; | |
3446 | gcc_assert (mem->value_id == i); | |
3447 | fprintf (dump_file, "%s", comma); | |
3448 | comma = ", "; | |
3449 | print_generic_expr (dump_file, mem->addr, 0); | |
3450 | } | |
3451 | fprintf (dump_file, "]\n"); | |
3452 | } | |
3453 | ||
3454 | /* Prettily dump all of the memopt sets in BLOCKS. */ | |
3455 | ||
3456 | static void | |
9771b263 | 3457 | dump_tm_memopt_sets (vec<basic_block> blocks) |
0a35513e AH |
3458 | { |
3459 | size_t i; | |
3460 | basic_block bb; | |
3461 | ||
9771b263 | 3462 | for (i = 0; blocks.iterate (i, &bb); ++i) |
0a35513e AH |
3463 | { |
3464 | fprintf (dump_file, "------------BB %d---------\n", bb->index); | |
3465 | dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb)); | |
3466 | dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb)); | |
3467 | dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb)); | |
3468 | dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb)); | |
3469 | dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb)); | |
3470 | dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb)); | |
3471 | } | |
3472 | } | |
3473 | ||
3474 | /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */ | |
3475 | ||
3476 | static void | |
3477 | tm_memopt_compute_avin (basic_block bb) | |
3478 | { | |
3479 | edge e; | |
3480 | unsigned ix; | |
3481 | ||
3482 | /* Seed with the AVOUT of any predecessor. */ | |
3483 | for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++) | |
3484 | { | |
3485 | e = EDGE_PRED (bb, ix); | |
3486 | /* Make sure we have already visited this BB, and is thus | |
3487 | initialized. | |
3488 | ||
3489 | If e->src->aux is NULL, this predecessor is actually on an | |
3490 | enclosing transaction. We only care about the current | |
3491 | transaction, so ignore it. */ | |
3492 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3493 | { | |
3494 | bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3495 | bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3496 | break; | |
3497 | } | |
3498 | } | |
3499 | ||
3500 | for (; ix < EDGE_COUNT (bb->preds); ix++) | |
3501 | { | |
3502 | e = EDGE_PRED (bb, ix); | |
3503 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3504 | { | |
3505 | bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3506 | bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3507 | } | |
3508 | } | |
3509 | ||
3510 | BB_VISITED_P (bb) = true; | |
3511 | } | |
3512 | ||
3513 | /* Compute the STORE_ANTIC_IN for the basic block BB. */ | |
3514 | ||
3515 | static void | |
3516 | tm_memopt_compute_antin (basic_block bb) | |
3517 | { | |
3518 | edge e; | |
3519 | unsigned ix; | |
3520 | ||
3521 | /* Seed with the ANTIC_OUT of any successor. */ | |
3522 | for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++) | |
3523 | { | |
3524 | e = EDGE_SUCC (bb, ix); | |
3525 | /* Make sure we have already visited this BB, and is thus | |
3526 | initialized. */ | |
3527 | if (BB_VISITED_P (e->dest)) | |
3528 | { | |
3529 | bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3530 | break; | |
3531 | } | |
3532 | } | |
3533 | ||
3534 | for (; ix < EDGE_COUNT (bb->succs); ix++) | |
3535 | { | |
3536 | e = EDGE_SUCC (bb, ix); | |
3537 | if (BB_VISITED_P (e->dest)) | |
3538 | bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3539 | } | |
3540 | ||
3541 | BB_VISITED_P (bb) = true; | |
3542 | } | |
3543 | ||
3544 | /* Compute the AVAIL sets for every basic block in BLOCKS. | |
3545 | ||
3546 | We compute {STORE,READ}_AVAIL_{OUT,IN} as follows: | |
3547 | ||
3548 | AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb]) | |
3549 | AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors]) | |
3550 | ||
3551 | This is basically what we do in lcm's compute_available(), but here | |
3552 | we calculate two sets of sets (one for STOREs and one for READs), | |
3553 | and we work on a region instead of the entire CFG. | |
3554 | ||
3555 | REGION is the TM region. | |
3556 | BLOCKS are the basic blocks in the region. */ | |
3557 | ||
3558 | static void | |
3559 | tm_memopt_compute_available (struct tm_region *region, | |
9771b263 | 3560 | vec<basic_block> blocks) |
0a35513e AH |
3561 | { |
3562 | edge e; | |
3563 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3564 | unsigned int qlen, i; | |
3565 | edge_iterator ei; | |
3566 | bool changed; | |
3567 | ||
3568 | /* Allocate a worklist array/queue. Entries are only added to the | |
3569 | list if they were not already on the list. So the size is | |
3570 | bounded by the number of basic blocks in the region. */ | |
9771b263 | 3571 | qlen = blocks.length () - 1; |
0a35513e AH |
3572 | qin = qout = worklist = |
3573 | XNEWVEC (basic_block, qlen); | |
3574 | ||
3575 | /* Put every block in the region on the worklist. */ | |
9771b263 | 3576 | for (i = 0; blocks.iterate (i, &bb); ++i) |
0a35513e AH |
3577 | { |
3578 | /* Seed AVAIL_OUT with the LOCAL set. */ | |
3579 | bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb)); | |
3580 | bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb)); | |
3581 | ||
3582 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3583 | /* No need to insert the entry block, since it has an AVIN of | |
3584 | null, and an AVOUT that has already been seeded in. */ | |
3585 | if (bb != region->entry_block) | |
3586 | *qin++ = bb; | |
3587 | } | |
3588 | ||
3589 | /* The entry block has been initialized with the local sets. */ | |
3590 | BB_VISITED_P (region->entry_block) = true; | |
3591 | ||
3592 | qin = worklist; | |
3593 | qend = &worklist[qlen]; | |
3594 | ||
3595 | /* Iterate until the worklist is empty. */ | |
3596 | while (qlen) | |
3597 | { | |
3598 | /* Take the first entry off the worklist. */ | |
3599 | bb = *qout++; | |
3600 | qlen--; | |
3601 | ||
3602 | if (qout >= qend) | |
3603 | qout = worklist; | |
3604 | ||
3605 | /* This block can be added to the worklist again if necessary. */ | |
3606 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3607 | tm_memopt_compute_avin (bb); | |
3608 | ||
3609 | /* Note: We do not add the LOCAL sets here because we already | |
3610 | seeded the AVAIL_OUT sets with them. */ | |
3611 | changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb)); | |
3612 | changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb)); | |
3613 | if (changed | |
3614 | && (region->exit_blocks == NULL | |
3615 | || !bitmap_bit_p (region->exit_blocks, bb->index))) | |
3616 | /* If the out state of this block changed, then we need to add | |
3617 | its successors to the worklist if they are not already in. */ | |
3618 | FOR_EACH_EDGE (e, ei, bb->succs) | |
fefa31b5 DM |
3619 | if (!AVAIL_IN_WORKLIST_P (e->dest) |
3620 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
0a35513e AH |
3621 | { |
3622 | *qin++ = e->dest; | |
3623 | AVAIL_IN_WORKLIST_P (e->dest) = true; | |
3624 | qlen++; | |
3625 | ||
3626 | if (qin >= qend) | |
3627 | qin = worklist; | |
3628 | } | |
3629 | } | |
3630 | ||
3631 | free (worklist); | |
3632 | ||
3633 | if (dump_file) | |
3634 | dump_tm_memopt_sets (blocks); | |
3635 | } | |
3636 | ||
3637 | /* Compute ANTIC sets for every basic block in BLOCKS. | |
3638 | ||
3639 | We compute STORE_ANTIC_OUT as follows: | |
3640 | ||
3641 | STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb]) | |
3642 | STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors]) | |
3643 | ||
3644 | REGION is the TM region. | |
3645 | BLOCKS are the basic blocks in the region. */ | |
3646 | ||
3647 | static void | |
3648 | tm_memopt_compute_antic (struct tm_region *region, | |
9771b263 | 3649 | vec<basic_block> blocks) |
0a35513e AH |
3650 | { |
3651 | edge e; | |
3652 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3653 | unsigned int qlen; | |
3654 | int i; | |
3655 | edge_iterator ei; | |
3656 | ||
3657 | /* Allocate a worklist array/queue. Entries are only added to the | |
3658 | list if they were not already on the list. So the size is | |
3659 | bounded by the number of basic blocks in the region. */ | |
9771b263 | 3660 | qin = qout = worklist = XNEWVEC (basic_block, blocks.length ()); |
0a35513e | 3661 | |
9771b263 | 3662 | for (qlen = 0, i = blocks.length () - 1; i >= 0; --i) |
0a35513e | 3663 | { |
9771b263 | 3664 | bb = blocks[i]; |
0a35513e AH |
3665 | |
3666 | /* Seed ANTIC_OUT with the LOCAL set. */ | |
3667 | bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb)); | |
3668 | ||
3669 | /* Put every block in the region on the worklist. */ | |
3670 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3671 | /* No need to insert exit blocks, since their ANTIC_IN is NULL, | |
3672 | and their ANTIC_OUT has already been seeded in. */ | |
3673 | if (region->exit_blocks | |
3674 | && !bitmap_bit_p (region->exit_blocks, bb->index)) | |
3675 | { | |
3676 | qlen++; | |
3677 | *qin++ = bb; | |
3678 | } | |
3679 | } | |
3680 | ||
3681 | /* The exit blocks have been initialized with the local sets. */ | |
3682 | if (region->exit_blocks) | |
3683 | { | |
3684 | unsigned int i; | |
3685 | bitmap_iterator bi; | |
3686 | EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi) | |
06e28de2 | 3687 | BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true; |
0a35513e AH |
3688 | } |
3689 | ||
3690 | qin = worklist; | |
3691 | qend = &worklist[qlen]; | |
3692 | ||
3693 | /* Iterate until the worklist is empty. */ | |
3694 | while (qlen) | |
3695 | { | |
3696 | /* Take the first entry off the worklist. */ | |
3697 | bb = *qout++; | |
3698 | qlen--; | |
3699 | ||
3700 | if (qout >= qend) | |
3701 | qout = worklist; | |
3702 | ||
3703 | /* This block can be added to the worklist again if necessary. */ | |
3704 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3705 | tm_memopt_compute_antin (bb); | |
3706 | ||
3707 | /* Note: We do not add the LOCAL sets here because we already | |
3708 | seeded the ANTIC_OUT sets with them. */ | |
3709 | if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb)) | |
3710 | && bb != region->entry_block) | |
3711 | /* If the out state of this block changed, then we need to add | |
3712 | its predecessors to the worklist if they are not already in. */ | |
3713 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3714 | if (!AVAIL_IN_WORKLIST_P (e->src)) | |
3715 | { | |
3716 | *qin++ = e->src; | |
3717 | AVAIL_IN_WORKLIST_P (e->src) = true; | |
3718 | qlen++; | |
3719 | ||
3720 | if (qin >= qend) | |
3721 | qin = worklist; | |
3722 | } | |
3723 | } | |
3724 | ||
3725 | free (worklist); | |
3726 | ||
3727 | if (dump_file) | |
3728 | dump_tm_memopt_sets (blocks); | |
3729 | } | |
3730 | ||
3731 | /* Offsets of load variants from TM_LOAD. For example, | |
3732 | BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*. | |
3733 | See gtm-builtins.def. */ | |
3734 | #define TRANSFORM_RAR 1 | |
3735 | #define TRANSFORM_RAW 2 | |
3736 | #define TRANSFORM_RFW 3 | |
3737 | /* Offsets of store variants from TM_STORE. */ | |
3738 | #define TRANSFORM_WAR 1 | |
3739 | #define TRANSFORM_WAW 2 | |
3740 | ||
3741 | /* Inform about a load/store optimization. */ | |
3742 | ||
3743 | static void | |
3744 | dump_tm_memopt_transform (gimple stmt) | |
3745 | { | |
3746 | if (dump_file) | |
3747 | { | |
3748 | fprintf (dump_file, "TM memopt: transforming: "); | |
3749 | print_gimple_stmt (dump_file, stmt, 0, 0); | |
3750 | fprintf (dump_file, "\n"); | |
3751 | } | |
3752 | } | |
3753 | ||
3754 | /* Perform a read/write optimization. Replaces the TM builtin in STMT | |
3755 | by a builtin that is OFFSET entries down in the builtins table in | |
3756 | gtm-builtins.def. */ | |
3757 | ||
3758 | static void | |
3759 | tm_memopt_transform_stmt (unsigned int offset, | |
3760 | gimple stmt, | |
3761 | gimple_stmt_iterator *gsi) | |
3762 | { | |
3763 | tree fn = gimple_call_fn (stmt); | |
3764 | gcc_assert (TREE_CODE (fn) == ADDR_EXPR); | |
3765 | TREE_OPERAND (fn, 0) | |
3766 | = builtin_decl_explicit ((enum built_in_function) | |
3767 | (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)) | |
3768 | + offset)); | |
3769 | gimple_call_set_fn (stmt, fn); | |
3770 | gsi_replace (gsi, stmt, true); | |
3771 | dump_tm_memopt_transform (stmt); | |
3772 | } | |
3773 | ||
3774 | /* Perform the actual TM memory optimization transformations in the | |
3775 | basic blocks in BLOCKS. */ | |
3776 | ||
3777 | static void | |
9771b263 | 3778 | tm_memopt_transform_blocks (vec<basic_block> blocks) |
0a35513e AH |
3779 | { |
3780 | size_t i; | |
3781 | basic_block bb; | |
3782 | gimple_stmt_iterator gsi; | |
3783 | ||
9771b263 | 3784 | for (i = 0; blocks.iterate (i, &bb); ++i) |
0a35513e AH |
3785 | { |
3786 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3787 | { | |
3788 | gimple stmt = gsi_stmt (gsi); | |
3789 | bitmap read_avail = READ_AVAIL_IN (bb); | |
3790 | bitmap store_avail = STORE_AVAIL_IN (bb); | |
3791 | bitmap store_antic = STORE_ANTIC_OUT (bb); | |
3792 | unsigned int loc; | |
3793 | ||
3794 | if (is_tm_simple_load (stmt)) | |
3795 | { | |
3796 | loc = tm_memopt_value_number (stmt, NO_INSERT); | |
3797 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
3798 | tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi); | |
3799 | else if (store_antic && bitmap_bit_p (store_antic, loc)) | |
3800 | { | |
3801 | tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi); | |
3802 | bitmap_set_bit (store_avail, loc); | |
3803 | } | |
3804 | else if (read_avail && bitmap_bit_p (read_avail, loc)) | |
3805 | tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi); | |
3806 | else | |
3807 | bitmap_set_bit (read_avail, loc); | |
3808 | } | |
3809 | else if (is_tm_simple_store (stmt)) | |
3810 | { | |
3811 | loc = tm_memopt_value_number (stmt, NO_INSERT); | |
3812 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
3813 | tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi); | |
3814 | else | |
3815 | { | |
3816 | if (read_avail && bitmap_bit_p (read_avail, loc)) | |
3817 | tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi); | |
3818 | bitmap_set_bit (store_avail, loc); | |
3819 | } | |
3820 | } | |
3821 | } | |
3822 | } | |
3823 | } | |
3824 | ||
3825 | /* Return a new set of bitmaps for a BB. */ | |
3826 | ||
3827 | static struct tm_memopt_bitmaps * | |
3828 | tm_memopt_init_sets (void) | |
3829 | { | |
3830 | struct tm_memopt_bitmaps *b | |
3831 | = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps); | |
3832 | b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3833 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3834 | b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3835 | b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3836 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3837 | b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3838 | b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3839 | b->read_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3840 | b->store_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3841 | return b; | |
3842 | } | |
3843 | ||
3844 | /* Free sets computed for each BB. */ | |
3845 | ||
3846 | static void | |
9771b263 | 3847 | tm_memopt_free_sets (vec<basic_block> blocks) |
0a35513e AH |
3848 | { |
3849 | size_t i; | |
3850 | basic_block bb; | |
3851 | ||
9771b263 | 3852 | for (i = 0; blocks.iterate (i, &bb); ++i) |
0a35513e AH |
3853 | bb->aux = NULL; |
3854 | } | |
3855 | ||
3856 | /* Clear the visited bit for every basic block in BLOCKS. */ | |
3857 | ||
3858 | static void | |
9771b263 | 3859 | tm_memopt_clear_visited (vec<basic_block> blocks) |
0a35513e AH |
3860 | { |
3861 | size_t i; | |
3862 | basic_block bb; | |
3863 | ||
9771b263 | 3864 | for (i = 0; blocks.iterate (i, &bb); ++i) |
0a35513e AH |
3865 | BB_VISITED_P (bb) = false; |
3866 | } | |
3867 | ||
3868 | /* Replace TM load/stores with hints for the runtime. We handle | |
3869 | things like read-after-write, write-after-read, read-after-read, | |
3870 | read-for-write, etc. */ | |
3871 | ||
3872 | static unsigned int | |
3873 | execute_tm_memopt (void) | |
3874 | { | |
3875 | struct tm_region *region; | |
9771b263 | 3876 | vec<basic_block> bbs; |
0a35513e AH |
3877 | |
3878 | tm_memopt_value_id = 0; | |
c203e8a7 | 3879 | tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10); |
0a35513e AH |
3880 | |
3881 | for (region = all_tm_regions; region; region = region->next) | |
3882 | { | |
3883 | /* All the TM stores/loads in the current region. */ | |
3884 | size_t i; | |
3885 | basic_block bb; | |
3886 | ||
3887 | bitmap_obstack_initialize (&tm_memopt_obstack); | |
3888 | ||
3889 | /* Save all BBs for the current region. */ | |
3890 | bbs = get_tm_region_blocks (region->entry_block, | |
3891 | region->exit_blocks, | |
3892 | region->irr_blocks, | |
3893 | NULL, | |
3894 | false); | |
3895 | ||
3896 | /* Collect all the memory operations. */ | |
9771b263 | 3897 | for (i = 0; bbs.iterate (i, &bb); ++i) |
0a35513e AH |
3898 | { |
3899 | bb->aux = tm_memopt_init_sets (); | |
3900 | tm_memopt_accumulate_memops (bb); | |
3901 | } | |
3902 | ||
3903 | /* Solve data flow equations and transform each block accordingly. */ | |
3904 | tm_memopt_clear_visited (bbs); | |
3905 | tm_memopt_compute_available (region, bbs); | |
3906 | tm_memopt_clear_visited (bbs); | |
3907 | tm_memopt_compute_antic (region, bbs); | |
3908 | tm_memopt_transform_blocks (bbs); | |
3909 | ||
3910 | tm_memopt_free_sets (bbs); | |
9771b263 | 3911 | bbs.release (); |
0a35513e | 3912 | bitmap_obstack_release (&tm_memopt_obstack); |
c203e8a7 | 3913 | tm_memopt_value_numbers->empty (); |
0a35513e AH |
3914 | } |
3915 | ||
c203e8a7 TS |
3916 | delete tm_memopt_value_numbers; |
3917 | tm_memopt_value_numbers = NULL; | |
0a35513e AH |
3918 | return 0; |
3919 | } | |
3920 | ||
27a4cd48 DM |
3921 | namespace { |
3922 | ||
3923 | const pass_data pass_data_tm_memopt = | |
3924 | { | |
3925 | GIMPLE_PASS, /* type */ | |
3926 | "tmmemopt", /* name */ | |
3927 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3928 | true, /* has_execute */ |
3929 | TV_TRANS_MEM, /* tv_id */ | |
3930 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3931 | 0, /* properties_provided */ | |
3932 | 0, /* properties_destroyed */ | |
3933 | 0, /* todo_flags_start */ | |
3934 | 0, /* todo_flags_finish */ | |
0a35513e AH |
3935 | }; |
3936 | ||
27a4cd48 DM |
3937 | class pass_tm_memopt : public gimple_opt_pass |
3938 | { | |
3939 | public: | |
c3284718 RS |
3940 | pass_tm_memopt (gcc::context *ctxt) |
3941 | : gimple_opt_pass (pass_data_tm_memopt, ctxt) | |
27a4cd48 DM |
3942 | {} |
3943 | ||
3944 | /* opt_pass methods: */ | |
1a3d085c | 3945 | virtual bool gate (function *) { return flag_tm && optimize > 0; } |
be55bfe6 | 3946 | virtual unsigned int execute (function *) { return execute_tm_memopt (); } |
27a4cd48 DM |
3947 | |
3948 | }; // class pass_tm_memopt | |
3949 | ||
3950 | } // anon namespace | |
3951 | ||
3952 | gimple_opt_pass * | |
3953 | make_pass_tm_memopt (gcc::context *ctxt) | |
3954 | { | |
3955 | return new pass_tm_memopt (ctxt); | |
3956 | } | |
3957 | ||
0a35513e AH |
3958 | \f |
3959 | /* Interprocedual analysis for the creation of transactional clones. | |
3960 | The aim of this pass is to find which functions are referenced in | |
3961 | a non-irrevocable transaction context, and for those over which | |
3962 | we have control (or user directive), create a version of the | |
3963 | function which uses only the transactional interface to reference | |
3964 | protected memories. This analysis proceeds in several steps: | |
3965 | ||
3966 | (1) Collect the set of all possible transactional clones: | |
3967 | ||
3968 | (a) For all local public functions marked tm_callable, push | |
3969 | it onto the tm_callee queue. | |
3970 | ||
3971 | (b) For all local functions, scan for calls in transaction blocks. | |
3972 | Push the caller and callee onto the tm_caller and tm_callee | |
3973 | queues. Count the number of callers for each callee. | |
3974 | ||
3975 | (c) For each local function on the callee list, assume we will | |
3976 | create a transactional clone. Push *all* calls onto the | |
3977 | callee queues; count the number of clone callers separately | |
3978 | to the number of original callers. | |
3979 | ||
3980 | (2) Propagate irrevocable status up the dominator tree: | |
3981 | ||
3982 | (a) Any external function on the callee list that is not marked | |
3983 | tm_callable is irrevocable. Push all callers of such onto | |
3984 | a worklist. | |
3985 | ||
3986 | (b) For each function on the worklist, mark each block that | |
3987 | contains an irrevocable call. Use the AND operator to | |
3988 | propagate that mark up the dominator tree. | |
3989 | ||
3990 | (c) If we reach the entry block for a possible transactional | |
3991 | clone, then the transactional clone is irrevocable, and | |
3992 | we should not create the clone after all. Push all | |
3993 | callers onto the worklist. | |
3994 | ||
3995 | (d) Place tm_irrevocable calls at the beginning of the relevant | |
3996 | blocks. Special case here is the entry block for the entire | |
3997 | transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for | |
3998 | the library to begin the region in serial mode. Decrement | |
3999 | the call count for all callees in the irrevocable region. | |
4000 | ||
4001 | (3) Create the transactional clones: | |
4002 | ||
4003 | Any tm_callee that still has a non-zero call count is cloned. | |
4004 | */ | |
4005 | ||
4006 | /* This structure is stored in the AUX field of each cgraph_node. */ | |
4007 | struct tm_ipa_cg_data | |
4008 | { | |
4009 | /* The clone of the function that got created. */ | |
4010 | struct cgraph_node *clone; | |
4011 | ||
4012 | /* The tm regions in the normal function. */ | |
4013 | struct tm_region *all_tm_regions; | |
4014 | ||
4015 | /* The blocks of the normal/clone functions that contain irrevocable | |
4016 | calls, or blocks that are post-dominated by irrevocable calls. */ | |
4017 | bitmap irrevocable_blocks_normal; | |
4018 | bitmap irrevocable_blocks_clone; | |
4019 | ||
4020 | /* The blocks of the normal function that are involved in transactions. */ | |
4021 | bitmap transaction_blocks_normal; | |
4022 | ||
4023 | /* The number of callers to the transactional clone of this function | |
4024 | from normal and transactional clones respectively. */ | |
4025 | unsigned tm_callers_normal; | |
4026 | unsigned tm_callers_clone; | |
4027 | ||
4028 | /* True if all calls to this function's transactional clone | |
4029 | are irrevocable. Also automatically true if the function | |
4030 | has no transactional clone. */ | |
4031 | bool is_irrevocable; | |
4032 | ||
4033 | /* Flags indicating the presence of this function in various queues. */ | |
4034 | bool in_callee_queue; | |
4035 | bool in_worklist; | |
4036 | ||
4037 | /* Flags indicating the kind of scan desired while in the worklist. */ | |
4038 | bool want_irr_scan_normal; | |
4039 | }; | |
4040 | ||
9771b263 | 4041 | typedef vec<cgraph_node_ptr> cgraph_node_queue; |
0a35513e AH |
4042 | |
4043 | /* Return the ipa data associated with NODE, allocating zeroed memory | |
594ec92f AH |
4044 | if necessary. TRAVERSE_ALIASES is true if we must traverse aliases |
4045 | and set *NODE accordingly. */ | |
0a35513e AH |
4046 | |
4047 | static struct tm_ipa_cg_data * | |
594ec92f | 4048 | get_cg_data (struct cgraph_node **node, bool traverse_aliases) |
0a35513e | 4049 | { |
594ec92f AH |
4050 | struct tm_ipa_cg_data *d; |
4051 | ||
67348ccc | 4052 | if (traverse_aliases && (*node)->alias) |
40a7fe1e | 4053 | *node = cgraph_alias_target (*node); |
594ec92f | 4054 | |
67348ccc | 4055 | d = (struct tm_ipa_cg_data *) (*node)->aux; |
0a35513e AH |
4056 | |
4057 | if (d == NULL) | |
4058 | { | |
4059 | d = (struct tm_ipa_cg_data *) | |
4060 | obstack_alloc (&tm_obstack.obstack, sizeof (*d)); | |
67348ccc | 4061 | (*node)->aux = (void *) d; |
0a35513e AH |
4062 | memset (d, 0, sizeof (*d)); |
4063 | } | |
4064 | ||
4065 | return d; | |
4066 | } | |
4067 | ||
4068 | /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that | |
4069 | it is already present. */ | |
4070 | ||
4071 | static void | |
4072 | maybe_push_queue (struct cgraph_node *node, | |
4073 | cgraph_node_queue *queue_p, bool *in_queue_p) | |
4074 | { | |
4075 | if (!*in_queue_p) | |
4076 | { | |
4077 | *in_queue_p = true; | |
9771b263 | 4078 | queue_p->safe_push (node); |
0a35513e AH |
4079 | } |
4080 | } | |
4081 | ||
398b1daa AH |
4082 | /* Duplicate the basic blocks in QUEUE for use in the uninstrumented |
4083 | code path. QUEUE are the basic blocks inside the transaction | |
4084 | represented in REGION. | |
4085 | ||
4086 | Later in split_code_paths() we will add the conditional to choose | |
4087 | between the two alternatives. */ | |
4088 | ||
4089 | static void | |
4090 | ipa_uninstrument_transaction (struct tm_region *region, | |
9771b263 | 4091 | vec<basic_block> queue) |
398b1daa AH |
4092 | { |
4093 | gimple transaction = region->transaction_stmt; | |
4094 | basic_block transaction_bb = gimple_bb (transaction); | |
9771b263 | 4095 | int n = queue.length (); |
398b1daa AH |
4096 | basic_block *new_bbs = XNEWVEC (basic_block, n); |
4097 | ||
f14540b6 SE |
4098 | copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb, |
4099 | true); | |
398b1daa AH |
4100 | edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED); |
4101 | add_phi_args_after_copy (new_bbs, n, e); | |
4102 | ||
4103 | // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it. | |
4104 | // a) EDGE_FALLTHRU into the transaction | |
4105 | // b) EDGE_TM_ABORT out of the transaction | |
4106 | // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks. | |
4107 | ||
4108 | free (new_bbs); | |
4109 | } | |
4110 | ||
0a35513e AH |
4111 | /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone. |
4112 | Queue all callees within block BB. */ | |
4113 | ||
4114 | static void | |
4115 | ipa_tm_scan_calls_block (cgraph_node_queue *callees_p, | |
4116 | basic_block bb, bool for_clone) | |
4117 | { | |
4118 | gimple_stmt_iterator gsi; | |
4119 | ||
4120 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4121 | { | |
4122 | gimple stmt = gsi_stmt (gsi); | |
4123 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) | |
4124 | { | |
4125 | tree fndecl = gimple_call_fndecl (stmt); | |
4126 | if (fndecl) | |
4127 | { | |
4128 | struct tm_ipa_cg_data *d; | |
4129 | unsigned *pcallers; | |
4130 | struct cgraph_node *node; | |
4131 | ||
4132 | if (is_tm_ending_fndecl (fndecl)) | |
4133 | continue; | |
4134 | if (find_tm_replacement_function (fndecl)) | |
4135 | continue; | |
4136 | ||
4137 | node = cgraph_get_node (fndecl); | |
4138 | gcc_assert (node != NULL); | |
594ec92f | 4139 | d = get_cg_data (&node, true); |
0a35513e AH |
4140 | |
4141 | pcallers = (for_clone ? &d->tm_callers_clone | |
4142 | : &d->tm_callers_normal); | |
4143 | *pcallers += 1; | |
4144 | ||
4145 | maybe_push_queue (node, callees_p, &d->in_callee_queue); | |
4146 | } | |
4147 | } | |
4148 | } | |
4149 | } | |
4150 | ||
4151 | /* Scan all calls in NODE that are within a transaction region, | |
4152 | and push the resulting nodes into the callee queue. */ | |
4153 | ||
4154 | static void | |
4155 | ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d, | |
4156 | cgraph_node_queue *callees_p) | |
4157 | { | |
4158 | struct tm_region *r; | |
4159 | ||
4160 | d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack); | |
4161 | d->all_tm_regions = all_tm_regions; | |
4162 | ||
4163 | for (r = all_tm_regions; r; r = r->next) | |
4164 | { | |
9771b263 | 4165 | vec<basic_block> bbs; |
0a35513e AH |
4166 | basic_block bb; |
4167 | unsigned i; | |
4168 | ||
4169 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL, | |
4170 | d->transaction_blocks_normal, false); | |
4171 | ||
398b1daa AH |
4172 | // Generate the uninstrumented code path for this transaction. |
4173 | ipa_uninstrument_transaction (r, bbs); | |
4174 | ||
9771b263 | 4175 | FOR_EACH_VEC_ELT (bbs, i, bb) |
0a35513e AH |
4176 | ipa_tm_scan_calls_block (callees_p, bb, false); |
4177 | ||
9771b263 | 4178 | bbs.release (); |
0a35513e | 4179 | } |
398b1daa AH |
4180 | |
4181 | // ??? copy_bbs should maintain cgraph edges for the blocks as it is | |
4182 | // copying them, rather than forcing us to do this externally. | |
4183 | rebuild_cgraph_edges (); | |
4184 | ||
4185 | // ??? In ipa_uninstrument_transaction we don't try to update dominators | |
4186 | // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects. | |
4187 | // Instead, just release dominators here so update_ssa recomputes them. | |
4188 | free_dominance_info (CDI_DOMINATORS); | |
4189 | ||
4190 | // When building the uninstrumented code path, copy_bbs will have invoked | |
4191 | // create_new_def_for starting an "ssa update context". There is only one | |
4192 | // instance of this context, so resolve ssa updates before moving on to | |
4193 | // the next function. | |
4194 | update_ssa (TODO_update_ssa); | |
0a35513e AH |
4195 | } |
4196 | ||
4197 | /* Scan all calls in NODE as if this is the transactional clone, | |
4198 | and push the destinations into the callee queue. */ | |
4199 | ||
4200 | static void | |
4201 | ipa_tm_scan_calls_clone (struct cgraph_node *node, | |
4202 | cgraph_node_queue *callees_p) | |
4203 | { | |
67348ccc | 4204 | struct function *fn = DECL_STRUCT_FUNCTION (node->decl); |
0a35513e AH |
4205 | basic_block bb; |
4206 | ||
4207 | FOR_EACH_BB_FN (bb, fn) | |
4208 | ipa_tm_scan_calls_block (callees_p, bb, true); | |
4209 | } | |
4210 | ||
4211 | /* The function NODE has been detected to be irrevocable. Push all | |
4212 | of its callers onto WORKLIST for the purpose of re-scanning them. */ | |
4213 | ||
4214 | static void | |
4215 | ipa_tm_note_irrevocable (struct cgraph_node *node, | |
4216 | cgraph_node_queue *worklist_p) | |
4217 | { | |
594ec92f | 4218 | struct tm_ipa_cg_data *d = get_cg_data (&node, true); |
0a35513e AH |
4219 | struct cgraph_edge *e; |
4220 | ||
4221 | d->is_irrevocable = true; | |
4222 | ||
4223 | for (e = node->callers; e ; e = e->next_caller) | |
4224 | { | |
4225 | basic_block bb; | |
594ec92f | 4226 | struct cgraph_node *caller; |
0a35513e AH |
4227 | |
4228 | /* Don't examine recursive calls. */ | |
4229 | if (e->caller == node) | |
4230 | continue; | |
4231 | /* Even if we think we can go irrevocable, believe the user | |
4232 | above all. */ | |
67348ccc | 4233 | if (is_tm_safe_or_pure (e->caller->decl)) |
0a35513e AH |
4234 | continue; |
4235 | ||
594ec92f AH |
4236 | caller = e->caller; |
4237 | d = get_cg_data (&caller, true); | |
0a35513e AH |
4238 | |
4239 | /* Check if the callee is in a transactional region. If so, | |
4240 | schedule the function for normal re-scan as well. */ | |
4241 | bb = gimple_bb (e->call_stmt); | |
4242 | gcc_assert (bb != NULL); | |
4243 | if (d->transaction_blocks_normal | |
4244 | && bitmap_bit_p (d->transaction_blocks_normal, bb->index)) | |
4245 | d->want_irr_scan_normal = true; | |
4246 | ||
594ec92f | 4247 | maybe_push_queue (caller, worklist_p, &d->in_worklist); |
0a35513e AH |
4248 | } |
4249 | } | |
4250 | ||
4251 | /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement | |
4252 | within the block is irrevocable. */ | |
4253 | ||
4254 | static bool | |
4255 | ipa_tm_scan_irr_block (basic_block bb) | |
4256 | { | |
4257 | gimple_stmt_iterator gsi; | |
4258 | tree fn; | |
4259 | ||
4260 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4261 | { | |
4262 | gimple stmt = gsi_stmt (gsi); | |
4263 | switch (gimple_code (stmt)) | |
4264 | { | |
a3770d3b AH |
4265 | case GIMPLE_ASSIGN: |
4266 | if (gimple_assign_single_p (stmt)) | |
4267 | { | |
4268 | tree lhs = gimple_assign_lhs (stmt); | |
4269 | tree rhs = gimple_assign_rhs1 (stmt); | |
4270 | if (volatile_var_p (lhs) || volatile_var_p (rhs)) | |
4271 | return true; | |
4272 | } | |
4273 | break; | |
4274 | ||
0a35513e | 4275 | case GIMPLE_CALL: |
a3770d3b AH |
4276 | { |
4277 | tree lhs = gimple_call_lhs (stmt); | |
4278 | if (lhs && volatile_var_p (lhs)) | |
4279 | return true; | |
0a35513e | 4280 | |
a3770d3b AH |
4281 | if (is_tm_pure_call (stmt)) |
4282 | break; | |
0a35513e | 4283 | |
a3770d3b | 4284 | fn = gimple_call_fn (stmt); |
0a35513e | 4285 | |
a3770d3b AH |
4286 | /* Functions with the attribute are by definition irrevocable. */ |
4287 | if (is_tm_irrevocable (fn)) | |
4288 | return true; | |
0a35513e | 4289 | |
a3770d3b AH |
4290 | /* For direct function calls, go ahead and check for replacement |
4291 | functions, or transitive irrevocable functions. For indirect | |
4292 | functions, we'll ask the runtime. */ | |
4293 | if (TREE_CODE (fn) == ADDR_EXPR) | |
4294 | { | |
4295 | struct tm_ipa_cg_data *d; | |
4296 | struct cgraph_node *node; | |
0a35513e | 4297 | |
a3770d3b AH |
4298 | fn = TREE_OPERAND (fn, 0); |
4299 | if (is_tm_ending_fndecl (fn)) | |
4300 | break; | |
4301 | if (find_tm_replacement_function (fn)) | |
4302 | break; | |
80fd8eba | 4303 | |
c3284718 | 4304 | node = cgraph_get_node (fn); |
a3770d3b AH |
4305 | d = get_cg_data (&node, true); |
4306 | ||
4307 | /* Return true if irrevocable, but above all, believe | |
4308 | the user. */ | |
4309 | if (d->is_irrevocable | |
4310 | && !is_tm_safe_or_pure (fn)) | |
4311 | return true; | |
4312 | } | |
4313 | break; | |
4314 | } | |
0a35513e AH |
4315 | |
4316 | case GIMPLE_ASM: | |
4317 | /* ??? The Approved Method of indicating that an inline | |
4318 | assembly statement is not relevant to the transaction | |
4319 | is to wrap it in a __tm_waiver block. This is not | |
4320 | yet implemented, so we can't check for it. */ | |
a4d031c7 AH |
4321 | if (is_tm_safe (current_function_decl)) |
4322 | { | |
4323 | tree t = build1 (NOP_EXPR, void_type_node, size_zero_node); | |
4324 | SET_EXPR_LOCATION (t, gimple_location (stmt)); | |
a4d031c7 AH |
4325 | error ("%Kasm not allowed in %<transaction_safe%> function", t); |
4326 | } | |
0a35513e AH |
4327 | return true; |
4328 | ||
4329 | default: | |
4330 | break; | |
4331 | } | |
4332 | } | |
4333 | ||
4334 | return false; | |
4335 | } | |
4336 | ||
4337 | /* For each of the blocks seeded witin PQUEUE, walk the CFG looking | |
4338 | for new irrevocable blocks, marking them in NEW_IRR. Don't bother | |
4339 | scanning past OLD_IRR or EXIT_BLOCKS. */ | |
4340 | ||
4341 | static bool | |
9771b263 | 4342 | ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr, |
0a35513e AH |
4343 | bitmap old_irr, bitmap exit_blocks) |
4344 | { | |
4345 | bool any_new_irr = false; | |
4346 | edge e; | |
4347 | edge_iterator ei; | |
4348 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
4349 | ||
4350 | do | |
4351 | { | |
9771b263 | 4352 | basic_block bb = pqueue->pop (); |
0a35513e AH |
4353 | |
4354 | /* Don't re-scan blocks we know already are irrevocable. */ | |
4355 | if (old_irr && bitmap_bit_p (old_irr, bb->index)) | |
4356 | continue; | |
4357 | ||
4358 | if (ipa_tm_scan_irr_block (bb)) | |
4359 | { | |
4360 | bitmap_set_bit (new_irr, bb->index); | |
4361 | any_new_irr = true; | |
4362 | } | |
4363 | else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index)) | |
4364 | { | |
4365 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4366 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
4367 | { | |
4368 | bitmap_set_bit (visited_blocks, e->dest->index); | |
9771b263 | 4369 | pqueue->safe_push (e->dest); |
0a35513e AH |
4370 | } |
4371 | } | |
4372 | } | |
9771b263 | 4373 | while (!pqueue->is_empty ()); |
0a35513e AH |
4374 | |
4375 | BITMAP_FREE (visited_blocks); | |
4376 | ||
4377 | return any_new_irr; | |
4378 | } | |
4379 | ||
4380 | /* Propagate the irrevocable property both up and down the dominator tree. | |
4381 | BB is the current block being scanned; EXIT_BLOCKS are the edges of the | |
4382 | TM regions; OLD_IRR are the results of a previous scan of the dominator | |
4383 | tree which has been fully propagated; NEW_IRR is the set of new blocks | |
4384 | which are gaining the irrevocable property during the current scan. */ | |
4385 | ||
4386 | static void | |
4387 | ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr, | |
4388 | bitmap old_irr, bitmap exit_blocks) | |
4389 | { | |
9771b263 | 4390 | vec<basic_block> bbs; |
0a35513e AH |
4391 | bitmap all_region_blocks; |
4392 | ||
4393 | /* If this block is in the old set, no need to rescan. */ | |
4394 | if (old_irr && bitmap_bit_p (old_irr, entry_block->index)) | |
4395 | return; | |
4396 | ||
4397 | all_region_blocks = BITMAP_ALLOC (&tm_obstack); | |
4398 | bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL, | |
4399 | all_region_blocks, false); | |
4400 | do | |
4401 | { | |
9771b263 | 4402 | basic_block bb = bbs.pop (); |
0a35513e AH |
4403 | bool this_irr = bitmap_bit_p (new_irr, bb->index); |
4404 | bool all_son_irr = false; | |
4405 | edge_iterator ei; | |
4406 | edge e; | |
4407 | ||
4408 | /* Propagate up. If my children are, I am too, but we must have | |
4409 | at least one child that is. */ | |
4410 | if (!this_irr) | |
4411 | { | |
4412 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4413 | { | |
4414 | if (!bitmap_bit_p (new_irr, e->dest->index)) | |
4415 | { | |
4416 | all_son_irr = false; | |
4417 | break; | |
4418 | } | |
4419 | else | |
4420 | all_son_irr = true; | |
4421 | } | |
4422 | if (all_son_irr) | |
4423 | { | |
4424 | /* Add block to new_irr if it hasn't already been processed. */ | |
4425 | if (!old_irr || !bitmap_bit_p (old_irr, bb->index)) | |
4426 | { | |
4427 | bitmap_set_bit (new_irr, bb->index); | |
4428 | this_irr = true; | |
4429 | } | |
4430 | } | |
4431 | } | |
4432 | ||
4433 | /* Propagate down to everyone we immediately dominate. */ | |
4434 | if (this_irr) | |
4435 | { | |
4436 | basic_block son; | |
4437 | for (son = first_dom_son (CDI_DOMINATORS, bb); | |
4438 | son; | |
4439 | son = next_dom_son (CDI_DOMINATORS, son)) | |
4440 | { | |
4441 | /* Make sure block is actually in a TM region, and it | |
4442 | isn't already in old_irr. */ | |
4443 | if ((!old_irr || !bitmap_bit_p (old_irr, son->index)) | |
4444 | && bitmap_bit_p (all_region_blocks, son->index)) | |
4445 | bitmap_set_bit (new_irr, son->index); | |
4446 | } | |
4447 | } | |
4448 | } | |
9771b263 | 4449 | while (!bbs.is_empty ()); |
0a35513e AH |
4450 | |
4451 | BITMAP_FREE (all_region_blocks); | |
9771b263 | 4452 | bbs.release (); |
0a35513e AH |
4453 | } |
4454 | ||
4455 | static void | |
4456 | ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone) | |
4457 | { | |
4458 | gimple_stmt_iterator gsi; | |
4459 | ||
4460 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4461 | { | |
4462 | gimple stmt = gsi_stmt (gsi); | |
4463 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) | |
4464 | { | |
4465 | tree fndecl = gimple_call_fndecl (stmt); | |
4466 | if (fndecl) | |
4467 | { | |
4468 | struct tm_ipa_cg_data *d; | |
4469 | unsigned *pcallers; | |
594ec92f | 4470 | struct cgraph_node *tnode; |
0a35513e AH |
4471 | |
4472 | if (is_tm_ending_fndecl (fndecl)) | |
4473 | continue; | |
4474 | if (find_tm_replacement_function (fndecl)) | |
4475 | continue; | |
4476 | ||
594ec92f AH |
4477 | tnode = cgraph_get_node (fndecl); |
4478 | d = get_cg_data (&tnode, true); | |
4479 | ||
0a35513e AH |
4480 | pcallers = (for_clone ? &d->tm_callers_clone |
4481 | : &d->tm_callers_normal); | |
4482 | ||
4483 | gcc_assert (*pcallers > 0); | |
4484 | *pcallers -= 1; | |
4485 | } | |
4486 | } | |
4487 | } | |
4488 | } | |
4489 | ||
4490 | /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions, | |
4491 | as well as other irrevocable actions such as inline assembly. Mark all | |
4492 | such blocks as irrevocable and decrement the number of calls to | |
4493 | transactional clones. Return true if, for the transactional clone, the | |
4494 | entire function is irrevocable. */ | |
4495 | ||
4496 | static bool | |
4497 | ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone) | |
4498 | { | |
4499 | struct tm_ipa_cg_data *d; | |
4500 | bitmap new_irr, old_irr; | |
0a35513e AH |
4501 | bool ret = false; |
4502 | ||
80fd8eba | 4503 | /* Builtin operators (operator new, and such). */ |
67348ccc DM |
4504 | if (DECL_STRUCT_FUNCTION (node->decl) == NULL |
4505 | || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL) | |
80fd8eba AH |
4506 | return false; |
4507 | ||
67348ccc | 4508 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
0a35513e AH |
4509 | calculate_dominance_info (CDI_DOMINATORS); |
4510 | ||
594ec92f | 4511 | d = get_cg_data (&node, true); |
00f96dc9 | 4512 | auto_vec<basic_block, 10> queue; |
0a35513e AH |
4513 | new_irr = BITMAP_ALLOC (&tm_obstack); |
4514 | ||
4515 | /* Scan each tm region, propagating irrevocable status through the tree. */ | |
4516 | if (for_clone) | |
4517 | { | |
4518 | old_irr = d->irrevocable_blocks_clone; | |
fefa31b5 | 4519 | queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
0a35513e AH |
4520 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL)) |
4521 | { | |
fefa31b5 DM |
4522 | ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), |
4523 | new_irr, | |
0a35513e | 4524 | old_irr, NULL); |
fefa31b5 DM |
4525 | ret = bitmap_bit_p (new_irr, |
4526 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index); | |
0a35513e AH |
4527 | } |
4528 | } | |
4529 | else | |
4530 | { | |
4531 | struct tm_region *region; | |
4532 | ||
4533 | old_irr = d->irrevocable_blocks_normal; | |
4534 | for (region = d->all_tm_regions; region; region = region->next) | |
4535 | { | |
9771b263 | 4536 | queue.quick_push (region->entry_block); |
0a35513e AH |
4537 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, |
4538 | region->exit_blocks)) | |
4539 | ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr, | |
4540 | region->exit_blocks); | |
4541 | } | |
4542 | } | |
4543 | ||
4544 | /* If we found any new irrevocable blocks, reduce the call count for | |
4545 | transactional clones within the irrevocable blocks. Save the new | |
4546 | set of irrevocable blocks for next time. */ | |
4547 | if (!bitmap_empty_p (new_irr)) | |
4548 | { | |
4549 | bitmap_iterator bmi; | |
4550 | unsigned i; | |
4551 | ||
4552 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
06e28de2 DM |
4553 | ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i), |
4554 | for_clone); | |
0a35513e AH |
4555 | |
4556 | if (old_irr) | |
4557 | { | |
4558 | bitmap_ior_into (old_irr, new_irr); | |
4559 | BITMAP_FREE (new_irr); | |
4560 | } | |
4561 | else if (for_clone) | |
4562 | d->irrevocable_blocks_clone = new_irr; | |
4563 | else | |
4564 | d->irrevocable_blocks_normal = new_irr; | |
4565 | ||
4566 | if (dump_file && new_irr) | |
4567 | { | |
4568 | const char *dname; | |
4569 | bitmap_iterator bmi; | |
4570 | unsigned i; | |
4571 | ||
4572 | dname = lang_hooks.decl_printable_name (current_function_decl, 2); | |
4573 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
4574 | fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i); | |
4575 | } | |
4576 | } | |
4577 | else | |
4578 | BITMAP_FREE (new_irr); | |
4579 | ||
0a35513e | 4580 | pop_cfun (); |
0a35513e AH |
4581 | |
4582 | return ret; | |
4583 | } | |
4584 | ||
4585 | /* Return true if, for the transactional clone of NODE, any call | |
4586 | may enter irrevocable mode. */ | |
4587 | ||
4588 | static bool | |
4589 | ipa_tm_mayenterirr_function (struct cgraph_node *node) | |
4590 | { | |
594ec92f AH |
4591 | struct tm_ipa_cg_data *d; |
4592 | tree decl; | |
4593 | unsigned flags; | |
4594 | ||
4595 | d = get_cg_data (&node, true); | |
67348ccc | 4596 | decl = node->decl; |
594ec92f | 4597 | flags = flags_from_decl_or_type (decl); |
0a35513e AH |
4598 | |
4599 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
4600 | at this point, but handling these functions when written in by the | |
4601 | user makes it easier to build unit tests. */ | |
4602 | if (flags & ECF_TM_BUILTIN) | |
4603 | return false; | |
4604 | ||
4605 | /* Filter out all functions that are marked. */ | |
4606 | if (flags & ECF_TM_PURE) | |
4607 | return false; | |
4608 | if (is_tm_safe (decl)) | |
4609 | return false; | |
4610 | if (is_tm_irrevocable (decl)) | |
4611 | return true; | |
4612 | if (is_tm_callable (decl)) | |
4613 | return true; | |
4614 | if (find_tm_replacement_function (decl)) | |
4615 | return true; | |
4616 | ||
4617 | /* If we aren't seeing the final version of the function we don't | |
4618 | know what it will contain at runtime. */ | |
4619 | if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE) | |
4620 | return true; | |
4621 | ||
4622 | /* If the function must go irrevocable, then of course true. */ | |
4623 | if (d->is_irrevocable) | |
4624 | return true; | |
4625 | ||
4626 | /* If there are any blocks marked irrevocable, then the function | |
4627 | as a whole may enter irrevocable. */ | |
4628 | if (d->irrevocable_blocks_clone) | |
4629 | return true; | |
4630 | ||
4631 | /* We may have previously marked this function as tm_may_enter_irr; | |
4632 | see pass_diagnose_tm_blocks. */ | |
4633 | if (node->local.tm_may_enter_irr) | |
4634 | return true; | |
4635 | ||
4636 | /* Recurse on the main body for aliases. In general, this will | |
4637 | result in one of the bits above being set so that we will not | |
4638 | have to recurse next time. */ | |
67348ccc | 4639 | if (node->alias) |
0a35513e AH |
4640 | return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias)); |
4641 | ||
4642 | /* What remains is unmarked local functions without items that force | |
4643 | the function to go irrevocable. */ | |
4644 | return false; | |
4645 | } | |
4646 | ||
4647 | /* Diagnose calls from transaction_safe functions to unmarked | |
4648 | functions that are determined to not be safe. */ | |
4649 | ||
4650 | static void | |
4651 | ipa_tm_diagnose_tm_safe (struct cgraph_node *node) | |
4652 | { | |
4653 | struct cgraph_edge *e; | |
4654 | ||
4655 | for (e = node->callees; e ; e = e->next_callee) | |
67348ccc | 4656 | if (!is_tm_callable (e->callee->decl) |
0a35513e AH |
4657 | && e->callee->local.tm_may_enter_irr) |
4658 | error_at (gimple_location (e->call_stmt), | |
4659 | "unsafe function call %qD within " | |
67348ccc | 4660 | "%<transaction_safe%> function", e->callee->decl); |
0a35513e AH |
4661 | } |
4662 | ||
4663 | /* Diagnose call from atomic transactions to unmarked functions | |
4664 | that are determined to not be safe. */ | |
4665 | ||
4666 | static void | |
4667 | ipa_tm_diagnose_transaction (struct cgraph_node *node, | |
4668 | struct tm_region *all_tm_regions) | |
4669 | { | |
4670 | struct tm_region *r; | |
4671 | ||
4672 | for (r = all_tm_regions; r ; r = r->next) | |
4673 | if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED) | |
4674 | { | |
4675 | /* Atomic transactions can be nested inside relaxed. */ | |
4676 | if (r->inner) | |
4677 | ipa_tm_diagnose_transaction (node, r->inner); | |
4678 | } | |
4679 | else | |
4680 | { | |
9771b263 | 4681 | vec<basic_block> bbs; |
0a35513e AH |
4682 | gimple_stmt_iterator gsi; |
4683 | basic_block bb; | |
4684 | size_t i; | |
4685 | ||
4686 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, | |
4687 | r->irr_blocks, NULL, false); | |
4688 | ||
9771b263 | 4689 | for (i = 0; bbs.iterate (i, &bb); ++i) |
0a35513e AH |
4690 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
4691 | { | |
4692 | gimple stmt = gsi_stmt (gsi); | |
4693 | tree fndecl; | |
4694 | ||
4695 | if (gimple_code (stmt) == GIMPLE_ASM) | |
4696 | { | |
4697 | error_at (gimple_location (stmt), | |
4698 | "asm not allowed in atomic transaction"); | |
4699 | continue; | |
4700 | } | |
4701 | ||
4702 | if (!is_gimple_call (stmt)) | |
4703 | continue; | |
4704 | fndecl = gimple_call_fndecl (stmt); | |
4705 | ||
4706 | /* Indirect function calls have been diagnosed already. */ | |
4707 | if (!fndecl) | |
4708 | continue; | |
4709 | ||
4710 | /* Stop at the end of the transaction. */ | |
4711 | if (is_tm_ending_fndecl (fndecl)) | |
4712 | { | |
4713 | if (bitmap_bit_p (r->exit_blocks, bb->index)) | |
4714 | break; | |
4715 | continue; | |
4716 | } | |
4717 | ||
4718 | /* Marked functions have been diagnosed already. */ | |
4719 | if (is_tm_pure_call (stmt)) | |
4720 | continue; | |
4721 | if (is_tm_callable (fndecl)) | |
4722 | continue; | |
4723 | ||
4724 | if (cgraph_local_info (fndecl)->tm_may_enter_irr) | |
4725 | error_at (gimple_location (stmt), | |
4726 | "unsafe function call %qD within " | |
4727 | "atomic transaction", fndecl); | |
4728 | } | |
4729 | ||
9771b263 | 4730 | bbs.release (); |
0a35513e AH |
4731 | } |
4732 | } | |
4733 | ||
4734 | /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in | |
4735 | OLD_DECL. The returned value is a freshly malloced pointer that | |
4736 | should be freed by the caller. */ | |
4737 | ||
4738 | static tree | |
4739 | tm_mangle (tree old_asm_id) | |
4740 | { | |
4741 | const char *old_asm_name; | |
4742 | char *tm_name; | |
4743 | void *alloc = NULL; | |
4744 | struct demangle_component *dc; | |
4745 | tree new_asm_id; | |
4746 | ||
4747 | /* Determine if the symbol is already a valid C++ mangled name. Do this | |
4748 | even for C, which might be interfacing with C++ code via appropriately | |
4749 | ugly identifiers. */ | |
4750 | /* ??? We could probably do just as well checking for "_Z" and be done. */ | |
4751 | old_asm_name = IDENTIFIER_POINTER (old_asm_id); | |
4752 | dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc); | |
4753 | ||
4754 | if (dc == NULL) | |
4755 | { | |
4756 | char length[8]; | |
4757 | ||
4758 | do_unencoded: | |
4759 | sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id)); | |
4760 | tm_name = concat ("_ZGTt", length, old_asm_name, NULL); | |
4761 | } | |
4762 | else | |
4763 | { | |
4764 | old_asm_name += 2; /* Skip _Z */ | |
4765 | ||
4766 | switch (dc->type) | |
4767 | { | |
4768 | case DEMANGLE_COMPONENT_TRANSACTION_CLONE: | |
4769 | case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE: | |
4770 | /* Don't play silly games, you! */ | |
4771 | goto do_unencoded; | |
4772 | ||
4773 | case DEMANGLE_COMPONENT_HIDDEN_ALIAS: | |
4774 | /* I'd really like to know if we can ever be passed one of | |
4775 | these from the C++ front end. The Logical Thing would | |
4776 | seem that hidden-alias should be outer-most, so that we | |
4777 | get hidden-alias of a transaction-clone and not vice-versa. */ | |
4778 | old_asm_name += 2; | |
4779 | break; | |
4780 | ||
4781 | default: | |
4782 | break; | |
4783 | } | |
4784 | ||
4785 | tm_name = concat ("_ZGTt", old_asm_name, NULL); | |
4786 | } | |
4787 | free (alloc); | |
4788 | ||
4789 | new_asm_id = get_identifier (tm_name); | |
4790 | free (tm_name); | |
4791 | ||
4792 | return new_asm_id; | |
4793 | } | |
4794 | ||
4795 | static inline void | |
ead84f73 | 4796 | ipa_tm_mark_force_output_node (struct cgraph_node *node) |
0a35513e | 4797 | { |
ead84f73 | 4798 | cgraph_mark_force_output_node (node); |
67348ccc | 4799 | node->analyzed = true; |
0a35513e AH |
4800 | } |
4801 | ||
edb983b2 JH |
4802 | static inline void |
4803 | ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node) | |
4804 | { | |
67348ccc DM |
4805 | node->forced_by_abi = true; |
4806 | node->analyzed = true; | |
edb983b2 JH |
4807 | } |
4808 | ||
0a35513e AH |
4809 | /* Callback data for ipa_tm_create_version_alias. */ |
4810 | struct create_version_alias_info | |
4811 | { | |
4812 | struct cgraph_node *old_node; | |
4813 | tree new_decl; | |
4814 | }; | |
4815 | ||
e617b554 | 4816 | /* A subroutine of ipa_tm_create_version, called via |
0a35513e AH |
4817 | cgraph_for_node_and_aliases. Create new tm clones for each of |
4818 | the existing aliases. */ | |
4819 | static bool | |
4820 | ipa_tm_create_version_alias (struct cgraph_node *node, void *data) | |
4821 | { | |
4822 | struct create_version_alias_info *info | |
4823 | = (struct create_version_alias_info *)data; | |
4824 | tree old_decl, new_decl, tm_name; | |
4825 | struct cgraph_node *new_node; | |
4826 | ||
67348ccc | 4827 | if (!node->cpp_implicit_alias) |
0a35513e AH |
4828 | return false; |
4829 | ||
67348ccc | 4830 | old_decl = node->decl; |
0a35513e AH |
4831 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); |
4832 | new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl), | |
4833 | TREE_CODE (old_decl), tm_name, | |
4834 | TREE_TYPE (old_decl)); | |
4835 | ||
4836 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4837 | SET_DECL_RTL (new_decl, NULL); | |
4838 | ||
4839 | /* Based loosely on C++'s make_alias_for(). */ | |
4840 | TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl); | |
54c39478 AH |
4841 | DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl); |
4842 | DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl); | |
0a35513e AH |
4843 | TREE_READONLY (new_decl) = TREE_READONLY (old_decl); |
4844 | DECL_EXTERNAL (new_decl) = 0; | |
4845 | DECL_ARTIFICIAL (new_decl) = 1; | |
4846 | TREE_ADDRESSABLE (new_decl) = 1; | |
4847 | TREE_USED (new_decl) = 1; | |
4848 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4849 | ||
4850 | /* Perform the same remapping to the comdat group. */ | |
43042ea7 | 4851 | if (DECL_ONE_ONLY (new_decl)) |
d67ff7b7 | 4852 | varpool_get_node (new_decl)->set_comdat_group (tm_mangle (decl_comdat_group_id (old_decl))); |
0a35513e AH |
4853 | |
4854 | new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl); | |
4855 | new_node->tm_clone = true; | |
67348ccc | 4856 | new_node->externally_visible = info->old_node->externally_visible; |
594ec92f AH |
4857 | /* ?? Do not traverse aliases here. */ |
4858 | get_cg_data (&node, false)->clone = new_node; | |
0a35513e AH |
4859 | |
4860 | record_tm_clone_pair (old_decl, new_decl); | |
4861 | ||
67348ccc | 4862 | if (info->old_node->force_output |
d122681a | 4863 | || info->old_node->ref_list.first_referring ()) |
ead84f73 | 4864 | ipa_tm_mark_force_output_node (new_node); |
67348ccc | 4865 | if (info->old_node->forced_by_abi) |
edb983b2 | 4866 | ipa_tm_mark_forced_by_abi_node (new_node); |
0a35513e AH |
4867 | return false; |
4868 | } | |
4869 | ||
4870 | /* Create a copy of the function (possibly declaration only) of OLD_NODE, | |
4871 | appropriate for the transactional clone. */ | |
4872 | ||
4873 | static void | |
4874 | ipa_tm_create_version (struct cgraph_node *old_node) | |
4875 | { | |
4876 | tree new_decl, old_decl, tm_name; | |
4877 | struct cgraph_node *new_node; | |
4878 | ||
67348ccc | 4879 | old_decl = old_node->decl; |
0a35513e AH |
4880 | new_decl = copy_node (old_decl); |
4881 | ||
4882 | /* DECL_ASSEMBLER_NAME needs to be set before we call | |
4883 | cgraph_copy_node_for_versioning below, because cgraph_node will | |
4884 | fill the assembler_name_hash. */ | |
4885 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); | |
4886 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4887 | SET_DECL_RTL (new_decl, NULL); | |
4888 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4889 | ||
4890 | /* Perform the same remapping to the comdat group. */ | |
43042ea7 | 4891 | if (DECL_ONE_ONLY (new_decl)) |
aede2c10 | 4892 | varpool_get_node (new_decl)->set_comdat_group (tm_mangle (DECL_COMDAT_GROUP (old_decl))); |
0a35513e | 4893 | |
ca860d03 | 4894 | gcc_assert (!old_node->ipa_transforms_to_apply.exists ()); |
6e1aa848 | 4895 | new_node = cgraph_copy_node_for_versioning (old_node, new_decl, vNULL, NULL); |
51a5c0c2 | 4896 | new_node->local.local = false; |
67348ccc | 4897 | new_node->externally_visible = old_node->externally_visible; |
0a35513e AH |
4898 | new_node->lowered = true; |
4899 | new_node->tm_clone = 1; | |
594ec92f | 4900 | get_cg_data (&old_node, true)->clone = new_node; |
0a35513e AH |
4901 | |
4902 | if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE) | |
4903 | { | |
4904 | /* Remap extern inline to static inline. */ | |
4905 | /* ??? Is it worth trying to use make_decl_one_only? */ | |
4906 | if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl)) | |
4907 | { | |
4908 | DECL_EXTERNAL (new_decl) = 0; | |
4909 | TREE_PUBLIC (new_decl) = 0; | |
e0b22991 | 4910 | DECL_WEAK (new_decl) = 0; |
0a35513e AH |
4911 | } |
4912 | ||
9771b263 DN |
4913 | tree_function_versioning (old_decl, new_decl, |
4914 | NULL, false, NULL, | |
4915 | false, NULL, NULL); | |
0a35513e AH |
4916 | } |
4917 | ||
4918 | record_tm_clone_pair (old_decl, new_decl); | |
4919 | ||
4920 | cgraph_call_function_insertion_hooks (new_node); | |
67348ccc | 4921 | if (old_node->force_output |
d122681a | 4922 | || old_node->ref_list.first_referring ()) |
ead84f73 | 4923 | ipa_tm_mark_force_output_node (new_node); |
67348ccc | 4924 | if (old_node->forced_by_abi) |
edb983b2 | 4925 | ipa_tm_mark_forced_by_abi_node (new_node); |
0a35513e AH |
4926 | |
4927 | /* Do the same thing, but for any aliases of the original node. */ | |
4928 | { | |
4929 | struct create_version_alias_info data; | |
4930 | data.old_node = old_node; | |
4931 | data.new_decl = new_decl; | |
4932 | cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias, | |
4933 | &data, true); | |
4934 | } | |
4935 | } | |
4936 | ||
4937 | /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */ | |
4938 | ||
4939 | static void | |
4940 | ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region, | |
4941 | basic_block bb) | |
4942 | { | |
4943 | gimple_stmt_iterator gsi; | |
4944 | gimple g; | |
4945 | ||
4946 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
4947 | ||
4948 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE), | |
4949 | 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE)); | |
4950 | ||
4951 | split_block_after_labels (bb); | |
4952 | gsi = gsi_after_labels (bb); | |
4953 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
4954 | ||
4955 | cgraph_create_edge (node, | |
4956 | cgraph_get_create_node | |
4957 | (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)), | |
4958 | g, 0, | |
67348ccc | 4959 | compute_call_stmt_bb_frequency (node->decl, |
0a35513e AH |
4960 | gimple_bb (g))); |
4961 | } | |
4962 | ||
4963 | /* Construct a call to TM_GETTMCLONE and insert it before GSI. */ | |
4964 | ||
4965 | static bool | |
4966 | ipa_tm_insert_gettmclone_call (struct cgraph_node *node, | |
4967 | struct tm_region *region, | |
4968 | gimple_stmt_iterator *gsi, gimple stmt) | |
4969 | { | |
4970 | tree gettm_fn, ret, old_fn, callfn; | |
4971 | gimple g, g2; | |
4972 | bool safe; | |
4973 | ||
4974 | old_fn = gimple_call_fn (stmt); | |
4975 | ||
4976 | if (TREE_CODE (old_fn) == ADDR_EXPR) | |
4977 | { | |
4978 | tree fndecl = TREE_OPERAND (old_fn, 0); | |
4979 | tree clone = get_tm_clone_pair (fndecl); | |
4980 | ||
4981 | /* By transforming the call into a TM_GETTMCLONE, we are | |
4982 | technically taking the address of the original function and | |
4983 | its clone. Explain this so inlining will know this function | |
4984 | is needed. */ | |
4985 | cgraph_mark_address_taken_node (cgraph_get_node (fndecl)); | |
4986 | if (clone) | |
4987 | cgraph_mark_address_taken_node (cgraph_get_node (clone)); | |
4988 | } | |
4989 | ||
4990 | safe = is_tm_safe (TREE_TYPE (old_fn)); | |
4991 | gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE | |
4992 | : BUILT_IN_TM_GETTMCLONE_IRR); | |
4993 | ret = create_tmp_var (ptr_type_node, NULL); | |
0a35513e AH |
4994 | |
4995 | if (!safe) | |
4996 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
4997 | ||
4998 | /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */ | |
4999 | if (TREE_CODE (old_fn) == OBJ_TYPE_REF) | |
5000 | old_fn = OBJ_TYPE_REF_EXPR (old_fn); | |
5001 | ||
5002 | g = gimple_build_call (gettm_fn, 1, old_fn); | |
5003 | ret = make_ssa_name (ret, g); | |
5004 | gimple_call_set_lhs (g, ret); | |
5005 | ||
5006 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
5007 | ||
5008 | cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0, | |
67348ccc | 5009 | compute_call_stmt_bb_frequency (node->decl, |
c3284718 | 5010 | gimple_bb (g))); |
0a35513e AH |
5011 | |
5012 | /* Cast return value from tm_gettmclone* into appropriate function | |
5013 | pointer. */ | |
5014 | callfn = create_tmp_var (TREE_TYPE (old_fn), NULL); | |
0a35513e AH |
5015 | g2 = gimple_build_assign (callfn, |
5016 | fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret)); | |
5017 | callfn = make_ssa_name (callfn, g2); | |
5018 | gimple_assign_set_lhs (g2, callfn); | |
5019 | gsi_insert_before (gsi, g2, GSI_SAME_STMT); | |
5020 | ||
5021 | /* ??? This is a hack to preserve the NOTHROW bit on the call, | |
5022 | which we would have derived from the decl. Failure to save | |
5023 | this bit means we might have to split the basic block. */ | |
5024 | if (gimple_call_nothrow_p (stmt)) | |
5025 | gimple_call_set_nothrow (stmt, true); | |
5026 | ||
5027 | gimple_call_set_fn (stmt, callfn); | |
5028 | ||
5029 | /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS | |
5030 | for a call statement. Fix it. */ | |
5031 | { | |
5032 | tree lhs = gimple_call_lhs (stmt); | |
5033 | tree rettype = TREE_TYPE (gimple_call_fntype (stmt)); | |
5034 | if (lhs | |
5035 | && !useless_type_conversion_p (TREE_TYPE (lhs), rettype)) | |
5036 | { | |
5037 | tree temp; | |
5038 | ||
7cc434a3 | 5039 | temp = create_tmp_reg (rettype, 0); |
0a35513e AH |
5040 | gimple_call_set_lhs (stmt, temp); |
5041 | ||
5042 | g2 = gimple_build_assign (lhs, | |
5043 | fold_build1 (VIEW_CONVERT_EXPR, | |
5044 | TREE_TYPE (lhs), temp)); | |
5045 | gsi_insert_after (gsi, g2, GSI_SAME_STMT); | |
5046 | } | |
5047 | } | |
5048 | ||
5049 | update_stmt (stmt); | |
5050 | ||
5051 | return true; | |
5052 | } | |
5053 | ||
5054 | /* Helper function for ipa_tm_transform_calls*. Given a call | |
5055 | statement in GSI which resides inside transaction REGION, redirect | |
5056 | the call to either its wrapper function, or its clone. */ | |
5057 | ||
5058 | static void | |
5059 | ipa_tm_transform_calls_redirect (struct cgraph_node *node, | |
5060 | struct tm_region *region, | |
5061 | gimple_stmt_iterator *gsi, | |
5062 | bool *need_ssa_rename_p) | |
5063 | { | |
5064 | gimple stmt = gsi_stmt (*gsi); | |
5065 | struct cgraph_node *new_node; | |
5066 | struct cgraph_edge *e = cgraph_edge (node, stmt); | |
5067 | tree fndecl = gimple_call_fndecl (stmt); | |
5068 | ||
5069 | /* For indirect calls, pass the address through the runtime. */ | |
5070 | if (fndecl == NULL) | |
5071 | { | |
5072 | *need_ssa_rename_p |= | |
5073 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
5074 | return; | |
5075 | } | |
5076 | ||
5077 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
5078 | at this point, but handling these functions when written in by the | |
5079 | user makes it easier to build unit tests. */ | |
5080 | if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN) | |
5081 | return; | |
5082 | ||
5083 | /* Fixup recursive calls inside clones. */ | |
5084 | /* ??? Why did cgraph_copy_node_for_versioning update the call edges | |
5085 | for recursion but not update the call statements themselves? */ | |
5086 | if (e->caller == e->callee && decl_is_tm_clone (current_function_decl)) | |
5087 | { | |
5088 | gimple_call_set_fndecl (stmt, current_function_decl); | |
5089 | return; | |
5090 | } | |
5091 | ||
5092 | /* If there is a replacement, use it. */ | |
5093 | fndecl = find_tm_replacement_function (fndecl); | |
5094 | if (fndecl) | |
5095 | { | |
5096 | new_node = cgraph_get_create_node (fndecl); | |
5097 | ||
5098 | /* ??? Mark all transaction_wrap functions tm_may_enter_irr. | |
5099 | ||
5100 | We can't do this earlier in record_tm_replacement because | |
5101 | cgraph_remove_unreachable_nodes is called before we inject | |
5102 | references to the node. Further, we can't do this in some | |
5103 | nice central place in ipa_tm_execute because we don't have | |
5104 | the exact list of wrapper functions that would be used. | |
5105 | Marking more wrappers than necessary results in the creation | |
5106 | of unnecessary cgraph_nodes, which can cause some of the | |
5107 | other IPA passes to crash. | |
5108 | ||
5109 | We do need to mark these nodes so that we get the proper | |
5110 | result in expand_call_tm. */ | |
5111 | /* ??? This seems broken. How is it that we're marking the | |
5112 | CALLEE as may_enter_irr? Surely we should be marking the | |
5113 | CALLER. Also note that find_tm_replacement_function also | |
5114 | contains mappings into the TM runtime, e.g. memcpy. These | |
5115 | we know won't go irrevocable. */ | |
5116 | new_node->local.tm_may_enter_irr = 1; | |
5117 | } | |
5118 | else | |
5119 | { | |
594ec92f AH |
5120 | struct tm_ipa_cg_data *d; |
5121 | struct cgraph_node *tnode = e->callee; | |
5122 | ||
5123 | d = get_cg_data (&tnode, true); | |
0a35513e AH |
5124 | new_node = d->clone; |
5125 | ||
5126 | /* As we've already skipped pure calls and appropriate builtins, | |
5127 | and we've already marked irrevocable blocks, if we can't come | |
5128 | up with a static replacement, then ask the runtime. */ | |
5129 | if (new_node == NULL) | |
5130 | { | |
5131 | *need_ssa_rename_p |= | |
5132 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
0a35513e AH |
5133 | return; |
5134 | } | |
5135 | ||
67348ccc | 5136 | fndecl = new_node->decl; |
0a35513e AH |
5137 | } |
5138 | ||
5139 | cgraph_redirect_edge_callee (e, new_node); | |
5140 | gimple_call_set_fndecl (stmt, fndecl); | |
5141 | } | |
5142 | ||
5143 | /* Helper function for ipa_tm_transform_calls. For a given BB, | |
5144 | install calls to tm_irrevocable when IRR_BLOCKS are reached, | |
5145 | redirect other calls to the generated transactional clone. */ | |
5146 | ||
5147 | static bool | |
5148 | ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region, | |
5149 | basic_block bb, bitmap irr_blocks) | |
5150 | { | |
5151 | gimple_stmt_iterator gsi; | |
5152 | bool need_ssa_rename = false; | |
5153 | ||
5154 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5155 | { | |
5156 | ipa_tm_insert_irr_call (node, region, bb); | |
5157 | return true; | |
5158 | } | |
5159 | ||
5160 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
5161 | { | |
5162 | gimple stmt = gsi_stmt (gsi); | |
5163 | ||
5164 | if (!is_gimple_call (stmt)) | |
5165 | continue; | |
5166 | if (is_tm_pure_call (stmt)) | |
5167 | continue; | |
5168 | ||
5169 | /* Redirect edges to the appropriate replacement or clone. */ | |
5170 | ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename); | |
5171 | } | |
5172 | ||
5173 | return need_ssa_rename; | |
5174 | } | |
5175 | ||
5176 | /* Walk the CFG for REGION, beginning at BB. Install calls to | |
5177 | tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to | |
5178 | the generated transactional clone. */ | |
5179 | ||
5180 | static bool | |
5181 | ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region, | |
5182 | basic_block bb, bitmap irr_blocks) | |
5183 | { | |
5184 | bool need_ssa_rename = false; | |
5185 | edge e; | |
5186 | edge_iterator ei; | |
ef062b13 | 5187 | auto_vec<basic_block> queue; |
0a35513e AH |
5188 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
5189 | ||
9771b263 | 5190 | queue.safe_push (bb); |
0a35513e AH |
5191 | do |
5192 | { | |
9771b263 | 5193 | bb = queue.pop (); |
0a35513e AH |
5194 | |
5195 | need_ssa_rename |= | |
5196 | ipa_tm_transform_calls_1 (node, region, bb, irr_blocks); | |
5197 | ||
5198 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5199 | continue; | |
5200 | ||
5201 | if (region && bitmap_bit_p (region->exit_blocks, bb->index)) | |
5202 | continue; | |
5203 | ||
5204 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5205 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
5206 | { | |
5207 | bitmap_set_bit (visited_blocks, e->dest->index); | |
9771b263 | 5208 | queue.safe_push (e->dest); |
0a35513e AH |
5209 | } |
5210 | } | |
9771b263 | 5211 | while (!queue.is_empty ()); |
0a35513e | 5212 | |
0a35513e AH |
5213 | BITMAP_FREE (visited_blocks); |
5214 | ||
5215 | return need_ssa_rename; | |
5216 | } | |
5217 | ||
5218 | /* Transform the calls within the TM regions within NODE. */ | |
5219 | ||
5220 | static void | |
5221 | ipa_tm_transform_transaction (struct cgraph_node *node) | |
5222 | { | |
594ec92f | 5223 | struct tm_ipa_cg_data *d; |
0a35513e AH |
5224 | struct tm_region *region; |
5225 | bool need_ssa_rename = false; | |
5226 | ||
594ec92f AH |
5227 | d = get_cg_data (&node, true); |
5228 | ||
67348ccc | 5229 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
0a35513e AH |
5230 | calculate_dominance_info (CDI_DOMINATORS); |
5231 | ||
5232 | for (region = d->all_tm_regions; region; region = region->next) | |
5233 | { | |
5234 | /* If we're sure to go irrevocable, don't transform anything. */ | |
5235 | if (d->irrevocable_blocks_normal | |
5236 | && bitmap_bit_p (d->irrevocable_blocks_normal, | |
5237 | region->entry_block->index)) | |
5238 | { | |
b7a78683 AH |
5239 | transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE |
5240 | | GTMA_MAY_ENTER_IRREVOCABLE | |
5241 | | GTMA_HAS_NO_INSTRUMENTATION); | |
0a35513e AH |
5242 | continue; |
5243 | } | |
5244 | ||
5245 | need_ssa_rename |= | |
5246 | ipa_tm_transform_calls (node, region, region->entry_block, | |
5247 | d->irrevocable_blocks_normal); | |
5248 | } | |
5249 | ||
5250 | if (need_ssa_rename) | |
5251 | update_ssa (TODO_update_ssa_only_virtuals); | |
5252 | ||
5253 | pop_cfun (); | |
0a35513e AH |
5254 | } |
5255 | ||
5256 | /* Transform the calls within the transactional clone of NODE. */ | |
5257 | ||
5258 | static void | |
5259 | ipa_tm_transform_clone (struct cgraph_node *node) | |
5260 | { | |
594ec92f | 5261 | struct tm_ipa_cg_data *d; |
0a35513e AH |
5262 | bool need_ssa_rename; |
5263 | ||
594ec92f AH |
5264 | d = get_cg_data (&node, true); |
5265 | ||
0a35513e AH |
5266 | /* If this function makes no calls and has no irrevocable blocks, |
5267 | then there's nothing to do. */ | |
5268 | /* ??? Remove non-aborting top-level transactions. */ | |
8730965e | 5269 | if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone) |
0a35513e AH |
5270 | return; |
5271 | ||
67348ccc | 5272 | push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl)); |
0a35513e AH |
5273 | calculate_dominance_info (CDI_DOMINATORS); |
5274 | ||
5275 | need_ssa_rename = | |
fefa31b5 DM |
5276 | ipa_tm_transform_calls (d->clone, NULL, |
5277 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), | |
0a35513e AH |
5278 | d->irrevocable_blocks_clone); |
5279 | ||
5280 | if (need_ssa_rename) | |
5281 | update_ssa (TODO_update_ssa_only_virtuals); | |
5282 | ||
5283 | pop_cfun (); | |
0a35513e AH |
5284 | } |
5285 | ||
5286 | /* Main entry point for the transactional memory IPA pass. */ | |
5287 | ||
5288 | static unsigned int | |
5289 | ipa_tm_execute (void) | |
5290 | { | |
c3284718 | 5291 | cgraph_node_queue tm_callees = cgraph_node_queue (); |
0a35513e | 5292 | /* List of functions that will go irrevocable. */ |
c3284718 | 5293 | cgraph_node_queue irr_worklist = cgraph_node_queue (); |
0a35513e AH |
5294 | |
5295 | struct cgraph_node *node; | |
5296 | struct tm_ipa_cg_data *d; | |
5297 | enum availability a; | |
5298 | unsigned int i; | |
5299 | ||
5300 | #ifdef ENABLE_CHECKING | |
5301 | verify_cgraph (); | |
5302 | #endif | |
5303 | ||
5304 | bitmap_obstack_initialize (&tm_obstack); | |
398b1daa | 5305 | initialize_original_copy_tables (); |
0a35513e AH |
5306 | |
5307 | /* For all local functions marked tm_callable, queue them. */ | |
65c70e6b | 5308 | FOR_EACH_DEFINED_FUNCTION (node) |
67348ccc | 5309 | if (is_tm_callable (node->decl) |
0a35513e AH |
5310 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5311 | { | |
594ec92f | 5312 | d = get_cg_data (&node, true); |
0a35513e AH |
5313 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5314 | } | |
5315 | ||
5316 | /* For all local reachable functions... */ | |
65c70e6b | 5317 | FOR_EACH_DEFINED_FUNCTION (node) |
93a18a70 | 5318 | if (node->lowered |
0a35513e AH |
5319 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5320 | { | |
5321 | /* ... marked tm_pure, record that fact for the runtime by | |
5322 | indicating that the pure function is its own tm_callable. | |
5323 | No need to do this if the function's address can't be taken. */ | |
67348ccc | 5324 | if (is_tm_pure (node->decl)) |
0a35513e AH |
5325 | { |
5326 | if (!node->local.local) | |
67348ccc | 5327 | record_tm_clone_pair (node->decl, node->decl); |
0a35513e AH |
5328 | continue; |
5329 | } | |
5330 | ||
67348ccc | 5331 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
0a35513e AH |
5332 | calculate_dominance_info (CDI_DOMINATORS); |
5333 | ||
5334 | tm_region_init (NULL); | |
5335 | if (all_tm_regions) | |
5336 | { | |
594ec92f | 5337 | d = get_cg_data (&node, true); |
0a35513e | 5338 | |
398b1daa AH |
5339 | /* Scan for calls that are in each transaction, and |
5340 | generate the uninstrumented code path. */ | |
0a35513e AH |
5341 | ipa_tm_scan_calls_transaction (d, &tm_callees); |
5342 | ||
80fd8eba AH |
5343 | /* Put it in the worklist so we can scan the function |
5344 | later (ipa_tm_scan_irr_function) and mark the | |
5345 | irrevocable blocks. */ | |
5346 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5347 | d->want_irr_scan_normal = true; | |
0a35513e AH |
5348 | } |
5349 | ||
5350 | pop_cfun (); | |
0a35513e AH |
5351 | } |
5352 | ||
5353 | /* For every local function on the callee list, scan as if we will be | |
5354 | creating a transactional clone, queueing all new functions we find | |
5355 | along the way. */ | |
9771b263 | 5356 | for (i = 0; i < tm_callees.length (); ++i) |
0a35513e | 5357 | { |
9771b263 | 5358 | node = tm_callees[i]; |
0a35513e | 5359 | a = cgraph_function_body_availability (node); |
594ec92f | 5360 | d = get_cg_data (&node, true); |
0a35513e | 5361 | |
80fd8eba AH |
5362 | /* Put it in the worklist so we can scan the function later |
5363 | (ipa_tm_scan_irr_function) and mark the irrevocable | |
5364 | blocks. */ | |
5365 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
0a35513e AH |
5366 | |
5367 | /* Some callees cannot be arbitrarily cloned. These will always be | |
5368 | irrevocable. Mark these now, so that we need not scan them. */ | |
67348ccc | 5369 | if (is_tm_irrevocable (node->decl)) |
0a35513e AH |
5370 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5371 | else if (a <= AVAIL_NOT_AVAILABLE | |
67348ccc | 5372 | && !is_tm_safe_or_pure (node->decl)) |
0a35513e AH |
5373 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5374 | else if (a >= AVAIL_OVERWRITABLE) | |
5375 | { | |
67348ccc | 5376 | if (!tree_versionable_function_p (node->decl)) |
0a35513e AH |
5377 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5378 | else if (!d->is_irrevocable) | |
5379 | { | |
5380 | /* If this is an alias, make sure its base is queued as well. | |
5381 | we need not scan the callees now, as the base will do. */ | |
67348ccc | 5382 | if (node->alias) |
0a35513e AH |
5383 | { |
5384 | node = cgraph_get_node (node->thunk.alias); | |
594ec92f | 5385 | d = get_cg_data (&node, true); |
0a35513e AH |
5386 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5387 | continue; | |
5388 | } | |
5389 | ||
5390 | /* Add all nodes called by this function into | |
5391 | tm_callees as well. */ | |
5392 | ipa_tm_scan_calls_clone (node, &tm_callees); | |
5393 | } | |
5394 | } | |
5395 | } | |
5396 | ||
5397 | /* Iterate scans until no more work to be done. Prefer not to use | |
9771b263 | 5398 | vec::pop because the worklist tends to follow a breadth-first |
0a35513e AH |
5399 | search of the callgraph, which should allow convergance with a |
5400 | minimum number of scans. But we also don't want the worklist | |
5401 | array to grow without bound, so we shift the array up periodically. */ | |
9771b263 | 5402 | for (i = 0; i < irr_worklist.length (); ++i) |
0a35513e | 5403 | { |
9771b263 | 5404 | if (i > 256 && i == irr_worklist.length () / 8) |
0a35513e | 5405 | { |
9771b263 | 5406 | irr_worklist.block_remove (0, i); |
0a35513e AH |
5407 | i = 0; |
5408 | } | |
5409 | ||
9771b263 | 5410 | node = irr_worklist[i]; |
594ec92f | 5411 | d = get_cg_data (&node, true); |
0a35513e AH |
5412 | d->in_worklist = false; |
5413 | ||
5414 | if (d->want_irr_scan_normal) | |
5415 | { | |
5416 | d->want_irr_scan_normal = false; | |
5417 | ipa_tm_scan_irr_function (node, false); | |
5418 | } | |
5419 | if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true)) | |
5420 | ipa_tm_note_irrevocable (node, &irr_worklist); | |
5421 | } | |
5422 | ||
5423 | /* For every function on the callee list, collect the tm_may_enter_irr | |
5424 | bit on the node. */ | |
9771b263 DN |
5425 | irr_worklist.truncate (0); |
5426 | for (i = 0; i < tm_callees.length (); ++i) | |
0a35513e | 5427 | { |
9771b263 | 5428 | node = tm_callees[i]; |
0a35513e AH |
5429 | if (ipa_tm_mayenterirr_function (node)) |
5430 | { | |
594ec92f | 5431 | d = get_cg_data (&node, true); |
0a35513e AH |
5432 | gcc_assert (d->in_worklist == false); |
5433 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5434 | } | |
5435 | } | |
5436 | ||
5437 | /* Propagate the tm_may_enter_irr bit to callers until stable. */ | |
9771b263 | 5438 | for (i = 0; i < irr_worklist.length (); ++i) |
0a35513e AH |
5439 | { |
5440 | struct cgraph_node *caller; | |
5441 | struct cgraph_edge *e; | |
d122681a | 5442 | struct ipa_ref *ref = NULL; |
0a35513e AH |
5443 | unsigned j; |
5444 | ||
9771b263 | 5445 | if (i > 256 && i == irr_worklist.length () / 8) |
0a35513e | 5446 | { |
9771b263 | 5447 | irr_worklist.block_remove (0, i); |
0a35513e AH |
5448 | i = 0; |
5449 | } | |
5450 | ||
9771b263 | 5451 | node = irr_worklist[i]; |
594ec92f | 5452 | d = get_cg_data (&node, true); |
0a35513e AH |
5453 | d->in_worklist = false; |
5454 | node->local.tm_may_enter_irr = true; | |
5455 | ||
5456 | /* Propagate back to normal callers. */ | |
5457 | for (e = node->callers; e ; e = e->next_caller) | |
5458 | { | |
5459 | caller = e->caller; | |
67348ccc | 5460 | if (!is_tm_safe_or_pure (caller->decl) |
0a35513e AH |
5461 | && !caller->local.tm_may_enter_irr) |
5462 | { | |
594ec92f | 5463 | d = get_cg_data (&caller, true); |
0a35513e AH |
5464 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5465 | } | |
5466 | } | |
5467 | ||
5468 | /* Propagate back to referring aliases as well. */ | |
d122681a | 5469 | for (j = 0; node->iterate_referring (j, ref); j++) |
0a35513e | 5470 | { |
5932a4d4 | 5471 | caller = cgraph (ref->referring); |
0a35513e AH |
5472 | if (ref->use == IPA_REF_ALIAS |
5473 | && !caller->local.tm_may_enter_irr) | |
5474 | { | |
594ec92f AH |
5475 | /* ?? Do not traverse aliases here. */ |
5476 | d = get_cg_data (&caller, false); | |
0a35513e AH |
5477 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5478 | } | |
5479 | } | |
5480 | } | |
5481 | ||
5482 | /* Now validate all tm_safe functions, and all atomic regions in | |
5483 | other functions. */ | |
65c70e6b | 5484 | FOR_EACH_DEFINED_FUNCTION (node) |
93a18a70 | 5485 | if (node->lowered |
0a35513e AH |
5486 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5487 | { | |
594ec92f | 5488 | d = get_cg_data (&node, true); |
67348ccc | 5489 | if (is_tm_safe (node->decl)) |
0a35513e AH |
5490 | ipa_tm_diagnose_tm_safe (node); |
5491 | else if (d->all_tm_regions) | |
5492 | ipa_tm_diagnose_transaction (node, d->all_tm_regions); | |
5493 | } | |
5494 | ||
5495 | /* Create clones. Do those that are not irrevocable and have a | |
5496 | positive call count. Do those publicly visible functions that | |
5497 | the user directed us to clone. */ | |
9771b263 | 5498 | for (i = 0; i < tm_callees.length (); ++i) |
0a35513e AH |
5499 | { |
5500 | bool doit = false; | |
5501 | ||
9771b263 | 5502 | node = tm_callees[i]; |
67348ccc | 5503 | if (node->cpp_implicit_alias) |
0a35513e AH |
5504 | continue; |
5505 | ||
5506 | a = cgraph_function_body_availability (node); | |
594ec92f | 5507 | d = get_cg_data (&node, true); |
0a35513e AH |
5508 | |
5509 | if (a <= AVAIL_NOT_AVAILABLE) | |
67348ccc DM |
5510 | doit = is_tm_callable (node->decl); |
5511 | else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl)) | |
0a35513e AH |
5512 | doit = true; |
5513 | else if (!d->is_irrevocable | |
5514 | && d->tm_callers_normal + d->tm_callers_clone > 0) | |
5515 | doit = true; | |
5516 | ||
5517 | if (doit) | |
5518 | ipa_tm_create_version (node); | |
5519 | } | |
5520 | ||
5521 | /* Redirect calls to the new clones, and insert irrevocable marks. */ | |
9771b263 | 5522 | for (i = 0; i < tm_callees.length (); ++i) |
0a35513e | 5523 | { |
9771b263 | 5524 | node = tm_callees[i]; |
67348ccc | 5525 | if (node->analyzed) |
0a35513e | 5526 | { |
594ec92f | 5527 | d = get_cg_data (&node, true); |
0a35513e AH |
5528 | if (d->clone) |
5529 | ipa_tm_transform_clone (node); | |
5530 | } | |
5531 | } | |
65c70e6b | 5532 | FOR_EACH_DEFINED_FUNCTION (node) |
93a18a70 | 5533 | if (node->lowered |
0a35513e AH |
5534 | && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE) |
5535 | { | |
594ec92f | 5536 | d = get_cg_data (&node, true); |
0a35513e AH |
5537 | if (d->all_tm_regions) |
5538 | ipa_tm_transform_transaction (node); | |
5539 | } | |
5540 | ||
5541 | /* Free and clear all data structures. */ | |
9771b263 DN |
5542 | tm_callees.release (); |
5543 | irr_worklist.release (); | |
0a35513e | 5544 | bitmap_obstack_release (&tm_obstack); |
398b1daa | 5545 | free_original_copy_tables (); |
0a35513e | 5546 | |
65c70e6b | 5547 | FOR_EACH_FUNCTION (node) |
67348ccc | 5548 | node->aux = NULL; |
0a35513e AH |
5549 | |
5550 | #ifdef ENABLE_CHECKING | |
5551 | verify_cgraph (); | |
5552 | #endif | |
5553 | ||
5554 | return 0; | |
5555 | } | |
5556 | ||
27a4cd48 DM |
5557 | namespace { |
5558 | ||
5559 | const pass_data pass_data_ipa_tm = | |
5560 | { | |
5561 | SIMPLE_IPA_PASS, /* type */ | |
5562 | "tmipa", /* name */ | |
5563 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
5564 | true, /* has_execute */ |
5565 | TV_TRANS_MEM, /* tv_id */ | |
5566 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
5567 | 0, /* properties_provided */ | |
5568 | 0, /* properties_destroyed */ | |
5569 | 0, /* todo_flags_start */ | |
5570 | 0, /* todo_flags_finish */ | |
0a35513e AH |
5571 | }; |
5572 | ||
27a4cd48 DM |
5573 | class pass_ipa_tm : public simple_ipa_opt_pass |
5574 | { | |
5575 | public: | |
c3284718 RS |
5576 | pass_ipa_tm (gcc::context *ctxt) |
5577 | : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt) | |
27a4cd48 DM |
5578 | {} |
5579 | ||
5580 | /* opt_pass methods: */ | |
1a3d085c | 5581 | virtual bool gate (function *) { return flag_tm; } |
be55bfe6 | 5582 | virtual unsigned int execute (function *) { return ipa_tm_execute (); } |
27a4cd48 DM |
5583 | |
5584 | }; // class pass_ipa_tm | |
5585 | ||
5586 | } // anon namespace | |
5587 | ||
5588 | simple_ipa_opt_pass * | |
5589 | make_pass_ipa_tm (gcc::context *ctxt) | |
5590 | { | |
5591 | return new pass_ipa_tm (ctxt); | |
5592 | } | |
5593 | ||
0a35513e | 5594 | #include "gt-trans-mem.h" |