]>
Commit | Line | Data |
---|---|---|
4c0315d0 | 1 | /* Passes for transactional memory support. |
3aea1f79 | 2 | Copyright (C) 2008-2014 Free Software Foundation, Inc. |
4c0315d0 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
d9dd21a8 | 23 | #include "hash-table.h" |
4c0315d0 | 24 | #include "tree.h" |
94ea8568 | 25 | #include "predict.h" |
26 | #include "vec.h" | |
27 | #include "hashtab.h" | |
28 | #include "hash-set.h" | |
29 | #include "machmode.h" | |
30 | #include "tm.h" | |
31 | #include "hard-reg-set.h" | |
32 | #include "input.h" | |
33 | #include "function.h" | |
34 | #include "dominance.h" | |
35 | #include "cfg.h" | |
bc61cadb | 36 | #include "basic-block.h" |
37 | #include "tree-ssa-alias.h" | |
38 | #include "internal-fn.h" | |
39 | #include "tree-eh.h" | |
40 | #include "gimple-expr.h" | |
41 | #include "is-a.h" | |
e795d6e1 | 42 | #include "gimple.h" |
9ed99284 | 43 | #include "calls.h" |
9ed99284 | 44 | #include "rtl.h" |
45 | #include "emit-rtl.h" | |
a8783bee | 46 | #include "gimplify.h" |
dcf1a1ec | 47 | #include "gimple-iterator.h" |
e795d6e1 | 48 | #include "gimplify-me.h" |
dcf1a1ec | 49 | #include "gimple-walk.h" |
073c1fd5 | 50 | #include "gimple-ssa.h" |
1140c305 | 51 | #include "hash-map.h" |
52 | #include "plugin-api.h" | |
53 | #include "ipa-ref.h" | |
073c1fd5 | 54 | #include "cgraph.h" |
55 | #include "tree-cfg.h" | |
9ed99284 | 56 | #include "stringpool.h" |
073c1fd5 | 57 | #include "tree-ssanames.h" |
58 | #include "tree-into-ssa.h" | |
4c0315d0 | 59 | #include "tree-pass.h" |
60 | #include "tree-inline.h" | |
61 | #include "diagnostic-core.h" | |
62 | #include "demangle.h" | |
63 | #include "output.h" | |
64 | #include "trans-mem.h" | |
65 | #include "params.h" | |
66 | #include "target.h" | |
67 | #include "langhooks.h" | |
4c0315d0 | 68 | #include "gimple-pretty-print.h" |
79f958cb | 69 | #include "cfgloop.h" |
424a4a92 | 70 | #include "tree-ssa-address.h" |
4c0315d0 | 71 | |
72 | ||
4c0315d0 | 73 | #define A_RUNINSTRUMENTEDCODE 0x0001 |
74 | #define A_RUNUNINSTRUMENTEDCODE 0x0002 | |
75 | #define A_SAVELIVEVARIABLES 0x0004 | |
76 | #define A_RESTORELIVEVARIABLES 0x0008 | |
77 | #define A_ABORTTRANSACTION 0x0010 | |
78 | ||
79 | #define AR_USERABORT 0x0001 | |
80 | #define AR_USERRETRY 0x0002 | |
81 | #define AR_TMCONFLICT 0x0004 | |
82 | #define AR_EXCEPTIONBLOCKABORT 0x0008 | |
83 | #define AR_OUTERABORT 0x0010 | |
84 | ||
85 | #define MODE_SERIALIRREVOCABLE 0x0000 | |
86 | ||
87 | ||
88 | /* The representation of a transaction changes several times during the | |
89 | lowering process. In the beginning, in the front-end we have the | |
90 | GENERIC tree TRANSACTION_EXPR. For example, | |
91 | ||
92 | __transaction { | |
93 | local++; | |
94 | if (++global == 10) | |
95 | __tm_abort; | |
96 | } | |
97 | ||
98 | During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is | |
99 | trivially replaced with a GIMPLE_TRANSACTION node. | |
100 | ||
101 | During pass_lower_tm, we examine the body of transactions looking | |
102 | for aborts. Transactions that do not contain an abort may be | |
103 | merged into an outer transaction. We also add a TRY-FINALLY node | |
104 | to arrange for the transaction to be committed on any exit. | |
105 | ||
106 | [??? Think about how this arrangement affects throw-with-commit | |
107 | and throw-with-abort operations. In this case we want the TRY to | |
108 | handle gotos, but not to catch any exceptions because the transaction | |
109 | will already be closed.] | |
110 | ||
111 | GIMPLE_TRANSACTION [label=NULL] { | |
112 | try { | |
113 | local = local + 1; | |
114 | t0 = global; | |
115 | t1 = t0 + 1; | |
116 | global = t1; | |
117 | if (t1 == 10) | |
118 | __builtin___tm_abort (); | |
119 | } finally { | |
120 | __builtin___tm_commit (); | |
121 | } | |
122 | } | |
123 | ||
124 | During pass_lower_eh, we create EH regions for the transactions, | |
125 | intermixed with the regular EH stuff. This gives us a nice persistent | |
126 | mapping (all the way through rtl) from transactional memory operation | |
127 | back to the transaction, which allows us to get the abnormal edges | |
128 | correct to model transaction aborts and restarts: | |
129 | ||
130 | GIMPLE_TRANSACTION [label=over] | |
131 | local = local + 1; | |
132 | t0 = global; | |
133 | t1 = t0 + 1; | |
134 | global = t1; | |
135 | if (t1 == 10) | |
136 | __builtin___tm_abort (); | |
137 | __builtin___tm_commit (); | |
138 | over: | |
139 | ||
140 | This is the end of all_lowering_passes, and so is what is present | |
141 | during the IPA passes, and through all of the optimization passes. | |
142 | ||
143 | During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all | |
144 | functions and mark functions for cloning. | |
145 | ||
146 | At the end of gimple optimization, before exiting SSA form, | |
147 | pass_tm_edges replaces statements that perform transactional | |
148 | memory operations with the appropriate TM builtins, and swap | |
149 | out function calls with their transactional clones. At this | |
150 | point we introduce the abnormal transaction restart edges and | |
151 | complete lowering of the GIMPLE_TRANSACTION node. | |
152 | ||
153 | x = __builtin___tm_start (MAY_ABORT); | |
154 | eh_label: | |
155 | if (x & abort_transaction) | |
156 | goto over; | |
157 | local = local + 1; | |
158 | t0 = __builtin___tm_load (global); | |
159 | t1 = t0 + 1; | |
160 | __builtin___tm_store (&global, t1); | |
161 | if (t1 == 10) | |
162 | __builtin___tm_abort (); | |
163 | __builtin___tm_commit (); | |
164 | over: | |
165 | */ | |
166 | ||
0cd02a19 | 167 | static void *expand_regions (struct tm_region *, |
168 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 169 | void *, bool); |
0cd02a19 | 170 | |
4c0315d0 | 171 | \f |
172 | /* Return the attributes we want to examine for X, or NULL if it's not | |
173 | something we examine. We look at function types, but allow pointers | |
174 | to function types and function decls and peek through. */ | |
175 | ||
176 | static tree | |
177 | get_attrs_for (const_tree x) | |
178 | { | |
179 | switch (TREE_CODE (x)) | |
180 | { | |
181 | case FUNCTION_DECL: | |
182 | return TYPE_ATTRIBUTES (TREE_TYPE (x)); | |
183 | break; | |
184 | ||
185 | default: | |
186 | if (TYPE_P (x)) | |
187 | return NULL; | |
188 | x = TREE_TYPE (x); | |
189 | if (TREE_CODE (x) != POINTER_TYPE) | |
190 | return NULL; | |
191 | /* FALLTHRU */ | |
192 | ||
193 | case POINTER_TYPE: | |
194 | x = TREE_TYPE (x); | |
195 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
196 | return NULL; | |
197 | /* FALLTHRU */ | |
198 | ||
199 | case FUNCTION_TYPE: | |
200 | case METHOD_TYPE: | |
201 | return TYPE_ATTRIBUTES (x); | |
202 | } | |
203 | } | |
204 | ||
205 | /* Return true if X has been marked TM_PURE. */ | |
206 | ||
207 | bool | |
208 | is_tm_pure (const_tree x) | |
209 | { | |
210 | unsigned flags; | |
211 | ||
212 | switch (TREE_CODE (x)) | |
213 | { | |
214 | case FUNCTION_DECL: | |
215 | case FUNCTION_TYPE: | |
216 | case METHOD_TYPE: | |
217 | break; | |
218 | ||
219 | default: | |
220 | if (TYPE_P (x)) | |
221 | return false; | |
222 | x = TREE_TYPE (x); | |
223 | if (TREE_CODE (x) != POINTER_TYPE) | |
224 | return false; | |
225 | /* FALLTHRU */ | |
226 | ||
227 | case POINTER_TYPE: | |
228 | x = TREE_TYPE (x); | |
229 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
230 | return false; | |
231 | break; | |
232 | } | |
233 | ||
234 | flags = flags_from_decl_or_type (x); | |
235 | return (flags & ECF_TM_PURE) != 0; | |
236 | } | |
237 | ||
238 | /* Return true if X has been marked TM_IRREVOCABLE. */ | |
239 | ||
240 | static bool | |
241 | is_tm_irrevocable (tree x) | |
242 | { | |
243 | tree attrs = get_attrs_for (x); | |
244 | ||
245 | if (attrs && lookup_attribute ("transaction_unsafe", attrs)) | |
246 | return true; | |
247 | ||
248 | /* A call to the irrevocable builtin is by definition, | |
249 | irrevocable. */ | |
250 | if (TREE_CODE (x) == ADDR_EXPR) | |
251 | x = TREE_OPERAND (x, 0); | |
252 | if (TREE_CODE (x) == FUNCTION_DECL | |
253 | && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL | |
254 | && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE) | |
255 | return true; | |
256 | ||
257 | return false; | |
258 | } | |
259 | ||
260 | /* Return true if X has been marked TM_SAFE. */ | |
261 | ||
262 | bool | |
263 | is_tm_safe (const_tree x) | |
264 | { | |
265 | if (flag_tm) | |
266 | { | |
267 | tree attrs = get_attrs_for (x); | |
268 | if (attrs) | |
269 | { | |
270 | if (lookup_attribute ("transaction_safe", attrs)) | |
271 | return true; | |
272 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
273 | return true; | |
274 | } | |
275 | } | |
276 | return false; | |
277 | } | |
278 | ||
279 | /* Return true if CALL is const, or tm_pure. */ | |
280 | ||
281 | static bool | |
282 | is_tm_pure_call (gimple call) | |
283 | { | |
284 | tree fn = gimple_call_fn (call); | |
285 | ||
286 | if (TREE_CODE (fn) == ADDR_EXPR) | |
287 | { | |
288 | fn = TREE_OPERAND (fn, 0); | |
289 | gcc_assert (TREE_CODE (fn) == FUNCTION_DECL); | |
290 | } | |
291 | else | |
292 | fn = TREE_TYPE (fn); | |
293 | ||
294 | return is_tm_pure (fn); | |
295 | } | |
296 | ||
297 | /* Return true if X has been marked TM_CALLABLE. */ | |
298 | ||
299 | static bool | |
300 | is_tm_callable (tree x) | |
301 | { | |
302 | tree attrs = get_attrs_for (x); | |
303 | if (attrs) | |
304 | { | |
305 | if (lookup_attribute ("transaction_callable", attrs)) | |
306 | return true; | |
307 | if (lookup_attribute ("transaction_safe", attrs)) | |
308 | return true; | |
309 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
310 | return true; | |
311 | } | |
312 | return false; | |
313 | } | |
314 | ||
315 | /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */ | |
316 | ||
317 | bool | |
318 | is_tm_may_cancel_outer (tree x) | |
319 | { | |
320 | tree attrs = get_attrs_for (x); | |
321 | if (attrs) | |
322 | return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL; | |
323 | return false; | |
324 | } | |
325 | ||
326 | /* Return true for built in functions that "end" a transaction. */ | |
327 | ||
328 | bool | |
329 | is_tm_ending_fndecl (tree fndecl) | |
330 | { | |
331 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
332 | switch (DECL_FUNCTION_CODE (fndecl)) | |
333 | { | |
334 | case BUILT_IN_TM_COMMIT: | |
335 | case BUILT_IN_TM_COMMIT_EH: | |
336 | case BUILT_IN_TM_ABORT: | |
337 | case BUILT_IN_TM_IRREVOCABLE: | |
338 | return true; | |
339 | default: | |
340 | break; | |
341 | } | |
342 | ||
343 | return false; | |
344 | } | |
345 | ||
50a50143 | 346 | /* Return true if STMT is a built in function call that "ends" a |
347 | transaction. */ | |
348 | ||
349 | bool | |
350 | is_tm_ending (gimple stmt) | |
351 | { | |
352 | tree fndecl; | |
353 | ||
354 | if (gimple_code (stmt) != GIMPLE_CALL) | |
355 | return false; | |
356 | ||
357 | fndecl = gimple_call_fndecl (stmt); | |
358 | return (fndecl != NULL_TREE | |
359 | && is_tm_ending_fndecl (fndecl)); | |
360 | } | |
361 | ||
4c0315d0 | 362 | /* Return true if STMT is a TM load. */ |
363 | ||
364 | static bool | |
365 | is_tm_load (gimple stmt) | |
366 | { | |
367 | tree fndecl; | |
368 | ||
369 | if (gimple_code (stmt) != GIMPLE_CALL) | |
370 | return false; | |
371 | ||
372 | fndecl = gimple_call_fndecl (stmt); | |
373 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
374 | && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl))); | |
375 | } | |
376 | ||
377 | /* Same as above, but for simple TM loads, that is, not the | |
378 | after-write, after-read, etc optimized variants. */ | |
379 | ||
380 | static bool | |
381 | is_tm_simple_load (gimple stmt) | |
382 | { | |
383 | tree fndecl; | |
384 | ||
385 | if (gimple_code (stmt) != GIMPLE_CALL) | |
386 | return false; | |
387 | ||
388 | fndecl = gimple_call_fndecl (stmt); | |
389 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
390 | { | |
391 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
392 | return (fcode == BUILT_IN_TM_LOAD_1 | |
393 | || fcode == BUILT_IN_TM_LOAD_2 | |
394 | || fcode == BUILT_IN_TM_LOAD_4 | |
395 | || fcode == BUILT_IN_TM_LOAD_8 | |
396 | || fcode == BUILT_IN_TM_LOAD_FLOAT | |
397 | || fcode == BUILT_IN_TM_LOAD_DOUBLE | |
398 | || fcode == BUILT_IN_TM_LOAD_LDOUBLE | |
399 | || fcode == BUILT_IN_TM_LOAD_M64 | |
400 | || fcode == BUILT_IN_TM_LOAD_M128 | |
401 | || fcode == BUILT_IN_TM_LOAD_M256); | |
402 | } | |
403 | return false; | |
404 | } | |
405 | ||
406 | /* Return true if STMT is a TM store. */ | |
407 | ||
408 | static bool | |
409 | is_tm_store (gimple stmt) | |
410 | { | |
411 | tree fndecl; | |
412 | ||
413 | if (gimple_code (stmt) != GIMPLE_CALL) | |
414 | return false; | |
415 | ||
416 | fndecl = gimple_call_fndecl (stmt); | |
417 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
418 | && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl))); | |
419 | } | |
420 | ||
421 | /* Same as above, but for simple TM stores, that is, not the | |
422 | after-write, after-read, etc optimized variants. */ | |
423 | ||
424 | static bool | |
425 | is_tm_simple_store (gimple stmt) | |
426 | { | |
427 | tree fndecl; | |
428 | ||
429 | if (gimple_code (stmt) != GIMPLE_CALL) | |
430 | return false; | |
431 | ||
432 | fndecl = gimple_call_fndecl (stmt); | |
433 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
434 | { | |
435 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
436 | return (fcode == BUILT_IN_TM_STORE_1 | |
437 | || fcode == BUILT_IN_TM_STORE_2 | |
438 | || fcode == BUILT_IN_TM_STORE_4 | |
439 | || fcode == BUILT_IN_TM_STORE_8 | |
440 | || fcode == BUILT_IN_TM_STORE_FLOAT | |
441 | || fcode == BUILT_IN_TM_STORE_DOUBLE | |
442 | || fcode == BUILT_IN_TM_STORE_LDOUBLE | |
443 | || fcode == BUILT_IN_TM_STORE_M64 | |
444 | || fcode == BUILT_IN_TM_STORE_M128 | |
445 | || fcode == BUILT_IN_TM_STORE_M256); | |
446 | } | |
447 | return false; | |
448 | } | |
449 | ||
450 | /* Return true if FNDECL is BUILT_IN_TM_ABORT. */ | |
451 | ||
452 | static bool | |
453 | is_tm_abort (tree fndecl) | |
454 | { | |
455 | return (fndecl | |
456 | && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
457 | && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT); | |
458 | } | |
459 | ||
460 | /* Build a GENERIC tree for a user abort. This is called by front ends | |
461 | while transforming the __tm_abort statement. */ | |
462 | ||
463 | tree | |
464 | build_tm_abort_call (location_t loc, bool is_outer) | |
465 | { | |
466 | return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1, | |
467 | build_int_cst (integer_type_node, | |
468 | AR_USERABORT | |
469 | | (is_outer ? AR_OUTERABORT : 0))); | |
470 | } | |
4c0315d0 | 471 | \f |
472 | /* Map for aribtrary function replacement under TM, as created | |
473 | by the tm_wrap attribute. */ | |
474 | ||
d1023d12 | 475 | struct tm_wrapper_hasher : ggc_cache_hasher<tree_map *> |
476 | { | |
477 | static inline hashval_t hash (tree_map *m) { return m->hash; } | |
478 | static inline bool | |
479 | equal (tree_map *a, tree_map *b) | |
480 | { | |
481 | return a->base.from == b->base.from; | |
482 | } | |
483 | ||
484 | static void | |
485 | handle_cache_entry (tree_map *&m) | |
486 | { | |
487 | extern void gt_ggc_mx (tree_map *&); | |
488 | if (m == HTAB_EMPTY_ENTRY || m == HTAB_DELETED_ENTRY) | |
489 | return; | |
490 | else if (ggc_marked_p (m->base.from)) | |
491 | gt_ggc_mx (m); | |
492 | else | |
493 | m = static_cast<tree_map *> (HTAB_DELETED_ENTRY); | |
494 | } | |
495 | }; | |
496 | ||
497 | static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map; | |
4c0315d0 | 498 | |
499 | void | |
500 | record_tm_replacement (tree from, tree to) | |
501 | { | |
502 | struct tree_map **slot, *h; | |
503 | ||
504 | /* Do not inline wrapper functions that will get replaced in the TM | |
505 | pass. | |
506 | ||
507 | Suppose you have foo() that will get replaced into tmfoo(). Make | |
508 | sure the inliner doesn't try to outsmart us and inline foo() | |
509 | before we get a chance to do the TM replacement. */ | |
510 | DECL_UNINLINABLE (from) = 1; | |
511 | ||
512 | if (tm_wrap_map == NULL) | |
d1023d12 | 513 | tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32); |
4c0315d0 | 514 | |
25a27413 | 515 | h = ggc_alloc<tree_map> (); |
4c0315d0 | 516 | h->hash = htab_hash_pointer (from); |
517 | h->base.from = from; | |
518 | h->to = to; | |
519 | ||
d1023d12 | 520 | slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT); |
4c0315d0 | 521 | *slot = h; |
522 | } | |
523 | ||
524 | /* Return a TM-aware replacement function for DECL. */ | |
525 | ||
526 | static tree | |
527 | find_tm_replacement_function (tree fndecl) | |
528 | { | |
529 | if (tm_wrap_map) | |
530 | { | |
531 | struct tree_map *h, in; | |
532 | ||
533 | in.base.from = fndecl; | |
534 | in.hash = htab_hash_pointer (fndecl); | |
d1023d12 | 535 | h = tm_wrap_map->find_with_hash (&in, in.hash); |
4c0315d0 | 536 | if (h) |
537 | return h->to; | |
538 | } | |
539 | ||
540 | /* ??? We may well want TM versions of most of the common <string.h> | |
541 | functions. For now, we've already these two defined. */ | |
542 | /* Adjust expand_call_tm() attributes as necessary for the cases | |
543 | handled here: */ | |
544 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
545 | switch (DECL_FUNCTION_CODE (fndecl)) | |
546 | { | |
547 | case BUILT_IN_MEMCPY: | |
548 | return builtin_decl_explicit (BUILT_IN_TM_MEMCPY); | |
549 | case BUILT_IN_MEMMOVE: | |
550 | return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); | |
551 | case BUILT_IN_MEMSET: | |
552 | return builtin_decl_explicit (BUILT_IN_TM_MEMSET); | |
553 | default: | |
554 | return NULL; | |
555 | } | |
556 | ||
557 | return NULL; | |
558 | } | |
559 | ||
560 | /* When appropriate, record TM replacement for memory allocation functions. | |
561 | ||
562 | FROM is the FNDECL to wrap. */ | |
563 | void | |
564 | tm_malloc_replacement (tree from) | |
565 | { | |
566 | const char *str; | |
567 | tree to; | |
568 | ||
569 | if (TREE_CODE (from) != FUNCTION_DECL) | |
570 | return; | |
571 | ||
572 | /* If we have a previous replacement, the user must be explicitly | |
573 | wrapping malloc/calloc/free. They better know what they're | |
574 | doing... */ | |
575 | if (find_tm_replacement_function (from)) | |
576 | return; | |
577 | ||
578 | str = IDENTIFIER_POINTER (DECL_NAME (from)); | |
579 | ||
580 | if (!strcmp (str, "malloc")) | |
581 | to = builtin_decl_explicit (BUILT_IN_TM_MALLOC); | |
582 | else if (!strcmp (str, "calloc")) | |
583 | to = builtin_decl_explicit (BUILT_IN_TM_CALLOC); | |
584 | else if (!strcmp (str, "free")) | |
585 | to = builtin_decl_explicit (BUILT_IN_TM_FREE); | |
586 | else | |
587 | return; | |
588 | ||
589 | TREE_NOTHROW (to) = 0; | |
590 | ||
591 | record_tm_replacement (from, to); | |
592 | } | |
593 | \f | |
594 | /* Diagnostics for tm_safe functions/regions. Called by the front end | |
595 | once we've lowered the function to high-gimple. */ | |
596 | ||
597 | /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq. | |
598 | Process exactly one statement. WI->INFO is set to non-null when in | |
599 | the context of a tm_safe function, and null for a __transaction block. */ | |
600 | ||
601 | #define DIAG_TM_OUTER 1 | |
602 | #define DIAG_TM_SAFE 2 | |
603 | #define DIAG_TM_RELAXED 4 | |
604 | ||
605 | struct diagnose_tm | |
606 | { | |
607 | unsigned int summary_flags : 8; | |
608 | unsigned int block_flags : 8; | |
609 | unsigned int func_flags : 8; | |
4c0315d0 | 610 | unsigned int saw_volatile : 1; |
611 | gimple stmt; | |
612 | }; | |
613 | ||
e153bd50 | 614 | /* Return true if T is a volatile variable of some kind. */ |
615 | ||
616 | static bool | |
617 | volatile_var_p (tree t) | |
618 | { | |
619 | return (SSA_VAR_P (t) | |
620 | && TREE_THIS_VOLATILE (TREE_TYPE (t))); | |
621 | } | |
622 | ||
4c0315d0 | 623 | /* Tree callback function for diagnose_tm pass. */ |
624 | ||
625 | static tree | |
626 | diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, | |
627 | void *data) | |
628 | { | |
629 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; | |
630 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
4c0315d0 | 631 | |
e153bd50 | 632 | if (volatile_var_p (*tp) |
633 | && d->block_flags & DIAG_TM_SAFE | |
4c0315d0 | 634 | && !d->saw_volatile) |
635 | { | |
636 | d->saw_volatile = 1; | |
637 | error_at (gimple_location (d->stmt), | |
638 | "invalid volatile use of %qD inside transaction", | |
639 | *tp); | |
640 | } | |
641 | ||
642 | return NULL_TREE; | |
643 | } | |
644 | ||
0e80b01d | 645 | static inline bool |
646 | is_tm_safe_or_pure (const_tree x) | |
647 | { | |
648 | return is_tm_safe (x) || is_tm_pure (x); | |
649 | } | |
650 | ||
4c0315d0 | 651 | static tree |
652 | diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
653 | struct walk_stmt_info *wi) | |
654 | { | |
655 | gimple stmt = gsi_stmt (*gsi); | |
656 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
657 | ||
658 | /* Save stmt for use in leaf analysis. */ | |
659 | d->stmt = stmt; | |
660 | ||
661 | switch (gimple_code (stmt)) | |
662 | { | |
663 | case GIMPLE_CALL: | |
664 | { | |
665 | tree fn = gimple_call_fn (stmt); | |
666 | ||
667 | if ((d->summary_flags & DIAG_TM_OUTER) == 0 | |
668 | && is_tm_may_cancel_outer (fn)) | |
669 | error_at (gimple_location (stmt), | |
670 | "%<transaction_may_cancel_outer%> function call not within" | |
671 | " outer transaction or %<transaction_may_cancel_outer%>"); | |
672 | ||
673 | if (d->summary_flags & DIAG_TM_SAFE) | |
674 | { | |
675 | bool is_safe, direct_call_p; | |
676 | tree replacement; | |
677 | ||
678 | if (TREE_CODE (fn) == ADDR_EXPR | |
679 | && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL) | |
680 | { | |
681 | direct_call_p = true; | |
682 | replacement = TREE_OPERAND (fn, 0); | |
683 | replacement = find_tm_replacement_function (replacement); | |
684 | if (replacement) | |
685 | fn = replacement; | |
686 | } | |
687 | else | |
688 | { | |
689 | direct_call_p = false; | |
690 | replacement = NULL_TREE; | |
691 | } | |
692 | ||
693 | if (is_tm_safe_or_pure (fn)) | |
694 | is_safe = true; | |
695 | else if (is_tm_callable (fn) || is_tm_irrevocable (fn)) | |
696 | { | |
697 | /* A function explicitly marked transaction_callable as | |
698 | opposed to transaction_safe is being defined to be | |
699 | unsafe as part of its ABI, regardless of its contents. */ | |
700 | is_safe = false; | |
701 | } | |
702 | else if (direct_call_p) | |
703 | { | |
c579aed5 | 704 | if (IS_TYPE_OR_DECL_P (fn) |
705 | && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN) | |
4c0315d0 | 706 | is_safe = true; |
707 | else if (replacement) | |
708 | { | |
709 | /* ??? At present we've been considering replacements | |
710 | merely transaction_callable, and therefore might | |
711 | enter irrevocable. The tm_wrap attribute has not | |
712 | yet made it into the new language spec. */ | |
713 | is_safe = false; | |
714 | } | |
715 | else | |
716 | { | |
717 | /* ??? Diagnostics for unmarked direct calls moved into | |
718 | the IPA pass. Section 3.2 of the spec details how | |
719 | functions not marked should be considered "implicitly | |
720 | safe" based on having examined the function body. */ | |
721 | is_safe = true; | |
722 | } | |
723 | } | |
724 | else | |
725 | { | |
726 | /* An unmarked indirect call. Consider it unsafe even | |
727 | though optimization may yet figure out how to inline. */ | |
728 | is_safe = false; | |
729 | } | |
730 | ||
731 | if (!is_safe) | |
732 | { | |
733 | if (TREE_CODE (fn) == ADDR_EXPR) | |
734 | fn = TREE_OPERAND (fn, 0); | |
735 | if (d->block_flags & DIAG_TM_SAFE) | |
2d3bf658 | 736 | { |
737 | if (direct_call_p) | |
738 | error_at (gimple_location (stmt), | |
739 | "unsafe function call %qD within " | |
740 | "atomic transaction", fn); | |
741 | else | |
300eddba | 742 | { |
743 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
744 | error_at (gimple_location (stmt), | |
745 | "unsafe function call %qE within " | |
746 | "atomic transaction", fn); | |
747 | else | |
748 | error_at (gimple_location (stmt), | |
749 | "unsafe indirect function call within " | |
750 | "atomic transaction"); | |
751 | } | |
2d3bf658 | 752 | } |
4c0315d0 | 753 | else |
2d3bf658 | 754 | { |
755 | if (direct_call_p) | |
756 | error_at (gimple_location (stmt), | |
757 | "unsafe function call %qD within " | |
758 | "%<transaction_safe%> function", fn); | |
759 | else | |
300eddba | 760 | { |
761 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
762 | error_at (gimple_location (stmt), | |
763 | "unsafe function call %qE within " | |
764 | "%<transaction_safe%> function", fn); | |
765 | else | |
766 | error_at (gimple_location (stmt), | |
767 | "unsafe indirect function call within " | |
768 | "%<transaction_safe%> function"); | |
769 | } | |
2d3bf658 | 770 | } |
4c0315d0 | 771 | } |
772 | } | |
773 | } | |
774 | break; | |
775 | ||
776 | case GIMPLE_ASM: | |
777 | /* ??? We ought to come up with a way to add attributes to | |
778 | asm statements, and then add "transaction_safe" to it. | |
779 | Either that or get the language spec to resurrect __tm_waiver. */ | |
780 | if (d->block_flags & DIAG_TM_SAFE) | |
781 | error_at (gimple_location (stmt), | |
782 | "asm not allowed in atomic transaction"); | |
783 | else if (d->func_flags & DIAG_TM_SAFE) | |
784 | error_at (gimple_location (stmt), | |
785 | "asm not allowed in %<transaction_safe%> function"); | |
4c0315d0 | 786 | break; |
787 | ||
788 | case GIMPLE_TRANSACTION: | |
789 | { | |
1a91d914 | 790 | gtransaction *trans_stmt = as_a <gtransaction *> (stmt); |
4c0315d0 | 791 | unsigned char inner_flags = DIAG_TM_SAFE; |
792 | ||
1a91d914 | 793 | if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED) |
4c0315d0 | 794 | { |
795 | if (d->block_flags & DIAG_TM_SAFE) | |
796 | error_at (gimple_location (stmt), | |
797 | "relaxed transaction in atomic transaction"); | |
798 | else if (d->func_flags & DIAG_TM_SAFE) | |
799 | error_at (gimple_location (stmt), | |
800 | "relaxed transaction in %<transaction_safe%> function"); | |
4c0315d0 | 801 | inner_flags = DIAG_TM_RELAXED; |
802 | } | |
1a91d914 | 803 | else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER) |
4c0315d0 | 804 | { |
805 | if (d->block_flags) | |
806 | error_at (gimple_location (stmt), | |
807 | "outer transaction in transaction"); | |
808 | else if (d->func_flags & DIAG_TM_OUTER) | |
809 | error_at (gimple_location (stmt), | |
810 | "outer transaction in " | |
811 | "%<transaction_may_cancel_outer%> function"); | |
812 | else if (d->func_flags & DIAG_TM_SAFE) | |
813 | error_at (gimple_location (stmt), | |
814 | "outer transaction in %<transaction_safe%> function"); | |
4c0315d0 | 815 | inner_flags |= DIAG_TM_OUTER; |
816 | } | |
817 | ||
818 | *handled_ops_p = true; | |
1a91d914 | 819 | if (gimple_transaction_body (trans_stmt)) |
4c0315d0 | 820 | { |
821 | struct walk_stmt_info wi_inner; | |
822 | struct diagnose_tm d_inner; | |
823 | ||
824 | memset (&d_inner, 0, sizeof (d_inner)); | |
825 | d_inner.func_flags = d->func_flags; | |
826 | d_inner.block_flags = d->block_flags | inner_flags; | |
827 | d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags; | |
828 | ||
829 | memset (&wi_inner, 0, sizeof (wi_inner)); | |
830 | wi_inner.info = &d_inner; | |
831 | ||
1a91d914 | 832 | walk_gimple_seq (gimple_transaction_body (trans_stmt), |
4c0315d0 | 833 | diagnose_tm_1, diagnose_tm_1_op, &wi_inner); |
4c0315d0 | 834 | } |
835 | } | |
836 | break; | |
837 | ||
838 | default: | |
839 | break; | |
840 | } | |
841 | ||
842 | return NULL_TREE; | |
843 | } | |
844 | ||
845 | static unsigned int | |
846 | diagnose_tm_blocks (void) | |
847 | { | |
848 | struct walk_stmt_info wi; | |
849 | struct diagnose_tm d; | |
850 | ||
851 | memset (&d, 0, sizeof (d)); | |
852 | if (is_tm_may_cancel_outer (current_function_decl)) | |
853 | d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE; | |
854 | else if (is_tm_safe (current_function_decl)) | |
855 | d.func_flags = DIAG_TM_SAFE; | |
856 | d.summary_flags = d.func_flags; | |
857 | ||
858 | memset (&wi, 0, sizeof (wi)); | |
859 | wi.info = &d; | |
860 | ||
861 | walk_gimple_seq (gimple_body (current_function_decl), | |
862 | diagnose_tm_1, diagnose_tm_1_op, &wi); | |
863 | ||
4c0315d0 | 864 | return 0; |
865 | } | |
866 | ||
cbe8bda8 | 867 | namespace { |
868 | ||
869 | const pass_data pass_data_diagnose_tm_blocks = | |
870 | { | |
871 | GIMPLE_PASS, /* type */ | |
872 | "*diagnose_tm_blocks", /* name */ | |
873 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 874 | TV_TRANS_MEM, /* tv_id */ |
875 | PROP_gimple_any, /* properties_required */ | |
876 | 0, /* properties_provided */ | |
877 | 0, /* properties_destroyed */ | |
878 | 0, /* todo_flags_start */ | |
879 | 0, /* todo_flags_finish */ | |
4c0315d0 | 880 | }; |
cbe8bda8 | 881 | |
882 | class pass_diagnose_tm_blocks : public gimple_opt_pass | |
883 | { | |
884 | public: | |
9af5ce0c | 885 | pass_diagnose_tm_blocks (gcc::context *ctxt) |
886 | : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt) | |
cbe8bda8 | 887 | {} |
888 | ||
889 | /* opt_pass methods: */ | |
31315c24 | 890 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 891 | virtual unsigned int execute (function *) { return diagnose_tm_blocks (); } |
cbe8bda8 | 892 | |
893 | }; // class pass_diagnose_tm_blocks | |
894 | ||
895 | } // anon namespace | |
896 | ||
897 | gimple_opt_pass * | |
898 | make_pass_diagnose_tm_blocks (gcc::context *ctxt) | |
899 | { | |
900 | return new pass_diagnose_tm_blocks (ctxt); | |
901 | } | |
4c0315d0 | 902 | \f |
903 | /* Instead of instrumenting thread private memory, we save the | |
904 | addresses in a log which we later use to save/restore the addresses | |
905 | upon transaction start/restart. | |
906 | ||
907 | The log is keyed by address, where each element contains individual | |
908 | statements among different code paths that perform the store. | |
909 | ||
910 | This log is later used to generate either plain save/restore of the | |
911 | addresses upon transaction start/restart, or calls to the ITM_L* | |
912 | logging functions. | |
913 | ||
914 | So for something like: | |
915 | ||
916 | struct large { int x[1000]; }; | |
917 | struct large lala = { 0 }; | |
918 | __transaction { | |
919 | lala.x[i] = 123; | |
920 | ... | |
921 | } | |
922 | ||
923 | We can either save/restore: | |
924 | ||
925 | lala = { 0 }; | |
926 | trxn = _ITM_startTransaction (); | |
927 | if (trxn & a_saveLiveVariables) | |
928 | tmp_lala1 = lala.x[i]; | |
929 | else if (a & a_restoreLiveVariables) | |
930 | lala.x[i] = tmp_lala1; | |
931 | ||
932 | or use the logging functions: | |
933 | ||
934 | lala = { 0 }; | |
935 | trxn = _ITM_startTransaction (); | |
936 | _ITM_LU4 (&lala.x[i]); | |
937 | ||
938 | Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as | |
939 | far up the dominator tree to shadow all of the writes to a given | |
940 | location (thus reducing the total number of logging calls), but not | |
941 | so high as to be called on a path that does not perform a | |
942 | write. */ | |
943 | ||
944 | /* One individual log entry. We may have multiple statements for the | |
945 | same location if neither dominate each other (on different | |
946 | execution paths). */ | |
947 | typedef struct tm_log_entry | |
948 | { | |
949 | /* Address to save. */ | |
950 | tree addr; | |
951 | /* Entry block for the transaction this address occurs in. */ | |
952 | basic_block entry_block; | |
953 | /* Dominating statements the store occurs in. */ | |
1a91d914 | 954 | vec<gimple> stmts; |
4c0315d0 | 955 | /* Initially, while we are building the log, we place a nonzero |
956 | value here to mean that this address *will* be saved with a | |
957 | save/restore sequence. Later, when generating the save sequence | |
958 | we place the SSA temp generated here. */ | |
959 | tree save_var; | |
960 | } *tm_log_entry_t; | |
961 | ||
4c0315d0 | 962 | |
d9dd21a8 | 963 | /* Log entry hashtable helpers. */ |
4c0315d0 | 964 | |
d9dd21a8 | 965 | struct log_entry_hasher |
4c0315d0 | 966 | { |
d9dd21a8 | 967 | typedef tm_log_entry value_type; |
968 | typedef tm_log_entry compare_type; | |
969 | static inline hashval_t hash (const value_type *); | |
970 | static inline bool equal (const value_type *, const compare_type *); | |
971 | static inline void remove (value_type *); | |
972 | }; | |
4c0315d0 | 973 | |
974 | /* Htab support. Return hash value for a `tm_log_entry'. */ | |
d9dd21a8 | 975 | inline hashval_t |
976 | log_entry_hasher::hash (const value_type *log) | |
4c0315d0 | 977 | { |
4c0315d0 | 978 | return iterative_hash_expr (log->addr, 0); |
979 | } | |
980 | ||
981 | /* Htab support. Return true if two log entries are the same. */ | |
d9dd21a8 | 982 | inline bool |
983 | log_entry_hasher::equal (const value_type *log1, const compare_type *log2) | |
4c0315d0 | 984 | { |
4c0315d0 | 985 | /* FIXME: |
986 | ||
987 | rth: I suggest that we get rid of the component refs etc. | |
988 | I.e. resolve the reference to base + offset. | |
989 | ||
990 | We may need to actually finish a merge with mainline for this, | |
991 | since we'd like to be presented with Richi's MEM_REF_EXPRs more | |
992 | often than not. But in the meantime your tm_log_entry could save | |
993 | the results of get_inner_reference. | |
994 | ||
995 | See: g++.dg/tm/pr46653.C | |
996 | */ | |
997 | ||
998 | /* Special case plain equality because operand_equal_p() below will | |
999 | return FALSE if the addresses are equal but they have | |
1000 | side-effects (e.g. a volatile address). */ | |
1001 | if (log1->addr == log2->addr) | |
1002 | return true; | |
1003 | ||
1004 | return operand_equal_p (log1->addr, log2->addr, 0); | |
1005 | } | |
1006 | ||
1007 | /* Htab support. Free one tm_log_entry. */ | |
d9dd21a8 | 1008 | inline void |
1009 | log_entry_hasher::remove (value_type *lp) | |
4c0315d0 | 1010 | { |
f1f41a6c | 1011 | lp->stmts.release (); |
4c0315d0 | 1012 | free (lp); |
1013 | } | |
1014 | ||
d9dd21a8 | 1015 | |
1016 | /* The actual log. */ | |
c1f445d2 | 1017 | static hash_table<log_entry_hasher> *tm_log; |
d9dd21a8 | 1018 | |
1019 | /* Addresses to log with a save/restore sequence. These should be in | |
1020 | dominator order. */ | |
1021 | static vec<tree> tm_log_save_addresses; | |
1022 | ||
1023 | enum thread_memory_type | |
1024 | { | |
1025 | mem_non_local = 0, | |
1026 | mem_thread_local, | |
1027 | mem_transaction_local, | |
1028 | mem_max | |
1029 | }; | |
1030 | ||
1031 | typedef struct tm_new_mem_map | |
1032 | { | |
1033 | /* SSA_NAME being dereferenced. */ | |
1034 | tree val; | |
1035 | enum thread_memory_type local_new_memory; | |
1036 | } tm_new_mem_map_t; | |
1037 | ||
1038 | /* Hashtable helpers. */ | |
1039 | ||
1040 | struct tm_mem_map_hasher : typed_free_remove <tm_new_mem_map_t> | |
1041 | { | |
1042 | typedef tm_new_mem_map_t value_type; | |
1043 | typedef tm_new_mem_map_t compare_type; | |
1044 | static inline hashval_t hash (const value_type *); | |
1045 | static inline bool equal (const value_type *, const compare_type *); | |
1046 | }; | |
1047 | ||
1048 | inline hashval_t | |
1049 | tm_mem_map_hasher::hash (const value_type *v) | |
1050 | { | |
1051 | return (intptr_t)v->val >> 4; | |
1052 | } | |
1053 | ||
1054 | inline bool | |
1055 | tm_mem_map_hasher::equal (const value_type *v, const compare_type *c) | |
1056 | { | |
1057 | return v->val == c->val; | |
1058 | } | |
1059 | ||
1060 | /* Map for an SSA_NAME originally pointing to a non aliased new piece | |
1061 | of memory (malloc, alloc, etc). */ | |
c1f445d2 | 1062 | static hash_table<tm_mem_map_hasher> *tm_new_mem_hash; |
d9dd21a8 | 1063 | |
4c0315d0 | 1064 | /* Initialize logging data structures. */ |
1065 | static void | |
1066 | tm_log_init (void) | |
1067 | { | |
c1f445d2 | 1068 | tm_log = new hash_table<log_entry_hasher> (10); |
1069 | tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5); | |
f1f41a6c | 1070 | tm_log_save_addresses.create (5); |
4c0315d0 | 1071 | } |
1072 | ||
1073 | /* Free logging data structures. */ | |
1074 | static void | |
1075 | tm_log_delete (void) | |
1076 | { | |
c1f445d2 | 1077 | delete tm_log; |
1078 | tm_log = NULL; | |
1079 | delete tm_new_mem_hash; | |
1080 | tm_new_mem_hash = NULL; | |
f1f41a6c | 1081 | tm_log_save_addresses.release (); |
4c0315d0 | 1082 | } |
1083 | ||
1084 | /* Return true if MEM is a transaction invariant memory for the TM | |
1085 | region starting at REGION_ENTRY_BLOCK. */ | |
1086 | static bool | |
1087 | transaction_invariant_address_p (const_tree mem, basic_block region_entry_block) | |
1088 | { | |
1089 | if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF) | |
1090 | && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME) | |
1091 | { | |
1092 | basic_block def_bb; | |
1093 | ||
1094 | def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0))); | |
1095 | return def_bb != region_entry_block | |
1096 | && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb); | |
1097 | } | |
1098 | ||
1099 | mem = strip_invariant_refs (mem); | |
1100 | return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem)); | |
1101 | } | |
1102 | ||
1103 | /* Given an address ADDR in STMT, find it in the memory log or add it, | |
1104 | making sure to keep only the addresses highest in the dominator | |
1105 | tree. | |
1106 | ||
1107 | ENTRY_BLOCK is the entry_block for the transaction. | |
1108 | ||
1109 | If we find the address in the log, make sure it's either the same | |
1110 | address, or an equivalent one that dominates ADDR. | |
1111 | ||
1112 | If we find the address, but neither ADDR dominates the found | |
1113 | address, nor the found one dominates ADDR, we're on different | |
1114 | execution paths. Add it. | |
1115 | ||
1116 | If known, ENTRY_BLOCK is the entry block for the region, otherwise | |
1117 | NULL. */ | |
1118 | static void | |
1119 | tm_log_add (basic_block entry_block, tree addr, gimple stmt) | |
1120 | { | |
d9dd21a8 | 1121 | tm_log_entry **slot; |
4c0315d0 | 1122 | struct tm_log_entry l, *lp; |
1123 | ||
1124 | l.addr = addr; | |
c1f445d2 | 1125 | slot = tm_log->find_slot (&l, INSERT); |
4c0315d0 | 1126 | if (!*slot) |
1127 | { | |
1128 | tree type = TREE_TYPE (addr); | |
1129 | ||
1130 | lp = XNEW (struct tm_log_entry); | |
1131 | lp->addr = addr; | |
1132 | *slot = lp; | |
1133 | ||
1134 | /* Small invariant addresses can be handled as save/restores. */ | |
1135 | if (entry_block | |
1136 | && transaction_invariant_address_p (lp->addr, entry_block) | |
1137 | && TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 1138 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) |
aa59f000 | 1139 | && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type)) |
4c0315d0 | 1140 | < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) |
1141 | /* We must be able to copy this type normally. I.e., no | |
1142 | special constructors and the like. */ | |
1143 | && !TREE_ADDRESSABLE (type)) | |
1144 | { | |
54a0a932 | 1145 | lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save"); |
f1f41a6c | 1146 | lp->stmts.create (0); |
4c0315d0 | 1147 | lp->entry_block = entry_block; |
1148 | /* Save addresses separately in dominator order so we don't | |
1149 | get confused by overlapping addresses in the save/restore | |
1150 | sequence. */ | |
f1f41a6c | 1151 | tm_log_save_addresses.safe_push (lp->addr); |
4c0315d0 | 1152 | } |
1153 | else | |
1154 | { | |
1155 | /* Use the logging functions. */ | |
f1f41a6c | 1156 | lp->stmts.create (5); |
1157 | lp->stmts.quick_push (stmt); | |
4c0315d0 | 1158 | lp->save_var = NULL; |
1159 | } | |
1160 | } | |
1161 | else | |
1162 | { | |
1163 | size_t i; | |
1164 | gimple oldstmt; | |
1165 | ||
d9dd21a8 | 1166 | lp = *slot; |
4c0315d0 | 1167 | |
1168 | /* If we're generating a save/restore sequence, we don't care | |
1169 | about statements. */ | |
1170 | if (lp->save_var) | |
1171 | return; | |
1172 | ||
f1f41a6c | 1173 | for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i) |
4c0315d0 | 1174 | { |
1175 | if (stmt == oldstmt) | |
1176 | return; | |
1177 | /* We already have a store to the same address, higher up the | |
1178 | dominator tree. Nothing to do. */ | |
1179 | if (dominated_by_p (CDI_DOMINATORS, | |
1180 | gimple_bb (stmt), gimple_bb (oldstmt))) | |
1181 | return; | |
1182 | /* We should be processing blocks in dominator tree order. */ | |
1183 | gcc_assert (!dominated_by_p (CDI_DOMINATORS, | |
1184 | gimple_bb (oldstmt), gimple_bb (stmt))); | |
1185 | } | |
1186 | /* Store is on a different code path. */ | |
f1f41a6c | 1187 | lp->stmts.safe_push (stmt); |
4c0315d0 | 1188 | } |
1189 | } | |
1190 | ||
1191 | /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME | |
1192 | result, insert the new statements before GSI. */ | |
1193 | ||
1194 | static tree | |
1195 | gimplify_addr (gimple_stmt_iterator *gsi, tree x) | |
1196 | { | |
1197 | if (TREE_CODE (x) == TARGET_MEM_REF) | |
1198 | x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x); | |
1199 | else | |
1200 | x = build_fold_addr_expr (x); | |
1201 | return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT); | |
1202 | } | |
1203 | ||
1204 | /* Instrument one address with the logging functions. | |
1205 | ADDR is the address to save. | |
1206 | STMT is the statement before which to place it. */ | |
1207 | static void | |
1208 | tm_log_emit_stmt (tree addr, gimple stmt) | |
1209 | { | |
1210 | tree type = TREE_TYPE (addr); | |
1211 | tree size = TYPE_SIZE_UNIT (type); | |
1212 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
1213 | gimple log; | |
1214 | enum built_in_function code = BUILT_IN_TM_LOG; | |
1215 | ||
1216 | if (type == float_type_node) | |
1217 | code = BUILT_IN_TM_LOG_FLOAT; | |
1218 | else if (type == double_type_node) | |
1219 | code = BUILT_IN_TM_LOG_DOUBLE; | |
1220 | else if (type == long_double_type_node) | |
1221 | code = BUILT_IN_TM_LOG_LDOUBLE; | |
cd4547bf | 1222 | else if (tree_fits_uhwi_p (size)) |
4c0315d0 | 1223 | { |
6a0712d4 | 1224 | unsigned int n = tree_to_uhwi (size); |
4c0315d0 | 1225 | switch (n) |
1226 | { | |
1227 | case 1: | |
1228 | code = BUILT_IN_TM_LOG_1; | |
1229 | break; | |
1230 | case 2: | |
1231 | code = BUILT_IN_TM_LOG_2; | |
1232 | break; | |
1233 | case 4: | |
1234 | code = BUILT_IN_TM_LOG_4; | |
1235 | break; | |
1236 | case 8: | |
1237 | code = BUILT_IN_TM_LOG_8; | |
1238 | break; | |
1239 | default: | |
1240 | code = BUILT_IN_TM_LOG; | |
1241 | if (TREE_CODE (type) == VECTOR_TYPE) | |
1242 | { | |
1243 | if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64)) | |
1244 | code = BUILT_IN_TM_LOG_M64; | |
1245 | else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128)) | |
1246 | code = BUILT_IN_TM_LOG_M128; | |
1247 | else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256)) | |
1248 | code = BUILT_IN_TM_LOG_M256; | |
1249 | } | |
1250 | break; | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | addr = gimplify_addr (&gsi, addr); | |
1255 | if (code == BUILT_IN_TM_LOG) | |
1256 | log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size); | |
1257 | else | |
1258 | log = gimple_build_call (builtin_decl_explicit (code), 1, addr); | |
1259 | gsi_insert_before (&gsi, log, GSI_SAME_STMT); | |
1260 | } | |
1261 | ||
1262 | /* Go through the log and instrument address that must be instrumented | |
1263 | with the logging functions. Leave the save/restore addresses for | |
1264 | later. */ | |
1265 | static void | |
1266 | tm_log_emit (void) | |
1267 | { | |
c1f445d2 | 1268 | hash_table<log_entry_hasher>::iterator hi; |
4c0315d0 | 1269 | struct tm_log_entry *lp; |
1270 | ||
c1f445d2 | 1271 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi) |
4c0315d0 | 1272 | { |
1273 | size_t i; | |
1274 | gimple stmt; | |
1275 | ||
1276 | if (dump_file) | |
1277 | { | |
1278 | fprintf (dump_file, "TM thread private mem logging: "); | |
1279 | print_generic_expr (dump_file, lp->addr, 0); | |
1280 | fprintf (dump_file, "\n"); | |
1281 | } | |
1282 | ||
1283 | if (lp->save_var) | |
1284 | { | |
1285 | if (dump_file) | |
1286 | fprintf (dump_file, "DUMPING to variable\n"); | |
1287 | continue; | |
1288 | } | |
1289 | else | |
1290 | { | |
1291 | if (dump_file) | |
1292 | fprintf (dump_file, "DUMPING with logging functions\n"); | |
f1f41a6c | 1293 | for (i = 0; lp->stmts.iterate (i, &stmt); ++i) |
4c0315d0 | 1294 | tm_log_emit_stmt (lp->addr, stmt); |
1295 | } | |
1296 | } | |
1297 | } | |
1298 | ||
1299 | /* Emit the save sequence for the corresponding addresses in the log. | |
1300 | ENTRY_BLOCK is the entry block for the transaction. | |
1301 | BB is the basic block to insert the code in. */ | |
1302 | static void | |
1303 | tm_log_emit_saves (basic_block entry_block, basic_block bb) | |
1304 | { | |
1305 | size_t i; | |
1306 | gimple_stmt_iterator gsi = gsi_last_bb (bb); | |
1307 | gimple stmt; | |
1308 | struct tm_log_entry l, *lp; | |
1309 | ||
f1f41a6c | 1310 | for (i = 0; i < tm_log_save_addresses.length (); ++i) |
4c0315d0 | 1311 | { |
f1f41a6c | 1312 | l.addr = tm_log_save_addresses[i]; |
c1f445d2 | 1313 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
4c0315d0 | 1314 | gcc_assert (lp->save_var != NULL); |
1315 | ||
1316 | /* We only care about variables in the current transaction. */ | |
1317 | if (lp->entry_block != entry_block) | |
1318 | continue; | |
1319 | ||
1320 | stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr)); | |
1321 | ||
1322 | /* Make sure we can create an SSA_NAME for this type. For | |
1323 | instance, aggregates aren't allowed, in which case the system | |
1324 | will create a VOP for us and everything will just work. */ | |
1325 | if (is_gimple_reg_type (TREE_TYPE (lp->save_var))) | |
1326 | { | |
1327 | lp->save_var = make_ssa_name (lp->save_var, stmt); | |
1328 | gimple_assign_set_lhs (stmt, lp->save_var); | |
1329 | } | |
1330 | ||
1331 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | /* Emit the restore sequence for the corresponding addresses in the log. | |
1336 | ENTRY_BLOCK is the entry block for the transaction. | |
1337 | BB is the basic block to insert the code in. */ | |
1338 | static void | |
1339 | tm_log_emit_restores (basic_block entry_block, basic_block bb) | |
1340 | { | |
1341 | int i; | |
1342 | struct tm_log_entry l, *lp; | |
1343 | gimple_stmt_iterator gsi; | |
1344 | gimple stmt; | |
1345 | ||
f1f41a6c | 1346 | for (i = tm_log_save_addresses.length () - 1; i >= 0; i--) |
4c0315d0 | 1347 | { |
f1f41a6c | 1348 | l.addr = tm_log_save_addresses[i]; |
c1f445d2 | 1349 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
4c0315d0 | 1350 | gcc_assert (lp->save_var != NULL); |
1351 | ||
1352 | /* We only care about variables in the current transaction. */ | |
1353 | if (lp->entry_block != entry_block) | |
1354 | continue; | |
1355 | ||
1356 | /* Restores are in LIFO order from the saves in case we have | |
1357 | overlaps. */ | |
1358 | gsi = gsi_start_bb (bb); | |
1359 | ||
1360 | stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var); | |
1361 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1362 | } | |
1363 | } | |
1364 | ||
4c0315d0 | 1365 | \f |
1366 | static tree lower_sequence_tm (gimple_stmt_iterator *, bool *, | |
1367 | struct walk_stmt_info *); | |
1368 | static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *, | |
1369 | struct walk_stmt_info *); | |
1370 | ||
1371 | /* Evaluate an address X being dereferenced and determine if it | |
1372 | originally points to a non aliased new chunk of memory (malloc, | |
1373 | alloca, etc). | |
1374 | ||
1375 | Return MEM_THREAD_LOCAL if it points to a thread-local address. | |
1376 | Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address. | |
1377 | Return MEM_NON_LOCAL otherwise. | |
1378 | ||
1379 | ENTRY_BLOCK is the entry block to the transaction containing the | |
1380 | dereference of X. */ | |
1381 | static enum thread_memory_type | |
1382 | thread_private_new_memory (basic_block entry_block, tree x) | |
1383 | { | |
1384 | gimple stmt = NULL; | |
1385 | enum tree_code code; | |
d9dd21a8 | 1386 | tm_new_mem_map_t **slot; |
4c0315d0 | 1387 | tm_new_mem_map_t elt, *elt_p; |
1388 | tree val = x; | |
1389 | enum thread_memory_type retval = mem_transaction_local; | |
1390 | ||
1391 | if (!entry_block | |
1392 | || TREE_CODE (x) != SSA_NAME | |
1393 | /* Possible uninitialized use, or a function argument. In | |
1394 | either case, we don't care. */ | |
1395 | || SSA_NAME_IS_DEFAULT_DEF (x)) | |
1396 | return mem_non_local; | |
1397 | ||
1398 | /* Look in cache first. */ | |
1399 | elt.val = x; | |
c1f445d2 | 1400 | slot = tm_new_mem_hash->find_slot (&elt, INSERT); |
d9dd21a8 | 1401 | elt_p = *slot; |
4c0315d0 | 1402 | if (elt_p) |
1403 | return elt_p->local_new_memory; | |
1404 | ||
1405 | /* Optimistically assume the memory is transaction local during | |
1406 | processing. This catches recursion into this variable. */ | |
1407 | *slot = elt_p = XNEW (tm_new_mem_map_t); | |
1408 | elt_p->val = val; | |
1409 | elt_p->local_new_memory = mem_transaction_local; | |
1410 | ||
1411 | /* Search DEF chain to find the original definition of this address. */ | |
1412 | do | |
1413 | { | |
1414 | if (ptr_deref_may_alias_global_p (x)) | |
1415 | { | |
1416 | /* Address escapes. This is not thread-private. */ | |
1417 | retval = mem_non_local; | |
1418 | goto new_memory_ret; | |
1419 | } | |
1420 | ||
1421 | stmt = SSA_NAME_DEF_STMT (x); | |
1422 | ||
1423 | /* If the malloc call is outside the transaction, this is | |
1424 | thread-local. */ | |
1425 | if (retval != mem_thread_local | |
1426 | && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block)) | |
1427 | retval = mem_thread_local; | |
1428 | ||
1429 | if (is_gimple_assign (stmt)) | |
1430 | { | |
1431 | code = gimple_assign_rhs_code (stmt); | |
1432 | /* x = foo ==> foo */ | |
1433 | if (code == SSA_NAME) | |
1434 | x = gimple_assign_rhs1 (stmt); | |
1435 | /* x = foo + n ==> foo */ | |
1436 | else if (code == POINTER_PLUS_EXPR) | |
1437 | x = gimple_assign_rhs1 (stmt); | |
1438 | /* x = (cast*) foo ==> foo */ | |
d09ef31a | 1439 | else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code)) |
4c0315d0 | 1440 | x = gimple_assign_rhs1 (stmt); |
43ee99ea | 1441 | /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */ |
1442 | else if (code == COND_EXPR) | |
1443 | { | |
1444 | tree op1 = gimple_assign_rhs2 (stmt); | |
1445 | tree op2 = gimple_assign_rhs3 (stmt); | |
1446 | enum thread_memory_type mem; | |
1447 | retval = thread_private_new_memory (entry_block, op1); | |
1448 | if (retval == mem_non_local) | |
1449 | goto new_memory_ret; | |
1450 | mem = thread_private_new_memory (entry_block, op2); | |
1451 | retval = MIN (retval, mem); | |
1452 | goto new_memory_ret; | |
1453 | } | |
4c0315d0 | 1454 | else |
1455 | { | |
1456 | retval = mem_non_local; | |
1457 | goto new_memory_ret; | |
1458 | } | |
1459 | } | |
1460 | else | |
1461 | { | |
1462 | if (gimple_code (stmt) == GIMPLE_PHI) | |
1463 | { | |
1464 | unsigned int i; | |
1465 | enum thread_memory_type mem; | |
1466 | tree phi_result = gimple_phi_result (stmt); | |
1467 | ||
1468 | /* If any of the ancestors are non-local, we are sure to | |
1469 | be non-local. Otherwise we can avoid doing anything | |
1470 | and inherit what has already been generated. */ | |
1471 | retval = mem_max; | |
1472 | for (i = 0; i < gimple_phi_num_args (stmt); ++i) | |
1473 | { | |
1474 | tree op = PHI_ARG_DEF (stmt, i); | |
1475 | ||
1476 | /* Exclude self-assignment. */ | |
1477 | if (phi_result == op) | |
1478 | continue; | |
1479 | ||
1480 | mem = thread_private_new_memory (entry_block, op); | |
1481 | if (mem == mem_non_local) | |
1482 | { | |
1483 | retval = mem; | |
1484 | goto new_memory_ret; | |
1485 | } | |
1486 | retval = MIN (retval, mem); | |
1487 | } | |
1488 | goto new_memory_ret; | |
1489 | } | |
1490 | break; | |
1491 | } | |
1492 | } | |
1493 | while (TREE_CODE (x) == SSA_NAME); | |
1494 | ||
1495 | if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC) | |
1496 | /* Thread-local or transaction-local. */ | |
1497 | ; | |
1498 | else | |
1499 | retval = mem_non_local; | |
1500 | ||
1501 | new_memory_ret: | |
1502 | elt_p->local_new_memory = retval; | |
1503 | return retval; | |
1504 | } | |
1505 | ||
1506 | /* Determine whether X has to be instrumented using a read | |
1507 | or write barrier. | |
1508 | ||
1509 | ENTRY_BLOCK is the entry block for the region where stmt resides | |
1510 | in. NULL if unknown. | |
1511 | ||
1512 | STMT is the statement in which X occurs in. It is used for thread | |
1513 | private memory instrumentation. If no TPM instrumentation is | |
1514 | desired, STMT should be null. */ | |
1515 | static bool | |
1516 | requires_barrier (basic_block entry_block, tree x, gimple stmt) | |
1517 | { | |
1518 | tree orig = x; | |
1519 | while (handled_component_p (x)) | |
1520 | x = TREE_OPERAND (x, 0); | |
1521 | ||
1522 | switch (TREE_CODE (x)) | |
1523 | { | |
1524 | case INDIRECT_REF: | |
1525 | case MEM_REF: | |
1526 | { | |
1527 | enum thread_memory_type ret; | |
1528 | ||
1529 | ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0)); | |
1530 | if (ret == mem_non_local) | |
1531 | return true; | |
1532 | if (stmt && ret == mem_thread_local) | |
1533 | /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */ | |
1534 | tm_log_add (entry_block, orig, stmt); | |
1535 | ||
1536 | /* Transaction-locals require nothing at all. For malloc, a | |
1537 | transaction restart frees the memory and we reallocate. | |
1538 | For alloca, the stack pointer gets reset by the retry and | |
1539 | we reallocate. */ | |
1540 | return false; | |
1541 | } | |
1542 | ||
1543 | case TARGET_MEM_REF: | |
1544 | if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR) | |
1545 | return true; | |
1546 | x = TREE_OPERAND (TMR_BASE (x), 0); | |
1547 | if (TREE_CODE (x) == PARM_DECL) | |
1548 | return false; | |
1549 | gcc_assert (TREE_CODE (x) == VAR_DECL); | |
1550 | /* FALLTHRU */ | |
1551 | ||
1552 | case PARM_DECL: | |
1553 | case RESULT_DECL: | |
1554 | case VAR_DECL: | |
1555 | if (DECL_BY_REFERENCE (x)) | |
1556 | { | |
1557 | /* ??? This value is a pointer, but aggregate_value_p has been | |
1558 | jigged to return true which confuses needs_to_live_in_memory. | |
1559 | This ought to be cleaned up generically. | |
1560 | ||
1561 | FIXME: Verify this still happens after the next mainline | |
1562 | merge. Testcase ie g++.dg/tm/pr47554.C. | |
1563 | */ | |
1564 | return false; | |
1565 | } | |
1566 | ||
1567 | if (is_global_var (x)) | |
e0f3ea3e | 1568 | return !TREE_READONLY (x); |
4c0315d0 | 1569 | if (/* FIXME: This condition should actually go below in the |
1570 | tm_log_add() call, however is_call_clobbered() depends on | |
1571 | aliasing info which is not available during | |
1572 | gimplification. Since requires_barrier() gets called | |
1573 | during lower_sequence_tm/gimplification, leave the call | |
1574 | to needs_to_live_in_memory until we eliminate | |
1575 | lower_sequence_tm altogether. */ | |
3c8b8a41 | 1576 | needs_to_live_in_memory (x)) |
4c0315d0 | 1577 | return true; |
e0f3ea3e | 1578 | else |
1579 | { | |
1580 | /* For local memory that doesn't escape (aka thread private | |
1581 | memory), we can either save the value at the beginning of | |
1582 | the transaction and restore on restart, or call a tm | |
1583 | function to dynamically save and restore on restart | |
1584 | (ITM_L*). */ | |
1585 | if (stmt) | |
1586 | tm_log_add (entry_block, orig, stmt); | |
1587 | return false; | |
1588 | } | |
4c0315d0 | 1589 | |
1590 | default: | |
1591 | return false; | |
1592 | } | |
1593 | } | |
1594 | ||
1595 | /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside | |
1596 | a transaction region. */ | |
1597 | ||
1598 | static void | |
1599 | examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1600 | { | |
1601 | gimple stmt = gsi_stmt (*gsi); | |
1602 | ||
1603 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL)) | |
1604 | *state |= GTMA_HAVE_LOAD; | |
1605 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL)) | |
1606 | *state |= GTMA_HAVE_STORE; | |
1607 | } | |
1608 | ||
1609 | /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */ | |
1610 | ||
1611 | static void | |
1612 | examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1613 | { | |
1614 | gimple stmt = gsi_stmt (*gsi); | |
1615 | tree fn; | |
1616 | ||
1617 | if (is_tm_pure_call (stmt)) | |
1618 | return; | |
1619 | ||
1620 | /* Check if this call is a transaction abort. */ | |
1621 | fn = gimple_call_fndecl (stmt); | |
1622 | if (is_tm_abort (fn)) | |
1623 | *state |= GTMA_HAVE_ABORT; | |
1624 | ||
1625 | /* Note that something may happen. */ | |
1626 | *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE; | |
1627 | } | |
1628 | ||
1629 | /* Lower a GIMPLE_TRANSACTION statement. */ | |
1630 | ||
1631 | static void | |
1632 | lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi) | |
1633 | { | |
1a91d914 | 1634 | gimple g; |
1635 | gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi)); | |
4c0315d0 | 1636 | unsigned int *outer_state = (unsigned int *) wi->info; |
1637 | unsigned int this_state = 0; | |
1638 | struct walk_stmt_info this_wi; | |
1639 | ||
1640 | /* First, lower the body. The scanning that we do inside gives | |
1641 | us some idea of what we're dealing with. */ | |
1642 | memset (&this_wi, 0, sizeof (this_wi)); | |
1643 | this_wi.info = (void *) &this_state; | |
e3a19533 | 1644 | walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt), |
1645 | lower_sequence_tm, NULL, &this_wi); | |
4c0315d0 | 1646 | |
1647 | /* If there was absolutely nothing transaction related inside the | |
1648 | transaction, we may elide it. Likewise if this is a nested | |
1649 | transaction and does not contain an abort. */ | |
1650 | if (this_state == 0 | |
1651 | || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL)) | |
1652 | { | |
1653 | if (outer_state) | |
1654 | *outer_state |= this_state; | |
1655 | ||
1656 | gsi_insert_seq_before (gsi, gimple_transaction_body (stmt), | |
1657 | GSI_SAME_STMT); | |
1658 | gimple_transaction_set_body (stmt, NULL); | |
1659 | ||
1660 | gsi_remove (gsi, true); | |
1661 | wi->removed_stmt = true; | |
1662 | return; | |
1663 | } | |
1664 | ||
1665 | /* Wrap the body of the transaction in a try-finally node so that | |
1666 | the commit call is always properly called. */ | |
1667 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0); | |
1668 | if (flag_exceptions) | |
1669 | { | |
1670 | tree ptr; | |
1671 | gimple_seq n_seq, e_seq; | |
1672 | ||
1673 | n_seq = gimple_seq_alloc_with_stmt (g); | |
e3a19533 | 1674 | e_seq = NULL; |
4c0315d0 | 1675 | |
1676 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER), | |
1677 | 1, integer_zero_node); | |
f9e245b2 | 1678 | ptr = create_tmp_var (ptr_type_node); |
4c0315d0 | 1679 | gimple_call_set_lhs (g, ptr); |
1680 | gimple_seq_add_stmt (&e_seq, g); | |
1681 | ||
1682 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH), | |
1683 | 1, ptr); | |
1684 | gimple_seq_add_stmt (&e_seq, g); | |
1685 | ||
1686 | g = gimple_build_eh_else (n_seq, e_seq); | |
1687 | } | |
1688 | ||
1689 | g = gimple_build_try (gimple_transaction_body (stmt), | |
1690 | gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY); | |
1691 | gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); | |
1692 | ||
1693 | gimple_transaction_set_body (stmt, NULL); | |
1694 | ||
1695 | /* If the transaction calls abort or if this is an outer transaction, | |
1696 | add an "over" label afterwards. */ | |
1697 | if ((this_state & (GTMA_HAVE_ABORT)) | |
9af5ce0c | 1698 | || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)) |
4c0315d0 | 1699 | { |
1700 | tree label = create_artificial_label (UNKNOWN_LOCATION); | |
1701 | gimple_transaction_set_label (stmt, label); | |
1702 | gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); | |
1703 | } | |
1704 | ||
1705 | /* Record the set of operations found for use later. */ | |
1706 | this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK; | |
1707 | gimple_transaction_set_subcode (stmt, this_state); | |
1708 | } | |
1709 | ||
1710 | /* Iterate through the statements in the sequence, lowering them all | |
1711 | as appropriate for being in a transaction. */ | |
1712 | ||
1713 | static tree | |
1714 | lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1715 | struct walk_stmt_info *wi) | |
1716 | { | |
1717 | unsigned int *state = (unsigned int *) wi->info; | |
1718 | gimple stmt = gsi_stmt (*gsi); | |
1719 | ||
1720 | *handled_ops_p = true; | |
1721 | switch (gimple_code (stmt)) | |
1722 | { | |
1723 | case GIMPLE_ASSIGN: | |
1724 | /* Only memory reads/writes need to be instrumented. */ | |
1725 | if (gimple_assign_single_p (stmt)) | |
1726 | examine_assign_tm (state, gsi); | |
1727 | break; | |
1728 | ||
1729 | case GIMPLE_CALL: | |
1730 | examine_call_tm (state, gsi); | |
1731 | break; | |
1732 | ||
1733 | case GIMPLE_ASM: | |
1734 | *state |= GTMA_MAY_ENTER_IRREVOCABLE; | |
1735 | break; | |
1736 | ||
1737 | case GIMPLE_TRANSACTION: | |
1738 | lower_transaction (gsi, wi); | |
1739 | break; | |
1740 | ||
1741 | default: | |
1742 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1743 | break; | |
1744 | } | |
1745 | ||
1746 | return NULL_TREE; | |
1747 | } | |
1748 | ||
1749 | /* Iterate through the statements in the sequence, lowering them all | |
1750 | as appropriate for being outside of a transaction. */ | |
1751 | ||
1752 | static tree | |
1753 | lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1754 | struct walk_stmt_info * wi) | |
1755 | { | |
1756 | gimple stmt = gsi_stmt (*gsi); | |
1757 | ||
1758 | if (gimple_code (stmt) == GIMPLE_TRANSACTION) | |
1759 | { | |
1760 | *handled_ops_p = true; | |
1761 | lower_transaction (gsi, wi); | |
1762 | } | |
1763 | else | |
1764 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1765 | ||
1766 | return NULL_TREE; | |
1767 | } | |
1768 | ||
1769 | /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After | |
1770 | this, GIMPLE_TRANSACTION nodes still exist, but the nested body has | |
1771 | been moved out, and all the data required for constructing a proper | |
1772 | CFG has been recorded. */ | |
1773 | ||
1774 | static unsigned int | |
1775 | execute_lower_tm (void) | |
1776 | { | |
1777 | struct walk_stmt_info wi; | |
e3a19533 | 1778 | gimple_seq body; |
4c0315d0 | 1779 | |
1780 | /* Transactional clones aren't created until a later pass. */ | |
1781 | gcc_assert (!decl_is_tm_clone (current_function_decl)); | |
1782 | ||
e3a19533 | 1783 | body = gimple_body (current_function_decl); |
4c0315d0 | 1784 | memset (&wi, 0, sizeof (wi)); |
e3a19533 | 1785 | walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi); |
1786 | gimple_set_body (current_function_decl, body); | |
4c0315d0 | 1787 | |
1788 | return 0; | |
1789 | } | |
1790 | ||
cbe8bda8 | 1791 | namespace { |
1792 | ||
1793 | const pass_data pass_data_lower_tm = | |
1794 | { | |
1795 | GIMPLE_PASS, /* type */ | |
1796 | "tmlower", /* name */ | |
1797 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 1798 | TV_TRANS_MEM, /* tv_id */ |
1799 | PROP_gimple_lcf, /* properties_required */ | |
1800 | 0, /* properties_provided */ | |
1801 | 0, /* properties_destroyed */ | |
1802 | 0, /* todo_flags_start */ | |
1803 | 0, /* todo_flags_finish */ | |
4c0315d0 | 1804 | }; |
cbe8bda8 | 1805 | |
1806 | class pass_lower_tm : public gimple_opt_pass | |
1807 | { | |
1808 | public: | |
9af5ce0c | 1809 | pass_lower_tm (gcc::context *ctxt) |
1810 | : gimple_opt_pass (pass_data_lower_tm, ctxt) | |
cbe8bda8 | 1811 | {} |
1812 | ||
1813 | /* opt_pass methods: */ | |
31315c24 | 1814 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 1815 | virtual unsigned int execute (function *) { return execute_lower_tm (); } |
cbe8bda8 | 1816 | |
1817 | }; // class pass_lower_tm | |
1818 | ||
1819 | } // anon namespace | |
1820 | ||
1821 | gimple_opt_pass * | |
1822 | make_pass_lower_tm (gcc::context *ctxt) | |
1823 | { | |
1824 | return new pass_lower_tm (ctxt); | |
1825 | } | |
4c0315d0 | 1826 | \f |
1827 | /* Collect region information for each transaction. */ | |
1828 | ||
1829 | struct tm_region | |
1830 | { | |
1a91d914 | 1831 | public: |
1832 | ||
1833 | /* The field "transaction_stmt" is initially a gtransaction *, | |
1834 | but eventually gets lowered to a gcall *(to BUILT_IN_TM_START). | |
1835 | ||
1836 | Helper method to get it as a gtransaction *, with code-checking | |
1837 | in a checked-build. */ | |
1838 | ||
1839 | gtransaction * | |
1840 | get_transaction_stmt () const | |
1841 | { | |
1842 | return as_a <gtransaction *> (transaction_stmt); | |
1843 | } | |
1844 | ||
1845 | public: | |
1846 | ||
4c0315d0 | 1847 | /* Link to the next unnested transaction. */ |
1848 | struct tm_region *next; | |
1849 | ||
1850 | /* Link to the next inner transaction. */ | |
1851 | struct tm_region *inner; | |
1852 | ||
1853 | /* Link to the next outer transaction. */ | |
1854 | struct tm_region *outer; | |
1855 | ||
0cd02a19 | 1856 | /* The GIMPLE_TRANSACTION statement beginning this transaction. |
1857 | After TM_MARK, this gets replaced by a call to | |
1a91d914 | 1858 | BUILT_IN_TM_START. |
1859 | Hence this will be either a gtransaction *or a gcall *. */ | |
4c0315d0 | 1860 | gimple transaction_stmt; |
1861 | ||
0cd02a19 | 1862 | /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to |
1863 | BUILT_IN_TM_START, this field is true if the transaction is an | |
1864 | outer transaction. */ | |
1865 | bool original_transaction_was_outer; | |
1866 | ||
1867 | /* Return value from BUILT_IN_TM_START. */ | |
1868 | tree tm_state; | |
1869 | ||
1870 | /* The entry block to this region. This will always be the first | |
1871 | block of the body of the transaction. */ | |
4c0315d0 | 1872 | basic_block entry_block; |
1873 | ||
0cd02a19 | 1874 | /* The first block after an expanded call to _ITM_beginTransaction. */ |
1875 | basic_block restart_block; | |
1876 | ||
4c0315d0 | 1877 | /* The set of all blocks that end the region; NULL if only EXIT_BLOCK. |
1878 | These blocks are still a part of the region (i.e., the border is | |
1879 | inclusive). Note that this set is only complete for paths in the CFG | |
1880 | starting at ENTRY_BLOCK, and that there is no exit block recorded for | |
1881 | the edge to the "over" label. */ | |
1882 | bitmap exit_blocks; | |
1883 | ||
1884 | /* The set of all blocks that have an TM_IRREVOCABLE call. */ | |
1885 | bitmap irr_blocks; | |
1886 | }; | |
1887 | ||
5ab54790 | 1888 | typedef struct tm_region *tm_region_p; |
5ab54790 | 1889 | |
4c0315d0 | 1890 | /* True if there are pending edge statements to be committed for the |
1891 | current function being scanned in the tmmark pass. */ | |
1892 | bool pending_edge_inserts_p; | |
1893 | ||
1894 | static struct tm_region *all_tm_regions; | |
1895 | static bitmap_obstack tm_obstack; | |
1896 | ||
1897 | ||
9d75589a | 1898 | /* A subroutine of tm_region_init. Record the existence of the |
4c0315d0 | 1899 | GIMPLE_TRANSACTION statement in a tree of tm_region elements. */ |
1900 | ||
1901 | static struct tm_region * | |
1a91d914 | 1902 | tm_region_init_0 (struct tm_region *outer, basic_block bb, |
1903 | gtransaction *stmt) | |
4c0315d0 | 1904 | { |
1905 | struct tm_region *region; | |
1906 | ||
1907 | region = (struct tm_region *) | |
1908 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
1909 | ||
1910 | if (outer) | |
1911 | { | |
1912 | region->next = outer->inner; | |
1913 | outer->inner = region; | |
1914 | } | |
1915 | else | |
1916 | { | |
1917 | region->next = all_tm_regions; | |
1918 | all_tm_regions = region; | |
1919 | } | |
1920 | region->inner = NULL; | |
1921 | region->outer = outer; | |
1922 | ||
1923 | region->transaction_stmt = stmt; | |
0cd02a19 | 1924 | region->original_transaction_was_outer = false; |
1925 | region->tm_state = NULL; | |
4c0315d0 | 1926 | |
1927 | /* There are either one or two edges out of the block containing | |
1928 | the GIMPLE_TRANSACTION, one to the actual region and one to the | |
1929 | "over" label if the region contains an abort. The former will | |
1930 | always be the one marked FALLTHRU. */ | |
1931 | region->entry_block = FALLTHRU_EDGE (bb)->dest; | |
1932 | ||
1933 | region->exit_blocks = BITMAP_ALLOC (&tm_obstack); | |
1934 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
1935 | ||
1936 | return region; | |
1937 | } | |
1938 | ||
1939 | /* A subroutine of tm_region_init. Record all the exit and | |
1940 | irrevocable blocks in BB into the region's exit_blocks and | |
1941 | irr_blocks bitmaps. Returns the new region being scanned. */ | |
1942 | ||
1943 | static struct tm_region * | |
1944 | tm_region_init_1 (struct tm_region *region, basic_block bb) | |
1945 | { | |
1946 | gimple_stmt_iterator gsi; | |
1947 | gimple g; | |
1948 | ||
1949 | if (!region | |
1950 | || (!region->irr_blocks && !region->exit_blocks)) | |
1951 | return region; | |
1952 | ||
1953 | /* Check to see if this is the end of a region by seeing if it | |
1954 | contains a call to __builtin_tm_commit{,_eh}. Note that the | |
1955 | outermost region for DECL_IS_TM_CLONE need not collect this. */ | |
1956 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
1957 | { | |
1958 | g = gsi_stmt (gsi); | |
1959 | if (gimple_code (g) == GIMPLE_CALL) | |
1960 | { | |
1961 | tree fn = gimple_call_fndecl (g); | |
1962 | if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL) | |
1963 | { | |
1964 | if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT | |
1965 | || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH) | |
1966 | && region->exit_blocks) | |
1967 | { | |
1968 | bitmap_set_bit (region->exit_blocks, bb->index); | |
1969 | region = region->outer; | |
1970 | break; | |
1971 | } | |
1972 | if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE) | |
1973 | bitmap_set_bit (region->irr_blocks, bb->index); | |
1974 | } | |
1975 | } | |
1976 | } | |
1977 | return region; | |
1978 | } | |
1979 | ||
1980 | /* Collect all of the transaction regions within the current function | |
1981 | and record them in ALL_TM_REGIONS. The REGION parameter may specify | |
1982 | an "outermost" region for use by tm clones. */ | |
1983 | ||
1984 | static void | |
1985 | tm_region_init (struct tm_region *region) | |
1986 | { | |
1987 | gimple g; | |
1988 | edge_iterator ei; | |
1989 | edge e; | |
1990 | basic_block bb; | |
c2078b80 | 1991 | auto_vec<basic_block> queue; |
4c0315d0 | 1992 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
1993 | struct tm_region *old_region; | |
c2078b80 | 1994 | auto_vec<tm_region_p> bb_regions; |
4c0315d0 | 1995 | |
1996 | all_tm_regions = region; | |
34154e27 | 1997 | bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 1998 | |
de60f90c | 1999 | /* We could store this information in bb->aux, but we may get called |
2000 | through get_all_tm_blocks() from another pass that may be already | |
2001 | using bb->aux. */ | |
fe672ac0 | 2002 | bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
de60f90c | 2003 | |
f1f41a6c | 2004 | queue.safe_push (bb); |
2005 | bb_regions[bb->index] = region; | |
4c0315d0 | 2006 | do |
2007 | { | |
f1f41a6c | 2008 | bb = queue.pop (); |
2009 | region = bb_regions[bb->index]; | |
2010 | bb_regions[bb->index] = NULL; | |
4c0315d0 | 2011 | |
2012 | /* Record exit and irrevocable blocks. */ | |
2013 | region = tm_region_init_1 (region, bb); | |
2014 | ||
2015 | /* Check for the last statement in the block beginning a new region. */ | |
2016 | g = last_stmt (bb); | |
2017 | old_region = region; | |
1a91d914 | 2018 | if (g) |
2019 | if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g)) | |
2020 | region = tm_region_init_0 (region, bb, trans_stmt); | |
4c0315d0 | 2021 | |
2022 | /* Process subsequent blocks. */ | |
2023 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2024 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
2025 | { | |
2026 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2027 | queue.safe_push (e->dest); |
4c0315d0 | 2028 | |
2029 | /* If the current block started a new region, make sure that only | |
2030 | the entry block of the new region is associated with this region. | |
2031 | Other successors are still part of the old region. */ | |
2032 | if (old_region != region && e->dest != region->entry_block) | |
f1f41a6c | 2033 | bb_regions[e->dest->index] = old_region; |
4c0315d0 | 2034 | else |
f1f41a6c | 2035 | bb_regions[e->dest->index] = region; |
4c0315d0 | 2036 | } |
2037 | } | |
f1f41a6c | 2038 | while (!queue.is_empty ()); |
4c0315d0 | 2039 | BITMAP_FREE (visited_blocks); |
2040 | } | |
2041 | ||
2042 | /* The "gate" function for all transactional memory expansion and optimization | |
2043 | passes. We collect region information for each top-level transaction, and | |
2044 | if we don't find any, we skip all of the TM passes. Each region will have | |
2045 | all of the exit blocks recorded, and the originating statement. */ | |
2046 | ||
2047 | static bool | |
2048 | gate_tm_init (void) | |
2049 | { | |
2050 | if (!flag_tm) | |
2051 | return false; | |
2052 | ||
2053 | calculate_dominance_info (CDI_DOMINATORS); | |
2054 | bitmap_obstack_initialize (&tm_obstack); | |
2055 | ||
2056 | /* If the function is a TM_CLONE, then the entire function is the region. */ | |
2057 | if (decl_is_tm_clone (current_function_decl)) | |
2058 | { | |
2059 | struct tm_region *region = (struct tm_region *) | |
2060 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
2061 | memset (region, 0, sizeof (*region)); | |
34154e27 | 2062 | region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 2063 | /* For a clone, the entire function is the region. But even if |
2064 | we don't need to record any exit blocks, we may need to | |
2065 | record irrevocable blocks. */ | |
2066 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
2067 | ||
2068 | tm_region_init (region); | |
2069 | } | |
2070 | else | |
2071 | { | |
2072 | tm_region_init (NULL); | |
2073 | ||
2074 | /* If we didn't find any regions, cleanup and skip the whole tree | |
2075 | of tm-related optimizations. */ | |
2076 | if (all_tm_regions == NULL) | |
2077 | { | |
2078 | bitmap_obstack_release (&tm_obstack); | |
2079 | return false; | |
2080 | } | |
2081 | } | |
2082 | ||
2083 | return true; | |
2084 | } | |
2085 | ||
cbe8bda8 | 2086 | namespace { |
2087 | ||
2088 | const pass_data pass_data_tm_init = | |
2089 | { | |
2090 | GIMPLE_PASS, /* type */ | |
2091 | "*tminit", /* name */ | |
2092 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 2093 | TV_TRANS_MEM, /* tv_id */ |
2094 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
2095 | 0, /* properties_provided */ | |
2096 | 0, /* properties_destroyed */ | |
2097 | 0, /* todo_flags_start */ | |
2098 | 0, /* todo_flags_finish */ | |
4c0315d0 | 2099 | }; |
cbe8bda8 | 2100 | |
2101 | class pass_tm_init : public gimple_opt_pass | |
2102 | { | |
2103 | public: | |
9af5ce0c | 2104 | pass_tm_init (gcc::context *ctxt) |
2105 | : gimple_opt_pass (pass_data_tm_init, ctxt) | |
cbe8bda8 | 2106 | {} |
2107 | ||
2108 | /* opt_pass methods: */ | |
31315c24 | 2109 | virtual bool gate (function *) { return gate_tm_init (); } |
cbe8bda8 | 2110 | |
2111 | }; // class pass_tm_init | |
2112 | ||
2113 | } // anon namespace | |
2114 | ||
2115 | gimple_opt_pass * | |
2116 | make_pass_tm_init (gcc::context *ctxt) | |
2117 | { | |
2118 | return new pass_tm_init (ctxt); | |
2119 | } | |
4c0315d0 | 2120 | \f |
2121 | /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region | |
2122 | represented by STATE. */ | |
2123 | ||
2124 | static inline void | |
2125 | transaction_subcode_ior (struct tm_region *region, unsigned flags) | |
2126 | { | |
2127 | if (region && region->transaction_stmt) | |
2128 | { | |
1a91d914 | 2129 | gtransaction *transaction_stmt = region->get_transaction_stmt (); |
2130 | flags |= gimple_transaction_subcode (transaction_stmt); | |
2131 | gimple_transaction_set_subcode (transaction_stmt, flags); | |
4c0315d0 | 2132 | } |
2133 | } | |
2134 | ||
2135 | /* Construct a memory load in a transactional context. Return the | |
2136 | gimple statement performing the load, or NULL if there is no | |
2137 | TM_LOAD builtin of the appropriate size to do the load. | |
2138 | ||
2139 | LOC is the location to use for the new statement(s). */ | |
2140 | ||
1a91d914 | 2141 | static gcall * |
4c0315d0 | 2142 | build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) |
2143 | { | |
2144 | enum built_in_function code = END_BUILTINS; | |
2145 | tree t, type = TREE_TYPE (rhs), decl; | |
1a91d914 | 2146 | gcall *gcall; |
4c0315d0 | 2147 | |
2148 | if (type == float_type_node) | |
2149 | code = BUILT_IN_TM_LOAD_FLOAT; | |
2150 | else if (type == double_type_node) | |
2151 | code = BUILT_IN_TM_LOAD_DOUBLE; | |
2152 | else if (type == long_double_type_node) | |
2153 | code = BUILT_IN_TM_LOAD_LDOUBLE; | |
2154 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 2155 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2156 | { |
6a0712d4 | 2157 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2158 | { |
2159 | case 1: | |
2160 | code = BUILT_IN_TM_LOAD_1; | |
2161 | break; | |
2162 | case 2: | |
2163 | code = BUILT_IN_TM_LOAD_2; | |
2164 | break; | |
2165 | case 4: | |
2166 | code = BUILT_IN_TM_LOAD_4; | |
2167 | break; | |
2168 | case 8: | |
2169 | code = BUILT_IN_TM_LOAD_8; | |
2170 | break; | |
2171 | } | |
2172 | } | |
2173 | ||
2174 | if (code == END_BUILTINS) | |
2175 | { | |
2176 | decl = targetm.vectorize.builtin_tm_load (type); | |
2177 | if (!decl) | |
2178 | return NULL; | |
2179 | } | |
2180 | else | |
2181 | decl = builtin_decl_explicit (code); | |
2182 | ||
2183 | t = gimplify_addr (gsi, rhs); | |
2184 | gcall = gimple_build_call (decl, 1, t); | |
2185 | gimple_set_location (gcall, loc); | |
2186 | ||
2187 | t = TREE_TYPE (TREE_TYPE (decl)); | |
2188 | if (useless_type_conversion_p (type, t)) | |
2189 | { | |
2190 | gimple_call_set_lhs (gcall, lhs); | |
2191 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2192 | } | |
2193 | else | |
2194 | { | |
2195 | gimple g; | |
2196 | tree temp; | |
2197 | ||
f9e245b2 | 2198 | temp = create_tmp_reg (t); |
4c0315d0 | 2199 | gimple_call_set_lhs (gcall, temp); |
2200 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2201 | ||
2202 | t = fold_build1 (VIEW_CONVERT_EXPR, type, temp); | |
2203 | g = gimple_build_assign (lhs, t); | |
2204 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2205 | } | |
2206 | ||
2207 | return gcall; | |
2208 | } | |
2209 | ||
2210 | ||
2211 | /* Similarly for storing TYPE in a transactional context. */ | |
2212 | ||
1a91d914 | 2213 | static gcall * |
4c0315d0 | 2214 | build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) |
2215 | { | |
2216 | enum built_in_function code = END_BUILTINS; | |
2217 | tree t, fn, type = TREE_TYPE (rhs), simple_type; | |
1a91d914 | 2218 | gcall *gcall; |
4c0315d0 | 2219 | |
2220 | if (type == float_type_node) | |
2221 | code = BUILT_IN_TM_STORE_FLOAT; | |
2222 | else if (type == double_type_node) | |
2223 | code = BUILT_IN_TM_STORE_DOUBLE; | |
2224 | else if (type == long_double_type_node) | |
2225 | code = BUILT_IN_TM_STORE_LDOUBLE; | |
2226 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 2227 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2228 | { |
6a0712d4 | 2229 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2230 | { |
2231 | case 1: | |
2232 | code = BUILT_IN_TM_STORE_1; | |
2233 | break; | |
2234 | case 2: | |
2235 | code = BUILT_IN_TM_STORE_2; | |
2236 | break; | |
2237 | case 4: | |
2238 | code = BUILT_IN_TM_STORE_4; | |
2239 | break; | |
2240 | case 8: | |
2241 | code = BUILT_IN_TM_STORE_8; | |
2242 | break; | |
2243 | } | |
2244 | } | |
2245 | ||
2246 | if (code == END_BUILTINS) | |
2247 | { | |
2248 | fn = targetm.vectorize.builtin_tm_store (type); | |
2249 | if (!fn) | |
2250 | return NULL; | |
2251 | } | |
2252 | else | |
2253 | fn = builtin_decl_explicit (code); | |
2254 | ||
2255 | simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn)))); | |
2256 | ||
2257 | if (TREE_CODE (rhs) == CONSTRUCTOR) | |
2258 | { | |
2259 | /* Handle the easy initialization to zero. */ | |
f1f41a6c | 2260 | if (!CONSTRUCTOR_ELTS (rhs)) |
4c0315d0 | 2261 | rhs = build_int_cst (simple_type, 0); |
2262 | else | |
2263 | { | |
2264 | /* ...otherwise punt to the caller and probably use | |
2265 | BUILT_IN_TM_MEMMOVE, because we can't wrap a | |
2266 | VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce | |
2267 | valid gimple. */ | |
2268 | return NULL; | |
2269 | } | |
2270 | } | |
2271 | else if (!useless_type_conversion_p (simple_type, type)) | |
2272 | { | |
2273 | gimple g; | |
2274 | tree temp; | |
2275 | ||
f9e245b2 | 2276 | temp = create_tmp_reg (simple_type); |
4c0315d0 | 2277 | t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs); |
2278 | g = gimple_build_assign (temp, t); | |
2279 | gimple_set_location (g, loc); | |
2280 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2281 | ||
2282 | rhs = temp; | |
2283 | } | |
2284 | ||
2285 | t = gimplify_addr (gsi, lhs); | |
2286 | gcall = gimple_build_call (fn, 2, t, rhs); | |
2287 | gimple_set_location (gcall, loc); | |
2288 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2289 | ||
2290 | return gcall; | |
2291 | } | |
2292 | ||
2293 | ||
2294 | /* Expand an assignment statement into transactional builtins. */ | |
2295 | ||
2296 | static void | |
2297 | expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) | |
2298 | { | |
2299 | gimple stmt = gsi_stmt (*gsi); | |
2300 | location_t loc = gimple_location (stmt); | |
2301 | tree lhs = gimple_assign_lhs (stmt); | |
2302 | tree rhs = gimple_assign_rhs1 (stmt); | |
2303 | bool store_p = requires_barrier (region->entry_block, lhs, NULL); | |
2304 | bool load_p = requires_barrier (region->entry_block, rhs, NULL); | |
2305 | gimple gcall = NULL; | |
2306 | ||
2307 | if (!load_p && !store_p) | |
2308 | { | |
2309 | /* Add thread private addresses to log if applicable. */ | |
2310 | requires_barrier (region->entry_block, lhs, stmt); | |
2311 | gsi_next (gsi); | |
2312 | return; | |
2313 | } | |
2314 | ||
0cd02a19 | 2315 | // Remove original load/store statement. |
4c0315d0 | 2316 | gsi_remove (gsi, true); |
2317 | ||
2318 | if (load_p && !store_p) | |
2319 | { | |
2320 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2321 | gcall = build_tm_load (loc, lhs, rhs, gsi); | |
2322 | } | |
2323 | else if (store_p && !load_p) | |
2324 | { | |
2325 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2326 | gcall = build_tm_store (loc, lhs, rhs, gsi); | |
2327 | } | |
2328 | if (!gcall) | |
2329 | { | |
ea580cf7 | 2330 | tree lhs_addr, rhs_addr, tmp; |
4c0315d0 | 2331 | |
2332 | if (load_p) | |
2333 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2334 | if (store_p) | |
2335 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2336 | ||
2337 | /* ??? Figure out if there's any possible overlap between the LHS | |
2338 | and the RHS and if not, use MEMCPY. */ | |
ea580cf7 | 2339 | |
28098b5b | 2340 | if (load_p && is_gimple_reg (lhs)) |
ea580cf7 | 2341 | { |
f9e245b2 | 2342 | tmp = create_tmp_var (TREE_TYPE (lhs)); |
ea580cf7 | 2343 | lhs_addr = build_fold_addr_expr (tmp); |
2344 | } | |
2345 | else | |
2346 | { | |
2347 | tmp = NULL_TREE; | |
2348 | lhs_addr = gimplify_addr (gsi, lhs); | |
2349 | } | |
4c0315d0 | 2350 | rhs_addr = gimplify_addr (gsi, rhs); |
2351 | gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE), | |
2352 | 3, lhs_addr, rhs_addr, | |
2353 | TYPE_SIZE_UNIT (TREE_TYPE (lhs))); | |
2354 | gimple_set_location (gcall, loc); | |
2355 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
ea580cf7 | 2356 | |
2357 | if (tmp) | |
2358 | { | |
2359 | gcall = gimple_build_assign (lhs, tmp); | |
2360 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2361 | } | |
4c0315d0 | 2362 | } |
2363 | ||
2364 | /* Now that we have the load/store in its instrumented form, add | |
2365 | thread private addresses to the log if applicable. */ | |
2366 | if (!store_p) | |
2367 | requires_barrier (region->entry_block, lhs, gcall); | |
2368 | ||
0cd02a19 | 2369 | // The calls to build_tm_{store,load} above inserted the instrumented |
2370 | // call into the stream. | |
2371 | // gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
4c0315d0 | 2372 | } |
2373 | ||
2374 | ||
2375 | /* Expand a call statement as appropriate for a transaction. That is, | |
2376 | either verify that the call does not affect the transaction, or | |
2377 | redirect the call to a clone that handles transactions, or change | |
2378 | the transaction state to IRREVOCABLE. Return true if the call is | |
2379 | one of the builtins that end a transaction. */ | |
2380 | ||
2381 | static bool | |
2382 | expand_call_tm (struct tm_region *region, | |
2383 | gimple_stmt_iterator *gsi) | |
2384 | { | |
1a91d914 | 2385 | gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); |
4c0315d0 | 2386 | tree lhs = gimple_call_lhs (stmt); |
2387 | tree fn_decl; | |
2388 | struct cgraph_node *node; | |
2389 | bool retval = false; | |
2390 | ||
2391 | fn_decl = gimple_call_fndecl (stmt); | |
2392 | ||
2393 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) | |
2394 | || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) | |
2395 | transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); | |
2396 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) | |
2397 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2398 | ||
2399 | if (is_tm_pure_call (stmt)) | |
2400 | return false; | |
2401 | ||
2402 | if (fn_decl) | |
2403 | retval = is_tm_ending_fndecl (fn_decl); | |
2404 | if (!retval) | |
2405 | { | |
2406 | /* Assume all non-const/pure calls write to memory, except | |
2407 | transaction ending builtins. */ | |
2408 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2409 | } | |
2410 | ||
2411 | /* For indirect calls, we already generated a call into the runtime. */ | |
2412 | if (!fn_decl) | |
2413 | { | |
2414 | tree fn = gimple_call_fn (stmt); | |
2415 | ||
2416 | /* We are guaranteed never to go irrevocable on a safe or pure | |
2417 | call, and the pure call was handled above. */ | |
2418 | if (is_tm_safe (fn)) | |
2419 | return false; | |
2420 | else | |
2421 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2422 | ||
2423 | return false; | |
2424 | } | |
2425 | ||
415d1b9a | 2426 | node = cgraph_node::get (fn_decl); |
fce5ce8e | 2427 | /* All calls should have cgraph here. */ |
2428 | if (!node) | |
2429 | { | |
2430 | /* We can have a nodeless call here if some pass after IPA-tm | |
2431 | added uninstrumented calls. For example, loop distribution | |
2432 | can transform certain loop constructs into __builtin_mem* | |
2433 | calls. In this case, see if we have a suitable TM | |
2434 | replacement and fill in the gaps. */ | |
2435 | gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL); | |
2436 | enum built_in_function code = DECL_FUNCTION_CODE (fn_decl); | |
2437 | gcc_assert (code == BUILT_IN_MEMCPY | |
2438 | || code == BUILT_IN_MEMMOVE | |
2439 | || code == BUILT_IN_MEMSET); | |
2440 | ||
2441 | tree repl = find_tm_replacement_function (fn_decl); | |
2442 | if (repl) | |
2443 | { | |
2444 | gimple_call_set_fndecl (stmt, repl); | |
2445 | update_stmt (stmt); | |
415d1b9a | 2446 | node = cgraph_node::create (repl); |
fce5ce8e | 2447 | node->local.tm_may_enter_irr = false; |
2448 | return expand_call_tm (region, gsi); | |
2449 | } | |
2450 | gcc_unreachable (); | |
2451 | } | |
4c0315d0 | 2452 | if (node->local.tm_may_enter_irr) |
2453 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2454 | ||
2455 | if (is_tm_abort (fn_decl)) | |
2456 | { | |
2457 | transaction_subcode_ior (region, GTMA_HAVE_ABORT); | |
2458 | return true; | |
2459 | } | |
2460 | ||
2461 | /* Instrument the store if needed. | |
2462 | ||
2463 | If the assignment happens inside the function call (return slot | |
2464 | optimization), there is no instrumentation to be done, since | |
2465 | the callee should have done the right thing. */ | |
2466 | if (lhs && requires_barrier (region->entry_block, lhs, stmt) | |
2467 | && !gimple_call_return_slot_opt_p (stmt)) | |
2468 | { | |
f9e245b2 | 2469 | tree tmp = create_tmp_reg (TREE_TYPE (lhs)); |
4c0315d0 | 2470 | location_t loc = gimple_location (stmt); |
2471 | edge fallthru_edge = NULL; | |
1a91d914 | 2472 | gassign *assign_stmt; |
4c0315d0 | 2473 | |
2474 | /* Remember if the call was going to throw. */ | |
2475 | if (stmt_can_throw_internal (stmt)) | |
2476 | { | |
2477 | edge_iterator ei; | |
2478 | edge e; | |
2479 | basic_block bb = gimple_bb (stmt); | |
2480 | ||
2481 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2482 | if (e->flags & EDGE_FALLTHRU) | |
2483 | { | |
2484 | fallthru_edge = e; | |
2485 | break; | |
2486 | } | |
2487 | } | |
2488 | ||
2489 | gimple_call_set_lhs (stmt, tmp); | |
2490 | update_stmt (stmt); | |
1a91d914 | 2491 | assign_stmt = gimple_build_assign (lhs, tmp); |
2492 | gimple_set_location (assign_stmt, loc); | |
4c0315d0 | 2493 | |
2494 | /* We cannot throw in the middle of a BB. If the call was going | |
2495 | to throw, place the instrumentation on the fallthru edge, so | |
2496 | the call remains the last statement in the block. */ | |
2497 | if (fallthru_edge) | |
2498 | { | |
1a91d914 | 2499 | gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt); |
4c0315d0 | 2500 | gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq); |
2501 | expand_assign_tm (region, &fallthru_gsi); | |
2502 | gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq); | |
2503 | pending_edge_inserts_p = true; | |
2504 | } | |
2505 | else | |
2506 | { | |
1a91d914 | 2507 | gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING); |
4c0315d0 | 2508 | expand_assign_tm (region, gsi); |
2509 | } | |
2510 | ||
2511 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2512 | } | |
2513 | ||
2514 | return retval; | |
2515 | } | |
2516 | ||
2517 | ||
2518 | /* Expand all statements in BB as appropriate for being inside | |
2519 | a transaction. */ | |
2520 | ||
2521 | static void | |
2522 | expand_block_tm (struct tm_region *region, basic_block bb) | |
2523 | { | |
2524 | gimple_stmt_iterator gsi; | |
2525 | ||
2526 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
2527 | { | |
2528 | gimple stmt = gsi_stmt (gsi); | |
2529 | switch (gimple_code (stmt)) | |
2530 | { | |
2531 | case GIMPLE_ASSIGN: | |
2532 | /* Only memory reads/writes need to be instrumented. */ | |
73fd1e9a | 2533 | if (gimple_assign_single_p (stmt) |
2534 | && !gimple_clobber_p (stmt)) | |
4c0315d0 | 2535 | { |
2536 | expand_assign_tm (region, &gsi); | |
2537 | continue; | |
2538 | } | |
2539 | break; | |
2540 | ||
2541 | case GIMPLE_CALL: | |
2542 | if (expand_call_tm (region, &gsi)) | |
2543 | return; | |
2544 | break; | |
2545 | ||
2546 | case GIMPLE_ASM: | |
2547 | gcc_unreachable (); | |
2548 | ||
2549 | default: | |
2550 | break; | |
2551 | } | |
2552 | if (!gsi_end_p (gsi)) | |
2553 | gsi_next (&gsi); | |
2554 | } | |
2555 | } | |
2556 | ||
2557 | /* Return the list of basic-blocks in REGION. | |
2558 | ||
2559 | STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks | |
79f4a793 | 2560 | following a TM_IRREVOCABLE call. |
2561 | ||
2562 | INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the | |
2563 | uninstrumented code path blocks in the list of basic blocks | |
2564 | returned, false otherwise. */ | |
4c0315d0 | 2565 | |
f1f41a6c | 2566 | static vec<basic_block> |
4c0315d0 | 2567 | get_tm_region_blocks (basic_block entry_block, |
2568 | bitmap exit_blocks, | |
2569 | bitmap irr_blocks, | |
2570 | bitmap all_region_blocks, | |
79f4a793 | 2571 | bool stop_at_irrevocable_p, |
2572 | bool include_uninstrumented_p = true) | |
4c0315d0 | 2573 | { |
1e094109 | 2574 | vec<basic_block> bbs = vNULL; |
4c0315d0 | 2575 | unsigned i; |
2576 | edge e; | |
2577 | edge_iterator ei; | |
2578 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
2579 | ||
2580 | i = 0; | |
f1f41a6c | 2581 | bbs.safe_push (entry_block); |
4c0315d0 | 2582 | bitmap_set_bit (visited_blocks, entry_block->index); |
2583 | ||
2584 | do | |
2585 | { | |
f1f41a6c | 2586 | basic_block bb = bbs[i++]; |
4c0315d0 | 2587 | |
2588 | if (exit_blocks && | |
2589 | bitmap_bit_p (exit_blocks, bb->index)) | |
2590 | continue; | |
2591 | ||
2592 | if (stop_at_irrevocable_p | |
2593 | && irr_blocks | |
2594 | && bitmap_bit_p (irr_blocks, bb->index)) | |
2595 | continue; | |
2596 | ||
2597 | FOR_EACH_EDGE (e, ei, bb->succs) | |
79f4a793 | 2598 | if ((include_uninstrumented_p |
2599 | || !(e->flags & EDGE_TM_UNINSTRUMENTED)) | |
2600 | && !bitmap_bit_p (visited_blocks, e->dest->index)) | |
4c0315d0 | 2601 | { |
2602 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2603 | bbs.safe_push (e->dest); |
4c0315d0 | 2604 | } |
2605 | } | |
f1f41a6c | 2606 | while (i < bbs.length ()); |
4c0315d0 | 2607 | |
2608 | if (all_region_blocks) | |
2609 | bitmap_ior_into (all_region_blocks, visited_blocks); | |
2610 | ||
2611 | BITMAP_FREE (visited_blocks); | |
2612 | return bbs; | |
2613 | } | |
2614 | ||
79f4a793 | 2615 | // Callback data for collect_bb2reg. |
2616 | struct bb2reg_stuff | |
2617 | { | |
2618 | vec<tm_region_p> *bb2reg; | |
2619 | bool include_uninstrumented_p; | |
2620 | }; | |
2621 | ||
0cd02a19 | 2622 | // Callback for expand_regions, collect innermost region data for each bb. |
2623 | static void * | |
2624 | collect_bb2reg (struct tm_region *region, void *data) | |
2625 | { | |
79f4a793 | 2626 | struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data; |
2627 | vec<tm_region_p> *bb2reg = stuff->bb2reg; | |
f1f41a6c | 2628 | vec<basic_block> queue; |
0cd02a19 | 2629 | unsigned int i; |
2630 | basic_block bb; | |
2631 | ||
2632 | queue = get_tm_region_blocks (region->entry_block, | |
2633 | region->exit_blocks, | |
2634 | region->irr_blocks, | |
2635 | NULL, | |
79f4a793 | 2636 | /*stop_at_irr_p=*/true, |
2637 | stuff->include_uninstrumented_p); | |
0cd02a19 | 2638 | |
2639 | // We expect expand_region to perform a post-order traversal of the region | |
2640 | // tree. Therefore the last region seen for any bb is the innermost. | |
f1f41a6c | 2641 | FOR_EACH_VEC_ELT (queue, i, bb) |
2642 | (*bb2reg)[bb->index] = region; | |
0cd02a19 | 2643 | |
f1f41a6c | 2644 | queue.release (); |
0cd02a19 | 2645 | return NULL; |
2646 | } | |
2647 | ||
2648 | // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to | |
2649 | // which a basic block belongs. Note that we only consider the instrumented | |
79f4a793 | 2650 | // code paths for the region; the uninstrumented code paths are ignored if |
2651 | // INCLUDE_UNINSTRUMENTED_P is false. | |
0cd02a19 | 2652 | // |
2653 | // ??? This data is very similar to the bb_regions array that is collected | |
2654 | // during tm_region_init. Or, rather, this data is similar to what could | |
2655 | // be used within tm_region_init. The actual computation in tm_region_init | |
2656 | // begins and ends with bb_regions entirely full of NULL pointers, due to | |
2657 | // the way in which pointers are swapped in and out of the array. | |
2658 | // | |
2659 | // ??? Our callers expect that blocks are not shared between transactions. | |
2660 | // When the optimizers get too smart, and blocks are shared, then during | |
2661 | // the tm_mark phase we'll add log entries to only one of the two transactions, | |
2662 | // and in the tm_edge phase we'll add edges to the CFG that create invalid | |
2663 | // cycles. The symptom being SSA defs that do not dominate their uses. | |
2664 | // Note that the optimizers were locally correct with their transformation, | |
2665 | // as we have no info within the program that suggests that the blocks cannot | |
2666 | // be shared. | |
2667 | // | |
2668 | // ??? There is currently a hack inside tree-ssa-pre.c to work around the | |
2669 | // only known instance of this block sharing. | |
2670 | ||
f1f41a6c | 2671 | static vec<tm_region_p> |
79f4a793 | 2672 | get_bb_regions_instrumented (bool traverse_clones, |
2673 | bool include_uninstrumented_p) | |
0cd02a19 | 2674 | { |
fe672ac0 | 2675 | unsigned n = last_basic_block_for_fn (cfun); |
79f4a793 | 2676 | struct bb2reg_stuff stuff; |
f1f41a6c | 2677 | vec<tm_region_p> ret; |
0cd02a19 | 2678 | |
f1f41a6c | 2679 | ret.create (n); |
2680 | ret.safe_grow_cleared (n); | |
79f4a793 | 2681 | stuff.bb2reg = &ret; |
2682 | stuff.include_uninstrumented_p = include_uninstrumented_p; | |
2683 | expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones); | |
0cd02a19 | 2684 | |
2685 | return ret; | |
2686 | } | |
2687 | ||
de60f90c | 2688 | /* Set the IN_TRANSACTION for all gimple statements that appear in a |
2689 | transaction. */ | |
2690 | ||
2691 | void | |
2692 | compute_transaction_bits (void) | |
2693 | { | |
2694 | struct tm_region *region; | |
f1f41a6c | 2695 | vec<basic_block> queue; |
de60f90c | 2696 | unsigned int i; |
de60f90c | 2697 | basic_block bb; |
2698 | ||
2699 | /* ?? Perhaps we need to abstract gate_tm_init further, because we | |
2700 | certainly don't need it to calculate CDI_DOMINATOR info. */ | |
2701 | gate_tm_init (); | |
2702 | ||
fc00614f | 2703 | FOR_EACH_BB_FN (bb, cfun) |
6ad451f8 | 2704 | bb->flags &= ~BB_IN_TRANSACTION; |
2705 | ||
de60f90c | 2706 | for (region = all_tm_regions; region; region = region->next) |
2707 | { | |
2708 | queue = get_tm_region_blocks (region->entry_block, | |
2709 | region->exit_blocks, | |
2710 | region->irr_blocks, | |
2711 | NULL, | |
2712 | /*stop_at_irr_p=*/true); | |
f1f41a6c | 2713 | for (i = 0; queue.iterate (i, &bb); ++i) |
6ad451f8 | 2714 | bb->flags |= BB_IN_TRANSACTION; |
f1f41a6c | 2715 | queue.release (); |
de60f90c | 2716 | } |
2717 | ||
2718 | if (all_tm_regions) | |
2719 | bitmap_obstack_release (&tm_obstack); | |
2720 | } | |
2721 | ||
0cd02a19 | 2722 | /* Replace the GIMPLE_TRANSACTION in this region with the corresponding |
2723 | call to BUILT_IN_TM_START. */ | |
2724 | ||
2725 | static void * | |
2726 | expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2727 | { | |
2728 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2729 | basic_block transaction_bb = gimple_bb (region->transaction_stmt); | |
2730 | tree tm_state = region->tm_state; | |
2731 | tree tm_state_type = TREE_TYPE (tm_state); | |
2732 | edge abort_edge = NULL; | |
2733 | edge inst_edge = NULL; | |
2734 | edge uninst_edge = NULL; | |
2735 | edge fallthru_edge = NULL; | |
2736 | ||
2737 | // Identify the various successors of the transaction start. | |
2738 | { | |
2739 | edge_iterator i; | |
2740 | edge e; | |
2741 | FOR_EACH_EDGE (e, i, transaction_bb->succs) | |
2742 | { | |
2743 | if (e->flags & EDGE_TM_ABORT) | |
2744 | abort_edge = e; | |
2745 | else if (e->flags & EDGE_TM_UNINSTRUMENTED) | |
2746 | uninst_edge = e; | |
2747 | else | |
2748 | inst_edge = e; | |
2749 | if (e->flags & EDGE_FALLTHRU) | |
2750 | fallthru_edge = e; | |
2751 | } | |
2752 | } | |
2753 | ||
2754 | /* ??? There are plenty of bits here we're not computing. */ | |
2755 | { | |
1a91d914 | 2756 | int subcode = gimple_transaction_subcode (region->get_transaction_stmt ()); |
0cd02a19 | 2757 | int flags = 0; |
2758 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2759 | flags |= PR_DOESGOIRREVOCABLE; | |
2760 | if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0) | |
2761 | flags |= PR_HASNOIRREVOCABLE; | |
2762 | /* If the transaction does not have an abort in lexical scope and is not | |
2763 | marked as an outer transaction, then it will never abort. */ | |
2764 | if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0) | |
2765 | flags |= PR_HASNOABORT; | |
2766 | if ((subcode & GTMA_HAVE_STORE) == 0) | |
2767 | flags |= PR_READONLY; | |
1910089e | 2768 | if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION)) |
0cd02a19 | 2769 | flags |= PR_INSTRUMENTEDCODE; |
2770 | if (uninst_edge) | |
2771 | flags |= PR_UNINSTRUMENTEDCODE; | |
2772 | if (subcode & GTMA_IS_OUTER) | |
2773 | region->original_transaction_was_outer = true; | |
2774 | tree t = build_int_cst (tm_state_type, flags); | |
1a91d914 | 2775 | gcall *call = gimple_build_call (tm_start, 1, t); |
0cd02a19 | 2776 | gimple_call_set_lhs (call, tm_state); |
2777 | gimple_set_location (call, gimple_location (region->transaction_stmt)); | |
2778 | ||
2779 | // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START. | |
2780 | gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb); | |
2781 | gcc_assert (gsi_stmt (gsi) == region->transaction_stmt); | |
2782 | gsi_insert_before (&gsi, call, GSI_SAME_STMT); | |
2783 | gsi_remove (&gsi, true); | |
2784 | region->transaction_stmt = call; | |
2785 | } | |
2786 | ||
2787 | // Generate log saves. | |
f1f41a6c | 2788 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2789 | tm_log_emit_saves (region->entry_block, transaction_bb); |
2790 | ||
2791 | // In the beginning, we've no tests to perform on transaction restart. | |
2792 | // Note that after this point, transaction_bb becomes the "most recent | |
2793 | // block containing tests for the transaction". | |
2794 | region->restart_block = region->entry_block; | |
2795 | ||
2796 | // Generate log restores. | |
f1f41a6c | 2797 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2798 | { |
2799 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2800 | basic_block code_bb = create_empty_bb (test_bb); | |
2801 | basic_block join_bb = create_empty_bb (code_bb); | |
b3083327 | 2802 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
2803 | add_bb_to_loop (code_bb, transaction_bb->loop_father); | |
2804 | add_bb_to_loop (join_bb, transaction_bb->loop_father); | |
0cd02a19 | 2805 | if (region->restart_block == region->entry_block) |
2806 | region->restart_block = test_bb; | |
2807 | ||
f9e245b2 | 2808 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2809 | tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES); |
e9cf809e | 2810 | gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2811 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2812 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2813 | ||
2814 | t2 = build_int_cst (tm_state_type, 0); | |
2815 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2816 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2817 | ||
2818 | tm_log_emit_restores (region->entry_block, code_bb); | |
2819 | ||
2820 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2821 | edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE); | |
2822 | edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE); | |
2823 | redirect_edge_pred (fallthru_edge, join_bb); | |
2824 | ||
2825 | join_bb->frequency = test_bb->frequency = transaction_bb->frequency; | |
2826 | join_bb->count = test_bb->count = transaction_bb->count; | |
2827 | ||
2828 | ei->probability = PROB_ALWAYS; | |
2829 | et->probability = PROB_LIKELY; | |
2830 | ef->probability = PROB_UNLIKELY; | |
9af5ce0c | 2831 | et->count = apply_probability (test_bb->count, et->probability); |
2832 | ef->count = apply_probability (test_bb->count, ef->probability); | |
0cd02a19 | 2833 | |
2834 | code_bb->count = et->count; | |
2835 | code_bb->frequency = EDGE_FREQUENCY (et); | |
2836 | ||
2837 | transaction_bb = join_bb; | |
2838 | } | |
2839 | ||
2840 | // If we have an ABORT edge, create a test to perform the abort. | |
2841 | if (abort_edge) | |
2842 | { | |
2843 | basic_block test_bb = create_empty_bb (transaction_bb); | |
b3083327 | 2844 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
0cd02a19 | 2845 | if (region->restart_block == region->entry_block) |
2846 | region->restart_block = test_bb; | |
2847 | ||
f9e245b2 | 2848 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2849 | tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION); |
e9cf809e | 2850 | gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2851 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2852 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2853 | ||
2854 | t2 = build_int_cst (tm_state_type, 0); | |
2855 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2856 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2857 | ||
2858 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2859 | test_bb->frequency = transaction_bb->frequency; | |
2860 | test_bb->count = transaction_bb->count; | |
2861 | ei->probability = PROB_ALWAYS; | |
2862 | ||
2863 | // Not abort edge. If both are live, chose one at random as we'll | |
2864 | // we'll be fixing that up below. | |
2865 | redirect_edge_pred (fallthru_edge, test_bb); | |
2866 | fallthru_edge->flags = EDGE_FALSE_VALUE; | |
2867 | fallthru_edge->probability = PROB_VERY_LIKELY; | |
2868 | fallthru_edge->count | |
9af5ce0c | 2869 | = apply_probability (test_bb->count, fallthru_edge->probability); |
0cd02a19 | 2870 | |
2871 | // Abort/over edge. | |
2872 | redirect_edge_pred (abort_edge, test_bb); | |
2873 | abort_edge->flags = EDGE_TRUE_VALUE; | |
2874 | abort_edge->probability = PROB_VERY_UNLIKELY; | |
2875 | abort_edge->count | |
9af5ce0c | 2876 | = apply_probability (test_bb->count, abort_edge->probability); |
0cd02a19 | 2877 | |
2878 | transaction_bb = test_bb; | |
2879 | } | |
2880 | ||
2881 | // If we have both instrumented and uninstrumented code paths, select one. | |
2882 | if (inst_edge && uninst_edge) | |
2883 | { | |
2884 | basic_block test_bb = create_empty_bb (transaction_bb); | |
b3083327 | 2885 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
0cd02a19 | 2886 | if (region->restart_block == region->entry_block) |
2887 | region->restart_block = test_bb; | |
2888 | ||
f9e245b2 | 2889 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2890 | tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE); |
2891 | ||
e9cf809e | 2892 | gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2893 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2894 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2895 | ||
2896 | t2 = build_int_cst (tm_state_type, 0); | |
2897 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2898 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2899 | ||
2900 | // Create the edge into test_bb first, as we want to copy values | |
2901 | // out of the fallthru edge. | |
2902 | edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags); | |
2903 | e->probability = fallthru_edge->probability; | |
2904 | test_bb->count = e->count = fallthru_edge->count; | |
2905 | test_bb->frequency = EDGE_FREQUENCY (e); | |
2906 | ||
2907 | // Now update the edges to the inst/uninist implementations. | |
2908 | // For now assume that the paths are equally likely. When using HTM, | |
2909 | // we'll try the uninst path first and fallback to inst path if htm | |
2910 | // buffers are exceeded. Without HTM we start with the inst path and | |
2911 | // use the uninst path when falling back to serial mode. | |
2912 | redirect_edge_pred (inst_edge, test_bb); | |
2913 | inst_edge->flags = EDGE_FALSE_VALUE; | |
2914 | inst_edge->probability = REG_BR_PROB_BASE / 2; | |
2915 | inst_edge->count | |
9af5ce0c | 2916 | = apply_probability (test_bb->count, inst_edge->probability); |
0cd02a19 | 2917 | |
2918 | redirect_edge_pred (uninst_edge, test_bb); | |
2919 | uninst_edge->flags = EDGE_TRUE_VALUE; | |
2920 | uninst_edge->probability = REG_BR_PROB_BASE / 2; | |
2921 | uninst_edge->count | |
9af5ce0c | 2922 | = apply_probability (test_bb->count, uninst_edge->probability); |
0cd02a19 | 2923 | } |
2924 | ||
2925 | // If we have no previous special cases, and we have PHIs at the beginning | |
2926 | // of the atomic region, this means we have a loop at the beginning of the | |
2927 | // atomic region that shares the first block. This can cause problems with | |
2928 | // the transaction restart abnormal edges to be added in the tm_edges pass. | |
2929 | // Solve this by adding a new empty block to receive the abnormal edges. | |
2930 | if (region->restart_block == region->entry_block | |
2931 | && phi_nodes (region->entry_block)) | |
2932 | { | |
2933 | basic_block empty_bb = create_empty_bb (transaction_bb); | |
2934 | region->restart_block = empty_bb; | |
b3083327 | 2935 | add_bb_to_loop (empty_bb, transaction_bb->loop_father); |
0cd02a19 | 2936 | |
2937 | redirect_edge_pred (fallthru_edge, empty_bb); | |
2938 | make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU); | |
2939 | } | |
2940 | ||
2941 | return NULL; | |
2942 | } | |
2943 | ||
2944 | /* Generate the temporary to be used for the return value of | |
2945 | BUILT_IN_TM_START. */ | |
2946 | ||
2947 | static void * | |
2948 | generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2949 | { | |
2950 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2951 | region->tm_state = | |
2952 | create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state"); | |
2953 | ||
2954 | // Reset the subcode, post optimizations. We'll fill this in | |
2955 | // again as we process blocks. | |
2956 | if (region->exit_blocks) | |
2957 | { | |
1a91d914 | 2958 | gtransaction *transaction_stmt = region->get_transaction_stmt (); |
2959 | unsigned int subcode = gimple_transaction_subcode (transaction_stmt); | |
0cd02a19 | 2960 | |
2961 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2962 | subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE | |
1910089e | 2963 | | GTMA_MAY_ENTER_IRREVOCABLE |
2964 | | GTMA_HAS_NO_INSTRUMENTATION); | |
0cd02a19 | 2965 | else |
2966 | subcode &= GTMA_DECLARATION_MASK; | |
1a91d914 | 2967 | gimple_transaction_set_subcode (transaction_stmt, subcode); |
0cd02a19 | 2968 | } |
2969 | ||
2970 | return NULL; | |
2971 | } | |
2972 | ||
2973 | // Propagate flags from inner transactions outwards. | |
2974 | static void | |
2975 | propagate_tm_flags_out (struct tm_region *region) | |
2976 | { | |
2977 | if (region == NULL) | |
2978 | return; | |
2979 | propagate_tm_flags_out (region->inner); | |
2980 | ||
2981 | if (region->outer && region->outer->transaction_stmt) | |
2982 | { | |
1a91d914 | 2983 | unsigned s |
2984 | = gimple_transaction_subcode (region->get_transaction_stmt ()); | |
0cd02a19 | 2985 | s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE |
2986 | | GTMA_MAY_ENTER_IRREVOCABLE); | |
1a91d914 | 2987 | s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ()); |
2988 | gimple_transaction_set_subcode (region->outer->get_transaction_stmt (), | |
2989 | s); | |
0cd02a19 | 2990 | } |
2991 | ||
2992 | propagate_tm_flags_out (region->next); | |
2993 | } | |
2994 | ||
4c0315d0 | 2995 | /* Entry point to the MARK phase of TM expansion. Here we replace |
2996 | transactional memory statements with calls to builtins, and function | |
2997 | calls with their transactional clones (if available). But we don't | |
2998 | yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */ | |
2999 | ||
3000 | static unsigned int | |
3001 | execute_tm_mark (void) | |
3002 | { | |
4c0315d0 | 3003 | pending_edge_inserts_p = false; |
3004 | ||
00d83cc8 | 3005 | expand_regions (all_tm_regions, generate_tm_state, NULL, |
3006 | /*traverse_clones=*/true); | |
4c0315d0 | 3007 | |
0cd02a19 | 3008 | tm_log_init (); |
4c0315d0 | 3009 | |
00d83cc8 | 3010 | vec<tm_region_p> bb_regions |
79f4a793 | 3011 | = get_bb_regions_instrumented (/*traverse_clones=*/true, |
3012 | /*include_uninstrumented_p=*/false); | |
0cd02a19 | 3013 | struct tm_region *r; |
3014 | unsigned i; | |
4c0315d0 | 3015 | |
0cd02a19 | 3016 | // Expand memory operations into calls into the runtime. |
3017 | // This collects log entries as well. | |
f1f41a6c | 3018 | FOR_EACH_VEC_ELT (bb_regions, i, r) |
ded1a556 | 3019 | { |
3020 | if (r != NULL) | |
3021 | { | |
3022 | if (r->transaction_stmt) | |
3023 | { | |
1a91d914 | 3024 | unsigned sub |
3025 | = gimple_transaction_subcode (r->get_transaction_stmt ()); | |
ded1a556 | 3026 | |
3027 | /* If we're sure to go irrevocable, there won't be | |
3028 | anything to expand, since the run-time will go | |
3029 | irrevocable right away. */ | |
3030 | if (sub & GTMA_DOES_GO_IRREVOCABLE | |
3031 | && sub & GTMA_MAY_ENTER_IRREVOCABLE) | |
3032 | continue; | |
3033 | } | |
f5a6b05f | 3034 | expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i)); |
ded1a556 | 3035 | } |
3036 | } | |
0cd02a19 | 3037 | |
4aac6cf8 | 3038 | bb_regions.release (); |
3039 | ||
0cd02a19 | 3040 | // Propagate flags from inner transactions outwards. |
3041 | propagate_tm_flags_out (all_tm_regions); | |
3042 | ||
3043 | // Expand GIMPLE_TRANSACTIONs into calls into the runtime. | |
00d83cc8 | 3044 | expand_regions (all_tm_regions, expand_transaction, NULL, |
3045 | /*traverse_clones=*/false); | |
0cd02a19 | 3046 | |
3047 | tm_log_emit (); | |
3048 | tm_log_delete (); | |
4c0315d0 | 3049 | |
3050 | if (pending_edge_inserts_p) | |
3051 | gsi_commit_edge_inserts (); | |
0cd02a19 | 3052 | free_dominance_info (CDI_DOMINATORS); |
4c0315d0 | 3053 | return 0; |
3054 | } | |
3055 | ||
cbe8bda8 | 3056 | namespace { |
3057 | ||
3058 | const pass_data pass_data_tm_mark = | |
3059 | { | |
3060 | GIMPLE_PASS, /* type */ | |
3061 | "tmmark", /* name */ | |
3062 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3063 | TV_TRANS_MEM, /* tv_id */ |
3064 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3065 | 0, /* properties_provided */ | |
3066 | 0, /* properties_destroyed */ | |
3067 | 0, /* todo_flags_start */ | |
8b88439e | 3068 | TODO_update_ssa, /* todo_flags_finish */ |
4c0315d0 | 3069 | }; |
cbe8bda8 | 3070 | |
3071 | class pass_tm_mark : public gimple_opt_pass | |
3072 | { | |
3073 | public: | |
9af5ce0c | 3074 | pass_tm_mark (gcc::context *ctxt) |
3075 | : gimple_opt_pass (pass_data_tm_mark, ctxt) | |
cbe8bda8 | 3076 | {} |
3077 | ||
3078 | /* opt_pass methods: */ | |
65b0537f | 3079 | virtual unsigned int execute (function *) { return execute_tm_mark (); } |
cbe8bda8 | 3080 | |
3081 | }; // class pass_tm_mark | |
3082 | ||
3083 | } // anon namespace | |
3084 | ||
3085 | gimple_opt_pass * | |
3086 | make_pass_tm_mark (gcc::context *ctxt) | |
3087 | { | |
3088 | return new pass_tm_mark (ctxt); | |
3089 | } | |
4c0315d0 | 3090 | \f |
0cd02a19 | 3091 | |
3092 | /* Create an abnormal edge from STMT at iter, splitting the block | |
3093 | as necessary. Adjust *PNEXT as needed for the split block. */ | |
4c0315d0 | 3094 | |
3095 | static inline void | |
0cd02a19 | 3096 | split_bb_make_tm_edge (gimple stmt, basic_block dest_bb, |
3097 | gimple_stmt_iterator iter, gimple_stmt_iterator *pnext) | |
4c0315d0 | 3098 | { |
0cd02a19 | 3099 | basic_block bb = gimple_bb (stmt); |
3100 | if (!gsi_one_before_end_p (iter)) | |
3101 | { | |
3102 | edge e = split_block (bb, stmt); | |
3103 | *pnext = gsi_start_bb (e->dest); | |
3104 | } | |
3105 | make_edge (bb, dest_bb, EDGE_ABNORMAL); | |
4c0315d0 | 3106 | |
0cd02a19 | 3107 | // Record the need for the edge for the benefit of the rtl passes. |
4c0315d0 | 3108 | if (cfun->gimple_df->tm_restart == NULL) |
b7aa58e4 | 3109 | cfun->gimple_df->tm_restart |
3110 | = hash_table<tm_restart_hasher>::create_ggc (31); | |
4c0315d0 | 3111 | |
0cd02a19 | 3112 | struct tm_restart_node dummy; |
4c0315d0 | 3113 | dummy.stmt = stmt; |
0cd02a19 | 3114 | dummy.label_or_list = gimple_block_label (dest_bb); |
3115 | ||
b7aa58e4 | 3116 | tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy, |
3117 | INSERT); | |
3118 | struct tm_restart_node *n = *slot; | |
4c0315d0 | 3119 | if (n == NULL) |
3120 | { | |
25a27413 | 3121 | n = ggc_alloc<tm_restart_node> (); |
4c0315d0 | 3122 | *n = dummy; |
3123 | } | |
3124 | else | |
3125 | { | |
3126 | tree old = n->label_or_list; | |
3127 | if (TREE_CODE (old) == LABEL_DECL) | |
0cd02a19 | 3128 | old = tree_cons (NULL, old, NULL); |
4c0315d0 | 3129 | n->label_or_list = tree_cons (NULL, dummy.label_or_list, old); |
3130 | } | |
4c0315d0 | 3131 | } |
3132 | ||
4c0315d0 | 3133 | /* Split block BB as necessary for every builtin function we added, and |
3134 | wire up the abnormal back edges implied by the transaction restart. */ | |
3135 | ||
3136 | static void | |
0cd02a19 | 3137 | expand_block_edges (struct tm_region *const region, basic_block bb) |
4c0315d0 | 3138 | { |
0cd02a19 | 3139 | gimple_stmt_iterator gsi, next_gsi; |
4c0315d0 | 3140 | |
0cd02a19 | 3141 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi) |
4c0315d0 | 3142 | { |
3143 | gimple stmt = gsi_stmt (gsi); | |
1a91d914 | 3144 | gcall *call_stmt; |
4c0315d0 | 3145 | |
0cd02a19 | 3146 | next_gsi = gsi; |
3147 | gsi_next (&next_gsi); | |
3148 | ||
3149 | // ??? Shouldn't we split for any non-pure, non-irrevocable function? | |
1a91d914 | 3150 | call_stmt = dyn_cast <gcall *> (stmt); |
3151 | if ((!call_stmt) | |
3152 | || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0) | |
0cd02a19 | 3153 | continue; |
3154 | ||
1a91d914 | 3155 | if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt)) |
3156 | == BUILT_IN_TM_ABORT) | |
4c0315d0 | 3157 | { |
0cd02a19 | 3158 | // If we have a ``_transaction_cancel [[outer]]'', there is only |
3159 | // one abnormal edge: to the transaction marked OUTER. | |
3160 | // All compiler-generated instances of BUILT_IN_TM_ABORT have a | |
3161 | // constant argument, which we can examine here. Users invoking | |
3162 | // TM_ABORT directly get what they deserve. | |
1a91d914 | 3163 | tree arg = gimple_call_arg (call_stmt, 0); |
0cd02a19 | 3164 | if (TREE_CODE (arg) == INTEGER_CST |
3165 | && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 | |
3166 | && !decl_is_tm_clone (current_function_decl)) | |
4c0315d0 | 3167 | { |
0cd02a19 | 3168 | // Find the GTMA_IS_OUTER transaction. |
3169 | for (struct tm_region *o = region; o; o = o->outer) | |
3170 | if (o->original_transaction_was_outer) | |
3171 | { | |
1a91d914 | 3172 | split_bb_make_tm_edge (call_stmt, o->restart_block, |
0cd02a19 | 3173 | gsi, &next_gsi); |
3174 | break; | |
3175 | } | |
3176 | ||
3177 | // Otherwise, the front-end should have semantically checked | |
3178 | // outer aborts, but in either case the target region is not | |
3179 | // within this function. | |
3180 | continue; | |
4c0315d0 | 3181 | } |
3182 | ||
0cd02a19 | 3183 | // Non-outer, TM aborts have an abnormal edge to the inner-most |
3184 | // transaction, the one being aborted; | |
1a91d914 | 3185 | split_bb_make_tm_edge (call_stmt, region->restart_block, gsi, |
3186 | &next_gsi); | |
4c0315d0 | 3187 | } |
3188 | ||
0cd02a19 | 3189 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3190 | // We never restart inner transactions. For tm clones, we know a-priori | |
3191 | // that the outer-most transaction is outside the function. | |
3192 | if (decl_is_tm_clone (current_function_decl)) | |
3193 | continue; | |
4c0315d0 | 3194 | |
0cd02a19 | 3195 | if (cfun->gimple_df->tm_restart == NULL) |
3196 | cfun->gimple_df->tm_restart | |
b7aa58e4 | 3197 | = hash_table<tm_restart_hasher>::create_ggc (31); |
4c0315d0 | 3198 | |
0cd02a19 | 3199 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3200 | // We never restart inner transactions. | |
3201 | for (struct tm_region *o = region; o; o = o->outer) | |
3202 | if (!o->outer) | |
3203 | { | |
1a91d914 | 3204 | split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi); |
0cd02a19 | 3205 | break; |
3206 | } | |
4c0315d0 | 3207 | |
0cd02a19 | 3208 | // Delete any tail-call annotation that may have been added. |
3209 | // The tail-call pass may have mis-identified the commit as being | |
3210 | // a candidate because we had not yet added this restart edge. | |
1a91d914 | 3211 | gimple_call_set_tail (call_stmt, false); |
4c0315d0 | 3212 | } |
3213 | } | |
3214 | ||
3215 | /* Entry point to the final expansion of transactional nodes. */ | |
3216 | ||
cbe8bda8 | 3217 | namespace { |
3218 | ||
3219 | const pass_data pass_data_tm_edges = | |
3220 | { | |
3221 | GIMPLE_PASS, /* type */ | |
3222 | "tmedge", /* name */ | |
3223 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3224 | TV_TRANS_MEM, /* tv_id */ |
3225 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3226 | 0, /* properties_provided */ | |
3227 | 0, /* properties_destroyed */ | |
3228 | 0, /* todo_flags_start */ | |
8b88439e | 3229 | TODO_update_ssa, /* todo_flags_finish */ |
4c0315d0 | 3230 | }; |
cbe8bda8 | 3231 | |
3232 | class pass_tm_edges : public gimple_opt_pass | |
3233 | { | |
3234 | public: | |
9af5ce0c | 3235 | pass_tm_edges (gcc::context *ctxt) |
3236 | : gimple_opt_pass (pass_data_tm_edges, ctxt) | |
cbe8bda8 | 3237 | {} |
3238 | ||
3239 | /* opt_pass methods: */ | |
65b0537f | 3240 | virtual unsigned int execute (function *); |
cbe8bda8 | 3241 | |
3242 | }; // class pass_tm_edges | |
3243 | ||
65b0537f | 3244 | unsigned int |
3245 | pass_tm_edges::execute (function *fun) | |
3246 | { | |
3247 | vec<tm_region_p> bb_regions | |
3248 | = get_bb_regions_instrumented (/*traverse_clones=*/false, | |
3249 | /*include_uninstrumented_p=*/true); | |
3250 | struct tm_region *r; | |
3251 | unsigned i; | |
3252 | ||
3253 | FOR_EACH_VEC_ELT (bb_regions, i, r) | |
3254 | if (r != NULL) | |
3255 | expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i)); | |
3256 | ||
3257 | bb_regions.release (); | |
3258 | ||
3259 | /* We've got to release the dominance info now, to indicate that it | |
3260 | must be rebuilt completely. Otherwise we'll crash trying to update | |
3261 | the SSA web in the TODO section following this pass. */ | |
3262 | free_dominance_info (CDI_DOMINATORS); | |
3263 | bitmap_obstack_release (&tm_obstack); | |
3264 | all_tm_regions = NULL; | |
3265 | ||
3266 | return 0; | |
3267 | } | |
3268 | ||
cbe8bda8 | 3269 | } // anon namespace |
3270 | ||
3271 | gimple_opt_pass * | |
3272 | make_pass_tm_edges (gcc::context *ctxt) | |
3273 | { | |
3274 | return new pass_tm_edges (ctxt); | |
3275 | } | |
0cd02a19 | 3276 | \f |
3277 | /* Helper function for expand_regions. Expand REGION and recurse to | |
3278 | the inner region. Call CALLBACK on each region. CALLBACK returns | |
3279 | NULL to continue the traversal, otherwise a non-null value which | |
00d83cc8 | 3280 | this function will return as well. TRAVERSE_CLONES is true if we |
3281 | should traverse transactional clones. */ | |
0cd02a19 | 3282 | |
3283 | static void * | |
3284 | expand_regions_1 (struct tm_region *region, | |
3285 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3286 | void *data, |
3287 | bool traverse_clones) | |
0cd02a19 | 3288 | { |
3289 | void *retval = NULL; | |
00d83cc8 | 3290 | if (region->exit_blocks |
3291 | || (traverse_clones && decl_is_tm_clone (current_function_decl))) | |
0cd02a19 | 3292 | { |
3293 | retval = callback (region, data); | |
3294 | if (retval) | |
3295 | return retval; | |
3296 | } | |
3297 | if (region->inner) | |
3298 | { | |
00d83cc8 | 3299 | retval = expand_regions (region->inner, callback, data, traverse_clones); |
0cd02a19 | 3300 | if (retval) |
3301 | return retval; | |
3302 | } | |
3303 | return retval; | |
3304 | } | |
3305 | ||
3306 | /* Traverse the regions enclosed and including REGION. Execute | |
3307 | CALLBACK for each region, passing DATA. CALLBACK returns NULL to | |
3308 | continue the traversal, otherwise a non-null value which this | |
00d83cc8 | 3309 | function will return as well. TRAVERSE_CLONES is true if we should |
3310 | traverse transactional clones. */ | |
0cd02a19 | 3311 | |
3312 | static void * | |
3313 | expand_regions (struct tm_region *region, | |
3314 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3315 | void *data, |
3316 | bool traverse_clones) | |
0cd02a19 | 3317 | { |
3318 | void *retval = NULL; | |
3319 | while (region) | |
3320 | { | |
00d83cc8 | 3321 | retval = expand_regions_1 (region, callback, data, traverse_clones); |
0cd02a19 | 3322 | if (retval) |
3323 | return retval; | |
3324 | region = region->next; | |
3325 | } | |
3326 | return retval; | |
3327 | } | |
3328 | ||
4c0315d0 | 3329 | \f |
3330 | /* A unique TM memory operation. */ | |
3331 | typedef struct tm_memop | |
3332 | { | |
3333 | /* Unique ID that all memory operations to the same location have. */ | |
3334 | unsigned int value_id; | |
3335 | /* Address of load/store. */ | |
3336 | tree addr; | |
3337 | } *tm_memop_t; | |
3338 | ||
d9dd21a8 | 3339 | /* TM memory operation hashtable helpers. */ |
3340 | ||
3341 | struct tm_memop_hasher : typed_free_remove <tm_memop> | |
3342 | { | |
3343 | typedef tm_memop value_type; | |
3344 | typedef tm_memop compare_type; | |
3345 | static inline hashval_t hash (const value_type *); | |
3346 | static inline bool equal (const value_type *, const compare_type *); | |
3347 | }; | |
3348 | ||
3349 | /* Htab support. Return a hash value for a `tm_memop'. */ | |
3350 | inline hashval_t | |
3351 | tm_memop_hasher::hash (const value_type *mem) | |
3352 | { | |
3353 | tree addr = mem->addr; | |
3354 | /* We drill down to the SSA_NAME/DECL for the hash, but equality is | |
3355 | actually done with operand_equal_p (see tm_memop_eq). */ | |
3356 | if (TREE_CODE (addr) == ADDR_EXPR) | |
3357 | addr = TREE_OPERAND (addr, 0); | |
3358 | return iterative_hash_expr (addr, 0); | |
3359 | } | |
3360 | ||
3361 | /* Htab support. Return true if two tm_memop's are the same. */ | |
3362 | inline bool | |
3363 | tm_memop_hasher::equal (const value_type *mem1, const compare_type *mem2) | |
3364 | { | |
3365 | return operand_equal_p (mem1->addr, mem2->addr, 0); | |
3366 | } | |
3367 | ||
4c0315d0 | 3368 | /* Sets for solving data flow equations in the memory optimization pass. */ |
3369 | struct tm_memopt_bitmaps | |
3370 | { | |
3371 | /* Stores available to this BB upon entry. Basically, stores that | |
3372 | dominate this BB. */ | |
3373 | bitmap store_avail_in; | |
3374 | /* Stores available at the end of this BB. */ | |
3375 | bitmap store_avail_out; | |
3376 | bitmap store_antic_in; | |
3377 | bitmap store_antic_out; | |
3378 | /* Reads available to this BB upon entry. Basically, reads that | |
3379 | dominate this BB. */ | |
3380 | bitmap read_avail_in; | |
3381 | /* Reads available at the end of this BB. */ | |
3382 | bitmap read_avail_out; | |
3383 | /* Reads performed in this BB. */ | |
3384 | bitmap read_local; | |
3385 | /* Writes performed in this BB. */ | |
3386 | bitmap store_local; | |
3387 | ||
3388 | /* Temporary storage for pass. */ | |
3389 | /* Is the current BB in the worklist? */ | |
3390 | bool avail_in_worklist_p; | |
3391 | /* Have we visited this BB? */ | |
3392 | bool visited_p; | |
3393 | }; | |
3394 | ||
3395 | static bitmap_obstack tm_memopt_obstack; | |
3396 | ||
3397 | /* Unique counter for TM loads and stores. Loads and stores of the | |
3398 | same address get the same ID. */ | |
3399 | static unsigned int tm_memopt_value_id; | |
c1f445d2 | 3400 | static hash_table<tm_memop_hasher> *tm_memopt_value_numbers; |
4c0315d0 | 3401 | |
3402 | #define STORE_AVAIL_IN(BB) \ | |
3403 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in | |
3404 | #define STORE_AVAIL_OUT(BB) \ | |
3405 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out | |
3406 | #define STORE_ANTIC_IN(BB) \ | |
3407 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in | |
3408 | #define STORE_ANTIC_OUT(BB) \ | |
3409 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out | |
3410 | #define READ_AVAIL_IN(BB) \ | |
3411 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in | |
3412 | #define READ_AVAIL_OUT(BB) \ | |
3413 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out | |
3414 | #define READ_LOCAL(BB) \ | |
3415 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local | |
3416 | #define STORE_LOCAL(BB) \ | |
3417 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local | |
3418 | #define AVAIL_IN_WORKLIST_P(BB) \ | |
3419 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p | |
3420 | #define BB_VISITED_P(BB) \ | |
3421 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p | |
3422 | ||
4c0315d0 | 3423 | /* Given a TM load/store in STMT, return the value number for the address |
3424 | it accesses. */ | |
3425 | ||
3426 | static unsigned int | |
3427 | tm_memopt_value_number (gimple stmt, enum insert_option op) | |
3428 | { | |
3429 | struct tm_memop tmpmem, *mem; | |
d9dd21a8 | 3430 | tm_memop **slot; |
4c0315d0 | 3431 | |
3432 | gcc_assert (is_tm_load (stmt) || is_tm_store (stmt)); | |
3433 | tmpmem.addr = gimple_call_arg (stmt, 0); | |
c1f445d2 | 3434 | slot = tm_memopt_value_numbers->find_slot (&tmpmem, op); |
4c0315d0 | 3435 | if (*slot) |
d9dd21a8 | 3436 | mem = *slot; |
4c0315d0 | 3437 | else if (op == INSERT) |
3438 | { | |
3439 | mem = XNEW (struct tm_memop); | |
3440 | *slot = mem; | |
3441 | mem->value_id = tm_memopt_value_id++; | |
3442 | mem->addr = tmpmem.addr; | |
3443 | } | |
3444 | else | |
3445 | gcc_unreachable (); | |
3446 | return mem->value_id; | |
3447 | } | |
3448 | ||
3449 | /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */ | |
3450 | ||
3451 | static void | |
3452 | tm_memopt_accumulate_memops (basic_block bb) | |
3453 | { | |
3454 | gimple_stmt_iterator gsi; | |
3455 | ||
3456 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3457 | { | |
3458 | gimple stmt = gsi_stmt (gsi); | |
3459 | bitmap bits; | |
3460 | unsigned int loc; | |
3461 | ||
3462 | if (is_tm_store (stmt)) | |
3463 | bits = STORE_LOCAL (bb); | |
3464 | else if (is_tm_load (stmt)) | |
3465 | bits = READ_LOCAL (bb); | |
3466 | else | |
3467 | continue; | |
3468 | ||
3469 | loc = tm_memopt_value_number (stmt, INSERT); | |
3470 | bitmap_set_bit (bits, loc); | |
3471 | if (dump_file) | |
3472 | { | |
3473 | fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=", | |
3474 | is_tm_load (stmt) ? "LOAD" : "STORE", loc, | |
3475 | gimple_bb (stmt)->index); | |
3476 | print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0); | |
3477 | fprintf (dump_file, "\n"); | |
3478 | } | |
3479 | } | |
3480 | } | |
3481 | ||
3482 | /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */ | |
3483 | ||
3484 | static void | |
3485 | dump_tm_memopt_set (const char *set_name, bitmap bits) | |
3486 | { | |
3487 | unsigned i; | |
3488 | bitmap_iterator bi; | |
3489 | const char *comma = ""; | |
3490 | ||
3491 | fprintf (dump_file, "TM memopt: %s: [", set_name); | |
3492 | EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi) | |
3493 | { | |
c1f445d2 | 3494 | hash_table<tm_memop_hasher>::iterator hi; |
d9dd21a8 | 3495 | struct tm_memop *mem = NULL; |
4c0315d0 | 3496 | |
3497 | /* Yeah, yeah, yeah. Whatever. This is just for debugging. */ | |
c1f445d2 | 3498 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi) |
4c0315d0 | 3499 | if (mem->value_id == i) |
3500 | break; | |
3501 | gcc_assert (mem->value_id == i); | |
3502 | fprintf (dump_file, "%s", comma); | |
3503 | comma = ", "; | |
3504 | print_generic_expr (dump_file, mem->addr, 0); | |
3505 | } | |
3506 | fprintf (dump_file, "]\n"); | |
3507 | } | |
3508 | ||
3509 | /* Prettily dump all of the memopt sets in BLOCKS. */ | |
3510 | ||
3511 | static void | |
f1f41a6c | 3512 | dump_tm_memopt_sets (vec<basic_block> blocks) |
4c0315d0 | 3513 | { |
3514 | size_t i; | |
3515 | basic_block bb; | |
3516 | ||
f1f41a6c | 3517 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3518 | { |
3519 | fprintf (dump_file, "------------BB %d---------\n", bb->index); | |
3520 | dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb)); | |
3521 | dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb)); | |
3522 | dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb)); | |
3523 | dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb)); | |
3524 | dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb)); | |
3525 | dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb)); | |
3526 | } | |
3527 | } | |
3528 | ||
3529 | /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */ | |
3530 | ||
3531 | static void | |
3532 | tm_memopt_compute_avin (basic_block bb) | |
3533 | { | |
3534 | edge e; | |
3535 | unsigned ix; | |
3536 | ||
3537 | /* Seed with the AVOUT of any predecessor. */ | |
3538 | for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++) | |
3539 | { | |
3540 | e = EDGE_PRED (bb, ix); | |
3541 | /* Make sure we have already visited this BB, and is thus | |
3542 | initialized. | |
3543 | ||
3544 | If e->src->aux is NULL, this predecessor is actually on an | |
3545 | enclosing transaction. We only care about the current | |
3546 | transaction, so ignore it. */ | |
3547 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3548 | { | |
3549 | bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3550 | bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3551 | break; | |
3552 | } | |
3553 | } | |
3554 | ||
3555 | for (; ix < EDGE_COUNT (bb->preds); ix++) | |
3556 | { | |
3557 | e = EDGE_PRED (bb, ix); | |
3558 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3559 | { | |
3560 | bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3561 | bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3562 | } | |
3563 | } | |
3564 | ||
3565 | BB_VISITED_P (bb) = true; | |
3566 | } | |
3567 | ||
3568 | /* Compute the STORE_ANTIC_IN for the basic block BB. */ | |
3569 | ||
3570 | static void | |
3571 | tm_memopt_compute_antin (basic_block bb) | |
3572 | { | |
3573 | edge e; | |
3574 | unsigned ix; | |
3575 | ||
3576 | /* Seed with the ANTIC_OUT of any successor. */ | |
3577 | for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++) | |
3578 | { | |
3579 | e = EDGE_SUCC (bb, ix); | |
3580 | /* Make sure we have already visited this BB, and is thus | |
3581 | initialized. */ | |
3582 | if (BB_VISITED_P (e->dest)) | |
3583 | { | |
3584 | bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3585 | break; | |
3586 | } | |
3587 | } | |
3588 | ||
3589 | for (; ix < EDGE_COUNT (bb->succs); ix++) | |
3590 | { | |
3591 | e = EDGE_SUCC (bb, ix); | |
3592 | if (BB_VISITED_P (e->dest)) | |
3593 | bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3594 | } | |
3595 | ||
3596 | BB_VISITED_P (bb) = true; | |
3597 | } | |
3598 | ||
3599 | /* Compute the AVAIL sets for every basic block in BLOCKS. | |
3600 | ||
3601 | We compute {STORE,READ}_AVAIL_{OUT,IN} as follows: | |
3602 | ||
3603 | AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb]) | |
3604 | AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors]) | |
3605 | ||
3606 | This is basically what we do in lcm's compute_available(), but here | |
3607 | we calculate two sets of sets (one for STOREs and one for READs), | |
3608 | and we work on a region instead of the entire CFG. | |
3609 | ||
3610 | REGION is the TM region. | |
3611 | BLOCKS are the basic blocks in the region. */ | |
3612 | ||
3613 | static void | |
3614 | tm_memopt_compute_available (struct tm_region *region, | |
f1f41a6c | 3615 | vec<basic_block> blocks) |
4c0315d0 | 3616 | { |
3617 | edge e; | |
3618 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3619 | unsigned int qlen, i; | |
3620 | edge_iterator ei; | |
3621 | bool changed; | |
3622 | ||
3623 | /* Allocate a worklist array/queue. Entries are only added to the | |
3624 | list if they were not already on the list. So the size is | |
3625 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3626 | qlen = blocks.length () - 1; |
4c0315d0 | 3627 | qin = qout = worklist = |
3628 | XNEWVEC (basic_block, qlen); | |
3629 | ||
3630 | /* Put every block in the region on the worklist. */ | |
f1f41a6c | 3631 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3632 | { |
3633 | /* Seed AVAIL_OUT with the LOCAL set. */ | |
3634 | bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb)); | |
3635 | bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb)); | |
3636 | ||
3637 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3638 | /* No need to insert the entry block, since it has an AVIN of | |
3639 | null, and an AVOUT that has already been seeded in. */ | |
3640 | if (bb != region->entry_block) | |
3641 | *qin++ = bb; | |
3642 | } | |
3643 | ||
3644 | /* The entry block has been initialized with the local sets. */ | |
3645 | BB_VISITED_P (region->entry_block) = true; | |
3646 | ||
3647 | qin = worklist; | |
3648 | qend = &worklist[qlen]; | |
3649 | ||
3650 | /* Iterate until the worklist is empty. */ | |
3651 | while (qlen) | |
3652 | { | |
3653 | /* Take the first entry off the worklist. */ | |
3654 | bb = *qout++; | |
3655 | qlen--; | |
3656 | ||
3657 | if (qout >= qend) | |
3658 | qout = worklist; | |
3659 | ||
3660 | /* This block can be added to the worklist again if necessary. */ | |
3661 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3662 | tm_memopt_compute_avin (bb); | |
3663 | ||
3664 | /* Note: We do not add the LOCAL sets here because we already | |
3665 | seeded the AVAIL_OUT sets with them. */ | |
3666 | changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb)); | |
3667 | changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb)); | |
3668 | if (changed | |
3669 | && (region->exit_blocks == NULL | |
3670 | || !bitmap_bit_p (region->exit_blocks, bb->index))) | |
3671 | /* If the out state of this block changed, then we need to add | |
3672 | its successors to the worklist if they are not already in. */ | |
3673 | FOR_EACH_EDGE (e, ei, bb->succs) | |
34154e27 | 3674 | if (!AVAIL_IN_WORKLIST_P (e->dest) |
3675 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
4c0315d0 | 3676 | { |
3677 | *qin++ = e->dest; | |
3678 | AVAIL_IN_WORKLIST_P (e->dest) = true; | |
3679 | qlen++; | |
3680 | ||
3681 | if (qin >= qend) | |
3682 | qin = worklist; | |
3683 | } | |
3684 | } | |
3685 | ||
3686 | free (worklist); | |
3687 | ||
3688 | if (dump_file) | |
3689 | dump_tm_memopt_sets (blocks); | |
3690 | } | |
3691 | ||
3692 | /* Compute ANTIC sets for every basic block in BLOCKS. | |
3693 | ||
3694 | We compute STORE_ANTIC_OUT as follows: | |
3695 | ||
3696 | STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb]) | |
3697 | STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors]) | |
3698 | ||
3699 | REGION is the TM region. | |
3700 | BLOCKS are the basic blocks in the region. */ | |
3701 | ||
3702 | static void | |
3703 | tm_memopt_compute_antic (struct tm_region *region, | |
f1f41a6c | 3704 | vec<basic_block> blocks) |
4c0315d0 | 3705 | { |
3706 | edge e; | |
3707 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3708 | unsigned int qlen; | |
3709 | int i; | |
3710 | edge_iterator ei; | |
3711 | ||
3712 | /* Allocate a worklist array/queue. Entries are only added to the | |
3713 | list if they were not already on the list. So the size is | |
3714 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3715 | qin = qout = worklist = XNEWVEC (basic_block, blocks.length ()); |
4c0315d0 | 3716 | |
f1f41a6c | 3717 | for (qlen = 0, i = blocks.length () - 1; i >= 0; --i) |
4c0315d0 | 3718 | { |
f1f41a6c | 3719 | bb = blocks[i]; |
4c0315d0 | 3720 | |
3721 | /* Seed ANTIC_OUT with the LOCAL set. */ | |
3722 | bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb)); | |
3723 | ||
3724 | /* Put every block in the region on the worklist. */ | |
3725 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3726 | /* No need to insert exit blocks, since their ANTIC_IN is NULL, | |
3727 | and their ANTIC_OUT has already been seeded in. */ | |
3728 | if (region->exit_blocks | |
3729 | && !bitmap_bit_p (region->exit_blocks, bb->index)) | |
3730 | { | |
3731 | qlen++; | |
3732 | *qin++ = bb; | |
3733 | } | |
3734 | } | |
3735 | ||
3736 | /* The exit blocks have been initialized with the local sets. */ | |
3737 | if (region->exit_blocks) | |
3738 | { | |
3739 | unsigned int i; | |
3740 | bitmap_iterator bi; | |
3741 | EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi) | |
f5a6b05f | 3742 | BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true; |
4c0315d0 | 3743 | } |
3744 | ||
3745 | qin = worklist; | |
3746 | qend = &worklist[qlen]; | |
3747 | ||
3748 | /* Iterate until the worklist is empty. */ | |
3749 | while (qlen) | |
3750 | { | |
3751 | /* Take the first entry off the worklist. */ | |
3752 | bb = *qout++; | |
3753 | qlen--; | |
3754 | ||
3755 | if (qout >= qend) | |
3756 | qout = worklist; | |
3757 | ||
3758 | /* This block can be added to the worklist again if necessary. */ | |
3759 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3760 | tm_memopt_compute_antin (bb); | |
3761 | ||
3762 | /* Note: We do not add the LOCAL sets here because we already | |
3763 | seeded the ANTIC_OUT sets with them. */ | |
3764 | if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb)) | |
3765 | && bb != region->entry_block) | |
3766 | /* If the out state of this block changed, then we need to add | |
3767 | its predecessors to the worklist if they are not already in. */ | |
3768 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3769 | if (!AVAIL_IN_WORKLIST_P (e->src)) | |
3770 | { | |
3771 | *qin++ = e->src; | |
3772 | AVAIL_IN_WORKLIST_P (e->src) = true; | |
3773 | qlen++; | |
3774 | ||
3775 | if (qin >= qend) | |
3776 | qin = worklist; | |
3777 | } | |
3778 | } | |
3779 | ||
3780 | free (worklist); | |
3781 | ||
3782 | if (dump_file) | |
3783 | dump_tm_memopt_sets (blocks); | |
3784 | } | |
3785 | ||
3786 | /* Offsets of load variants from TM_LOAD. For example, | |
3787 | BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*. | |
3788 | See gtm-builtins.def. */ | |
3789 | #define TRANSFORM_RAR 1 | |
3790 | #define TRANSFORM_RAW 2 | |
3791 | #define TRANSFORM_RFW 3 | |
3792 | /* Offsets of store variants from TM_STORE. */ | |
3793 | #define TRANSFORM_WAR 1 | |
3794 | #define TRANSFORM_WAW 2 | |
3795 | ||
3796 | /* Inform about a load/store optimization. */ | |
3797 | ||
3798 | static void | |
3799 | dump_tm_memopt_transform (gimple stmt) | |
3800 | { | |
3801 | if (dump_file) | |
3802 | { | |
3803 | fprintf (dump_file, "TM memopt: transforming: "); | |
3804 | print_gimple_stmt (dump_file, stmt, 0, 0); | |
3805 | fprintf (dump_file, "\n"); | |
3806 | } | |
3807 | } | |
3808 | ||
3809 | /* Perform a read/write optimization. Replaces the TM builtin in STMT | |
3810 | by a builtin that is OFFSET entries down in the builtins table in | |
3811 | gtm-builtins.def. */ | |
3812 | ||
3813 | static void | |
3814 | tm_memopt_transform_stmt (unsigned int offset, | |
1a91d914 | 3815 | gcall *stmt, |
4c0315d0 | 3816 | gimple_stmt_iterator *gsi) |
3817 | { | |
3818 | tree fn = gimple_call_fn (stmt); | |
3819 | gcc_assert (TREE_CODE (fn) == ADDR_EXPR); | |
3820 | TREE_OPERAND (fn, 0) | |
3821 | = builtin_decl_explicit ((enum built_in_function) | |
3822 | (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)) | |
3823 | + offset)); | |
3824 | gimple_call_set_fn (stmt, fn); | |
3825 | gsi_replace (gsi, stmt, true); | |
3826 | dump_tm_memopt_transform (stmt); | |
3827 | } | |
3828 | ||
3829 | /* Perform the actual TM memory optimization transformations in the | |
3830 | basic blocks in BLOCKS. */ | |
3831 | ||
3832 | static void | |
f1f41a6c | 3833 | tm_memopt_transform_blocks (vec<basic_block> blocks) |
4c0315d0 | 3834 | { |
3835 | size_t i; | |
3836 | basic_block bb; | |
3837 | gimple_stmt_iterator gsi; | |
3838 | ||
f1f41a6c | 3839 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3840 | { |
3841 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3842 | { | |
3843 | gimple stmt = gsi_stmt (gsi); | |
3844 | bitmap read_avail = READ_AVAIL_IN (bb); | |
3845 | bitmap store_avail = STORE_AVAIL_IN (bb); | |
3846 | bitmap store_antic = STORE_ANTIC_OUT (bb); | |
3847 | unsigned int loc; | |
3848 | ||
3849 | if (is_tm_simple_load (stmt)) | |
3850 | { | |
1a91d914 | 3851 | gcall *call_stmt = as_a <gcall *> (stmt); |
4c0315d0 | 3852 | loc = tm_memopt_value_number (stmt, NO_INSERT); |
3853 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
1a91d914 | 3854 | tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi); |
4c0315d0 | 3855 | else if (store_antic && bitmap_bit_p (store_antic, loc)) |
3856 | { | |
1a91d914 | 3857 | tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi); |
4c0315d0 | 3858 | bitmap_set_bit (store_avail, loc); |
3859 | } | |
3860 | else if (read_avail && bitmap_bit_p (read_avail, loc)) | |
1a91d914 | 3861 | tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi); |
4c0315d0 | 3862 | else |
3863 | bitmap_set_bit (read_avail, loc); | |
3864 | } | |
3865 | else if (is_tm_simple_store (stmt)) | |
3866 | { | |
1a91d914 | 3867 | gcall *call_stmt = as_a <gcall *> (stmt); |
4c0315d0 | 3868 | loc = tm_memopt_value_number (stmt, NO_INSERT); |
3869 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
1a91d914 | 3870 | tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi); |
4c0315d0 | 3871 | else |
3872 | { | |
3873 | if (read_avail && bitmap_bit_p (read_avail, loc)) | |
1a91d914 | 3874 | tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi); |
4c0315d0 | 3875 | bitmap_set_bit (store_avail, loc); |
3876 | } | |
3877 | } | |
3878 | } | |
3879 | } | |
3880 | } | |
3881 | ||
3882 | /* Return a new set of bitmaps for a BB. */ | |
3883 | ||
3884 | static struct tm_memopt_bitmaps * | |
3885 | tm_memopt_init_sets (void) | |
3886 | { | |
3887 | struct tm_memopt_bitmaps *b | |
3888 | = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps); | |
3889 | b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3890 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3891 | b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3892 | b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3893 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3894 | b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3895 | b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3896 | b->read_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3897 | b->store_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3898 | return b; | |
3899 | } | |
3900 | ||
3901 | /* Free sets computed for each BB. */ | |
3902 | ||
3903 | static void | |
f1f41a6c | 3904 | tm_memopt_free_sets (vec<basic_block> blocks) |
4c0315d0 | 3905 | { |
3906 | size_t i; | |
3907 | basic_block bb; | |
3908 | ||
f1f41a6c | 3909 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3910 | bb->aux = NULL; |
3911 | } | |
3912 | ||
3913 | /* Clear the visited bit for every basic block in BLOCKS. */ | |
3914 | ||
3915 | static void | |
f1f41a6c | 3916 | tm_memopt_clear_visited (vec<basic_block> blocks) |
4c0315d0 | 3917 | { |
3918 | size_t i; | |
3919 | basic_block bb; | |
3920 | ||
f1f41a6c | 3921 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3922 | BB_VISITED_P (bb) = false; |
3923 | } | |
3924 | ||
3925 | /* Replace TM load/stores with hints for the runtime. We handle | |
3926 | things like read-after-write, write-after-read, read-after-read, | |
3927 | read-for-write, etc. */ | |
3928 | ||
3929 | static unsigned int | |
3930 | execute_tm_memopt (void) | |
3931 | { | |
3932 | struct tm_region *region; | |
f1f41a6c | 3933 | vec<basic_block> bbs; |
4c0315d0 | 3934 | |
3935 | tm_memopt_value_id = 0; | |
c1f445d2 | 3936 | tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10); |
4c0315d0 | 3937 | |
3938 | for (region = all_tm_regions; region; region = region->next) | |
3939 | { | |
3940 | /* All the TM stores/loads in the current region. */ | |
3941 | size_t i; | |
3942 | basic_block bb; | |
3943 | ||
3944 | bitmap_obstack_initialize (&tm_memopt_obstack); | |
3945 | ||
3946 | /* Save all BBs for the current region. */ | |
3947 | bbs = get_tm_region_blocks (region->entry_block, | |
3948 | region->exit_blocks, | |
3949 | region->irr_blocks, | |
3950 | NULL, | |
3951 | false); | |
3952 | ||
3953 | /* Collect all the memory operations. */ | |
f1f41a6c | 3954 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 3955 | { |
3956 | bb->aux = tm_memopt_init_sets (); | |
3957 | tm_memopt_accumulate_memops (bb); | |
3958 | } | |
3959 | ||
3960 | /* Solve data flow equations and transform each block accordingly. */ | |
3961 | tm_memopt_clear_visited (bbs); | |
3962 | tm_memopt_compute_available (region, bbs); | |
3963 | tm_memopt_clear_visited (bbs); | |
3964 | tm_memopt_compute_antic (region, bbs); | |
3965 | tm_memopt_transform_blocks (bbs); | |
3966 | ||
3967 | tm_memopt_free_sets (bbs); | |
f1f41a6c | 3968 | bbs.release (); |
4c0315d0 | 3969 | bitmap_obstack_release (&tm_memopt_obstack); |
c1f445d2 | 3970 | tm_memopt_value_numbers->empty (); |
4c0315d0 | 3971 | } |
3972 | ||
c1f445d2 | 3973 | delete tm_memopt_value_numbers; |
3974 | tm_memopt_value_numbers = NULL; | |
4c0315d0 | 3975 | return 0; |
3976 | } | |
3977 | ||
cbe8bda8 | 3978 | namespace { |
3979 | ||
3980 | const pass_data pass_data_tm_memopt = | |
3981 | { | |
3982 | GIMPLE_PASS, /* type */ | |
3983 | "tmmemopt", /* name */ | |
3984 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3985 | TV_TRANS_MEM, /* tv_id */ |
3986 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3987 | 0, /* properties_provided */ | |
3988 | 0, /* properties_destroyed */ | |
3989 | 0, /* todo_flags_start */ | |
3990 | 0, /* todo_flags_finish */ | |
4c0315d0 | 3991 | }; |
3992 | ||
cbe8bda8 | 3993 | class pass_tm_memopt : public gimple_opt_pass |
3994 | { | |
3995 | public: | |
9af5ce0c | 3996 | pass_tm_memopt (gcc::context *ctxt) |
3997 | : gimple_opt_pass (pass_data_tm_memopt, ctxt) | |
cbe8bda8 | 3998 | {} |
3999 | ||
4000 | /* opt_pass methods: */ | |
31315c24 | 4001 | virtual bool gate (function *) { return flag_tm && optimize > 0; } |
65b0537f | 4002 | virtual unsigned int execute (function *) { return execute_tm_memopt (); } |
cbe8bda8 | 4003 | |
4004 | }; // class pass_tm_memopt | |
4005 | ||
4006 | } // anon namespace | |
4007 | ||
4008 | gimple_opt_pass * | |
4009 | make_pass_tm_memopt (gcc::context *ctxt) | |
4010 | { | |
4011 | return new pass_tm_memopt (ctxt); | |
4012 | } | |
4013 | ||
4c0315d0 | 4014 | \f |
4015 | /* Interprocedual analysis for the creation of transactional clones. | |
4016 | The aim of this pass is to find which functions are referenced in | |
4017 | a non-irrevocable transaction context, and for those over which | |
4018 | we have control (or user directive), create a version of the | |
4019 | function which uses only the transactional interface to reference | |
4020 | protected memories. This analysis proceeds in several steps: | |
4021 | ||
4022 | (1) Collect the set of all possible transactional clones: | |
4023 | ||
4024 | (a) For all local public functions marked tm_callable, push | |
4025 | it onto the tm_callee queue. | |
4026 | ||
4027 | (b) For all local functions, scan for calls in transaction blocks. | |
4028 | Push the caller and callee onto the tm_caller and tm_callee | |
4029 | queues. Count the number of callers for each callee. | |
4030 | ||
4031 | (c) For each local function on the callee list, assume we will | |
4032 | create a transactional clone. Push *all* calls onto the | |
4033 | callee queues; count the number of clone callers separately | |
4034 | to the number of original callers. | |
4035 | ||
4036 | (2) Propagate irrevocable status up the dominator tree: | |
4037 | ||
4038 | (a) Any external function on the callee list that is not marked | |
4039 | tm_callable is irrevocable. Push all callers of such onto | |
4040 | a worklist. | |
4041 | ||
4042 | (b) For each function on the worklist, mark each block that | |
4043 | contains an irrevocable call. Use the AND operator to | |
4044 | propagate that mark up the dominator tree. | |
4045 | ||
4046 | (c) If we reach the entry block for a possible transactional | |
4047 | clone, then the transactional clone is irrevocable, and | |
4048 | we should not create the clone after all. Push all | |
4049 | callers onto the worklist. | |
4050 | ||
4051 | (d) Place tm_irrevocable calls at the beginning of the relevant | |
4052 | blocks. Special case here is the entry block for the entire | |
4053 | transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for | |
4054 | the library to begin the region in serial mode. Decrement | |
4055 | the call count for all callees in the irrevocable region. | |
4056 | ||
4057 | (3) Create the transactional clones: | |
4058 | ||
4059 | Any tm_callee that still has a non-zero call count is cloned. | |
4060 | */ | |
4061 | ||
4062 | /* This structure is stored in the AUX field of each cgraph_node. */ | |
4063 | struct tm_ipa_cg_data | |
4064 | { | |
4065 | /* The clone of the function that got created. */ | |
4066 | struct cgraph_node *clone; | |
4067 | ||
4068 | /* The tm regions in the normal function. */ | |
4069 | struct tm_region *all_tm_regions; | |
4070 | ||
4071 | /* The blocks of the normal/clone functions that contain irrevocable | |
4072 | calls, or blocks that are post-dominated by irrevocable calls. */ | |
4073 | bitmap irrevocable_blocks_normal; | |
4074 | bitmap irrevocable_blocks_clone; | |
4075 | ||
4076 | /* The blocks of the normal function that are involved in transactions. */ | |
4077 | bitmap transaction_blocks_normal; | |
4078 | ||
4079 | /* The number of callers to the transactional clone of this function | |
4080 | from normal and transactional clones respectively. */ | |
4081 | unsigned tm_callers_normal; | |
4082 | unsigned tm_callers_clone; | |
4083 | ||
4084 | /* True if all calls to this function's transactional clone | |
4085 | are irrevocable. Also automatically true if the function | |
4086 | has no transactional clone. */ | |
4087 | bool is_irrevocable; | |
4088 | ||
4089 | /* Flags indicating the presence of this function in various queues. */ | |
4090 | bool in_callee_queue; | |
4091 | bool in_worklist; | |
4092 | ||
4093 | /* Flags indicating the kind of scan desired while in the worklist. */ | |
4094 | bool want_irr_scan_normal; | |
4095 | }; | |
4096 | ||
415d1b9a | 4097 | typedef vec<cgraph_node *> cgraph_node_queue; |
4c0315d0 | 4098 | |
4099 | /* Return the ipa data associated with NODE, allocating zeroed memory | |
3e426b86 | 4100 | if necessary. TRAVERSE_ALIASES is true if we must traverse aliases |
4101 | and set *NODE accordingly. */ | |
4c0315d0 | 4102 | |
4103 | static struct tm_ipa_cg_data * | |
3e426b86 | 4104 | get_cg_data (struct cgraph_node **node, bool traverse_aliases) |
4c0315d0 | 4105 | { |
3e426b86 | 4106 | struct tm_ipa_cg_data *d; |
4107 | ||
02774f2d | 4108 | if (traverse_aliases && (*node)->alias) |
415d1b9a | 4109 | *node = (*node)->get_alias_target (); |
3e426b86 | 4110 | |
02774f2d | 4111 | d = (struct tm_ipa_cg_data *) (*node)->aux; |
4c0315d0 | 4112 | |
4113 | if (d == NULL) | |
4114 | { | |
4115 | d = (struct tm_ipa_cg_data *) | |
4116 | obstack_alloc (&tm_obstack.obstack, sizeof (*d)); | |
02774f2d | 4117 | (*node)->aux = (void *) d; |
4c0315d0 | 4118 | memset (d, 0, sizeof (*d)); |
4119 | } | |
4120 | ||
4121 | return d; | |
4122 | } | |
4123 | ||
4124 | /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that | |
4125 | it is already present. */ | |
4126 | ||
4127 | static void | |
4128 | maybe_push_queue (struct cgraph_node *node, | |
4129 | cgraph_node_queue *queue_p, bool *in_queue_p) | |
4130 | { | |
4131 | if (!*in_queue_p) | |
4132 | { | |
4133 | *in_queue_p = true; | |
f1f41a6c | 4134 | queue_p->safe_push (node); |
4c0315d0 | 4135 | } |
4136 | } | |
4137 | ||
0cd02a19 | 4138 | /* Duplicate the basic blocks in QUEUE for use in the uninstrumented |
4139 | code path. QUEUE are the basic blocks inside the transaction | |
4140 | represented in REGION. | |
4141 | ||
4142 | Later in split_code_paths() we will add the conditional to choose | |
4143 | between the two alternatives. */ | |
4144 | ||
4145 | static void | |
4146 | ipa_uninstrument_transaction (struct tm_region *region, | |
f1f41a6c | 4147 | vec<basic_block> queue) |
0cd02a19 | 4148 | { |
4149 | gimple transaction = region->transaction_stmt; | |
4150 | basic_block transaction_bb = gimple_bb (transaction); | |
f1f41a6c | 4151 | int n = queue.length (); |
0cd02a19 | 4152 | basic_block *new_bbs = XNEWVEC (basic_block, n); |
4153 | ||
d99f53b2 | 4154 | copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb, |
4155 | true); | |
0cd02a19 | 4156 | edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED); |
4157 | add_phi_args_after_copy (new_bbs, n, e); | |
4158 | ||
4159 | // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it. | |
4160 | // a) EDGE_FALLTHRU into the transaction | |
4161 | // b) EDGE_TM_ABORT out of the transaction | |
4162 | // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks. | |
4163 | ||
4164 | free (new_bbs); | |
4165 | } | |
4166 | ||
4c0315d0 | 4167 | /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone. |
4168 | Queue all callees within block BB. */ | |
4169 | ||
4170 | static void | |
4171 | ipa_tm_scan_calls_block (cgraph_node_queue *callees_p, | |
4172 | basic_block bb, bool for_clone) | |
4173 | { | |
4174 | gimple_stmt_iterator gsi; | |
4175 | ||
4176 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4177 | { | |
4178 | gimple stmt = gsi_stmt (gsi); | |
4179 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) | |
4180 | { | |
4181 | tree fndecl = gimple_call_fndecl (stmt); | |
4182 | if (fndecl) | |
4183 | { | |
4184 | struct tm_ipa_cg_data *d; | |
4185 | unsigned *pcallers; | |
4186 | struct cgraph_node *node; | |
4187 | ||
4188 | if (is_tm_ending_fndecl (fndecl)) | |
4189 | continue; | |
4190 | if (find_tm_replacement_function (fndecl)) | |
4191 | continue; | |
4192 | ||
415d1b9a | 4193 | node = cgraph_node::get (fndecl); |
4c0315d0 | 4194 | gcc_assert (node != NULL); |
3e426b86 | 4195 | d = get_cg_data (&node, true); |
4c0315d0 | 4196 | |
4197 | pcallers = (for_clone ? &d->tm_callers_clone | |
4198 | : &d->tm_callers_normal); | |
4199 | *pcallers += 1; | |
4200 | ||
4201 | maybe_push_queue (node, callees_p, &d->in_callee_queue); | |
4202 | } | |
4203 | } | |
4204 | } | |
4205 | } | |
4206 | ||
4207 | /* Scan all calls in NODE that are within a transaction region, | |
4208 | and push the resulting nodes into the callee queue. */ | |
4209 | ||
4210 | static void | |
4211 | ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d, | |
4212 | cgraph_node_queue *callees_p) | |
4213 | { | |
4214 | struct tm_region *r; | |
4215 | ||
4216 | d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack); | |
4217 | d->all_tm_regions = all_tm_regions; | |
4218 | ||
4219 | for (r = all_tm_regions; r; r = r->next) | |
4220 | { | |
f1f41a6c | 4221 | vec<basic_block> bbs; |
4c0315d0 | 4222 | basic_block bb; |
4223 | unsigned i; | |
4224 | ||
4225 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL, | |
4226 | d->transaction_blocks_normal, false); | |
4227 | ||
0cd02a19 | 4228 | // Generate the uninstrumented code path for this transaction. |
4229 | ipa_uninstrument_transaction (r, bbs); | |
4230 | ||
f1f41a6c | 4231 | FOR_EACH_VEC_ELT (bbs, i, bb) |
4c0315d0 | 4232 | ipa_tm_scan_calls_block (callees_p, bb, false); |
4233 | ||
f1f41a6c | 4234 | bbs.release (); |
4c0315d0 | 4235 | } |
0cd02a19 | 4236 | |
4237 | // ??? copy_bbs should maintain cgraph edges for the blocks as it is | |
4238 | // copying them, rather than forcing us to do this externally. | |
35ee1c66 | 4239 | cgraph_edge::rebuild_edges (); |
0cd02a19 | 4240 | |
4241 | // ??? In ipa_uninstrument_transaction we don't try to update dominators | |
4242 | // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects. | |
4243 | // Instead, just release dominators here so update_ssa recomputes them. | |
4244 | free_dominance_info (CDI_DOMINATORS); | |
4245 | ||
4246 | // When building the uninstrumented code path, copy_bbs will have invoked | |
4247 | // create_new_def_for starting an "ssa update context". There is only one | |
4248 | // instance of this context, so resolve ssa updates before moving on to | |
4249 | // the next function. | |
4250 | update_ssa (TODO_update_ssa); | |
4c0315d0 | 4251 | } |
4252 | ||
4253 | /* Scan all calls in NODE as if this is the transactional clone, | |
4254 | and push the destinations into the callee queue. */ | |
4255 | ||
4256 | static void | |
4257 | ipa_tm_scan_calls_clone (struct cgraph_node *node, | |
4258 | cgraph_node_queue *callees_p) | |
4259 | { | |
02774f2d | 4260 | struct function *fn = DECL_STRUCT_FUNCTION (node->decl); |
4c0315d0 | 4261 | basic_block bb; |
4262 | ||
4263 | FOR_EACH_BB_FN (bb, fn) | |
4264 | ipa_tm_scan_calls_block (callees_p, bb, true); | |
4265 | } | |
4266 | ||
4267 | /* The function NODE has been detected to be irrevocable. Push all | |
4268 | of its callers onto WORKLIST for the purpose of re-scanning them. */ | |
4269 | ||
4270 | static void | |
4271 | ipa_tm_note_irrevocable (struct cgraph_node *node, | |
4272 | cgraph_node_queue *worklist_p) | |
4273 | { | |
3e426b86 | 4274 | struct tm_ipa_cg_data *d = get_cg_data (&node, true); |
4c0315d0 | 4275 | struct cgraph_edge *e; |
4276 | ||
4277 | d->is_irrevocable = true; | |
4278 | ||
4279 | for (e = node->callers; e ; e = e->next_caller) | |
4280 | { | |
4281 | basic_block bb; | |
3e426b86 | 4282 | struct cgraph_node *caller; |
4c0315d0 | 4283 | |
4284 | /* Don't examine recursive calls. */ | |
4285 | if (e->caller == node) | |
4286 | continue; | |
4287 | /* Even if we think we can go irrevocable, believe the user | |
4288 | above all. */ | |
02774f2d | 4289 | if (is_tm_safe_or_pure (e->caller->decl)) |
4c0315d0 | 4290 | continue; |
4291 | ||
3e426b86 | 4292 | caller = e->caller; |
4293 | d = get_cg_data (&caller, true); | |
4c0315d0 | 4294 | |
4295 | /* Check if the callee is in a transactional region. If so, | |
4296 | schedule the function for normal re-scan as well. */ | |
4297 | bb = gimple_bb (e->call_stmt); | |
4298 | gcc_assert (bb != NULL); | |
4299 | if (d->transaction_blocks_normal | |
4300 | && bitmap_bit_p (d->transaction_blocks_normal, bb->index)) | |
4301 | d->want_irr_scan_normal = true; | |
4302 | ||
3e426b86 | 4303 | maybe_push_queue (caller, worklist_p, &d->in_worklist); |
4c0315d0 | 4304 | } |
4305 | } | |
4306 | ||
4307 | /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement | |
4308 | within the block is irrevocable. */ | |
4309 | ||
4310 | static bool | |
4311 | ipa_tm_scan_irr_block (basic_block bb) | |
4312 | { | |
4313 | gimple_stmt_iterator gsi; | |
4314 | tree fn; | |
4315 | ||
4316 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4317 | { | |
4318 | gimple stmt = gsi_stmt (gsi); | |
4319 | switch (gimple_code (stmt)) | |
4320 | { | |
e153bd50 | 4321 | case GIMPLE_ASSIGN: |
4322 | if (gimple_assign_single_p (stmt)) | |
4323 | { | |
4324 | tree lhs = gimple_assign_lhs (stmt); | |
4325 | tree rhs = gimple_assign_rhs1 (stmt); | |
4326 | if (volatile_var_p (lhs) || volatile_var_p (rhs)) | |
4327 | return true; | |
4328 | } | |
4329 | break; | |
4330 | ||
4c0315d0 | 4331 | case GIMPLE_CALL: |
e153bd50 | 4332 | { |
4333 | tree lhs = gimple_call_lhs (stmt); | |
4334 | if (lhs && volatile_var_p (lhs)) | |
4335 | return true; | |
4c0315d0 | 4336 | |
e153bd50 | 4337 | if (is_tm_pure_call (stmt)) |
4338 | break; | |
4c0315d0 | 4339 | |
e153bd50 | 4340 | fn = gimple_call_fn (stmt); |
4c0315d0 | 4341 | |
e153bd50 | 4342 | /* Functions with the attribute are by definition irrevocable. */ |
4343 | if (is_tm_irrevocable (fn)) | |
4344 | return true; | |
4c0315d0 | 4345 | |
e153bd50 | 4346 | /* For direct function calls, go ahead and check for replacement |
4347 | functions, or transitive irrevocable functions. For indirect | |
4348 | functions, we'll ask the runtime. */ | |
4349 | if (TREE_CODE (fn) == ADDR_EXPR) | |
4350 | { | |
4351 | struct tm_ipa_cg_data *d; | |
4352 | struct cgraph_node *node; | |
4c0315d0 | 4353 | |
e153bd50 | 4354 | fn = TREE_OPERAND (fn, 0); |
4355 | if (is_tm_ending_fndecl (fn)) | |
4356 | break; | |
4357 | if (find_tm_replacement_function (fn)) | |
4358 | break; | |
40879ac6 | 4359 | |
415d1b9a | 4360 | node = cgraph_node::get (fn); |
e153bd50 | 4361 | d = get_cg_data (&node, true); |
4362 | ||
4363 | /* Return true if irrevocable, but above all, believe | |
4364 | the user. */ | |
4365 | if (d->is_irrevocable | |
4366 | && !is_tm_safe_or_pure (fn)) | |
4367 | return true; | |
4368 | } | |
4369 | break; | |
4370 | } | |
4c0315d0 | 4371 | |
4372 | case GIMPLE_ASM: | |
4373 | /* ??? The Approved Method of indicating that an inline | |
4374 | assembly statement is not relevant to the transaction | |
4375 | is to wrap it in a __tm_waiver block. This is not | |
4376 | yet implemented, so we can't check for it. */ | |
43156aa3 | 4377 | if (is_tm_safe (current_function_decl)) |
4378 | { | |
4379 | tree t = build1 (NOP_EXPR, void_type_node, size_zero_node); | |
4380 | SET_EXPR_LOCATION (t, gimple_location (stmt)); | |
43156aa3 | 4381 | error ("%Kasm not allowed in %<transaction_safe%> function", t); |
4382 | } | |
4c0315d0 | 4383 | return true; |
4384 | ||
4385 | default: | |
4386 | break; | |
4387 | } | |
4388 | } | |
4389 | ||
4390 | return false; | |
4391 | } | |
4392 | ||
4393 | /* For each of the blocks seeded witin PQUEUE, walk the CFG looking | |
4394 | for new irrevocable blocks, marking them in NEW_IRR. Don't bother | |
4395 | scanning past OLD_IRR or EXIT_BLOCKS. */ | |
4396 | ||
4397 | static bool | |
f1f41a6c | 4398 | ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr, |
4c0315d0 | 4399 | bitmap old_irr, bitmap exit_blocks) |
4400 | { | |
4401 | bool any_new_irr = false; | |
4402 | edge e; | |
4403 | edge_iterator ei; | |
4404 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
4405 | ||
4406 | do | |
4407 | { | |
f1f41a6c | 4408 | basic_block bb = pqueue->pop (); |
4c0315d0 | 4409 | |
4410 | /* Don't re-scan blocks we know already are irrevocable. */ | |
4411 | if (old_irr && bitmap_bit_p (old_irr, bb->index)) | |
4412 | continue; | |
4413 | ||
4414 | if (ipa_tm_scan_irr_block (bb)) | |
4415 | { | |
4416 | bitmap_set_bit (new_irr, bb->index); | |
4417 | any_new_irr = true; | |
4418 | } | |
4419 | else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index)) | |
4420 | { | |
4421 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4422 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
4423 | { | |
4424 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 4425 | pqueue->safe_push (e->dest); |
4c0315d0 | 4426 | } |
4427 | } | |
4428 | } | |
f1f41a6c | 4429 | while (!pqueue->is_empty ()); |
4c0315d0 | 4430 | |
4431 | BITMAP_FREE (visited_blocks); | |
4432 | ||
4433 | return any_new_irr; | |
4434 | } | |
4435 | ||
4436 | /* Propagate the irrevocable property both up and down the dominator tree. | |
4437 | BB is the current block being scanned; EXIT_BLOCKS are the edges of the | |
4438 | TM regions; OLD_IRR are the results of a previous scan of the dominator | |
4439 | tree which has been fully propagated; NEW_IRR is the set of new blocks | |
4440 | which are gaining the irrevocable property during the current scan. */ | |
4441 | ||
4442 | static void | |
4443 | ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr, | |
4444 | bitmap old_irr, bitmap exit_blocks) | |
4445 | { | |
f1f41a6c | 4446 | vec<basic_block> bbs; |
4c0315d0 | 4447 | bitmap all_region_blocks; |
4448 | ||
4449 | /* If this block is in the old set, no need to rescan. */ | |
4450 | if (old_irr && bitmap_bit_p (old_irr, entry_block->index)) | |
4451 | return; | |
4452 | ||
4453 | all_region_blocks = BITMAP_ALLOC (&tm_obstack); | |
4454 | bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL, | |
4455 | all_region_blocks, false); | |
4456 | do | |
4457 | { | |
f1f41a6c | 4458 | basic_block bb = bbs.pop (); |
4c0315d0 | 4459 | bool this_irr = bitmap_bit_p (new_irr, bb->index); |
4460 | bool all_son_irr = false; | |
4461 | edge_iterator ei; | |
4462 | edge e; | |
4463 | ||
4464 | /* Propagate up. If my children are, I am too, but we must have | |
4465 | at least one child that is. */ | |
4466 | if (!this_irr) | |
4467 | { | |
4468 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4469 | { | |
4470 | if (!bitmap_bit_p (new_irr, e->dest->index)) | |
4471 | { | |
4472 | all_son_irr = false; | |
4473 | break; | |
4474 | } | |
4475 | else | |
4476 | all_son_irr = true; | |
4477 | } | |
4478 | if (all_son_irr) | |
4479 | { | |
4480 | /* Add block to new_irr if it hasn't already been processed. */ | |
4481 | if (!old_irr || !bitmap_bit_p (old_irr, bb->index)) | |
4482 | { | |
4483 | bitmap_set_bit (new_irr, bb->index); | |
4484 | this_irr = true; | |
4485 | } | |
4486 | } | |
4487 | } | |
4488 | ||
4489 | /* Propagate down to everyone we immediately dominate. */ | |
4490 | if (this_irr) | |
4491 | { | |
4492 | basic_block son; | |
4493 | for (son = first_dom_son (CDI_DOMINATORS, bb); | |
4494 | son; | |
4495 | son = next_dom_son (CDI_DOMINATORS, son)) | |
4496 | { | |
4497 | /* Make sure block is actually in a TM region, and it | |
4498 | isn't already in old_irr. */ | |
4499 | if ((!old_irr || !bitmap_bit_p (old_irr, son->index)) | |
4500 | && bitmap_bit_p (all_region_blocks, son->index)) | |
4501 | bitmap_set_bit (new_irr, son->index); | |
4502 | } | |
4503 | } | |
4504 | } | |
f1f41a6c | 4505 | while (!bbs.is_empty ()); |
4c0315d0 | 4506 | |
4507 | BITMAP_FREE (all_region_blocks); | |
f1f41a6c | 4508 | bbs.release (); |
4c0315d0 | 4509 | } |
4510 | ||
4511 | static void | |
4512 | ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone) | |
4513 | { | |
4514 | gimple_stmt_iterator gsi; | |
4515 | ||
4516 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4517 | { | |
4518 | gimple stmt = gsi_stmt (gsi); | |
4519 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) | |
4520 | { | |
4521 | tree fndecl = gimple_call_fndecl (stmt); | |
4522 | if (fndecl) | |
4523 | { | |
4524 | struct tm_ipa_cg_data *d; | |
4525 | unsigned *pcallers; | |
3e426b86 | 4526 | struct cgraph_node *tnode; |
4c0315d0 | 4527 | |
4528 | if (is_tm_ending_fndecl (fndecl)) | |
4529 | continue; | |
4530 | if (find_tm_replacement_function (fndecl)) | |
4531 | continue; | |
4532 | ||
415d1b9a | 4533 | tnode = cgraph_node::get (fndecl); |
3e426b86 | 4534 | d = get_cg_data (&tnode, true); |
4535 | ||
4c0315d0 | 4536 | pcallers = (for_clone ? &d->tm_callers_clone |
4537 | : &d->tm_callers_normal); | |
4538 | ||
4539 | gcc_assert (*pcallers > 0); | |
4540 | *pcallers -= 1; | |
4541 | } | |
4542 | } | |
4543 | } | |
4544 | } | |
4545 | ||
4546 | /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions, | |
4547 | as well as other irrevocable actions such as inline assembly. Mark all | |
4548 | such blocks as irrevocable and decrement the number of calls to | |
4549 | transactional clones. Return true if, for the transactional clone, the | |
4550 | entire function is irrevocable. */ | |
4551 | ||
4552 | static bool | |
4553 | ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone) | |
4554 | { | |
4555 | struct tm_ipa_cg_data *d; | |
4556 | bitmap new_irr, old_irr; | |
4c0315d0 | 4557 | bool ret = false; |
4558 | ||
40879ac6 | 4559 | /* Builtin operators (operator new, and such). */ |
02774f2d | 4560 | if (DECL_STRUCT_FUNCTION (node->decl) == NULL |
4561 | || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL) | |
40879ac6 | 4562 | return false; |
4563 | ||
02774f2d | 4564 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 4565 | calculate_dominance_info (CDI_DOMINATORS); |
4566 | ||
3e426b86 | 4567 | d = get_cg_data (&node, true); |
4997014d | 4568 | auto_vec<basic_block, 10> queue; |
4c0315d0 | 4569 | new_irr = BITMAP_ALLOC (&tm_obstack); |
4570 | ||
4571 | /* Scan each tm region, propagating irrevocable status through the tree. */ | |
4572 | if (for_clone) | |
4573 | { | |
4574 | old_irr = d->irrevocable_blocks_clone; | |
34154e27 | 4575 | queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
4c0315d0 | 4576 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL)) |
4577 | { | |
34154e27 | 4578 | ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), |
4579 | new_irr, | |
4c0315d0 | 4580 | old_irr, NULL); |
34154e27 | 4581 | ret = bitmap_bit_p (new_irr, |
4582 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index); | |
4c0315d0 | 4583 | } |
4584 | } | |
4585 | else | |
4586 | { | |
4587 | struct tm_region *region; | |
4588 | ||
4589 | old_irr = d->irrevocable_blocks_normal; | |
4590 | for (region = d->all_tm_regions; region; region = region->next) | |
4591 | { | |
f1f41a6c | 4592 | queue.quick_push (region->entry_block); |
4c0315d0 | 4593 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, |
4594 | region->exit_blocks)) | |
4595 | ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr, | |
4596 | region->exit_blocks); | |
4597 | } | |
4598 | } | |
4599 | ||
4600 | /* If we found any new irrevocable blocks, reduce the call count for | |
4601 | transactional clones within the irrevocable blocks. Save the new | |
4602 | set of irrevocable blocks for next time. */ | |
4603 | if (!bitmap_empty_p (new_irr)) | |
4604 | { | |
4605 | bitmap_iterator bmi; | |
4606 | unsigned i; | |
4607 | ||
4608 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
f5a6b05f | 4609 | ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i), |
4610 | for_clone); | |
4c0315d0 | 4611 | |
4612 | if (old_irr) | |
4613 | { | |
4614 | bitmap_ior_into (old_irr, new_irr); | |
4615 | BITMAP_FREE (new_irr); | |
4616 | } | |
4617 | else if (for_clone) | |
4618 | d->irrevocable_blocks_clone = new_irr; | |
4619 | else | |
4620 | d->irrevocable_blocks_normal = new_irr; | |
4621 | ||
4622 | if (dump_file && new_irr) | |
4623 | { | |
4624 | const char *dname; | |
4625 | bitmap_iterator bmi; | |
4626 | unsigned i; | |
4627 | ||
4628 | dname = lang_hooks.decl_printable_name (current_function_decl, 2); | |
4629 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
4630 | fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i); | |
4631 | } | |
4632 | } | |
4633 | else | |
4634 | BITMAP_FREE (new_irr); | |
4635 | ||
4c0315d0 | 4636 | pop_cfun (); |
4c0315d0 | 4637 | |
4638 | return ret; | |
4639 | } | |
4640 | ||
4641 | /* Return true if, for the transactional clone of NODE, any call | |
4642 | may enter irrevocable mode. */ | |
4643 | ||
4644 | static bool | |
4645 | ipa_tm_mayenterirr_function (struct cgraph_node *node) | |
4646 | { | |
3e426b86 | 4647 | struct tm_ipa_cg_data *d; |
4648 | tree decl; | |
4649 | unsigned flags; | |
4650 | ||
4651 | d = get_cg_data (&node, true); | |
02774f2d | 4652 | decl = node->decl; |
3e426b86 | 4653 | flags = flags_from_decl_or_type (decl); |
4c0315d0 | 4654 | |
4655 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
4656 | at this point, but handling these functions when written in by the | |
4657 | user makes it easier to build unit tests. */ | |
4658 | if (flags & ECF_TM_BUILTIN) | |
4659 | return false; | |
4660 | ||
4661 | /* Filter out all functions that are marked. */ | |
4662 | if (flags & ECF_TM_PURE) | |
4663 | return false; | |
4664 | if (is_tm_safe (decl)) | |
4665 | return false; | |
4666 | if (is_tm_irrevocable (decl)) | |
4667 | return true; | |
4668 | if (is_tm_callable (decl)) | |
4669 | return true; | |
4670 | if (find_tm_replacement_function (decl)) | |
4671 | return true; | |
4672 | ||
4673 | /* If we aren't seeing the final version of the function we don't | |
4674 | know what it will contain at runtime. */ | |
415d1b9a | 4675 | if (node->get_availability () < AVAIL_AVAILABLE) |
4c0315d0 | 4676 | return true; |
4677 | ||
4678 | /* If the function must go irrevocable, then of course true. */ | |
4679 | if (d->is_irrevocable) | |
4680 | return true; | |
4681 | ||
4682 | /* If there are any blocks marked irrevocable, then the function | |
4683 | as a whole may enter irrevocable. */ | |
4684 | if (d->irrevocable_blocks_clone) | |
4685 | return true; | |
4686 | ||
4687 | /* We may have previously marked this function as tm_may_enter_irr; | |
4688 | see pass_diagnose_tm_blocks. */ | |
4689 | if (node->local.tm_may_enter_irr) | |
4690 | return true; | |
4691 | ||
4692 | /* Recurse on the main body for aliases. In general, this will | |
4693 | result in one of the bits above being set so that we will not | |
4694 | have to recurse next time. */ | |
02774f2d | 4695 | if (node->alias) |
415d1b9a | 4696 | return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias)); |
4c0315d0 | 4697 | |
4698 | /* What remains is unmarked local functions without items that force | |
4699 | the function to go irrevocable. */ | |
4700 | return false; | |
4701 | } | |
4702 | ||
4703 | /* Diagnose calls from transaction_safe functions to unmarked | |
4704 | functions that are determined to not be safe. */ | |
4705 | ||
4706 | static void | |
4707 | ipa_tm_diagnose_tm_safe (struct cgraph_node *node) | |
4708 | { | |
4709 | struct cgraph_edge *e; | |
4710 | ||
4711 | for (e = node->callees; e ; e = e->next_callee) | |
02774f2d | 4712 | if (!is_tm_callable (e->callee->decl) |
4c0315d0 | 4713 | && e->callee->local.tm_may_enter_irr) |
4714 | error_at (gimple_location (e->call_stmt), | |
4715 | "unsafe function call %qD within " | |
02774f2d | 4716 | "%<transaction_safe%> function", e->callee->decl); |
4c0315d0 | 4717 | } |
4718 | ||
4719 | /* Diagnose call from atomic transactions to unmarked functions | |
4720 | that are determined to not be safe. */ | |
4721 | ||
4722 | static void | |
4723 | ipa_tm_diagnose_transaction (struct cgraph_node *node, | |
4724 | struct tm_region *all_tm_regions) | |
4725 | { | |
4726 | struct tm_region *r; | |
4727 | ||
4728 | for (r = all_tm_regions; r ; r = r->next) | |
1a91d914 | 4729 | if (gimple_transaction_subcode (r->get_transaction_stmt ()) |
4730 | & GTMA_IS_RELAXED) | |
4c0315d0 | 4731 | { |
4732 | /* Atomic transactions can be nested inside relaxed. */ | |
4733 | if (r->inner) | |
4734 | ipa_tm_diagnose_transaction (node, r->inner); | |
4735 | } | |
4736 | else | |
4737 | { | |
f1f41a6c | 4738 | vec<basic_block> bbs; |
4c0315d0 | 4739 | gimple_stmt_iterator gsi; |
4740 | basic_block bb; | |
4741 | size_t i; | |
4742 | ||
4743 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, | |
4744 | r->irr_blocks, NULL, false); | |
4745 | ||
f1f41a6c | 4746 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 4747 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
4748 | { | |
4749 | gimple stmt = gsi_stmt (gsi); | |
4750 | tree fndecl; | |
4751 | ||
4752 | if (gimple_code (stmt) == GIMPLE_ASM) | |
4753 | { | |
4754 | error_at (gimple_location (stmt), | |
4755 | "asm not allowed in atomic transaction"); | |
4756 | continue; | |
4757 | } | |
4758 | ||
4759 | if (!is_gimple_call (stmt)) | |
4760 | continue; | |
4761 | fndecl = gimple_call_fndecl (stmt); | |
4762 | ||
4763 | /* Indirect function calls have been diagnosed already. */ | |
4764 | if (!fndecl) | |
4765 | continue; | |
4766 | ||
4767 | /* Stop at the end of the transaction. */ | |
4768 | if (is_tm_ending_fndecl (fndecl)) | |
4769 | { | |
4770 | if (bitmap_bit_p (r->exit_blocks, bb->index)) | |
4771 | break; | |
4772 | continue; | |
4773 | } | |
4774 | ||
4775 | /* Marked functions have been diagnosed already. */ | |
4776 | if (is_tm_pure_call (stmt)) | |
4777 | continue; | |
4778 | if (is_tm_callable (fndecl)) | |
4779 | continue; | |
4780 | ||
35ee1c66 | 4781 | if (cgraph_node::local_info (fndecl)->tm_may_enter_irr) |
4c0315d0 | 4782 | error_at (gimple_location (stmt), |
4783 | "unsafe function call %qD within " | |
4784 | "atomic transaction", fndecl); | |
4785 | } | |
4786 | ||
f1f41a6c | 4787 | bbs.release (); |
4c0315d0 | 4788 | } |
4789 | } | |
4790 | ||
4791 | /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in | |
4792 | OLD_DECL. The returned value is a freshly malloced pointer that | |
4793 | should be freed by the caller. */ | |
4794 | ||
4795 | static tree | |
4796 | tm_mangle (tree old_asm_id) | |
4797 | { | |
4798 | const char *old_asm_name; | |
4799 | char *tm_name; | |
4800 | void *alloc = NULL; | |
4801 | struct demangle_component *dc; | |
4802 | tree new_asm_id; | |
4803 | ||
4804 | /* Determine if the symbol is already a valid C++ mangled name. Do this | |
4805 | even for C, which might be interfacing with C++ code via appropriately | |
4806 | ugly identifiers. */ | |
4807 | /* ??? We could probably do just as well checking for "_Z" and be done. */ | |
4808 | old_asm_name = IDENTIFIER_POINTER (old_asm_id); | |
4809 | dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc); | |
4810 | ||
4811 | if (dc == NULL) | |
4812 | { | |
4813 | char length[8]; | |
4814 | ||
4815 | do_unencoded: | |
4816 | sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id)); | |
4817 | tm_name = concat ("_ZGTt", length, old_asm_name, NULL); | |
4818 | } | |
4819 | else | |
4820 | { | |
4821 | old_asm_name += 2; /* Skip _Z */ | |
4822 | ||
4823 | switch (dc->type) | |
4824 | { | |
4825 | case DEMANGLE_COMPONENT_TRANSACTION_CLONE: | |
4826 | case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE: | |
4827 | /* Don't play silly games, you! */ | |
4828 | goto do_unencoded; | |
4829 | ||
4830 | case DEMANGLE_COMPONENT_HIDDEN_ALIAS: | |
4831 | /* I'd really like to know if we can ever be passed one of | |
4832 | these from the C++ front end. The Logical Thing would | |
4833 | seem that hidden-alias should be outer-most, so that we | |
4834 | get hidden-alias of a transaction-clone and not vice-versa. */ | |
4835 | old_asm_name += 2; | |
4836 | break; | |
4837 | ||
4838 | default: | |
4839 | break; | |
4840 | } | |
4841 | ||
4842 | tm_name = concat ("_ZGTt", old_asm_name, NULL); | |
4843 | } | |
4844 | free (alloc); | |
4845 | ||
4846 | new_asm_id = get_identifier (tm_name); | |
4847 | free (tm_name); | |
4848 | ||
4849 | return new_asm_id; | |
4850 | } | |
4851 | ||
4852 | static inline void | |
8efa224a | 4853 | ipa_tm_mark_force_output_node (struct cgraph_node *node) |
4c0315d0 | 4854 | { |
415d1b9a | 4855 | node->mark_force_output (); |
02774f2d | 4856 | node->analyzed = true; |
4c0315d0 | 4857 | } |
4858 | ||
6a1c0403 | 4859 | static inline void |
4860 | ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node) | |
4861 | { | |
02774f2d | 4862 | node->forced_by_abi = true; |
4863 | node->analyzed = true; | |
6a1c0403 | 4864 | } |
4865 | ||
4c0315d0 | 4866 | /* Callback data for ipa_tm_create_version_alias. */ |
4867 | struct create_version_alias_info | |
4868 | { | |
4869 | struct cgraph_node *old_node; | |
4870 | tree new_decl; | |
4871 | }; | |
4872 | ||
caf29404 | 4873 | /* A subroutine of ipa_tm_create_version, called via |
4c0315d0 | 4874 | cgraph_for_node_and_aliases. Create new tm clones for each of |
4875 | the existing aliases. */ | |
4876 | static bool | |
4877 | ipa_tm_create_version_alias (struct cgraph_node *node, void *data) | |
4878 | { | |
4879 | struct create_version_alias_info *info | |
4880 | = (struct create_version_alias_info *)data; | |
4881 | tree old_decl, new_decl, tm_name; | |
4882 | struct cgraph_node *new_node; | |
4883 | ||
02774f2d | 4884 | if (!node->cpp_implicit_alias) |
4c0315d0 | 4885 | return false; |
4886 | ||
02774f2d | 4887 | old_decl = node->decl; |
4c0315d0 | 4888 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); |
4889 | new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl), | |
4890 | TREE_CODE (old_decl), tm_name, | |
4891 | TREE_TYPE (old_decl)); | |
4892 | ||
4893 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4894 | SET_DECL_RTL (new_decl, NULL); | |
4895 | ||
4896 | /* Based loosely on C++'s make_alias_for(). */ | |
4897 | TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl); | |
f7c22b34 | 4898 | DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl); |
4899 | DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl); | |
4c0315d0 | 4900 | TREE_READONLY (new_decl) = TREE_READONLY (old_decl); |
4901 | DECL_EXTERNAL (new_decl) = 0; | |
4902 | DECL_ARTIFICIAL (new_decl) = 1; | |
4903 | TREE_ADDRESSABLE (new_decl) = 1; | |
4904 | TREE_USED (new_decl) = 1; | |
4905 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4906 | ||
4907 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4908 | if (DECL_ONE_ONLY (new_decl)) |
97221fd7 | 4909 | varpool_node::get (new_decl)->set_comdat_group |
4910 | (tm_mangle (decl_comdat_group_id (old_decl))); | |
4c0315d0 | 4911 | |
415d1b9a | 4912 | new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl); |
4c0315d0 | 4913 | new_node->tm_clone = true; |
02774f2d | 4914 | new_node->externally_visible = info->old_node->externally_visible; |
6b722052 | 4915 | new_node->no_reorder = info->old_node->no_reorder; |
3e426b86 | 4916 | /* ?? Do not traverse aliases here. */ |
4917 | get_cg_data (&node, false)->clone = new_node; | |
4c0315d0 | 4918 | |
4919 | record_tm_clone_pair (old_decl, new_decl); | |
4920 | ||
02774f2d | 4921 | if (info->old_node->force_output |
51ce5652 | 4922 | || info->old_node->ref_list.first_referring ()) |
8efa224a | 4923 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4924 | if (info->old_node->forced_by_abi) |
6a1c0403 | 4925 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4926 | return false; |
4927 | } | |
4928 | ||
4929 | /* Create a copy of the function (possibly declaration only) of OLD_NODE, | |
4930 | appropriate for the transactional clone. */ | |
4931 | ||
4932 | static void | |
4933 | ipa_tm_create_version (struct cgraph_node *old_node) | |
4934 | { | |
4935 | tree new_decl, old_decl, tm_name; | |
4936 | struct cgraph_node *new_node; | |
4937 | ||
02774f2d | 4938 | old_decl = old_node->decl; |
4c0315d0 | 4939 | new_decl = copy_node (old_decl); |
4940 | ||
4941 | /* DECL_ASSEMBLER_NAME needs to be set before we call | |
4942 | cgraph_copy_node_for_versioning below, because cgraph_node will | |
4943 | fill the assembler_name_hash. */ | |
4944 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); | |
4945 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4946 | SET_DECL_RTL (new_decl, NULL); | |
4947 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4948 | ||
4949 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4950 | if (DECL_ONE_ONLY (new_decl)) |
97221fd7 | 4951 | varpool_node::get (new_decl)->set_comdat_group |
4952 | (tm_mangle (DECL_COMDAT_GROUP (old_decl))); | |
4c0315d0 | 4953 | |
2cf4c3b3 | 4954 | gcc_assert (!old_node->ipa_transforms_to_apply.exists ()); |
415d1b9a | 4955 | new_node = old_node->create_version_clone (new_decl, vNULL, NULL); |
281dea26 | 4956 | new_node->local.local = false; |
02774f2d | 4957 | new_node->externally_visible = old_node->externally_visible; |
4c0315d0 | 4958 | new_node->lowered = true; |
4959 | new_node->tm_clone = 1; | |
3e426b86 | 4960 | get_cg_data (&old_node, true)->clone = new_node; |
4c0315d0 | 4961 | |
415d1b9a | 4962 | if (old_node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 4963 | { |
4964 | /* Remap extern inline to static inline. */ | |
4965 | /* ??? Is it worth trying to use make_decl_one_only? */ | |
4966 | if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl)) | |
4967 | { | |
4968 | DECL_EXTERNAL (new_decl) = 0; | |
4969 | TREE_PUBLIC (new_decl) = 0; | |
7a571443 | 4970 | DECL_WEAK (new_decl) = 0; |
4c0315d0 | 4971 | } |
4972 | ||
f1f41a6c | 4973 | tree_function_versioning (old_decl, new_decl, |
4974 | NULL, false, NULL, | |
4975 | false, NULL, NULL); | |
4c0315d0 | 4976 | } |
4977 | ||
4978 | record_tm_clone_pair (old_decl, new_decl); | |
4979 | ||
35ee1c66 | 4980 | symtab->call_cgraph_insertion_hooks (new_node); |
02774f2d | 4981 | if (old_node->force_output |
51ce5652 | 4982 | || old_node->ref_list.first_referring ()) |
8efa224a | 4983 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4984 | if (old_node->forced_by_abi) |
6a1c0403 | 4985 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4986 | |
4987 | /* Do the same thing, but for any aliases of the original node. */ | |
4988 | { | |
4989 | struct create_version_alias_info data; | |
4990 | data.old_node = old_node; | |
4991 | data.new_decl = new_decl; | |
415d1b9a | 4992 | old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias, |
4993 | &data, true); | |
4c0315d0 | 4994 | } |
4995 | } | |
4996 | ||
4997 | /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */ | |
4998 | ||
4999 | static void | |
5000 | ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region, | |
5001 | basic_block bb) | |
5002 | { | |
5003 | gimple_stmt_iterator gsi; | |
1a91d914 | 5004 | gcall *g; |
4c0315d0 | 5005 | |
5006 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
5007 | ||
5008 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE), | |
5009 | 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE)); | |
5010 | ||
5011 | split_block_after_labels (bb); | |
5012 | gsi = gsi_after_labels (bb); | |
5013 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
5014 | ||
415d1b9a | 5015 | node->create_edge (cgraph_node::get_create |
5016 | (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)), | |
5017 | g, 0, | |
5018 | compute_call_stmt_bb_frequency (node->decl, | |
5019 | gimple_bb (g))); | |
4c0315d0 | 5020 | } |
5021 | ||
5022 | /* Construct a call to TM_GETTMCLONE and insert it before GSI. */ | |
5023 | ||
5024 | static bool | |
5025 | ipa_tm_insert_gettmclone_call (struct cgraph_node *node, | |
5026 | struct tm_region *region, | |
1a91d914 | 5027 | gimple_stmt_iterator *gsi, gcall *stmt) |
4c0315d0 | 5028 | { |
5029 | tree gettm_fn, ret, old_fn, callfn; | |
1a91d914 | 5030 | gcall *g; |
5031 | gassign *g2; | |
4c0315d0 | 5032 | bool safe; |
5033 | ||
5034 | old_fn = gimple_call_fn (stmt); | |
5035 | ||
5036 | if (TREE_CODE (old_fn) == ADDR_EXPR) | |
5037 | { | |
5038 | tree fndecl = TREE_OPERAND (old_fn, 0); | |
5039 | tree clone = get_tm_clone_pair (fndecl); | |
5040 | ||
5041 | /* By transforming the call into a TM_GETTMCLONE, we are | |
5042 | technically taking the address of the original function and | |
5043 | its clone. Explain this so inlining will know this function | |
5044 | is needed. */ | |
415d1b9a | 5045 | cgraph_node::get (fndecl)->mark_address_taken () ; |
4c0315d0 | 5046 | if (clone) |
415d1b9a | 5047 | cgraph_node::get (clone)->mark_address_taken (); |
4c0315d0 | 5048 | } |
5049 | ||
5050 | safe = is_tm_safe (TREE_TYPE (old_fn)); | |
5051 | gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE | |
5052 | : BUILT_IN_TM_GETTMCLONE_IRR); | |
f9e245b2 | 5053 | ret = create_tmp_var (ptr_type_node); |
4c0315d0 | 5054 | |
5055 | if (!safe) | |
5056 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
5057 | ||
5058 | /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */ | |
5059 | if (TREE_CODE (old_fn) == OBJ_TYPE_REF) | |
5060 | old_fn = OBJ_TYPE_REF_EXPR (old_fn); | |
5061 | ||
5062 | g = gimple_build_call (gettm_fn, 1, old_fn); | |
5063 | ret = make_ssa_name (ret, g); | |
5064 | gimple_call_set_lhs (g, ret); | |
5065 | ||
5066 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
5067 | ||
415d1b9a | 5068 | node->create_edge (cgraph_node::get_create (gettm_fn), g, 0, |
5069 | compute_call_stmt_bb_frequency (node->decl, | |
5070 | gimple_bb (g))); | |
4c0315d0 | 5071 | |
5072 | /* Cast return value from tm_gettmclone* into appropriate function | |
5073 | pointer. */ | |
f9e245b2 | 5074 | callfn = create_tmp_var (TREE_TYPE (old_fn)); |
4c0315d0 | 5075 | g2 = gimple_build_assign (callfn, |
5076 | fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret)); | |
5077 | callfn = make_ssa_name (callfn, g2); | |
5078 | gimple_assign_set_lhs (g2, callfn); | |
5079 | gsi_insert_before (gsi, g2, GSI_SAME_STMT); | |
5080 | ||
5081 | /* ??? This is a hack to preserve the NOTHROW bit on the call, | |
5082 | which we would have derived from the decl. Failure to save | |
5083 | this bit means we might have to split the basic block. */ | |
5084 | if (gimple_call_nothrow_p (stmt)) | |
5085 | gimple_call_set_nothrow (stmt, true); | |
5086 | ||
5087 | gimple_call_set_fn (stmt, callfn); | |
5088 | ||
5089 | /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS | |
5090 | for a call statement. Fix it. */ | |
5091 | { | |
5092 | tree lhs = gimple_call_lhs (stmt); | |
5093 | tree rettype = TREE_TYPE (gimple_call_fntype (stmt)); | |
5094 | if (lhs | |
5095 | && !useless_type_conversion_p (TREE_TYPE (lhs), rettype)) | |
5096 | { | |
5097 | tree temp; | |
5098 | ||
f9e245b2 | 5099 | temp = create_tmp_reg (rettype); |
4c0315d0 | 5100 | gimple_call_set_lhs (stmt, temp); |
5101 | ||
5102 | g2 = gimple_build_assign (lhs, | |
5103 | fold_build1 (VIEW_CONVERT_EXPR, | |
5104 | TREE_TYPE (lhs), temp)); | |
5105 | gsi_insert_after (gsi, g2, GSI_SAME_STMT); | |
5106 | } | |
5107 | } | |
5108 | ||
5109 | update_stmt (stmt); | |
d8b5abdb | 5110 | cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt); |
5111 | if (e && e->indirect_info) | |
5112 | e->indirect_info->polymorphic = false; | |
4c0315d0 | 5113 | |
5114 | return true; | |
5115 | } | |
5116 | ||
5117 | /* Helper function for ipa_tm_transform_calls*. Given a call | |
5118 | statement in GSI which resides inside transaction REGION, redirect | |
5119 | the call to either its wrapper function, or its clone. */ | |
5120 | ||
5121 | static void | |
5122 | ipa_tm_transform_calls_redirect (struct cgraph_node *node, | |
5123 | struct tm_region *region, | |
5124 | gimple_stmt_iterator *gsi, | |
5125 | bool *need_ssa_rename_p) | |
5126 | { | |
1a91d914 | 5127 | gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); |
4c0315d0 | 5128 | struct cgraph_node *new_node; |
415d1b9a | 5129 | struct cgraph_edge *e = node->get_edge (stmt); |
4c0315d0 | 5130 | tree fndecl = gimple_call_fndecl (stmt); |
5131 | ||
5132 | /* For indirect calls, pass the address through the runtime. */ | |
5133 | if (fndecl == NULL) | |
5134 | { | |
5135 | *need_ssa_rename_p |= | |
5136 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
5137 | return; | |
5138 | } | |
5139 | ||
5140 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
5141 | at this point, but handling these functions when written in by the | |
5142 | user makes it easier to build unit tests. */ | |
5143 | if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN) | |
5144 | return; | |
5145 | ||
5146 | /* Fixup recursive calls inside clones. */ | |
5147 | /* ??? Why did cgraph_copy_node_for_versioning update the call edges | |
5148 | for recursion but not update the call statements themselves? */ | |
5149 | if (e->caller == e->callee && decl_is_tm_clone (current_function_decl)) | |
5150 | { | |
5151 | gimple_call_set_fndecl (stmt, current_function_decl); | |
5152 | return; | |
5153 | } | |
5154 | ||
5155 | /* If there is a replacement, use it. */ | |
5156 | fndecl = find_tm_replacement_function (fndecl); | |
5157 | if (fndecl) | |
5158 | { | |
415d1b9a | 5159 | new_node = cgraph_node::get_create (fndecl); |
4c0315d0 | 5160 | |
5161 | /* ??? Mark all transaction_wrap functions tm_may_enter_irr. | |
5162 | ||
5163 | We can't do this earlier in record_tm_replacement because | |
5164 | cgraph_remove_unreachable_nodes is called before we inject | |
5165 | references to the node. Further, we can't do this in some | |
5166 | nice central place in ipa_tm_execute because we don't have | |
5167 | the exact list of wrapper functions that would be used. | |
5168 | Marking more wrappers than necessary results in the creation | |
5169 | of unnecessary cgraph_nodes, which can cause some of the | |
5170 | other IPA passes to crash. | |
5171 | ||
5172 | We do need to mark these nodes so that we get the proper | |
5173 | result in expand_call_tm. */ | |
5174 | /* ??? This seems broken. How is it that we're marking the | |
5175 | CALLEE as may_enter_irr? Surely we should be marking the | |
5176 | CALLER. Also note that find_tm_replacement_function also | |
5177 | contains mappings into the TM runtime, e.g. memcpy. These | |
5178 | we know won't go irrevocable. */ | |
5179 | new_node->local.tm_may_enter_irr = 1; | |
5180 | } | |
5181 | else | |
5182 | { | |
3e426b86 | 5183 | struct tm_ipa_cg_data *d; |
5184 | struct cgraph_node *tnode = e->callee; | |
5185 | ||
5186 | d = get_cg_data (&tnode, true); | |
4c0315d0 | 5187 | new_node = d->clone; |
5188 | ||
5189 | /* As we've already skipped pure calls and appropriate builtins, | |
5190 | and we've already marked irrevocable blocks, if we can't come | |
5191 | up with a static replacement, then ask the runtime. */ | |
5192 | if (new_node == NULL) | |
5193 | { | |
5194 | *need_ssa_rename_p |= | |
5195 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
4c0315d0 | 5196 | return; |
5197 | } | |
5198 | ||
02774f2d | 5199 | fndecl = new_node->decl; |
4c0315d0 | 5200 | } |
5201 | ||
35ee1c66 | 5202 | e->redirect_callee (new_node); |
4c0315d0 | 5203 | gimple_call_set_fndecl (stmt, fndecl); |
5204 | } | |
5205 | ||
5206 | /* Helper function for ipa_tm_transform_calls. For a given BB, | |
5207 | install calls to tm_irrevocable when IRR_BLOCKS are reached, | |
5208 | redirect other calls to the generated transactional clone. */ | |
5209 | ||
5210 | static bool | |
5211 | ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region, | |
5212 | basic_block bb, bitmap irr_blocks) | |
5213 | { | |
5214 | gimple_stmt_iterator gsi; | |
5215 | bool need_ssa_rename = false; | |
5216 | ||
5217 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5218 | { | |
5219 | ipa_tm_insert_irr_call (node, region, bb); | |
5220 | return true; | |
5221 | } | |
5222 | ||
5223 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
5224 | { | |
5225 | gimple stmt = gsi_stmt (gsi); | |
5226 | ||
5227 | if (!is_gimple_call (stmt)) | |
5228 | continue; | |
5229 | if (is_tm_pure_call (stmt)) | |
5230 | continue; | |
5231 | ||
5232 | /* Redirect edges to the appropriate replacement or clone. */ | |
5233 | ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename); | |
5234 | } | |
5235 | ||
5236 | return need_ssa_rename; | |
5237 | } | |
5238 | ||
5239 | /* Walk the CFG for REGION, beginning at BB. Install calls to | |
5240 | tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to | |
5241 | the generated transactional clone. */ | |
5242 | ||
5243 | static bool | |
5244 | ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region, | |
5245 | basic_block bb, bitmap irr_blocks) | |
5246 | { | |
5247 | bool need_ssa_rename = false; | |
5248 | edge e; | |
5249 | edge_iterator ei; | |
c2078b80 | 5250 | auto_vec<basic_block> queue; |
4c0315d0 | 5251 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
5252 | ||
f1f41a6c | 5253 | queue.safe_push (bb); |
4c0315d0 | 5254 | do |
5255 | { | |
f1f41a6c | 5256 | bb = queue.pop (); |
4c0315d0 | 5257 | |
5258 | need_ssa_rename |= | |
5259 | ipa_tm_transform_calls_1 (node, region, bb, irr_blocks); | |
5260 | ||
5261 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5262 | continue; | |
5263 | ||
5264 | if (region && bitmap_bit_p (region->exit_blocks, bb->index)) | |
5265 | continue; | |
5266 | ||
5267 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5268 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
5269 | { | |
5270 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 5271 | queue.safe_push (e->dest); |
4c0315d0 | 5272 | } |
5273 | } | |
f1f41a6c | 5274 | while (!queue.is_empty ()); |
4c0315d0 | 5275 | |
4c0315d0 | 5276 | BITMAP_FREE (visited_blocks); |
5277 | ||
5278 | return need_ssa_rename; | |
5279 | } | |
5280 | ||
5281 | /* Transform the calls within the TM regions within NODE. */ | |
5282 | ||
5283 | static void | |
5284 | ipa_tm_transform_transaction (struct cgraph_node *node) | |
5285 | { | |
3e426b86 | 5286 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5287 | struct tm_region *region; |
5288 | bool need_ssa_rename = false; | |
5289 | ||
3e426b86 | 5290 | d = get_cg_data (&node, true); |
5291 | ||
02774f2d | 5292 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5293 | calculate_dominance_info (CDI_DOMINATORS); |
5294 | ||
5295 | for (region = d->all_tm_regions; region; region = region->next) | |
5296 | { | |
5297 | /* If we're sure to go irrevocable, don't transform anything. */ | |
5298 | if (d->irrevocable_blocks_normal | |
5299 | && bitmap_bit_p (d->irrevocable_blocks_normal, | |
5300 | region->entry_block->index)) | |
5301 | { | |
1910089e | 5302 | transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE |
5303 | | GTMA_MAY_ENTER_IRREVOCABLE | |
5304 | | GTMA_HAS_NO_INSTRUMENTATION); | |
4c0315d0 | 5305 | continue; |
5306 | } | |
5307 | ||
5308 | need_ssa_rename |= | |
5309 | ipa_tm_transform_calls (node, region, region->entry_block, | |
5310 | d->irrevocable_blocks_normal); | |
5311 | } | |
5312 | ||
5313 | if (need_ssa_rename) | |
5314 | update_ssa (TODO_update_ssa_only_virtuals); | |
5315 | ||
5316 | pop_cfun (); | |
4c0315d0 | 5317 | } |
5318 | ||
5319 | /* Transform the calls within the transactional clone of NODE. */ | |
5320 | ||
5321 | static void | |
5322 | ipa_tm_transform_clone (struct cgraph_node *node) | |
5323 | { | |
3e426b86 | 5324 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5325 | bool need_ssa_rename; |
5326 | ||
3e426b86 | 5327 | d = get_cg_data (&node, true); |
5328 | ||
4c0315d0 | 5329 | /* If this function makes no calls and has no irrevocable blocks, |
5330 | then there's nothing to do. */ | |
5331 | /* ??? Remove non-aborting top-level transactions. */ | |
2670559d | 5332 | if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone) |
4c0315d0 | 5333 | return; |
5334 | ||
02774f2d | 5335 | push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl)); |
4c0315d0 | 5336 | calculate_dominance_info (CDI_DOMINATORS); |
5337 | ||
5338 | need_ssa_rename = | |
34154e27 | 5339 | ipa_tm_transform_calls (d->clone, NULL, |
5340 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), | |
4c0315d0 | 5341 | d->irrevocable_blocks_clone); |
5342 | ||
5343 | if (need_ssa_rename) | |
5344 | update_ssa (TODO_update_ssa_only_virtuals); | |
5345 | ||
5346 | pop_cfun (); | |
4c0315d0 | 5347 | } |
5348 | ||
5349 | /* Main entry point for the transactional memory IPA pass. */ | |
5350 | ||
5351 | static unsigned int | |
5352 | ipa_tm_execute (void) | |
5353 | { | |
9af5ce0c | 5354 | cgraph_node_queue tm_callees = cgraph_node_queue (); |
4c0315d0 | 5355 | /* List of functions that will go irrevocable. */ |
9af5ce0c | 5356 | cgraph_node_queue irr_worklist = cgraph_node_queue (); |
4c0315d0 | 5357 | |
5358 | struct cgraph_node *node; | |
5359 | struct tm_ipa_cg_data *d; | |
5360 | enum availability a; | |
5361 | unsigned int i; | |
5362 | ||
5363 | #ifdef ENABLE_CHECKING | |
415d1b9a | 5364 | cgraph_node::verify_cgraph_nodes (); |
4c0315d0 | 5365 | #endif |
5366 | ||
5367 | bitmap_obstack_initialize (&tm_obstack); | |
0cd02a19 | 5368 | initialize_original_copy_tables (); |
4c0315d0 | 5369 | |
5370 | /* For all local functions marked tm_callable, queue them. */ | |
7c455d87 | 5371 | FOR_EACH_DEFINED_FUNCTION (node) |
02774f2d | 5372 | if (is_tm_callable (node->decl) |
415d1b9a | 5373 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5374 | { |
3e426b86 | 5375 | d = get_cg_data (&node, true); |
4c0315d0 | 5376 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5377 | } | |
5378 | ||
5379 | /* For all local reachable functions... */ | |
7c455d87 | 5380 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5381 | if (node->lowered |
415d1b9a | 5382 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5383 | { |
5384 | /* ... marked tm_pure, record that fact for the runtime by | |
5385 | indicating that the pure function is its own tm_callable. | |
5386 | No need to do this if the function's address can't be taken. */ | |
02774f2d | 5387 | if (is_tm_pure (node->decl)) |
4c0315d0 | 5388 | { |
5389 | if (!node->local.local) | |
02774f2d | 5390 | record_tm_clone_pair (node->decl, node->decl); |
4c0315d0 | 5391 | continue; |
5392 | } | |
5393 | ||
02774f2d | 5394 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5395 | calculate_dominance_info (CDI_DOMINATORS); |
5396 | ||
5397 | tm_region_init (NULL); | |
5398 | if (all_tm_regions) | |
5399 | { | |
3e426b86 | 5400 | d = get_cg_data (&node, true); |
4c0315d0 | 5401 | |
0cd02a19 | 5402 | /* Scan for calls that are in each transaction, and |
5403 | generate the uninstrumented code path. */ | |
4c0315d0 | 5404 | ipa_tm_scan_calls_transaction (d, &tm_callees); |
5405 | ||
40879ac6 | 5406 | /* Put it in the worklist so we can scan the function |
5407 | later (ipa_tm_scan_irr_function) and mark the | |
5408 | irrevocable blocks. */ | |
5409 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5410 | d->want_irr_scan_normal = true; | |
4c0315d0 | 5411 | } |
5412 | ||
5413 | pop_cfun (); | |
4c0315d0 | 5414 | } |
5415 | ||
5416 | /* For every local function on the callee list, scan as if we will be | |
5417 | creating a transactional clone, queueing all new functions we find | |
5418 | along the way. */ | |
f1f41a6c | 5419 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5420 | { |
f1f41a6c | 5421 | node = tm_callees[i]; |
415d1b9a | 5422 | a = node->get_availability (); |
3e426b86 | 5423 | d = get_cg_data (&node, true); |
4c0315d0 | 5424 | |
40879ac6 | 5425 | /* Put it in the worklist so we can scan the function later |
5426 | (ipa_tm_scan_irr_function) and mark the irrevocable | |
5427 | blocks. */ | |
5428 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
4c0315d0 | 5429 | |
5430 | /* Some callees cannot be arbitrarily cloned. These will always be | |
5431 | irrevocable. Mark these now, so that we need not scan them. */ | |
02774f2d | 5432 | if (is_tm_irrevocable (node->decl)) |
4c0315d0 | 5433 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5434 | else if (a <= AVAIL_NOT_AVAILABLE | |
02774f2d | 5435 | && !is_tm_safe_or_pure (node->decl)) |
4c0315d0 | 5436 | ipa_tm_note_irrevocable (node, &irr_worklist); |
415d1b9a | 5437 | else if (a >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5438 | { |
02774f2d | 5439 | if (!tree_versionable_function_p (node->decl)) |
4c0315d0 | 5440 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5441 | else if (!d->is_irrevocable) | |
5442 | { | |
5443 | /* If this is an alias, make sure its base is queued as well. | |
5444 | we need not scan the callees now, as the base will do. */ | |
02774f2d | 5445 | if (node->alias) |
4c0315d0 | 5446 | { |
415d1b9a | 5447 | node = cgraph_node::get (node->thunk.alias); |
3e426b86 | 5448 | d = get_cg_data (&node, true); |
4c0315d0 | 5449 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5450 | continue; | |
5451 | } | |
5452 | ||
5453 | /* Add all nodes called by this function into | |
5454 | tm_callees as well. */ | |
5455 | ipa_tm_scan_calls_clone (node, &tm_callees); | |
5456 | } | |
5457 | } | |
5458 | } | |
5459 | ||
5460 | /* Iterate scans until no more work to be done. Prefer not to use | |
f1f41a6c | 5461 | vec::pop because the worklist tends to follow a breadth-first |
4c0315d0 | 5462 | search of the callgraph, which should allow convergance with a |
5463 | minimum number of scans. But we also don't want the worklist | |
5464 | array to grow without bound, so we shift the array up periodically. */ | |
f1f41a6c | 5465 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5466 | { |
f1f41a6c | 5467 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5468 | { |
f1f41a6c | 5469 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5470 | i = 0; |
5471 | } | |
5472 | ||
f1f41a6c | 5473 | node = irr_worklist[i]; |
3e426b86 | 5474 | d = get_cg_data (&node, true); |
4c0315d0 | 5475 | d->in_worklist = false; |
5476 | ||
5477 | if (d->want_irr_scan_normal) | |
5478 | { | |
5479 | d->want_irr_scan_normal = false; | |
5480 | ipa_tm_scan_irr_function (node, false); | |
5481 | } | |
5482 | if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true)) | |
5483 | ipa_tm_note_irrevocable (node, &irr_worklist); | |
5484 | } | |
5485 | ||
5486 | /* For every function on the callee list, collect the tm_may_enter_irr | |
5487 | bit on the node. */ | |
f1f41a6c | 5488 | irr_worklist.truncate (0); |
5489 | for (i = 0; i < tm_callees.length (); ++i) | |
4c0315d0 | 5490 | { |
f1f41a6c | 5491 | node = tm_callees[i]; |
4c0315d0 | 5492 | if (ipa_tm_mayenterirr_function (node)) |
5493 | { | |
3e426b86 | 5494 | d = get_cg_data (&node, true); |
4c0315d0 | 5495 | gcc_assert (d->in_worklist == false); |
5496 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5497 | } | |
5498 | } | |
5499 | ||
5500 | /* Propagate the tm_may_enter_irr bit to callers until stable. */ | |
f1f41a6c | 5501 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5502 | { |
5503 | struct cgraph_node *caller; | |
5504 | struct cgraph_edge *e; | |
e4a2b488 | 5505 | struct ipa_ref *ref; |
4c0315d0 | 5506 | |
f1f41a6c | 5507 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5508 | { |
f1f41a6c | 5509 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5510 | i = 0; |
5511 | } | |
5512 | ||
f1f41a6c | 5513 | node = irr_worklist[i]; |
3e426b86 | 5514 | d = get_cg_data (&node, true); |
4c0315d0 | 5515 | d->in_worklist = false; |
5516 | node->local.tm_may_enter_irr = true; | |
5517 | ||
5518 | /* Propagate back to normal callers. */ | |
5519 | for (e = node->callers; e ; e = e->next_caller) | |
5520 | { | |
5521 | caller = e->caller; | |
02774f2d | 5522 | if (!is_tm_safe_or_pure (caller->decl) |
4c0315d0 | 5523 | && !caller->local.tm_may_enter_irr) |
5524 | { | |
3e426b86 | 5525 | d = get_cg_data (&caller, true); |
4c0315d0 | 5526 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5527 | } | |
5528 | } | |
5529 | ||
5530 | /* Propagate back to referring aliases as well. */ | |
e4a2b488 | 5531 | FOR_EACH_ALIAS (node, ref) |
4c0315d0 | 5532 | { |
415d1b9a | 5533 | caller = dyn_cast<cgraph_node *> (ref->referring); |
e4a2b488 | 5534 | if (!caller->local.tm_may_enter_irr) |
4c0315d0 | 5535 | { |
3e426b86 | 5536 | /* ?? Do not traverse aliases here. */ |
5537 | d = get_cg_data (&caller, false); | |
4c0315d0 | 5538 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5539 | } | |
5540 | } | |
5541 | } | |
5542 | ||
5543 | /* Now validate all tm_safe functions, and all atomic regions in | |
5544 | other functions. */ | |
7c455d87 | 5545 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5546 | if (node->lowered |
415d1b9a | 5547 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5548 | { |
3e426b86 | 5549 | d = get_cg_data (&node, true); |
02774f2d | 5550 | if (is_tm_safe (node->decl)) |
4c0315d0 | 5551 | ipa_tm_diagnose_tm_safe (node); |
5552 | else if (d->all_tm_regions) | |
5553 | ipa_tm_diagnose_transaction (node, d->all_tm_regions); | |
5554 | } | |
5555 | ||
5556 | /* Create clones. Do those that are not irrevocable and have a | |
5557 | positive call count. Do those publicly visible functions that | |
5558 | the user directed us to clone. */ | |
f1f41a6c | 5559 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5560 | { |
5561 | bool doit = false; | |
5562 | ||
f1f41a6c | 5563 | node = tm_callees[i]; |
02774f2d | 5564 | if (node->cpp_implicit_alias) |
4c0315d0 | 5565 | continue; |
5566 | ||
415d1b9a | 5567 | a = node->get_availability (); |
3e426b86 | 5568 | d = get_cg_data (&node, true); |
4c0315d0 | 5569 | |
5570 | if (a <= AVAIL_NOT_AVAILABLE) | |
02774f2d | 5571 | doit = is_tm_callable (node->decl); |
5572 | else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl)) | |
4c0315d0 | 5573 | doit = true; |
5574 | else if (!d->is_irrevocable | |
5575 | && d->tm_callers_normal + d->tm_callers_clone > 0) | |
5576 | doit = true; | |
5577 | ||
5578 | if (doit) | |
5579 | ipa_tm_create_version (node); | |
5580 | } | |
5581 | ||
5582 | /* Redirect calls to the new clones, and insert irrevocable marks. */ | |
f1f41a6c | 5583 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5584 | { |
f1f41a6c | 5585 | node = tm_callees[i]; |
02774f2d | 5586 | if (node->analyzed) |
4c0315d0 | 5587 | { |
3e426b86 | 5588 | d = get_cg_data (&node, true); |
4c0315d0 | 5589 | if (d->clone) |
5590 | ipa_tm_transform_clone (node); | |
5591 | } | |
5592 | } | |
7c455d87 | 5593 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5594 | if (node->lowered |
415d1b9a | 5595 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5596 | { |
3e426b86 | 5597 | d = get_cg_data (&node, true); |
4c0315d0 | 5598 | if (d->all_tm_regions) |
5599 | ipa_tm_transform_transaction (node); | |
5600 | } | |
5601 | ||
5602 | /* Free and clear all data structures. */ | |
f1f41a6c | 5603 | tm_callees.release (); |
5604 | irr_worklist.release (); | |
4c0315d0 | 5605 | bitmap_obstack_release (&tm_obstack); |
0cd02a19 | 5606 | free_original_copy_tables (); |
4c0315d0 | 5607 | |
7c455d87 | 5608 | FOR_EACH_FUNCTION (node) |
02774f2d | 5609 | node->aux = NULL; |
4c0315d0 | 5610 | |
5611 | #ifdef ENABLE_CHECKING | |
415d1b9a | 5612 | cgraph_node::verify_cgraph_nodes (); |
4c0315d0 | 5613 | #endif |
5614 | ||
5615 | return 0; | |
5616 | } | |
5617 | ||
cbe8bda8 | 5618 | namespace { |
5619 | ||
5620 | const pass_data pass_data_ipa_tm = | |
5621 | { | |
5622 | SIMPLE_IPA_PASS, /* type */ | |
5623 | "tmipa", /* name */ | |
5624 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 5625 | TV_TRANS_MEM, /* tv_id */ |
5626 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
5627 | 0, /* properties_provided */ | |
5628 | 0, /* properties_destroyed */ | |
5629 | 0, /* todo_flags_start */ | |
5630 | 0, /* todo_flags_finish */ | |
4c0315d0 | 5631 | }; |
5632 | ||
cbe8bda8 | 5633 | class pass_ipa_tm : public simple_ipa_opt_pass |
5634 | { | |
5635 | public: | |
9af5ce0c | 5636 | pass_ipa_tm (gcc::context *ctxt) |
5637 | : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt) | |
cbe8bda8 | 5638 | {} |
5639 | ||
5640 | /* opt_pass methods: */ | |
31315c24 | 5641 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 5642 | virtual unsigned int execute (function *) { return ipa_tm_execute (); } |
cbe8bda8 | 5643 | |
5644 | }; // class pass_ipa_tm | |
5645 | ||
5646 | } // anon namespace | |
5647 | ||
5648 | simple_ipa_opt_pass * | |
5649 | make_pass_ipa_tm (gcc::context *ctxt) | |
5650 | { | |
5651 | return new pass_ipa_tm (ctxt); | |
5652 | } | |
5653 | ||
4c0315d0 | 5654 | #include "gt-trans-mem.h" |