]>
Commit | Line | Data |
---|---|---|
cbe34bb5 | 1 | /* Copyright (C) 2008-2017 Free Software Foundation, Inc. |
0a35513e AH |
2 | Contributed by Richard Henderson <rth@redhat.com>. |
3 | ||
4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | ||
6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | |
10 | ||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | |
15 | ||
16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | |
19 | ||
20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | |
24 | ||
25 | #include "libitm_i.h" | |
26 | #include <pthread.h> | |
27 | ||
28 | ||
29 | using namespace GTM; | |
30 | ||
31 | #if !defined(HAVE_ARCH_GTM_THREAD) || !defined(HAVE_ARCH_GTM_THREAD_DISP) | |
32 | extern __thread gtm_thread_tls _gtm_thr_tls; | |
33 | #endif | |
34 | ||
6041f70a TR |
35 | // Put this at the start of a cacheline so that serial_lock's writers and |
36 | // htm_fastpath fields are on the same cacheline, so that HW transactions | |
37 | // only have to pay one cacheline capacity to monitor both. | |
38 | gtm_rwlock GTM::gtm_thread::serial_lock | |
39 | __attribute__((aligned(HW_CACHELINE_SIZE))); | |
0a35513e AH |
40 | gtm_thread *GTM::gtm_thread::list_of_threads = 0; |
41 | unsigned GTM::gtm_thread::number_of_threads = 0; | |
42 | ||
0a35513e AH |
43 | /* ??? Move elsewhere when we figure out library initialization. */ |
44 | uint64_t GTM::gtm_spin_count_var = 1000; | |
45 | ||
36cfbee1 RH |
46 | #ifdef HAVE_64BIT_SYNC_BUILTINS |
47 | static atomic<_ITM_transactionId_t> global_tid; | |
48 | #else | |
0a35513e | 49 | static _ITM_transactionId_t global_tid; |
36cfbee1 RH |
50 | static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER; |
51 | #endif | |
52 | ||
0a35513e AH |
53 | |
54 | // Provides a on-thread-exit callback used to release per-thread data. | |
55 | static pthread_key_t thr_release_key; | |
56 | static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT; | |
57 | ||
0a35513e AH |
58 | /* Allocate a transaction structure. */ |
59 | void * | |
60 | GTM::gtm_thread::operator new (size_t s) | |
61 | { | |
62 | void *tx; | |
63 | ||
64 | assert(s == sizeof(gtm_thread)); | |
65 | ||
66 | tx = xmalloc (sizeof (gtm_thread), true); | |
67 | memset (tx, 0, sizeof (gtm_thread)); | |
68 | ||
69 | return tx; | |
70 | } | |
71 | ||
72 | /* Free the given transaction. Raises an error if the transaction is still | |
73 | in use. */ | |
74 | void | |
75 | GTM::gtm_thread::operator delete(void *tx) | |
76 | { | |
77 | free(tx); | |
78 | } | |
79 | ||
80 | static void | |
81 | thread_exit_handler(void *) | |
82 | { | |
83 | gtm_thread *thr = gtm_thr(); | |
84 | if (thr) | |
85 | delete thr; | |
86 | set_gtm_thr(0); | |
87 | } | |
88 | ||
89 | static void | |
90 | thread_exit_init() | |
91 | { | |
92 | if (pthread_key_create(&thr_release_key, thread_exit_handler)) | |
93 | GTM_fatal("Creating thread release TLS key failed."); | |
94 | } | |
95 | ||
96 | ||
97 | GTM::gtm_thread::~gtm_thread() | |
98 | { | |
99 | if (nesting > 0) | |
100 | GTM_fatal("Thread exit while a transaction is still active."); | |
101 | ||
102 | // Deregister this transaction. | |
103 | serial_lock.write_lock (); | |
104 | gtm_thread **prev = &list_of_threads; | |
105 | for (; *prev; prev = &(*prev)->next_thread) | |
106 | { | |
107 | if (*prev == this) | |
108 | { | |
109 | *prev = (*prev)->next_thread; | |
110 | break; | |
111 | } | |
112 | } | |
113 | number_of_threads--; | |
114 | number_of_threads_changed(number_of_threads + 1, number_of_threads); | |
115 | serial_lock.write_unlock (); | |
116 | } | |
117 | ||
118 | GTM::gtm_thread::gtm_thread () | |
119 | { | |
120 | // This object's memory has been set to zero by operator new, so no need | |
121 | // to initialize any of the other primitive-type members that do not have | |
122 | // constructors. | |
36cfbee1 | 123 | shared_state.store(-1, memory_order_relaxed); |
0a35513e AH |
124 | |
125 | // Register this transaction with the list of all threads' transactions. | |
126 | serial_lock.write_lock (); | |
127 | next_thread = list_of_threads; | |
128 | list_of_threads = this; | |
129 | number_of_threads++; | |
130 | number_of_threads_changed(number_of_threads - 1, number_of_threads); | |
131 | serial_lock.write_unlock (); | |
132 | ||
258c1d07 TR |
133 | init_cpp_exceptions (); |
134 | ||
0a35513e AH |
135 | if (pthread_once(&thr_release_once, thread_exit_init)) |
136 | GTM_fatal("Initializing thread release TLS key failed."); | |
137 | // Any non-null value is sufficient to trigger destruction of this | |
138 | // transaction when the current thread terminates. | |
139 | if (pthread_setspecific(thr_release_key, this)) | |
140 | GTM_fatal("Setting thread release TLS key failed."); | |
141 | } | |
142 | ||
36cfbee1 RH |
143 | static inline uint32_t |
144 | choose_code_path(uint32_t prop, abi_dispatch *disp) | |
0a35513e AH |
145 | { |
146 | if ((prop & pr_uninstrumentedCode) && disp->can_run_uninstrumented_code()) | |
147 | return a_runUninstrumentedCode; | |
148 | else | |
149 | return a_runInstrumentedCode; | |
150 | } | |
151 | ||
edcbda7e DV |
152 | #ifdef TARGET_BEGIN_TRANSACTION_ATTRIBUTE |
153 | /* This macro can be used to define target specific attributes for this | |
154 | function. For example, S/390 requires floating point to be disabled in | |
155 | begin_transaction. */ | |
156 | TARGET_BEGIN_TRANSACTION_ATTRIBUTE | |
157 | #endif | |
0a35513e AH |
158 | uint32_t |
159 | GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb) | |
160 | { | |
161 | static const _ITM_transactionId_t tid_block_size = 1 << 16; | |
162 | ||
163 | gtm_thread *tx; | |
164 | abi_dispatch *disp; | |
165 | uint32_t ret; | |
166 | ||
167 | // ??? pr_undoLogCode is not properly defined in the ABI. Are barriers | |
168 | // omitted because they are not necessary (e.g., a transaction on thread- | |
169 | // local data) or because the compiler thinks that some kind of global | |
170 | // synchronization might perform better? | |
171 | if (unlikely(prop & pr_undoLogCode)) | |
172 | GTM_fatal("pr_undoLogCode not supported"); | |
173 | ||
bec9ec3f | 174 | #ifdef USE_HTM_FASTPATH |
64fbcc74 TR |
175 | // HTM fastpath. Only chosen in the absence of transaction_cancel to allow |
176 | // using an uninstrumented code path. | |
177 | // The fastpath is enabled only by dispatch_htm's method group, which uses | |
178 | // serial-mode methods as fallback. Serial-mode transactions cannot execute | |
179 | // concurrently with HW transactions because the latter monitor the serial | |
180 | // lock's writer flag and thus abort if another thread is or becomes a | |
181 | // serial transaction. Therefore, if the fastpath is enabled, then a | |
182 | // transaction is not executing as a HW transaction iff the serial lock is | |
6041f70a TR |
183 | // write-locked. Also, HW transactions monitor the fastpath control |
184 | // variable, so that they will only execute if dispatch_htm is still the | |
185 | // current method group. This allows us to use htm_fastpath and the serial | |
186 | // lock's writers flag to reliable determine whether the current thread runs | |
187 | // a HW transaction, and thus we do not need to maintain this information in | |
64fbcc74 TR |
188 | // per-thread state. |
189 | // If an uninstrumented code path is not available, we can still run | |
190 | // instrumented code from a HW transaction because the HTM fastpath kicks | |
191 | // in early in both begin and commit, and the transaction is not canceled. | |
192 | // HW transactions might get requests to switch to serial-irrevocable mode, | |
193 | // but these can be ignored because the HTM provides all necessary | |
194 | // correctness guarantees. Transactions cannot detect whether they are | |
195 | // indeed in serial mode, and HW transactions should never need serial mode | |
196 | // for any internal changes (e.g., they never abort visibly to the STM code | |
197 | // and thus do not trigger the standard retry handling). | |
bec9ec3f | 198 | #ifndef HTM_CUSTOM_FASTPATH |
6041f70a | 199 | if (likely(serial_lock.get_htm_fastpath() && (prop & pr_hasNoAbort))) |
64fbcc74 | 200 | { |
6041f70a TR |
201 | // Note that the snapshot of htm_fastpath that we take here could be |
202 | // outdated, and a different method group than dispatch_htm may have | |
203 | // been chosen in the meantime. Therefore, take care not not touch | |
204 | // anything besides the serial lock, which is independent of method | |
205 | // groups. | |
206 | for (uint32_t t = serial_lock.get_htm_fastpath(); t; t--) | |
64fbcc74 TR |
207 | { |
208 | uint32_t ret = htm_begin(); | |
209 | if (htm_begin_success(ret)) | |
210 | { | |
211 | // We are executing a transaction now. | |
212 | // Monitor the writer flag in the serial-mode lock, and abort | |
213 | // if there is an active or waiting serial-mode transaction. | |
6041f70a TR |
214 | // Also checks that htm_fastpath is still nonzero and thus |
215 | // HW transactions are allowed to run. | |
b1db457b TR |
216 | // Note that this can also happen due to an enclosing |
217 | // serial-mode transaction; we handle this case below. | |
6041f70a | 218 | if (unlikely(serial_lock.htm_fastpath_disabled())) |
64fbcc74 TR |
219 | htm_abort(); |
220 | else | |
221 | // We do not need to set a_saveLiveVariables because of HTM. | |
222 | return (prop & pr_uninstrumentedCode) ? | |
223 | a_runUninstrumentedCode : a_runInstrumentedCode; | |
224 | } | |
225 | // The transaction has aborted. Don't retry if it's unlikely that | |
226 | // retrying the transaction will be successful. | |
227 | if (!htm_abort_should_retry(ret)) | |
228 | break; | |
6041f70a TR |
229 | // Check whether the HTM fastpath has been disabled. |
230 | if (!serial_lock.get_htm_fastpath()) | |
231 | break; | |
64fbcc74 TR |
232 | // Wait until any concurrent serial-mode transactions have finished. |
233 | // This is an empty critical section, but won't be elided. | |
6041f70a | 234 | if (serial_lock.htm_fastpath_disabled()) |
64fbcc74 TR |
235 | { |
236 | tx = gtm_thr(); | |
237 | if (unlikely(tx == NULL)) | |
238 | { | |
239 | // See below. | |
240 | tx = new gtm_thread(); | |
241 | set_gtm_thr(tx); | |
242 | } | |
b1db457b TR |
243 | // Check whether there is an enclosing serial-mode transaction; |
244 | // if so, we just continue as a nested transaction and don't | |
245 | // try to use the HTM fastpath. This case can happen when an | |
246 | // outermost relaxed transaction calls unsafe code that starts | |
247 | // a transaction. | |
248 | if (tx->nesting > 0) | |
249 | break; | |
250 | // Another thread is running a serial-mode transaction. Wait. | |
64fbcc74 TR |
251 | serial_lock.read_lock(tx); |
252 | serial_lock.read_unlock(tx); | |
253 | // TODO We should probably reset the retry count t here, unless | |
254 | // we have retried so often that we should go serial to avoid | |
255 | // starvation. | |
256 | } | |
257 | } | |
258 | } | |
bec9ec3f TR |
259 | #else |
260 | // If we have a custom HTM fastpath in ITM_beginTransaction, we implement | |
261 | // just the retry policy here. We communicate with the custom fastpath | |
262 | // through additional property bits and return codes, and either transfer | |
263 | // control back to the custom fastpath or run the fallback mechanism. The | |
264 | // fastpath synchronization algorithm itself is the same. | |
265 | // pr_HTMRetryableAbort states that a HW transaction started by the custom | |
266 | // HTM fastpath aborted, and that we thus have to decide whether to retry | |
267 | // the fastpath (returning a_tryHTMFastPath) or just proceed with the | |
268 | // fallback method. | |
6041f70a | 269 | if (likely(serial_lock.get_htm_fastpath() && (prop & pr_HTMRetryableAbort))) |
bec9ec3f TR |
270 | { |
271 | tx = gtm_thr(); | |
272 | if (unlikely(tx == NULL)) | |
273 | { | |
274 | // See below. | |
275 | tx = new gtm_thread(); | |
276 | set_gtm_thr(tx); | |
277 | } | |
278 | // If this is the first abort, reset the retry count. We abuse | |
279 | // restart_total for the retry count, which is fine because our only | |
280 | // other fallback will use serial transactions, which don't use | |
281 | // restart_total but will reset it when committing. | |
282 | if (!(prop & pr_HTMRetriedAfterAbort)) | |
6041f70a | 283 | tx->restart_total = gtm_thread::serial_lock.get_htm_fastpath(); |
bec9ec3f TR |
284 | |
285 | if (--tx->restart_total > 0) | |
286 | { | |
287 | // Wait until any concurrent serial-mode transactions have finished. | |
288 | // Essentially the same code as above. | |
6041f70a TR |
289 | if (!serial_lock.get_htm_fastpath()) |
290 | goto stop_custom_htm_fastpath; | |
291 | if (serial_lock.htm_fastpath_disabled()) | |
bec9ec3f TR |
292 | { |
293 | if (tx->nesting > 0) | |
294 | goto stop_custom_htm_fastpath; | |
295 | serial_lock.read_lock(tx); | |
296 | serial_lock.read_unlock(tx); | |
297 | } | |
298 | // Let ITM_beginTransaction retry the custom HTM fastpath. | |
299 | return a_tryHTMFastPath; | |
300 | } | |
301 | } | |
302 | stop_custom_htm_fastpath: | |
303 | #endif | |
64fbcc74 TR |
304 | #endif |
305 | ||
0a35513e AH |
306 | tx = gtm_thr(); |
307 | if (unlikely(tx == NULL)) | |
308 | { | |
309 | // Create the thread object. The constructor will also set up automatic | |
310 | // deletion on thread termination. | |
311 | tx = new gtm_thread(); | |
312 | set_gtm_thr(tx); | |
313 | } | |
314 | ||
315 | if (tx->nesting > 0) | |
316 | { | |
317 | // This is a nested transaction. | |
318 | // Check prop compatibility: | |
319 | // The ABI requires pr_hasNoFloatUpdate, pr_hasNoVectorUpdate, | |
320 | // pr_hasNoIrrevocable, pr_aWBarriersOmitted, pr_RaRBarriersOmitted, and | |
321 | // pr_hasNoSimpleReads to hold for the full dynamic scope of a | |
322 | // transaction. We could check that these are set for the nested | |
323 | // transaction if they are also set for the parent transaction, but the | |
324 | // ABI does not require these flags to be set if they could be set, | |
325 | // so the check could be too strict. | |
326 | // ??? For pr_readOnly, lexical or dynamic scope is unspecified. | |
327 | ||
328 | if (prop & pr_hasNoAbort) | |
329 | { | |
330 | // We can use flat nesting, so elide this transaction. | |
331 | if (!(prop & pr_instrumentedCode)) | |
332 | { | |
333 | if (!(tx->state & STATE_SERIAL) || | |
334 | !(tx->state & STATE_IRREVOCABLE)) | |
335 | tx->serialirr_mode(); | |
336 | } | |
337 | // Increment nesting level after checking that we have a method that | |
338 | // allows us to continue. | |
339 | tx->nesting++; | |
340 | return choose_code_path(prop, abi_disp()); | |
341 | } | |
342 | ||
343 | // The transaction might abort, so use closed nesting if possible. | |
344 | // pr_hasNoAbort has lexical scope, so the compiler should really have | |
345 | // generated an instrumented code path. | |
346 | assert(prop & pr_instrumentedCode); | |
347 | ||
348 | // Create a checkpoint of the current transaction. | |
349 | gtm_transaction_cp *cp = tx->parent_txns.push(); | |
350 | cp->save(tx); | |
351 | new (&tx->alloc_actions) aa_tree<uintptr_t, gtm_alloc_action>(); | |
352 | ||
353 | // Check whether the current method actually supports closed nesting. | |
354 | // If we can switch to another one, do so. | |
355 | // If not, we assume that actual aborts are infrequent, and rather | |
356 | // restart in _ITM_abortTransaction when we really have to. | |
357 | disp = abi_disp(); | |
358 | if (!disp->closed_nesting()) | |
359 | { | |
360 | // ??? Should we elide the transaction if there is no alternative | |
361 | // method that supports closed nesting? If we do, we need to set | |
362 | // some flag to prevent _ITM_abortTransaction from aborting the | |
363 | // wrong transaction (i.e., some parent transaction). | |
364 | abi_dispatch *cn_disp = disp->closed_nesting_alternative(); | |
365 | if (cn_disp) | |
366 | { | |
367 | disp = cn_disp; | |
368 | set_abi_disp(disp); | |
369 | } | |
370 | } | |
371 | } | |
372 | else | |
373 | { | |
374 | // Outermost transaction | |
375 | disp = tx->decide_begin_dispatch (prop); | |
0a35513e AH |
376 | set_abi_disp (disp); |
377 | } | |
378 | ||
379 | // Initialization that is common for outermost and nested transactions. | |
380 | tx->prop = prop; | |
381 | tx->nesting++; | |
382 | ||
383 | tx->jb = *jb; | |
384 | ||
385 | // As long as we have not exhausted a previously allocated block of TIDs, | |
386 | // we can avoid an atomic operation on a shared cacheline. | |
387 | if (tx->local_tid & (tid_block_size - 1)) | |
388 | tx->id = tx->local_tid++; | |
389 | else | |
390 | { | |
391 | #ifdef HAVE_64BIT_SYNC_BUILTINS | |
799142bf TR |
392 | // We don't really care which block of TIDs we get but only that we |
393 | // acquire one atomically; therefore, relaxed memory order is | |
394 | // sufficient. | |
36cfbee1 | 395 | tx->id = global_tid.fetch_add(tid_block_size, memory_order_relaxed); |
0a35513e AH |
396 | tx->local_tid = tx->id + 1; |
397 | #else | |
398 | pthread_mutex_lock (&global_tid_lock); | |
399 | global_tid += tid_block_size; | |
400 | tx->id = global_tid; | |
401 | tx->local_tid = tx->id + 1; | |
402 | pthread_mutex_unlock (&global_tid_lock); | |
403 | #endif | |
404 | } | |
405 | ||
258c1d07 TR |
406 | // Log the number of uncaught exceptions if we might have to roll back this |
407 | // state. | |
408 | if (tx->cxa_uncaught_count_ptr != 0) | |
409 | tx->cxa_uncaught_count = *tx->cxa_uncaught_count_ptr; | |
410 | ||
0a35513e AH |
411 | // Run dispatch-specific restart code. Retry until we succeed. |
412 | GTM::gtm_restart_reason rr; | |
413 | while ((rr = disp->begin_or_restart()) != NO_RESTART) | |
414 | { | |
415 | tx->decide_retry_strategy(rr); | |
416 | disp = abi_disp(); | |
417 | } | |
418 | ||
419 | // Determine the code path to run. Only irrevocable transactions cannot be | |
420 | // restarted, so all other transactions need to save live variables. | |
421 | ret = choose_code_path(prop, disp); | |
422 | if (!(tx->state & STATE_IRREVOCABLE)) | |
423 | ret |= a_saveLiveVariables; | |
424 | return ret; | |
425 | } | |
426 | ||
427 | ||
428 | void | |
429 | GTM::gtm_transaction_cp::save(gtm_thread* tx) | |
430 | { | |
431 | // Save everything that we might have to restore on restarts or aborts. | |
432 | jb = tx->jb; | |
433 | undolog_size = tx->undolog.size(); | |
434 | memcpy(&alloc_actions, &tx->alloc_actions, sizeof(alloc_actions)); | |
435 | user_actions_size = tx->user_actions.size(); | |
436 | id = tx->id; | |
437 | prop = tx->prop; | |
438 | cxa_catch_count = tx->cxa_catch_count; | |
258c1d07 | 439 | cxa_uncaught_count = tx->cxa_uncaught_count; |
0a35513e AH |
440 | disp = abi_disp(); |
441 | nesting = tx->nesting; | |
442 | } | |
443 | ||
444 | void | |
445 | GTM::gtm_transaction_cp::commit(gtm_thread* tx) | |
446 | { | |
447 | // Restore state that is not persistent across commits. Exception handling, | |
448 | // information, nesting level, and any logs do not need to be restored on | |
449 | // commits of nested transactions. Allocation actions must be committed | |
450 | // before committing the snapshot. | |
451 | tx->jb = jb; | |
452 | memcpy(&tx->alloc_actions, &alloc_actions, sizeof(alloc_actions)); | |
453 | tx->id = id; | |
454 | tx->prop = prop; | |
455 | } | |
456 | ||
457 | ||
458 | void | |
459 | GTM::gtm_thread::rollback (gtm_transaction_cp *cp, bool aborting) | |
460 | { | |
461 | // The undo log is special in that it used for both thread-local and shared | |
462 | // data. Because of the latter, we have to roll it back before any | |
463 | // dispatch-specific rollback (which handles synchronization with other | |
464 | // transactions). | |
07b6642b | 465 | undolog.rollback (this, cp ? cp->undolog_size : 0); |
0a35513e AH |
466 | |
467 | // Perform dispatch-specific rollback. | |
468 | abi_disp()->rollback (cp); | |
469 | ||
470 | // Roll back all actions that are supposed to happen around the transaction. | |
471 | rollback_user_actions (cp ? cp->user_actions_size : 0); | |
472 | commit_allocations (true, (cp ? &cp->alloc_actions : 0)); | |
473 | revert_cpp_exceptions (cp); | |
474 | ||
475 | if (cp) | |
476 | { | |
477 | // We do not yet handle restarts of nested transactions. To do that, we | |
478 | // would have to restore some state (jb, id, prop, nesting) not to the | |
479 | // checkpoint but to the transaction that was started from this | |
480 | // checkpoint (e.g., nesting = cp->nesting + 1); | |
481 | assert(aborting); | |
482 | // Roll back the rest of the state to the checkpoint. | |
483 | jb = cp->jb; | |
484 | id = cp->id; | |
485 | prop = cp->prop; | |
486 | if (cp->disp != abi_disp()) | |
487 | set_abi_disp(cp->disp); | |
488 | memcpy(&alloc_actions, &cp->alloc_actions, sizeof(alloc_actions)); | |
489 | nesting = cp->nesting; | |
490 | } | |
491 | else | |
492 | { | |
493 | // Roll back to the outermost transaction. | |
494 | // Restore the jump buffer and transaction properties, which we will | |
495 | // need for the longjmp used to restart or abort the transaction. | |
496 | if (parent_txns.size() > 0) | |
497 | { | |
498 | jb = parent_txns[0].jb; | |
499 | id = parent_txns[0].id; | |
500 | prop = parent_txns[0].prop; | |
501 | } | |
502 | // Reset the transaction. Do not reset this->state, which is handled by | |
503 | // the callers. Note that if we are not aborting, we reset the | |
504 | // transaction to the point after having executed begin_transaction | |
505 | // (we will return from it), so the nesting level must be one, not zero. | |
506 | nesting = (aborting ? 0 : 1); | |
507 | parent_txns.clear(); | |
508 | } | |
509 | ||
510 | if (this->eh_in_flight) | |
511 | { | |
512 | _Unwind_DeleteException ((_Unwind_Exception *) this->eh_in_flight); | |
513 | this->eh_in_flight = NULL; | |
514 | } | |
515 | } | |
516 | ||
517 | void ITM_REGPARM | |
518 | _ITM_abortTransaction (_ITM_abortReason reason) | |
519 | { | |
520 | gtm_thread *tx = gtm_thr(); | |
521 | ||
522 | assert (reason == userAbort || reason == (userAbort | outerAbort)); | |
523 | assert ((tx->prop & pr_hasNoAbort) == 0); | |
524 | ||
525 | if (tx->state & gtm_thread::STATE_IRREVOCABLE) | |
526 | abort (); | |
527 | ||
528 | // Roll back to innermost transaction. | |
529 | if (tx->parent_txns.size() > 0 && !(reason & outerAbort)) | |
530 | { | |
531 | // If the current method does not support closed nesting but we are | |
532 | // nested and must only roll back the innermost transaction, then | |
533 | // restart with a method that supports closed nesting. | |
534 | abi_dispatch *disp = abi_disp(); | |
535 | if (!disp->closed_nesting()) | |
536 | tx->restart(RESTART_CLOSED_NESTING); | |
537 | ||
538 | // The innermost transaction is a closed nested transaction. | |
539 | gtm_transaction_cp *cp = tx->parent_txns.pop(); | |
540 | uint32_t longjmp_prop = tx->prop; | |
541 | gtm_jmpbuf longjmp_jb = tx->jb; | |
542 | ||
543 | tx->rollback (cp, true); | |
544 | ||
545 | // Jump to nested transaction (use the saved jump buffer). | |
062f93f2 RH |
546 | GTM_longjmp (a_abortTransaction | a_restoreLiveVariables, |
547 | &longjmp_jb, longjmp_prop); | |
0a35513e AH |
548 | } |
549 | else | |
550 | { | |
551 | // There is no nested transaction or an abort of the outermost | |
552 | // transaction was requested, so roll back to the outermost transaction. | |
553 | tx->rollback (0, true); | |
554 | ||
555 | // Aborting an outermost transaction finishes execution of the whole | |
556 | // transaction. Therefore, reset transaction state. | |
557 | if (tx->state & gtm_thread::STATE_SERIAL) | |
558 | gtm_thread::serial_lock.write_unlock (); | |
559 | else | |
560 | gtm_thread::serial_lock.read_unlock (tx); | |
561 | tx->state = 0; | |
562 | ||
062f93f2 RH |
563 | GTM_longjmp (a_abortTransaction | a_restoreLiveVariables, |
564 | &tx->jb, tx->prop); | |
0a35513e AH |
565 | } |
566 | } | |
567 | ||
568 | bool | |
569 | GTM::gtm_thread::trycommit () | |
570 | { | |
571 | nesting--; | |
572 | ||
573 | // Skip any real commit for elided transactions. | |
574 | if (nesting > 0 && (parent_txns.size() == 0 || | |
575 | nesting > parent_txns[parent_txns.size() - 1].nesting)) | |
576 | return true; | |
577 | ||
578 | if (nesting > 0) | |
579 | { | |
580 | // Commit of a closed-nested transaction. Remove one checkpoint and add | |
581 | // any effects of this transaction to the parent transaction. | |
582 | gtm_transaction_cp *cp = parent_txns.pop(); | |
583 | commit_allocations(false, &cp->alloc_actions); | |
584 | cp->commit(this); | |
585 | return true; | |
586 | } | |
587 | ||
588 | // Commit of an outermost transaction. | |
589 | gtm_word priv_time = 0; | |
590 | if (abi_disp()->trycommit (priv_time)) | |
591 | { | |
629e4729 TR |
592 | // The transaction is now finished but we will still access some shared |
593 | // data if we have to ensure privatization safety. | |
594 | bool do_read_unlock = false; | |
0a35513e | 595 | if (state & gtm_thread::STATE_SERIAL) |
c898f7b8 TR |
596 | { |
597 | gtm_thread::serial_lock.write_unlock (); | |
598 | // There are no other active transactions, so there's no need to | |
599 | // enforce privatization safety. | |
600 | priv_time = 0; | |
601 | } | |
0a35513e | 602 | else |
629e4729 TR |
603 | { |
604 | // If we have to ensure privatization safety, we must not yet | |
605 | // release the read lock and become inactive because (1) we still | |
606 | // have to go through the list of all transactions, which can be | |
607 | // modified by serial mode threads, and (2) we interpret each | |
608 | // transactions' shared_state in the context of what we believe to | |
609 | // be the current method group (and serial mode transactions can | |
610 | // change the method group). Therefore, if we have to ensure | |
611 | // privatization safety, delay becoming inactive but set a maximum | |
612 | // snapshot time (we have committed and thus have an empty snapshot, | |
613 | // so it will always be most recent). Use release MO so that this | |
614 | // synchronizes with other threads observing our snapshot time. | |
615 | if (priv_time) | |
616 | { | |
617 | do_read_unlock = true; | |
618 | shared_state.store((~(typeof gtm_thread::shared_state)0) - 1, | |
619 | memory_order_release); | |
620 | } | |
621 | else | |
622 | gtm_thread::serial_lock.read_unlock (this); | |
623 | } | |
0a35513e AH |
624 | state = 0; |
625 | ||
626 | // We can commit the undo log after dispatch-specific commit and after | |
627 | // making the transaction inactive because we only have to reset | |
628 | // gtm_thread state. | |
11f30bb0 | 629 | undolog.commit (); |
0a35513e AH |
630 | // Reset further transaction state. |
631 | cxa_catch_count = 0; | |
0a35513e AH |
632 | restart_total = 0; |
633 | ||
634 | // Ensure privatization safety, if necessary. | |
635 | if (priv_time) | |
636 | { | |
799142bf TR |
637 | // There must be a seq_cst fence between the following loads of the |
638 | // other transactions' shared_state and the dispatch-specific stores | |
639 | // that signal updates by this transaction (e.g., lock | |
640 | // acquisitions). This ensures that if we read prior to other | |
641 | // reader transactions setting their shared_state to 0, then those | |
642 | // readers will observe our updates. We can reuse the seq_cst fence | |
e89137ce TR |
643 | // in serial_lock.read_unlock() if we performed that; if not, we |
644 | // issue the fence. | |
645 | if (do_read_unlock) | |
646 | atomic_thread_fence (memory_order_seq_cst); | |
0a35513e AH |
647 | // TODO Don't just spin but also block using cond vars / futexes |
648 | // here. Should probably be integrated with the serial lock code. | |
0a35513e AH |
649 | for (gtm_thread *it = gtm_thread::list_of_threads; it != 0; |
650 | it = it->next_thread) | |
651 | { | |
652 | if (it == this) continue; | |
799142bf TR |
653 | // We need to load other threads' shared_state using acquire |
654 | // semantics (matching the release semantics of the respective | |
655 | // updates). This is necessary to ensure that the other | |
656 | // threads' memory accesses happen before our actions that | |
657 | // assume privatization safety. | |
658 | // TODO Are there any platform-specific optimizations (e.g., | |
659 | // merging barriers)? | |
660 | while (it->shared_state.load(memory_order_acquire) < priv_time) | |
0a35513e AH |
661 | cpu_relax(); |
662 | } | |
663 | } | |
664 | ||
629e4729 TR |
665 | // After ensuring privatization safety, we are now truly inactive and |
666 | // thus can release the read lock. We will also execute potentially | |
667 | // privatizing actions (e.g., calling free()). User actions are first. | |
668 | if (do_read_unlock) | |
669 | gtm_thread::serial_lock.read_unlock (this); | |
0a35513e AH |
670 | commit_user_actions (); |
671 | commit_allocations (false, 0); | |
672 | ||
673 | return true; | |
674 | } | |
675 | return false; | |
676 | } | |
677 | ||
678 | void ITM_NORETURN | |
610e3901 | 679 | GTM::gtm_thread::restart (gtm_restart_reason r, bool finish_serial_upgrade) |
0a35513e AH |
680 | { |
681 | // Roll back to outermost transaction. Do not reset transaction state because | |
682 | // we will continue executing this transaction. | |
683 | rollback (); | |
610e3901 TR |
684 | |
685 | // If we have to restart while an upgrade of the serial lock is happening, | |
686 | // we need to finish this here, after rollback (to ensure privatization | |
687 | // safety despite undo writes) and before deciding about the retry strategy | |
688 | // (which could switch to/from serial mode). | |
689 | if (finish_serial_upgrade) | |
690 | gtm_thread::serial_lock.write_upgrade_finish(this); | |
691 | ||
0a35513e AH |
692 | decide_retry_strategy (r); |
693 | ||
694 | // Run dispatch-specific restart code. Retry until we succeed. | |
695 | abi_dispatch* disp = abi_disp(); | |
696 | GTM::gtm_restart_reason rr; | |
697 | while ((rr = disp->begin_or_restart()) != NO_RESTART) | |
698 | { | |
699 | decide_retry_strategy(rr); | |
700 | disp = abi_disp(); | |
701 | } | |
702 | ||
062f93f2 RH |
703 | GTM_longjmp (choose_code_path(prop, disp) | a_restoreLiveVariables, |
704 | &jb, prop); | |
0a35513e AH |
705 | } |
706 | ||
707 | void ITM_REGPARM | |
708 | _ITM_commitTransaction(void) | |
709 | { | |
64fbcc74 TR |
710 | #if defined(USE_HTM_FASTPATH) |
711 | // HTM fastpath. If we are not executing a HW transaction, then we will be | |
712 | // a serial-mode transaction. If we are, then there will be no other | |
713 | // concurrent serial-mode transaction. | |
714 | // See gtm_thread::begin_transaction. | |
6041f70a | 715 | if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled())) |
64fbcc74 TR |
716 | { |
717 | htm_commit(); | |
718 | return; | |
719 | } | |
720 | #endif | |
0a35513e AH |
721 | gtm_thread *tx = gtm_thr(); |
722 | if (!tx->trycommit ()) | |
723 | tx->restart (RESTART_VALIDATE_COMMIT); | |
724 | } | |
725 | ||
726 | void ITM_REGPARM | |
727 | _ITM_commitTransactionEH(void *exc_ptr) | |
728 | { | |
64fbcc74 TR |
729 | #if defined(USE_HTM_FASTPATH) |
730 | // See _ITM_commitTransaction. | |
6041f70a | 731 | if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled())) |
64fbcc74 TR |
732 | { |
733 | htm_commit(); | |
734 | return; | |
735 | } | |
736 | #endif | |
0a35513e AH |
737 | gtm_thread *tx = gtm_thr(); |
738 | if (!tx->trycommit ()) | |
739 | { | |
740 | tx->eh_in_flight = exc_ptr; | |
741 | tx->restart (RESTART_VALIDATE_COMMIT); | |
742 | } | |
743 | } |