]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/powerpc/nptl/elide.h
powerpc: Enforce compiler barriers on hardware transactions
[thirdparty/glibc.git] / sysdeps / powerpc / nptl / elide.h
1 /* elide.h: Generic lock elision support for powerpc.
2 Copyright (C) 2015-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #ifndef ELIDE_PPC_H
20 # define ELIDE_PPC_H
21
22 #ifdef ENABLE_LOCK_ELISION
23 # include <htm.h>
24 # include <elision-conf.h>
25
26 /* Get the new value of adapt_count according to the elision
27 configurations. Returns true if the system should retry again or false
28 otherwise. */
29 static inline bool
30 __get_new_count (uint8_t *adapt_count, int attempt)
31 {
32 /* A persistent failure indicates that a retry will probably
33 result in another failure. Use normal locking now and
34 for the next couple of calls. */
35 if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
36 {
37 if (__elision_aconf.skip_lock_internal_abort > 0)
38 *adapt_count = __elision_aconf.skip_lock_internal_abort;
39 return false;
40 }
41 /* Same logic as above, but for a number of temporary failures in a
42 a row. */
43 else if (attempt <= 1 && __elision_aconf.skip_lock_out_of_tbegin_retries > 0
44 && __elision_aconf.try_tbegin > 0)
45 *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
46 return true;
47 }
48
49 /* CONCURRENCY NOTES:
50
51 The evaluation of the macro expression is_lock_free encompasses one or
52 more loads from memory locations that are concurrently modified by other
53 threads. For lock elision to work, this evaluation and the rest of the
54 critical section protected by the lock must be atomic because an
55 execution with lock elision must be equivalent to an execution in which
56 the lock would have been actually acquired and released. Therefore, we
57 evaluate is_lock_free inside of the transaction that represents the
58 critical section for which we want to use lock elision, which ensures
59 the atomicity that we require. */
60
61 /* Returns 0 if the lock defined by is_lock_free was elided.
62 ADAPT_COUNT is a per-lock state variable. */
63 # define ELIDE_LOCK(adapt_count, is_lock_free) \
64 ({ \
65 int ret = 0; \
66 if (adapt_count > 0) \
67 (adapt_count)--; \
68 else \
69 for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
70 { \
71 if (__libc_tbegin (0)) \
72 { \
73 if (is_lock_free) \
74 { \
75 ret = 1; \
76 break; \
77 } \
78 __libc_tabort (_ABORT_LOCK_BUSY); \
79 } \
80 else \
81 if (!__get_new_count (&adapt_count,i)) \
82 break; \
83 } \
84 ret; \
85 })
86
87 # define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) \
88 ({ \
89 int ret = 0; \
90 if (__elision_aconf.try_tbegin > 0) \
91 { \
92 if (write) \
93 __libc_tabort (_ABORT_NESTED_TRYLOCK); \
94 ret = ELIDE_LOCK (adapt_count, is_lock_free); \
95 } \
96 ret; \
97 })
98
99
100 static inline bool
101 __elide_unlock (int is_lock_free)
102 {
103 if (is_lock_free)
104 {
105 __libc_tend (0);
106 return true;
107 }
108 return false;
109 }
110
111 # define ELIDE_UNLOCK(is_lock_free) \
112 __elide_unlock (is_lock_free)
113
114 # else
115
116 # define ELIDE_LOCK(adapt_count, is_lock_free) 0
117 # define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) 0
118 # define ELIDE_UNLOCK(is_lock_free) 0
119
120 #endif /* ENABLE_LOCK_ELISION */
121
122 #endif