]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/powerpc/nptl/elide.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / powerpc / nptl / elide.h
CommitLineData
4b45943a 1/* elide.h: Generic lock elision support for powerpc.
d614a753 2 Copyright (C) 2015-2020 Free Software Foundation, Inc.
4b45943a
AZ
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
4b45943a
AZ
18
19#ifndef ELIDE_PPC_H
20# define ELIDE_PPC_H
21
4b45943a
AZ
22# include <htm.h>
23# include <elision-conf.h>
24
6ec52bf6
TMQMF
25/* Get the new value of adapt_count according to the elision
26 configurations. Returns true if the system should retry again or false
27 otherwise. */
4b45943a 28static inline bool
72f1463d 29__get_new_count (uint8_t *adapt_count, int attempt)
4b45943a 30{
6ec52bf6
TMQMF
31 /* A persistent failure indicates that a retry will probably
32 result in another failure. Use normal locking now and
33 for the next couple of calls. */
34 if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
4b45943a 35 {
6ec52bf6
TMQMF
36 if (__elision_aconf.skip_lock_internal_abort > 0)
37 *adapt_count = __elision_aconf.skip_lock_internal_abort;
4b45943a
AZ
38 return false;
39 }
6ec52bf6
TMQMF
40 /* Same logic as above, but for a number of temporary failures in a
41 a row. */
72f1463d 42 else if (attempt <= 1 && __elision_aconf.skip_lock_out_of_tbegin_retries > 0
6ec52bf6
TMQMF
43 && __elision_aconf.try_tbegin > 0)
44 *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
45 return true;
4b45943a
AZ
46}
47
6ec52bf6
TMQMF
48/* CONCURRENCY NOTES:
49
50 The evaluation of the macro expression is_lock_free encompasses one or
51 more loads from memory locations that are concurrently modified by other
52 threads. For lock elision to work, this evaluation and the rest of the
53 critical section protected by the lock must be atomic because an
54 execution with lock elision must be equivalent to an execution in which
55 the lock would have been actually acquired and released. Therefore, we
56 evaluate is_lock_free inside of the transaction that represents the
57 critical section for which we want to use lock elision, which ensures
58 the atomicity that we require. */
59
60/* Returns 0 if the lock defined by is_lock_free was elided.
61 ADAPT_COUNT is a per-lock state variable. */
62# define ELIDE_LOCK(adapt_count, is_lock_free) \
63 ({ \
64 int ret = 0; \
65 if (adapt_count > 0) \
66 (adapt_count)--; \
67 else \
68 for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
69 { \
42bf1c89 70 if (__libc_tbegin (0)) \
6ec52bf6
TMQMF
71 { \
72 if (is_lock_free) \
73 { \
74 ret = 1; \
75 break; \
76 } \
42bf1c89 77 __libc_tabort (_ABORT_LOCK_BUSY); \
6ec52bf6
TMQMF
78 } \
79 else \
72f1463d 80 if (!__get_new_count (&adapt_count,i)) \
6ec52bf6
TMQMF
81 break; \
82 } \
83 ret; \
84 })
4b45943a
AZ
85
86# define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) \
6ec52bf6
TMQMF
87 ({ \
88 int ret = 0; \
89 if (__elision_aconf.try_tbegin > 0) \
90 { \
91 if (write) \
42bf1c89 92 __libc_tabort (_ABORT_NESTED_TRYLOCK); \
6ec52bf6
TMQMF
93 ret = ELIDE_LOCK (adapt_count, is_lock_free); \
94 } \
95 ret; \
96 })
4b45943a
AZ
97
98
99static inline bool
100__elide_unlock (int is_lock_free)
101{
102 if (is_lock_free)
103 {
739e14f9
TMQMF
104 /* This code is expected to crash when trying to unlock a lock not
105 held by this thread. More information is available in the
106 __pthread_rwlock_unlock() implementation. */
42bf1c89 107 __libc_tend (0);
4b45943a
AZ
108 return true;
109 }
110 return false;
111}
112
113# define ELIDE_UNLOCK(is_lock_free) \
114 __elide_unlock (is_lock_free)
115
4b45943a 116#endif