]> git.ipfire.org Git - thirdparty/glibc.git/blob - hurd/hurdlock.c
y2038: Introduce struct __timespec64 - new internal glibc type
[thirdparty/glibc.git] / hurd / hurdlock.c
1 /* Hurd helpers for lowlevellocks.
2 Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include "hurdlock.h"
20 #include <hurd.h>
21 #include <hurd/hurd.h>
22 #include <time.h>
23 #include <errno.h>
24 #include <unistd.h>
25
26 /* Convert an absolute timeout in nanoseconds to a relative
27 timeout in milliseconds. */
28 static inline int __attribute__ ((gnu_inline))
29 compute_reltime (const struct timespec *abstime, clockid_t clk)
30 {
31 struct timespec ts;
32 __clock_gettime (clk, &ts);
33
34 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
35 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
36
37 if (ts.tv_nsec < 0)
38 {
39 --ts.tv_sec;
40 ts.tv_nsec += 1000000000;
41 }
42
43 return ts.tv_sec < 0 ? -1 : (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000);
44 }
45
46 int
47 __lll_abstimed_wait (void *ptr, int val,
48 const struct timespec *tsp, int flags, int clk)
49 {
50 int mlsec = compute_reltime (tsp, clk);
51 return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_wait (ptr, val, mlsec, flags);
52 }
53
54 int
55 __lll_abstimed_xwait (void *ptr, int lo, int hi,
56 const struct timespec *tsp, int flags, int clk)
57 {
58 int mlsec = compute_reltime (tsp, clk);
59 return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_xwait (ptr, lo, hi, mlsec,
60 flags);
61 }
62
63 int
64 __lll_abstimed_lock (void *ptr,
65 const struct timespec *tsp, int flags, int clk)
66 {
67 if (lll_trylock (ptr) == 0)
68 return 0;
69
70 while (1)
71 {
72 if (atomic_exchange_acq ((int *)ptr, 2) == 0)
73 return 0;
74 else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
75 return EINVAL;
76
77 int mlsec = compute_reltime (tsp, clk);
78 if (mlsec < 0 || lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
79 return ETIMEDOUT;
80 }
81 }
82
83 /* Robust locks. */
84
85 /* Test if a given process id is still valid. */
86 static inline int
87 valid_pid (int pid)
88 {
89 task_t task = __pid2task (pid);
90 if (task == MACH_PORT_NULL)
91 return 0;
92
93 __mach_port_deallocate (__mach_task_self (), task);
94 return 1;
95 }
96
97 /* Robust locks have currently no support from the kernel; they
98 are simply implemented with periodic polling. When sleeping, the
99 maximum blocking time is determined by this constant. */
100 #define MAX_WAIT_TIME 1500
101
102 int
103 __lll_robust_lock (void *ptr, int flags)
104 {
105 int *iptr = (int *)ptr;
106 int id = __getpid ();
107 int wait_time = 25;
108 unsigned int val;
109
110 /* Try to set the lock word to our PID if it's clear. Otherwise,
111 mark it as having waiters. */
112 while (1)
113 {
114 val = *iptr;
115 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
116 return 0;
117 else if (atomic_compare_and_exchange_bool_acq (iptr,
118 val | LLL_WAITERS, val) == 0)
119 break;
120 }
121
122 for (id |= LLL_WAITERS ; ; )
123 {
124 val = *iptr;
125 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
126 return 0;
127 else if (val && !valid_pid (val & LLL_OWNER_MASK))
128 {
129 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
130 return EOWNERDEAD;
131 }
132 else
133 {
134 lll_timed_wait (iptr, val, wait_time, flags);
135 if (wait_time < MAX_WAIT_TIME)
136 wait_time <<= 1;
137 }
138 }
139 }
140
141 int
142 __lll_robust_abstimed_lock (void *ptr,
143 const struct timespec *tsp, int flags, int clk)
144 {
145 int *iptr = (int *)ptr;
146 int id = __getpid ();
147 int wait_time = 25;
148 unsigned int val;
149
150 while (1)
151 {
152 val = *iptr;
153 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
154 return 0;
155 else if (atomic_compare_and_exchange_bool_acq (iptr,
156 val | LLL_WAITERS, val) == 0)
157 break;
158 }
159
160 for (id |= LLL_WAITERS ; ; )
161 {
162 val = *iptr;
163 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
164 return 0;
165 else if (val && !valid_pid (val & LLL_OWNER_MASK))
166 {
167 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
168 return EOWNERDEAD;
169 }
170 else
171 {
172 int mlsec = compute_reltime (tsp, clk);
173 if (mlsec < 0)
174 return ETIMEDOUT;
175 else if (mlsec > wait_time)
176 mlsec = wait_time;
177
178 int res = lll_timed_wait (iptr, val, mlsec, flags);
179 if (res == KERN_TIMEDOUT)
180 return ETIMEDOUT;
181 else if (wait_time < MAX_WAIT_TIME)
182 wait_time <<= 1;
183 }
184 }
185 }
186
187 int
188 __lll_robust_trylock (void *ptr)
189 {
190 int *iptr = (int *)ptr;
191 int id = __getpid ();
192 unsigned int val = *iptr;
193
194 if (!val)
195 {
196 if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
197 return 0;
198 }
199 else if (!valid_pid (val & LLL_OWNER_MASK)
200 && atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
201 return EOWNERDEAD;
202
203 return EBUSY;
204 }
205
206 void
207 __lll_robust_unlock (void *ptr, int flags)
208 {
209 unsigned int val = atomic_load_relaxed ((unsigned int *)ptr);
210 while (1)
211 {
212 if (val & LLL_WAITERS)
213 {
214 lll_set_wake (ptr, 0, flags);
215 break;
216 }
217 else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
218 break;
219 }
220 }