]> git.ipfire.org Git - thirdparty/systemd.git/blob - tdb/spinlock.c
[PATCH] update udev to include scsi_id 0.6
[thirdparty/systemd.git] / tdb / spinlock.c
1 /*
2 Unix SMB/CIFS implementation.
3 Samba database functions
4 Copyright (C) Anton Blanchard 2001
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 /* udev defines */
21 #define STANDALONE
22 #define TDB_DEBUG
23 #define HAVE_MMAP 1
24 #include "../udev.h"
25
26 #if HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29
30 #ifdef STANDALONE
31 #define _KLIBC_HAS_ARCH_SIG_ATOMIC_T
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <unistd.h>
35 #include <string.h>
36 #include <fcntl.h>
37 #include <errno.h>
38 #include <sys/stat.h>
39 #include <time.h>
40 #include <signal.h>
41 #include "tdb.h"
42 #include "spinlock.h"
43
44 #else
45 #include "includes.h"
46 #endif
47
48 #ifdef USE_SPINLOCKS
49
50 /*
51 * ARCH SPECIFIC
52 */
53
54 #if defined(SPARC_SPINLOCKS)
55
56 static inline int __spin_trylock(spinlock_t *lock)
57 {
58 unsigned int result;
59
60 asm volatile("ldstub [%1], %0"
61 : "=r" (result)
62 : "r" (lock)
63 : "memory");
64
65 return (result == 0) ? 0 : EBUSY;
66 }
67
68 static inline void __spin_unlock(spinlock_t *lock)
69 {
70 asm volatile("":::"memory");
71 *lock = 0;
72 }
73
74 static inline void __spin_lock_init(spinlock_t *lock)
75 {
76 *lock = 0;
77 }
78
79 static inline int __spin_is_locked(spinlock_t *lock)
80 {
81 return (*lock != 0);
82 }
83
84 #elif defined(POWERPC_SPINLOCKS)
85
86 static inline int __spin_trylock(spinlock_t *lock)
87 {
88 unsigned int result;
89
90 __asm__ __volatile__(
91 "1: lwarx %0,0,%1\n\
92 cmpwi 0,%0,0\n\
93 li %0,0\n\
94 bne- 2f\n\
95 li %0,1\n\
96 stwcx. %0,0,%1\n\
97 bne- 1b\n\
98 isync\n\
99 2:" : "=&r"(result)
100 : "r"(lock)
101 : "cr0", "memory");
102
103 return (result == 1) ? 0 : EBUSY;
104 }
105
106 static inline void __spin_unlock(spinlock_t *lock)
107 {
108 asm volatile("eieio":::"memory");
109 *lock = 0;
110 }
111
112 static inline void __spin_lock_init(spinlock_t *lock)
113 {
114 *lock = 0;
115 }
116
117 static inline int __spin_is_locked(spinlock_t *lock)
118 {
119 return (*lock != 0);
120 }
121
122 #elif defined(INTEL_SPINLOCKS)
123
124 static inline int __spin_trylock(spinlock_t *lock)
125 {
126 int oldval;
127
128 asm volatile("xchgl %0,%1"
129 : "=r" (oldval), "=m" (*lock)
130 : "0" (0)
131 : "memory");
132
133 return oldval > 0 ? 0 : EBUSY;
134 }
135
136 static inline void __spin_unlock(spinlock_t *lock)
137 {
138 asm volatile("":::"memory");
139 *lock = 1;
140 }
141
142 static inline void __spin_lock_init(spinlock_t *lock)
143 {
144 *lock = 1;
145 }
146
147 static inline int __spin_is_locked(spinlock_t *lock)
148 {
149 return (*lock != 1);
150 }
151
152 #elif defined(MIPS_SPINLOCKS)
153
154 static inline unsigned int load_linked(unsigned long addr)
155 {
156 unsigned int res;
157
158 __asm__ __volatile__("ll\t%0,(%1)"
159 : "=r" (res)
160 : "r" (addr));
161
162 return res;
163 }
164
165 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
166 {
167 unsigned int res;
168
169 __asm__ __volatile__("sc\t%0,(%2)"
170 : "=r" (res)
171 : "0" (value), "r" (addr));
172 return res;
173 }
174
175 static inline int __spin_trylock(spinlock_t *lock)
176 {
177 unsigned int mw;
178
179 do {
180 mw = load_linked(lock);
181 if (mw)
182 return EBUSY;
183 } while (!store_conditional(lock, 1));
184
185 asm volatile("":::"memory");
186
187 return 0;
188 }
189
190 static inline void __spin_unlock(spinlock_t *lock)
191 {
192 asm volatile("":::"memory");
193 *lock = 0;
194 }
195
196 static inline void __spin_lock_init(spinlock_t *lock)
197 {
198 *lock = 0;
199 }
200
201 static inline int __spin_is_locked(spinlock_t *lock)
202 {
203 return (*lock != 0);
204 }
205
206 #else
207 #error Need to implement spinlock code in spinlock.c
208 #endif
209
210 /*
211 * OS SPECIFIC
212 */
213
214 static void yield_cpu(void)
215 {
216 struct timespec tm;
217
218 #ifdef USE_SCHED_YIELD
219 sched_yield();
220 #else
221 /* Linux will busy loop for delays < 2ms on real time tasks */
222 tm.tv_sec = 0;
223 tm.tv_nsec = 2000000L + 1;
224 nanosleep(&tm, NULL);
225 #endif
226 }
227
228 static int this_is_smp(void)
229 {
230 return 0;
231 }
232
233 /*
234 * GENERIC
235 */
236
237 static int smp_machine = 0;
238
239 static inline void __spin_lock(spinlock_t *lock)
240 {
241 int ntries = 0;
242
243 while(__spin_trylock(lock)) {
244 while(__spin_is_locked(lock)) {
245 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
246 continue;
247 yield_cpu();
248 }
249 }
250 }
251
252 static void __read_lock(tdb_rwlock_t *rwlock)
253 {
254 int ntries = 0;
255
256 while(1) {
257 __spin_lock(&rwlock->lock);
258
259 if (!(rwlock->count & RWLOCK_BIAS)) {
260 rwlock->count++;
261 __spin_unlock(&rwlock->lock);
262 return;
263 }
264
265 __spin_unlock(&rwlock->lock);
266
267 while(rwlock->count & RWLOCK_BIAS) {
268 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
269 continue;
270 yield_cpu();
271 }
272 }
273 }
274
275 static void __write_lock(tdb_rwlock_t *rwlock)
276 {
277 int ntries = 0;
278
279 while(1) {
280 __spin_lock(&rwlock->lock);
281
282 if (rwlock->count == 0) {
283 rwlock->count |= RWLOCK_BIAS;
284 __spin_unlock(&rwlock->lock);
285 return;
286 }
287
288 __spin_unlock(&rwlock->lock);
289
290 while(rwlock->count != 0) {
291 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
292 continue;
293 yield_cpu();
294 }
295 }
296 }
297
298 static void __write_unlock(tdb_rwlock_t *rwlock)
299 {
300 __spin_lock(&rwlock->lock);
301
302 if (!(rwlock->count & RWLOCK_BIAS))
303 dbg("bug: write_unlock");
304
305 rwlock->count &= ~RWLOCK_BIAS;
306 __spin_unlock(&rwlock->lock);
307 }
308
309 static void __read_unlock(tdb_rwlock_t *rwlock)
310 {
311 __spin_lock(&rwlock->lock);
312
313 if (!rwlock->count)
314 dbg("bug: read_unlock");
315
316 if (rwlock->count & RWLOCK_BIAS)
317 dbg("bug: read_unlock");
318
319 rwlock->count--;
320 __spin_unlock(&rwlock->lock);
321 }
322
323 /* TDB SPECIFIC */
324
325 /* lock a list in the database. list -1 is the alloc list */
326 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
327 {
328 tdb_rwlock_t *rwlocks;
329
330 if (!tdb->map_ptr) return -1;
331 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
332
333 switch(rw_type) {
334 case F_RDLCK:
335 __read_lock(&rwlocks[list+1]);
336 break;
337
338 case F_WRLCK:
339 __write_lock(&rwlocks[list+1]);
340 break;
341
342 default:
343 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
344 }
345 return 0;
346 }
347
348 /* unlock the database. */
349 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
350 {
351 tdb_rwlock_t *rwlocks;
352
353 if (!tdb->map_ptr) return -1;
354 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
355
356 switch(rw_type) {
357 case F_RDLCK:
358 __read_unlock(&rwlocks[list+1]);
359 break;
360
361 case F_WRLCK:
362 __write_unlock(&rwlocks[list+1]);
363 break;
364
365 default:
366 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
367 }
368
369 return 0;
370 }
371
372 int tdb_create_rwlocks(int fd, unsigned int hash_size)
373 {
374 unsigned size, i;
375 tdb_rwlock_t *rwlocks;
376
377 size = (hash_size + 1) * sizeof(tdb_rwlock_t);
378 rwlocks = malloc(size);
379 if (!rwlocks)
380 return -1;
381
382 for(i = 0; i < hash_size+1; i++) {
383 __spin_lock_init(&rwlocks[i].lock);
384 rwlocks[i].count = 0;
385 }
386
387 /* Write it out (appending to end) */
388 if (write(fd, rwlocks, size) != size) {
389 free(rwlocks);
390 return -1;
391 }
392 smp_machine = this_is_smp();
393 free(rwlocks);
394 return 0;
395 }
396
397 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
398 {
399 tdb_rwlock_t *rwlocks;
400 unsigned i;
401
402 if (tdb->header.rwlocks == 0) return 0;
403 if (!tdb->map_ptr) return -1;
404
405 /* We're mmapped here */
406 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
407 for(i = 0; i < tdb->header.hash_size+1; i++) {
408 __spin_lock_init(&rwlocks[i].lock);
409 rwlocks[i].count = 0;
410 }
411 return 0;
412 }
413 #else
414 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
415 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
416 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
417
418 /* Non-spinlock version: remove spinlock pointer */
419 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
420 {
421 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
422 - (char *)&tdb->header);
423
424 tdb->header.rwlocks = 0;
425 if (lseek(tdb->fd, off, SEEK_SET) != off
426 || write(tdb->fd, (void *)&tdb->header.rwlocks,
427 sizeof(tdb->header.rwlocks))
428 != sizeof(tdb->header.rwlocks))
429 return -1;
430 return 0;
431 }
432 #endif