]> git.ipfire.org Git - thirdparty/systemd.git/blob - tdb/spinlock.c
[PATCH] fix some compiler warnings in the tdb code.
[thirdparty/systemd.git] / tdb / spinlock.c
1 /*
2 Unix SMB/CIFS implementation.
3 Samba database functions
4 Copyright (C) Anton Blanchard 2001
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 /* udev defines */
21 #define STANDALONE
22 #define TDB_DEBUG
23 #define HAVE_MMAP 1
24 #include "../udev.h"
25
26 #if HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29
30 #ifdef STANDALONE
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41 #include "spinlock.h"
42
43 #else
44 #include "includes.h"
45 #endif
46
47 #ifdef USE_SPINLOCKS
48
49 /*
50 * ARCH SPECIFIC
51 */
52
53 #if defined(SPARC_SPINLOCKS)
54
55 static inline int __spin_trylock(spinlock_t *lock)
56 {
57 unsigned int result;
58
59 asm volatile("ldstub [%1], %0"
60 : "=r" (result)
61 : "r" (lock)
62 : "memory");
63
64 return (result == 0) ? 0 : EBUSY;
65 }
66
67 static inline void __spin_unlock(spinlock_t *lock)
68 {
69 asm volatile("":::"memory");
70 *lock = 0;
71 }
72
73 static inline void __spin_lock_init(spinlock_t *lock)
74 {
75 *lock = 0;
76 }
77
78 static inline int __spin_is_locked(spinlock_t *lock)
79 {
80 return (*lock != 0);
81 }
82
83 #elif defined(POWERPC_SPINLOCKS)
84
85 static inline int __spin_trylock(spinlock_t *lock)
86 {
87 unsigned int result;
88
89 __asm__ __volatile__(
90 "1: lwarx %0,0,%1\n\
91 cmpwi 0,%0,0\n\
92 li %0,0\n\
93 bne- 2f\n\
94 li %0,1\n\
95 stwcx. %0,0,%1\n\
96 bne- 1b\n\
97 isync\n\
98 2:" : "=&r"(result)
99 : "r"(lock)
100 : "cr0", "memory");
101
102 return (result == 1) ? 0 : EBUSY;
103 }
104
105 static inline void __spin_unlock(spinlock_t *lock)
106 {
107 asm volatile("eieio":::"memory");
108 *lock = 0;
109 }
110
111 static inline void __spin_lock_init(spinlock_t *lock)
112 {
113 *lock = 0;
114 }
115
116 static inline int __spin_is_locked(spinlock_t *lock)
117 {
118 return (*lock != 0);
119 }
120
121 #elif defined(INTEL_SPINLOCKS)
122
123 static inline int __spin_trylock(spinlock_t *lock)
124 {
125 int oldval;
126
127 asm volatile("xchgl %0,%1"
128 : "=r" (oldval), "=m" (*lock)
129 : "0" (0)
130 : "memory");
131
132 return oldval > 0 ? 0 : EBUSY;
133 }
134
135 static inline void __spin_unlock(spinlock_t *lock)
136 {
137 asm volatile("":::"memory");
138 *lock = 1;
139 }
140
141 static inline void __spin_lock_init(spinlock_t *lock)
142 {
143 *lock = 1;
144 }
145
146 static inline int __spin_is_locked(spinlock_t *lock)
147 {
148 return (*lock != 1);
149 }
150
151 #elif defined(MIPS_SPINLOCKS)
152
153 static inline unsigned int load_linked(unsigned long addr)
154 {
155 unsigned int res;
156
157 __asm__ __volatile__("ll\t%0,(%1)"
158 : "=r" (res)
159 : "r" (addr));
160
161 return res;
162 }
163
164 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
165 {
166 unsigned int res;
167
168 __asm__ __volatile__("sc\t%0,(%2)"
169 : "=r" (res)
170 : "0" (value), "r" (addr));
171 return res;
172 }
173
174 static inline int __spin_trylock(spinlock_t *lock)
175 {
176 unsigned int mw;
177
178 do {
179 mw = load_linked(lock);
180 if (mw)
181 return EBUSY;
182 } while (!store_conditional(lock, 1));
183
184 asm volatile("":::"memory");
185
186 return 0;
187 }
188
189 static inline void __spin_unlock(spinlock_t *lock)
190 {
191 asm volatile("":::"memory");
192 *lock = 0;
193 }
194
195 static inline void __spin_lock_init(spinlock_t *lock)
196 {
197 *lock = 0;
198 }
199
200 static inline int __spin_is_locked(spinlock_t *lock)
201 {
202 return (*lock != 0);
203 }
204
205 #else
206 #error Need to implement spinlock code in spinlock.c
207 #endif
208
209 /*
210 * OS SPECIFIC
211 */
212
213 static void yield_cpu(void)
214 {
215 struct timespec tm;
216
217 #ifdef USE_SCHED_YIELD
218 sched_yield();
219 #else
220 /* Linux will busy loop for delays < 2ms on real time tasks */
221 tm.tv_sec = 0;
222 tm.tv_nsec = 2000000L + 1;
223 nanosleep(&tm, NULL);
224 #endif
225 }
226
227 static int this_is_smp(void)
228 {
229 return 0;
230 }
231
232 /*
233 * GENERIC
234 */
235
236 static int smp_machine = 0;
237
238 static inline void __spin_lock(spinlock_t *lock)
239 {
240 int ntries = 0;
241
242 while(__spin_trylock(lock)) {
243 while(__spin_is_locked(lock)) {
244 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
245 continue;
246 yield_cpu();
247 }
248 }
249 }
250
251 static void __read_lock(tdb_rwlock_t *rwlock)
252 {
253 int ntries = 0;
254
255 while(1) {
256 __spin_lock(&rwlock->lock);
257
258 if (!(rwlock->count & RWLOCK_BIAS)) {
259 rwlock->count++;
260 __spin_unlock(&rwlock->lock);
261 return;
262 }
263
264 __spin_unlock(&rwlock->lock);
265
266 while(rwlock->count & RWLOCK_BIAS) {
267 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
268 continue;
269 yield_cpu();
270 }
271 }
272 }
273
274 static void __write_lock(tdb_rwlock_t *rwlock)
275 {
276 int ntries = 0;
277
278 while(1) {
279 __spin_lock(&rwlock->lock);
280
281 if (rwlock->count == 0) {
282 rwlock->count |= RWLOCK_BIAS;
283 __spin_unlock(&rwlock->lock);
284 return;
285 }
286
287 __spin_unlock(&rwlock->lock);
288
289 while(rwlock->count != 0) {
290 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
291 continue;
292 yield_cpu();
293 }
294 }
295 }
296
297 static void __write_unlock(tdb_rwlock_t *rwlock)
298 {
299 __spin_lock(&rwlock->lock);
300
301 if (!(rwlock->count & RWLOCK_BIAS))
302 dbg("bug: write_unlock");
303
304 rwlock->count &= ~RWLOCK_BIAS;
305 __spin_unlock(&rwlock->lock);
306 }
307
308 static void __read_unlock(tdb_rwlock_t *rwlock)
309 {
310 __spin_lock(&rwlock->lock);
311
312 if (!rwlock->count)
313 dbg("bug: read_unlock");
314
315 if (rwlock->count & RWLOCK_BIAS)
316 dbg("bug: read_unlock");
317
318 rwlock->count--;
319 __spin_unlock(&rwlock->lock);
320 }
321
322 /* TDB SPECIFIC */
323
324 /* lock a list in the database. list -1 is the alloc list */
325 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
326 {
327 tdb_rwlock_t *rwlocks;
328
329 if (!tdb->map_ptr) return -1;
330 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
331
332 switch(rw_type) {
333 case F_RDLCK:
334 __read_lock(&rwlocks[list+1]);
335 break;
336
337 case F_WRLCK:
338 __write_lock(&rwlocks[list+1]);
339 break;
340
341 default:
342 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
343 }
344 return 0;
345 }
346
347 /* unlock the database. */
348 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
349 {
350 tdb_rwlock_t *rwlocks;
351
352 if (!tdb->map_ptr) return -1;
353 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
354
355 switch(rw_type) {
356 case F_RDLCK:
357 __read_unlock(&rwlocks[list+1]);
358 break;
359
360 case F_WRLCK:
361 __write_unlock(&rwlocks[list+1]);
362 break;
363
364 default:
365 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
366 }
367
368 return 0;
369 }
370
371 int tdb_create_rwlocks(int fd, unsigned int hash_size)
372 {
373 unsigned size, i;
374 tdb_rwlock_t *rwlocks;
375
376 size = (hash_size + 1) * sizeof(tdb_rwlock_t);
377 rwlocks = malloc(size);
378 if (!rwlocks)
379 return -1;
380
381 for(i = 0; i < hash_size+1; i++) {
382 __spin_lock_init(&rwlocks[i].lock);
383 rwlocks[i].count = 0;
384 }
385
386 /* Write it out (appending to end) */
387 if (write(fd, rwlocks, size) != size) {
388 free(rwlocks);
389 return -1;
390 }
391 smp_machine = this_is_smp();
392 free(rwlocks);
393 return 0;
394 }
395
396 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
397 {
398 tdb_rwlock_t *rwlocks;
399 unsigned i;
400
401 if (tdb->header.rwlocks == 0) return 0;
402 if (!tdb->map_ptr) return -1;
403
404 /* We're mmapped here */
405 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
406 for(i = 0; i < tdb->header.hash_size+1; i++) {
407 __spin_lock_init(&rwlocks[i].lock);
408 rwlocks[i].count = 0;
409 }
410 return 0;
411 }
412 #else
413 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
414 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
415 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
416
417 /* Non-spinlock version: remove spinlock pointer */
418 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
419 {
420 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
421 - (char *)&tdb->header);
422
423 tdb->header.rwlocks = 0;
424 if (lseek(tdb->fd, off, SEEK_SET) != off
425 || write(tdb->fd, (void *)&tdb->header.rwlocks,
426 sizeof(tdb->header.rwlocks))
427 != sizeof(tdb->header.rwlocks))
428 return -1;
429 return 0;
430 }
431 #endif