]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/ipc/sem.c | |
3 | * Copyright (C) 1992 Krishna Balasubramanian | |
4 | * Copyright (C) 1995 Eric Schenk, Bruno Haible | |
5 | * | |
6 | * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): | |
7 | * This code underwent a massive rewrite in order to solve some problems | |
8 | * with the original code. In particular the original code failed to | |
9 | * wake up processes that were waiting for semval to go to 0 if the | |
10 | * value went to 0 and was then incremented rapidly enough. In solving | |
11 | * this problem I have also modified the implementation so that it | |
12 | * processes pending operations in a FIFO manner, thus give a guarantee | |
13 | * that processes waiting for a lock on the semaphore won't starve | |
14 | * unless another locking process fails to unlock. | |
15 | * In addition the following two changes in behavior have been introduced: | |
16 | * - The original implementation of semop returned the value | |
17 | * last semaphore element examined on success. This does not | |
18 | * match the manual page specifications, and effectively | |
19 | * allows the user to read the semaphore even if they do not | |
20 | * have read permissions. The implementation now returns 0 | |
21 | * on success as stated in the manual page. | |
22 | * - There is some confusion over whether the set of undo adjustments | |
23 | * to be performed at exit should be done in an atomic manner. | |
24 | * That is, if we are attempting to decrement the semval should we queue | |
25 | * up and wait until we can do so legally? | |
26 | * The original implementation attempted to do this. | |
27 | * The current implementation does not do so. This is because I don't | |
28 | * think it is the right thing (TM) to do, and because I couldn't | |
29 | * see a clean way to get the old behavior with the new design. | |
30 | * The POSIX standard and SVID should be consulted to determine | |
31 | * what behavior is mandated. | |
32 | * | |
33 | * Further notes on refinement (Christoph Rohland, December 1998): | |
34 | * - The POSIX standard says, that the undo adjustments simply should | |
35 | * redo. So the current implementation is o.K. | |
36 | * - The previous code had two flaws: | |
37 | * 1) It actively gave the semaphore to the next waiting process | |
38 | * sleeping on the semaphore. Since this process did not have the | |
39 | * cpu this led to many unnecessary context switches and bad | |
40 | * performance. Now we only check which process should be able to | |
41 | * get the semaphore and if this process wants to reduce some | |
42 | * semaphore value we simply wake it up without doing the | |
43 | * operation. So it has to try to get it later. Thus e.g. the | |
44 | * running process may reacquire the semaphore during the current | |
45 | * time slice. If it only waits for zero or increases the semaphore, | |
46 | * we do the operation in advance and wake it up. | |
47 | * 2) It did not wake up all zero waiting processes. We try to do | |
48 | * better but only get the semops right which only wait for zero or | |
49 | * increase. If there are decrement operations in the operations | |
50 | * array we do the same as before. | |
51 | * | |
52 | * With the incarnation of O(1) scheduler, it becomes unnecessary to perform | |
53 | * check/retry algorithm for waking up blocked processes as the new scheduler | |
54 | * is better at handling thread switch than the old one. | |
55 | * | |
56 | * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> | |
57 | * | |
58 | * SMP-threaded, sysctl's added | |
624dffcb | 59 | * (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
1da177e4 LT |
60 | * Enforced range limit on SEM_UNDO |
61 | * (c) 2001 Red Hat Inc <alan@redhat.com> | |
62 | * Lockless wakeup | |
63 | * (c) 2003 Manfred Spraul <manfred@colorfullife.com> | |
073115d6 SG |
64 | * |
65 | * support for audit of ipc object properties and permission changes | |
66 | * Dustin Kirkland <dustin.kirkland@us.ibm.com> | |
e3893534 KK |
67 | * |
68 | * namespaces support | |
69 | * OpenVZ, SWsoft Inc. | |
70 | * Pavel Emelianov <xemul@openvz.org> | |
1da177e4 LT |
71 | */ |
72 | ||
1da177e4 LT |
73 | #include <linux/slab.h> |
74 | #include <linux/spinlock.h> | |
75 | #include <linux/init.h> | |
76 | #include <linux/proc_fs.h> | |
77 | #include <linux/time.h> | |
1da177e4 LT |
78 | #include <linux/security.h> |
79 | #include <linux/syscalls.h> | |
80 | #include <linux/audit.h> | |
c59ede7b | 81 | #include <linux/capability.h> |
19b4946c | 82 | #include <linux/seq_file.h> |
5f921ae9 | 83 | #include <linux/mutex.h> |
e3893534 | 84 | #include <linux/nsproxy.h> |
5f921ae9 | 85 | |
1da177e4 LT |
86 | #include <asm/uaccess.h> |
87 | #include "util.h" | |
88 | ||
e3893534 KK |
89 | #define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) |
90 | ||
e3893534 | 91 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) |
e3893534 KK |
92 | #define sem_checkid(ns, sma, semid) \ |
93 | ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) | |
94 | #define sem_buildid(ns, id, seq) \ | |
95 | ipc_buildid(&sem_ids(ns), id, seq) | |
1da177e4 | 96 | |
e3893534 | 97 | static struct ipc_ids init_sem_ids; |
1da177e4 | 98 | |
7748dbfa | 99 | static int newary(struct ipc_namespace *, struct ipc_params *); |
7ca7e564 | 100 | static void freeary(struct ipc_namespace *, struct sem_array *); |
1da177e4 | 101 | #ifdef CONFIG_PROC_FS |
19b4946c | 102 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
1da177e4 LT |
103 | #endif |
104 | ||
105 | #define SEMMSL_FAST 256 /* 512 bytes on stack */ | |
106 | #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ | |
107 | ||
108 | /* | |
109 | * linked list protection: | |
110 | * sem_undo.id_next, | |
111 | * sem_array.sem_pending{,last}, | |
112 | * sem_array.sem_undo: sem_lock() for read/write | |
113 | * sem_undo.proc_next: only "current" is allowed to read/write that field. | |
114 | * | |
115 | */ | |
116 | ||
e3893534 KK |
117 | #define sc_semmsl sem_ctls[0] |
118 | #define sc_semmns sem_ctls[1] | |
119 | #define sc_semopm sem_ctls[2] | |
120 | #define sc_semmni sem_ctls[3] | |
121 | ||
7d69a1f4 | 122 | static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) |
e3893534 KK |
123 | { |
124 | ns->ids[IPC_SEM_IDS] = ids; | |
125 | ns->sc_semmsl = SEMMSL; | |
126 | ns->sc_semmns = SEMMNS; | |
127 | ns->sc_semopm = SEMOPM; | |
128 | ns->sc_semmni = SEMMNI; | |
129 | ns->used_sems = 0; | |
7ca7e564 | 130 | ipc_init_ids(ids); |
e3893534 KK |
131 | } |
132 | ||
e3893534 KK |
133 | int sem_init_ns(struct ipc_namespace *ns) |
134 | { | |
135 | struct ipc_ids *ids; | |
136 | ||
137 | ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL); | |
138 | if (ids == NULL) | |
139 | return -ENOMEM; | |
140 | ||
141 | __sem_init_ns(ns, ids); | |
142 | return 0; | |
143 | } | |
144 | ||
145 | void sem_exit_ns(struct ipc_namespace *ns) | |
146 | { | |
e3893534 | 147 | struct sem_array *sma; |
7ca7e564 ND |
148 | int next_id; |
149 | int total, in_use; | |
e3893534 KK |
150 | |
151 | mutex_lock(&sem_ids(ns).mutex); | |
7ca7e564 ND |
152 | |
153 | in_use = sem_ids(ns).in_use; | |
154 | ||
155 | for (total = 0, next_id = 0; total < in_use; next_id++) { | |
156 | sma = idr_find(&sem_ids(ns).ipcs_idr, next_id); | |
e3893534 KK |
157 | if (sma == NULL) |
158 | continue; | |
7ca7e564 ND |
159 | ipc_lock_by_ptr(&sma->sem_perm); |
160 | freeary(ns, sma); | |
161 | total++; | |
e3893534 KK |
162 | } |
163 | mutex_unlock(&sem_ids(ns).mutex); | |
1da177e4 | 164 | |
e3893534 KK |
165 | kfree(ns->ids[IPC_SEM_IDS]); |
166 | ns->ids[IPC_SEM_IDS] = NULL; | |
167 | } | |
1da177e4 LT |
168 | |
169 | void __init sem_init (void) | |
170 | { | |
e3893534 | 171 | __sem_init_ns(&init_ipc_ns, &init_sem_ids); |
19b4946c MW |
172 | ipc_init_proc_interface("sysvipc/sem", |
173 | " key semid perms nsems uid gid cuid cgid otime ctime\n", | |
e3893534 | 174 | IPC_SEM_IDS, sysvipc_sem_proc_show); |
1da177e4 LT |
175 | } |
176 | ||
023a5355 ND |
177 | static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id) |
178 | { | |
03f02c76 ND |
179 | struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id); |
180 | ||
181 | return container_of(ipcp, struct sem_array, sem_perm); | |
023a5355 ND |
182 | } |
183 | ||
184 | static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, | |
185 | int id) | |
186 | { | |
03f02c76 ND |
187 | struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id); |
188 | ||
189 | return container_of(ipcp, struct sem_array, sem_perm); | |
023a5355 ND |
190 | } |
191 | ||
7ca7e564 ND |
192 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) |
193 | { | |
194 | ipc_rmid(&sem_ids(ns), &s->sem_perm); | |
195 | } | |
196 | ||
1da177e4 LT |
197 | /* |
198 | * Lockless wakeup algorithm: | |
199 | * Without the check/retry algorithm a lockless wakeup is possible: | |
200 | * - queue.status is initialized to -EINTR before blocking. | |
201 | * - wakeup is performed by | |
202 | * * unlinking the queue entry from sma->sem_pending | |
203 | * * setting queue.status to IN_WAKEUP | |
204 | * This is the notification for the blocked thread that a | |
205 | * result value is imminent. | |
206 | * * call wake_up_process | |
207 | * * set queue.status to the final value. | |
208 | * - the previously blocked thread checks queue.status: | |
209 | * * if it's IN_WAKEUP, then it must wait until the value changes | |
210 | * * if it's not -EINTR, then the operation was completed by | |
211 | * update_queue. semtimedop can return queue.status without | |
5f921ae9 | 212 | * performing any operation on the sem array. |
1da177e4 LT |
213 | * * otherwise it must acquire the spinlock and check what's up. |
214 | * | |
215 | * The two-stage algorithm is necessary to protect against the following | |
216 | * races: | |
217 | * - if queue.status is set after wake_up_process, then the woken up idle | |
218 | * thread could race forward and try (and fail) to acquire sma->lock | |
219 | * before update_queue had a chance to set queue.status | |
220 | * - if queue.status is written before wake_up_process and if the | |
221 | * blocked process is woken up by a signal between writing | |
222 | * queue.status and the wake_up_process, then the woken up | |
223 | * process could return from semtimedop and die by calling | |
224 | * sys_exit before wake_up_process is called. Then wake_up_process | |
225 | * will oops, because the task structure is already invalid. | |
226 | * (yes, this happened on s390 with sysv msg). | |
227 | * | |
228 | */ | |
229 | #define IN_WAKEUP 1 | |
230 | ||
7748dbfa | 231 | static int newary(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4 LT |
232 | { |
233 | int id; | |
234 | int retval; | |
235 | struct sem_array *sma; | |
236 | int size; | |
7748dbfa ND |
237 | key_t key = params->key; |
238 | int nsems = params->u.nsems; | |
239 | int semflg = params->flg; | |
1da177e4 LT |
240 | |
241 | if (!nsems) | |
242 | return -EINVAL; | |
e3893534 | 243 | if (ns->used_sems + nsems > ns->sc_semmns) |
1da177e4 LT |
244 | return -ENOSPC; |
245 | ||
246 | size = sizeof (*sma) + nsems * sizeof (struct sem); | |
247 | sma = ipc_rcu_alloc(size); | |
248 | if (!sma) { | |
249 | return -ENOMEM; | |
250 | } | |
251 | memset (sma, 0, size); | |
252 | ||
253 | sma->sem_perm.mode = (semflg & S_IRWXUGO); | |
254 | sma->sem_perm.key = key; | |
255 | ||
256 | sma->sem_perm.security = NULL; | |
257 | retval = security_sem_alloc(sma); | |
258 | if (retval) { | |
259 | ipc_rcu_putref(sma); | |
260 | return retval; | |
261 | } | |
262 | ||
e3893534 | 263 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); |
1da177e4 LT |
264 | if(id == -1) { |
265 | security_sem_free(sma); | |
266 | ipc_rcu_putref(sma); | |
267 | return -ENOSPC; | |
268 | } | |
e3893534 | 269 | ns->used_sems += nsems; |
1da177e4 | 270 | |
7ca7e564 | 271 | sma->sem_perm.id = sem_buildid(ns, id, sma->sem_perm.seq); |
1da177e4 LT |
272 | sma->sem_base = (struct sem *) &sma[1]; |
273 | /* sma->sem_pending = NULL; */ | |
274 | sma->sem_pending_last = &sma->sem_pending; | |
275 | /* sma->undo = NULL; */ | |
276 | sma->sem_nsems = nsems; | |
277 | sma->sem_ctime = get_seconds(); | |
278 | sem_unlock(sma); | |
279 | ||
7ca7e564 | 280 | return sma->sem_perm.id; |
1da177e4 LT |
281 | } |
282 | ||
7748dbfa | 283 | |
03f02c76 | 284 | static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) |
7748dbfa | 285 | { |
03f02c76 ND |
286 | struct sem_array *sma; |
287 | ||
288 | sma = container_of(ipcp, struct sem_array, sem_perm); | |
289 | return security_sem_associate(sma, semflg); | |
7748dbfa ND |
290 | } |
291 | ||
03f02c76 ND |
292 | static inline int sem_more_checks(struct kern_ipc_perm *ipcp, |
293 | struct ipc_params *params) | |
7748dbfa | 294 | { |
03f02c76 ND |
295 | struct sem_array *sma; |
296 | ||
297 | sma = container_of(ipcp, struct sem_array, sem_perm); | |
298 | if (params->u.nsems > sma->sem_nsems) | |
7748dbfa ND |
299 | return -EINVAL; |
300 | ||
301 | return 0; | |
302 | } | |
303 | ||
304 | asmlinkage long sys_semget(key_t key, int nsems, int semflg) | |
1da177e4 | 305 | { |
e3893534 | 306 | struct ipc_namespace *ns; |
7748dbfa ND |
307 | struct ipc_ops sem_ops; |
308 | struct ipc_params sem_params; | |
e3893534 KK |
309 | |
310 | ns = current->nsproxy->ipc_ns; | |
1da177e4 | 311 | |
e3893534 | 312 | if (nsems < 0 || nsems > ns->sc_semmsl) |
1da177e4 | 313 | return -EINVAL; |
7ca7e564 | 314 | |
7748dbfa ND |
315 | sem_ops.getnew = newary; |
316 | sem_ops.associate = sem_security; | |
317 | sem_ops.more_checks = sem_more_checks; | |
318 | ||
319 | sem_params.key = key; | |
320 | sem_params.flg = semflg; | |
321 | sem_params.u.nsems = nsems; | |
1da177e4 | 322 | |
7748dbfa | 323 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); |
1da177e4 LT |
324 | } |
325 | ||
326 | /* Manage the doubly linked list sma->sem_pending as a FIFO: | |
327 | * insert new queue elements at the tail sma->sem_pending_last. | |
328 | */ | |
329 | static inline void append_to_queue (struct sem_array * sma, | |
330 | struct sem_queue * q) | |
331 | { | |
332 | *(q->prev = sma->sem_pending_last) = q; | |
333 | *(sma->sem_pending_last = &q->next) = NULL; | |
334 | } | |
335 | ||
336 | static inline void prepend_to_queue (struct sem_array * sma, | |
337 | struct sem_queue * q) | |
338 | { | |
339 | q->next = sma->sem_pending; | |
340 | *(q->prev = &sma->sem_pending) = q; | |
341 | if (q->next) | |
342 | q->next->prev = &q->next; | |
343 | else /* sma->sem_pending_last == &sma->sem_pending */ | |
344 | sma->sem_pending_last = &q->next; | |
345 | } | |
346 | ||
347 | static inline void remove_from_queue (struct sem_array * sma, | |
348 | struct sem_queue * q) | |
349 | { | |
350 | *(q->prev) = q->next; | |
351 | if (q->next) | |
352 | q->next->prev = q->prev; | |
353 | else /* sma->sem_pending_last == &q->next */ | |
354 | sma->sem_pending_last = q->prev; | |
355 | q->prev = NULL; /* mark as removed */ | |
356 | } | |
357 | ||
358 | /* | |
359 | * Determine whether a sequence of semaphore operations would succeed | |
360 | * all at once. Return 0 if yes, 1 if need to sleep, else return error code. | |
361 | */ | |
362 | ||
363 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, | |
364 | int nsops, struct sem_undo *un, int pid) | |
365 | { | |
366 | int result, sem_op; | |
367 | struct sembuf *sop; | |
368 | struct sem * curr; | |
369 | ||
370 | for (sop = sops; sop < sops + nsops; sop++) { | |
371 | curr = sma->sem_base + sop->sem_num; | |
372 | sem_op = sop->sem_op; | |
373 | result = curr->semval; | |
374 | ||
375 | if (!sem_op && result) | |
376 | goto would_block; | |
377 | ||
378 | result += sem_op; | |
379 | if (result < 0) | |
380 | goto would_block; | |
381 | if (result > SEMVMX) | |
382 | goto out_of_range; | |
383 | if (sop->sem_flg & SEM_UNDO) { | |
384 | int undo = un->semadj[sop->sem_num] - sem_op; | |
385 | /* | |
386 | * Exceeding the undo range is an error. | |
387 | */ | |
388 | if (undo < (-SEMAEM - 1) || undo > SEMAEM) | |
389 | goto out_of_range; | |
390 | } | |
391 | curr->semval = result; | |
392 | } | |
393 | ||
394 | sop--; | |
395 | while (sop >= sops) { | |
396 | sma->sem_base[sop->sem_num].sempid = pid; | |
397 | if (sop->sem_flg & SEM_UNDO) | |
398 | un->semadj[sop->sem_num] -= sop->sem_op; | |
399 | sop--; | |
400 | } | |
401 | ||
402 | sma->sem_otime = get_seconds(); | |
403 | return 0; | |
404 | ||
405 | out_of_range: | |
406 | result = -ERANGE; | |
407 | goto undo; | |
408 | ||
409 | would_block: | |
410 | if (sop->sem_flg & IPC_NOWAIT) | |
411 | result = -EAGAIN; | |
412 | else | |
413 | result = 1; | |
414 | ||
415 | undo: | |
416 | sop--; | |
417 | while (sop >= sops) { | |
418 | sma->sem_base[sop->sem_num].semval -= sop->sem_op; | |
419 | sop--; | |
420 | } | |
421 | ||
422 | return result; | |
423 | } | |
424 | ||
425 | /* Go through the pending queue for the indicated semaphore | |
426 | * looking for tasks that can be completed. | |
427 | */ | |
428 | static void update_queue (struct sem_array * sma) | |
429 | { | |
430 | int error; | |
431 | struct sem_queue * q; | |
432 | ||
433 | q = sma->sem_pending; | |
434 | while(q) { | |
435 | error = try_atomic_semop(sma, q->sops, q->nsops, | |
436 | q->undo, q->pid); | |
437 | ||
438 | /* Does q->sleeper still need to sleep? */ | |
439 | if (error <= 0) { | |
440 | struct sem_queue *n; | |
441 | remove_from_queue(sma,q); | |
442 | q->status = IN_WAKEUP; | |
443 | /* | |
444 | * Continue scanning. The next operation | |
445 | * that must be checked depends on the type of the | |
446 | * completed operation: | |
447 | * - if the operation modified the array, then | |
448 | * restart from the head of the queue and | |
449 | * check for threads that might be waiting | |
450 | * for semaphore values to become 0. | |
451 | * - if the operation didn't modify the array, | |
452 | * then just continue. | |
453 | */ | |
454 | if (q->alter) | |
455 | n = sma->sem_pending; | |
456 | else | |
457 | n = q->next; | |
458 | wake_up_process(q->sleeper); | |
459 | /* hands-off: q will disappear immediately after | |
460 | * writing q->status. | |
461 | */ | |
1224b375 | 462 | smp_wmb(); |
1da177e4 LT |
463 | q->status = error; |
464 | q = n; | |
465 | } else { | |
466 | q = q->next; | |
467 | } | |
468 | } | |
469 | } | |
470 | ||
471 | /* The following counts are associated to each semaphore: | |
472 | * semncnt number of tasks waiting on semval being nonzero | |
473 | * semzcnt number of tasks waiting on semval being zero | |
474 | * This model assumes that a task waits on exactly one semaphore. | |
475 | * Since semaphore operations are to be performed atomically, tasks actually | |
476 | * wait on a whole sequence of semaphores simultaneously. | |
477 | * The counts we return here are a rough approximation, but still | |
478 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. | |
479 | */ | |
480 | static int count_semncnt (struct sem_array * sma, ushort semnum) | |
481 | { | |
482 | int semncnt; | |
483 | struct sem_queue * q; | |
484 | ||
485 | semncnt = 0; | |
486 | for (q = sma->sem_pending; q; q = q->next) { | |
487 | struct sembuf * sops = q->sops; | |
488 | int nsops = q->nsops; | |
489 | int i; | |
490 | for (i = 0; i < nsops; i++) | |
491 | if (sops[i].sem_num == semnum | |
492 | && (sops[i].sem_op < 0) | |
493 | && !(sops[i].sem_flg & IPC_NOWAIT)) | |
494 | semncnt++; | |
495 | } | |
496 | return semncnt; | |
497 | } | |
498 | static int count_semzcnt (struct sem_array * sma, ushort semnum) | |
499 | { | |
500 | int semzcnt; | |
501 | struct sem_queue * q; | |
502 | ||
503 | semzcnt = 0; | |
504 | for (q = sma->sem_pending; q; q = q->next) { | |
505 | struct sembuf * sops = q->sops; | |
506 | int nsops = q->nsops; | |
507 | int i; | |
508 | for (i = 0; i < nsops; i++) | |
509 | if (sops[i].sem_num == semnum | |
510 | && (sops[i].sem_op == 0) | |
511 | && !(sops[i].sem_flg & IPC_NOWAIT)) | |
512 | semzcnt++; | |
513 | } | |
514 | return semzcnt; | |
515 | } | |
516 | ||
5f921ae9 IM |
517 | /* Free a semaphore set. freeary() is called with sem_ids.mutex locked and |
518 | * the spinlock for this semaphore set hold. sem_ids.mutex remains locked | |
1da177e4 LT |
519 | * on exit. |
520 | */ | |
7ca7e564 | 521 | static void freeary(struct ipc_namespace *ns, struct sem_array *sma) |
1da177e4 LT |
522 | { |
523 | struct sem_undo *un; | |
524 | struct sem_queue *q; | |
1da177e4 LT |
525 | |
526 | /* Invalidate the existing undo structures for this semaphore set. | |
527 | * (They will be freed without any further action in exit_sem() | |
528 | * or during the next semop.) | |
529 | */ | |
530 | for (un = sma->undo; un; un = un->id_next) | |
531 | un->semid = -1; | |
532 | ||
533 | /* Wake up all pending processes and let them fail with EIDRM. */ | |
534 | q = sma->sem_pending; | |
535 | while(q) { | |
536 | struct sem_queue *n; | |
537 | /* lazy remove_from_queue: we are killing the whole queue */ | |
538 | q->prev = NULL; | |
539 | n = q->next; | |
540 | q->status = IN_WAKEUP; | |
541 | wake_up_process(q->sleeper); /* doesn't sleep */ | |
6003a93e | 542 | smp_wmb(); |
1da177e4 LT |
543 | q->status = -EIDRM; /* hands-off q */ |
544 | q = n; | |
545 | } | |
546 | ||
7ca7e564 ND |
547 | /* Remove the semaphore set from the IDR */ |
548 | sem_rmid(ns, sma); | |
1da177e4 LT |
549 | sem_unlock(sma); |
550 | ||
e3893534 | 551 | ns->used_sems -= sma->sem_nsems; |
1da177e4 LT |
552 | security_sem_free(sma); |
553 | ipc_rcu_putref(sma); | |
554 | } | |
555 | ||
556 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) | |
557 | { | |
558 | switch(version) { | |
559 | case IPC_64: | |
560 | return copy_to_user(buf, in, sizeof(*in)); | |
561 | case IPC_OLD: | |
562 | { | |
563 | struct semid_ds out; | |
564 | ||
565 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); | |
566 | ||
567 | out.sem_otime = in->sem_otime; | |
568 | out.sem_ctime = in->sem_ctime; | |
569 | out.sem_nsems = in->sem_nsems; | |
570 | ||
571 | return copy_to_user(buf, &out, sizeof(out)); | |
572 | } | |
573 | default: | |
574 | return -EINVAL; | |
575 | } | |
576 | } | |
577 | ||
e3893534 KK |
578 | static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, |
579 | int cmd, int version, union semun arg) | |
1da177e4 LT |
580 | { |
581 | int err = -EINVAL; | |
582 | struct sem_array *sma; | |
583 | ||
584 | switch(cmd) { | |
585 | case IPC_INFO: | |
586 | case SEM_INFO: | |
587 | { | |
588 | struct seminfo seminfo; | |
589 | int max_id; | |
590 | ||
591 | err = security_sem_semctl(NULL, cmd); | |
592 | if (err) | |
593 | return err; | |
594 | ||
595 | memset(&seminfo,0,sizeof(seminfo)); | |
e3893534 KK |
596 | seminfo.semmni = ns->sc_semmni; |
597 | seminfo.semmns = ns->sc_semmns; | |
598 | seminfo.semmsl = ns->sc_semmsl; | |
599 | seminfo.semopm = ns->sc_semopm; | |
1da177e4 LT |
600 | seminfo.semvmx = SEMVMX; |
601 | seminfo.semmnu = SEMMNU; | |
602 | seminfo.semmap = SEMMAP; | |
603 | seminfo.semume = SEMUME; | |
e3893534 | 604 | mutex_lock(&sem_ids(ns).mutex); |
1da177e4 | 605 | if (cmd == SEM_INFO) { |
e3893534 KK |
606 | seminfo.semusz = sem_ids(ns).in_use; |
607 | seminfo.semaem = ns->used_sems; | |
1da177e4 LT |
608 | } else { |
609 | seminfo.semusz = SEMUSZ; | |
610 | seminfo.semaem = SEMAEM; | |
611 | } | |
7ca7e564 | 612 | max_id = ipc_get_maxid(&sem_ids(ns)); |
e3893534 | 613 | mutex_unlock(&sem_ids(ns).mutex); |
1da177e4 LT |
614 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
615 | return -EFAULT; | |
616 | return (max_id < 0) ? 0: max_id; | |
617 | } | |
618 | case SEM_STAT: | |
619 | { | |
620 | struct semid64_ds tbuf; | |
621 | int id; | |
622 | ||
e3893534 | 623 | sma = sem_lock(ns, semid); |
023a5355 ND |
624 | if (IS_ERR(sma)) |
625 | return PTR_ERR(sma); | |
1da177e4 LT |
626 | |
627 | err = -EACCES; | |
628 | if (ipcperms (&sma->sem_perm, S_IRUGO)) | |
629 | goto out_unlock; | |
630 | ||
631 | err = security_sem_semctl(sma, cmd); | |
632 | if (err) | |
633 | goto out_unlock; | |
634 | ||
7ca7e564 | 635 | id = sma->sem_perm.id; |
1da177e4 | 636 | |
023a5355 ND |
637 | memset(&tbuf, 0, sizeof(tbuf)); |
638 | ||
1da177e4 LT |
639 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
640 | tbuf.sem_otime = sma->sem_otime; | |
641 | tbuf.sem_ctime = sma->sem_ctime; | |
642 | tbuf.sem_nsems = sma->sem_nsems; | |
643 | sem_unlock(sma); | |
644 | if (copy_semid_to_user (arg.buf, &tbuf, version)) | |
645 | return -EFAULT; | |
646 | return id; | |
647 | } | |
648 | default: | |
649 | return -EINVAL; | |
650 | } | |
651 | return err; | |
652 | out_unlock: | |
653 | sem_unlock(sma); | |
654 | return err; | |
655 | } | |
656 | ||
e3893534 KK |
657 | static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, |
658 | int cmd, int version, union semun arg) | |
1da177e4 LT |
659 | { |
660 | struct sem_array *sma; | |
661 | struct sem* curr; | |
662 | int err; | |
663 | ushort fast_sem_io[SEMMSL_FAST]; | |
664 | ushort* sem_io = fast_sem_io; | |
665 | int nsems; | |
666 | ||
023a5355 ND |
667 | sma = sem_lock_check(ns, semid); |
668 | if (IS_ERR(sma)) | |
669 | return PTR_ERR(sma); | |
1da177e4 LT |
670 | |
671 | nsems = sma->sem_nsems; | |
672 | ||
1da177e4 LT |
673 | err = -EACCES; |
674 | if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) | |
675 | goto out_unlock; | |
676 | ||
677 | err = security_sem_semctl(sma, cmd); | |
678 | if (err) | |
679 | goto out_unlock; | |
680 | ||
681 | err = -EACCES; | |
682 | switch (cmd) { | |
683 | case GETALL: | |
684 | { | |
685 | ushort __user *array = arg.array; | |
686 | int i; | |
687 | ||
688 | if(nsems > SEMMSL_FAST) { | |
689 | ipc_rcu_getref(sma); | |
690 | sem_unlock(sma); | |
691 | ||
692 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | |
693 | if(sem_io == NULL) { | |
694 | ipc_lock_by_ptr(&sma->sem_perm); | |
695 | ipc_rcu_putref(sma); | |
696 | sem_unlock(sma); | |
697 | return -ENOMEM; | |
698 | } | |
699 | ||
700 | ipc_lock_by_ptr(&sma->sem_perm); | |
701 | ipc_rcu_putref(sma); | |
702 | if (sma->sem_perm.deleted) { | |
703 | sem_unlock(sma); | |
704 | err = -EIDRM; | |
705 | goto out_free; | |
706 | } | |
707 | } | |
708 | ||
709 | for (i = 0; i < sma->sem_nsems; i++) | |
710 | sem_io[i] = sma->sem_base[i].semval; | |
711 | sem_unlock(sma); | |
712 | err = 0; | |
713 | if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) | |
714 | err = -EFAULT; | |
715 | goto out_free; | |
716 | } | |
717 | case SETALL: | |
718 | { | |
719 | int i; | |
720 | struct sem_undo *un; | |
721 | ||
722 | ipc_rcu_getref(sma); | |
723 | sem_unlock(sma); | |
724 | ||
725 | if(nsems > SEMMSL_FAST) { | |
726 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | |
727 | if(sem_io == NULL) { | |
728 | ipc_lock_by_ptr(&sma->sem_perm); | |
729 | ipc_rcu_putref(sma); | |
730 | sem_unlock(sma); | |
731 | return -ENOMEM; | |
732 | } | |
733 | } | |
734 | ||
735 | if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { | |
736 | ipc_lock_by_ptr(&sma->sem_perm); | |
737 | ipc_rcu_putref(sma); | |
738 | sem_unlock(sma); | |
739 | err = -EFAULT; | |
740 | goto out_free; | |
741 | } | |
742 | ||
743 | for (i = 0; i < nsems; i++) { | |
744 | if (sem_io[i] > SEMVMX) { | |
745 | ipc_lock_by_ptr(&sma->sem_perm); | |
746 | ipc_rcu_putref(sma); | |
747 | sem_unlock(sma); | |
748 | err = -ERANGE; | |
749 | goto out_free; | |
750 | } | |
751 | } | |
752 | ipc_lock_by_ptr(&sma->sem_perm); | |
753 | ipc_rcu_putref(sma); | |
754 | if (sma->sem_perm.deleted) { | |
755 | sem_unlock(sma); | |
756 | err = -EIDRM; | |
757 | goto out_free; | |
758 | } | |
759 | ||
760 | for (i = 0; i < nsems; i++) | |
761 | sma->sem_base[i].semval = sem_io[i]; | |
762 | for (un = sma->undo; un; un = un->id_next) | |
763 | for (i = 0; i < nsems; i++) | |
764 | un->semadj[i] = 0; | |
765 | sma->sem_ctime = get_seconds(); | |
766 | /* maybe some queued-up processes were waiting for this */ | |
767 | update_queue(sma); | |
768 | err = 0; | |
769 | goto out_unlock; | |
770 | } | |
771 | case IPC_STAT: | |
772 | { | |
773 | struct semid64_ds tbuf; | |
774 | memset(&tbuf,0,sizeof(tbuf)); | |
775 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | |
776 | tbuf.sem_otime = sma->sem_otime; | |
777 | tbuf.sem_ctime = sma->sem_ctime; | |
778 | tbuf.sem_nsems = sma->sem_nsems; | |
779 | sem_unlock(sma); | |
780 | if (copy_semid_to_user (arg.buf, &tbuf, version)) | |
781 | return -EFAULT; | |
782 | return 0; | |
783 | } | |
784 | /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ | |
785 | } | |
786 | err = -EINVAL; | |
787 | if(semnum < 0 || semnum >= nsems) | |
788 | goto out_unlock; | |
789 | ||
790 | curr = &sma->sem_base[semnum]; | |
791 | ||
792 | switch (cmd) { | |
793 | case GETVAL: | |
794 | err = curr->semval; | |
795 | goto out_unlock; | |
796 | case GETPID: | |
797 | err = curr->sempid; | |
798 | goto out_unlock; | |
799 | case GETNCNT: | |
800 | err = count_semncnt(sma,semnum); | |
801 | goto out_unlock; | |
802 | case GETZCNT: | |
803 | err = count_semzcnt(sma,semnum); | |
804 | goto out_unlock; | |
805 | case SETVAL: | |
806 | { | |
807 | int val = arg.val; | |
808 | struct sem_undo *un; | |
809 | err = -ERANGE; | |
810 | if (val > SEMVMX || val < 0) | |
811 | goto out_unlock; | |
812 | ||
813 | for (un = sma->undo; un; un = un->id_next) | |
814 | un->semadj[semnum] = 0; | |
815 | curr->semval = val; | |
b488893a | 816 | curr->sempid = task_tgid_vnr(current); |
1da177e4 LT |
817 | sma->sem_ctime = get_seconds(); |
818 | /* maybe some queued-up processes were waiting for this */ | |
819 | update_queue(sma); | |
820 | err = 0; | |
821 | goto out_unlock; | |
822 | } | |
823 | } | |
824 | out_unlock: | |
825 | sem_unlock(sma); | |
826 | out_free: | |
827 | if(sem_io != fast_sem_io) | |
828 | ipc_free(sem_io, sizeof(ushort)*nsems); | |
829 | return err; | |
830 | } | |
831 | ||
832 | struct sem_setbuf { | |
833 | uid_t uid; | |
834 | gid_t gid; | |
835 | mode_t mode; | |
836 | }; | |
837 | ||
838 | static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) | |
839 | { | |
840 | switch(version) { | |
841 | case IPC_64: | |
842 | { | |
843 | struct semid64_ds tbuf; | |
844 | ||
845 | if(copy_from_user(&tbuf, buf, sizeof(tbuf))) | |
846 | return -EFAULT; | |
847 | ||
848 | out->uid = tbuf.sem_perm.uid; | |
849 | out->gid = tbuf.sem_perm.gid; | |
850 | out->mode = tbuf.sem_perm.mode; | |
851 | ||
852 | return 0; | |
853 | } | |
854 | case IPC_OLD: | |
855 | { | |
856 | struct semid_ds tbuf_old; | |
857 | ||
858 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | |
859 | return -EFAULT; | |
860 | ||
861 | out->uid = tbuf_old.sem_perm.uid; | |
862 | out->gid = tbuf_old.sem_perm.gid; | |
863 | out->mode = tbuf_old.sem_perm.mode; | |
864 | ||
865 | return 0; | |
866 | } | |
867 | default: | |
868 | return -EINVAL; | |
869 | } | |
870 | } | |
871 | ||
e3893534 KK |
872 | static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, |
873 | int cmd, int version, union semun arg) | |
1da177e4 LT |
874 | { |
875 | struct sem_array *sma; | |
876 | int err; | |
8e1c091c | 877 | struct sem_setbuf uninitialized_var(setbuf); |
1da177e4 LT |
878 | struct kern_ipc_perm *ipcp; |
879 | ||
880 | if(cmd == IPC_SET) { | |
881 | if(copy_semid_from_user (&setbuf, arg.buf, version)) | |
882 | return -EFAULT; | |
1da177e4 | 883 | } |
023a5355 ND |
884 | sma = sem_lock_check(ns, semid); |
885 | if (IS_ERR(sma)) | |
886 | return PTR_ERR(sma); | |
1da177e4 | 887 | |
1da177e4 | 888 | ipcp = &sma->sem_perm; |
073115d6 SG |
889 | |
890 | err = audit_ipc_obj(ipcp); | |
891 | if (err) | |
892 | goto out_unlock; | |
893 | ||
ac03221a LK |
894 | if (cmd == IPC_SET) { |
895 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); | |
896 | if (err) | |
897 | goto out_unlock; | |
898 | } | |
1da177e4 LT |
899 | if (current->euid != ipcp->cuid && |
900 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { | |
901 | err=-EPERM; | |
902 | goto out_unlock; | |
903 | } | |
904 | ||
905 | err = security_sem_semctl(sma, cmd); | |
906 | if (err) | |
907 | goto out_unlock; | |
908 | ||
909 | switch(cmd){ | |
910 | case IPC_RMID: | |
7ca7e564 | 911 | freeary(ns, sma); |
1da177e4 LT |
912 | err = 0; |
913 | break; | |
914 | case IPC_SET: | |
915 | ipcp->uid = setbuf.uid; | |
916 | ipcp->gid = setbuf.gid; | |
917 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | |
918 | | (setbuf.mode & S_IRWXUGO); | |
919 | sma->sem_ctime = get_seconds(); | |
920 | sem_unlock(sma); | |
921 | err = 0; | |
922 | break; | |
923 | default: | |
924 | sem_unlock(sma); | |
925 | err = -EINVAL; | |
926 | break; | |
927 | } | |
928 | return err; | |
929 | ||
930 | out_unlock: | |
931 | sem_unlock(sma); | |
932 | return err; | |
933 | } | |
934 | ||
935 | asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | |
936 | { | |
937 | int err = -EINVAL; | |
938 | int version; | |
e3893534 | 939 | struct ipc_namespace *ns; |
1da177e4 LT |
940 | |
941 | if (semid < 0) | |
942 | return -EINVAL; | |
943 | ||
944 | version = ipc_parse_version(&cmd); | |
e3893534 | 945 | ns = current->nsproxy->ipc_ns; |
1da177e4 LT |
946 | |
947 | switch(cmd) { | |
948 | case IPC_INFO: | |
949 | case SEM_INFO: | |
950 | case SEM_STAT: | |
e3893534 | 951 | err = semctl_nolock(ns,semid,semnum,cmd,version,arg); |
1da177e4 LT |
952 | return err; |
953 | case GETALL: | |
954 | case GETVAL: | |
955 | case GETPID: | |
956 | case GETNCNT: | |
957 | case GETZCNT: | |
958 | case IPC_STAT: | |
959 | case SETVAL: | |
960 | case SETALL: | |
e3893534 | 961 | err = semctl_main(ns,semid,semnum,cmd,version,arg); |
1da177e4 LT |
962 | return err; |
963 | case IPC_RMID: | |
964 | case IPC_SET: | |
e3893534 KK |
965 | mutex_lock(&sem_ids(ns).mutex); |
966 | err = semctl_down(ns,semid,semnum,cmd,version,arg); | |
967 | mutex_unlock(&sem_ids(ns).mutex); | |
1da177e4 LT |
968 | return err; |
969 | default: | |
970 | return -EINVAL; | |
971 | } | |
972 | } | |
973 | ||
974 | static inline void lock_semundo(void) | |
975 | { | |
976 | struct sem_undo_list *undo_list; | |
977 | ||
978 | undo_list = current->sysvsem.undo_list; | |
00a5dfdb | 979 | if (undo_list) |
1da177e4 LT |
980 | spin_lock(&undo_list->lock); |
981 | } | |
982 | ||
983 | /* This code has an interaction with copy_semundo(). | |
984 | * Consider; two tasks are sharing the undo_list. task1 | |
985 | * acquires the undo_list lock in lock_semundo(). If task2 now | |
986 | * exits before task1 releases the lock (by calling | |
987 | * unlock_semundo()), then task1 will never call spin_unlock(). | |
988 | * This leave the sem_undo_list in a locked state. If task1 now creats task3 | |
989 | * and once again shares the sem_undo_list, the sem_undo_list will still be | |
990 | * locked, and future SEM_UNDO operations will deadlock. This case is | |
991 | * dealt with in copy_semundo() by having it reinitialize the spin lock when | |
992 | * the refcnt goes from 1 to 2. | |
993 | */ | |
994 | static inline void unlock_semundo(void) | |
995 | { | |
996 | struct sem_undo_list *undo_list; | |
997 | ||
998 | undo_list = current->sysvsem.undo_list; | |
00a5dfdb | 999 | if (undo_list) |
1da177e4 LT |
1000 | spin_unlock(&undo_list->lock); |
1001 | } | |
1002 | ||
1003 | ||
1004 | /* If the task doesn't already have a undo_list, then allocate one | |
1005 | * here. We guarantee there is only one thread using this undo list, | |
1006 | * and current is THE ONE | |
1007 | * | |
1008 | * If this allocation and assignment succeeds, but later | |
1009 | * portions of this code fail, there is no need to free the sem_undo_list. | |
1010 | * Just let it stay associated with the task, and it'll be freed later | |
1011 | * at exit time. | |
1012 | * | |
1013 | * This can block, so callers must hold no locks. | |
1014 | */ | |
1015 | static inline int get_undo_list(struct sem_undo_list **undo_listp) | |
1016 | { | |
1017 | struct sem_undo_list *undo_list; | |
1da177e4 LT |
1018 | |
1019 | undo_list = current->sysvsem.undo_list; | |
1020 | if (!undo_list) { | |
2453a306 | 1021 | undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); |
1da177e4 LT |
1022 | if (undo_list == NULL) |
1023 | return -ENOMEM; | |
00a5dfdb | 1024 | spin_lock_init(&undo_list->lock); |
1da177e4 LT |
1025 | atomic_set(&undo_list->refcnt, 1); |
1026 | current->sysvsem.undo_list = undo_list; | |
1027 | } | |
1028 | *undo_listp = undo_list; | |
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | |
1033 | { | |
1034 | struct sem_undo **last, *un; | |
1035 | ||
1036 | last = &ulp->proc_list; | |
1037 | un = *last; | |
1038 | while(un != NULL) { | |
1039 | if(un->semid==semid) | |
1040 | break; | |
1041 | if(un->semid==-1) { | |
1042 | *last=un->proc_next; | |
1043 | kfree(un); | |
1044 | } else { | |
1045 | last=&un->proc_next; | |
1046 | } | |
1047 | un=*last; | |
1048 | } | |
1049 | return un; | |
1050 | } | |
1051 | ||
e3893534 | 1052 | static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) |
1da177e4 LT |
1053 | { |
1054 | struct sem_array *sma; | |
1055 | struct sem_undo_list *ulp; | |
1056 | struct sem_undo *un, *new; | |
1057 | int nsems; | |
1058 | int error; | |
1059 | ||
1060 | error = get_undo_list(&ulp); | |
1061 | if (error) | |
1062 | return ERR_PTR(error); | |
1063 | ||
1064 | lock_semundo(); | |
1065 | un = lookup_undo(ulp, semid); | |
1066 | unlock_semundo(); | |
1067 | if (likely(un!=NULL)) | |
1068 | goto out; | |
1069 | ||
1070 | /* no undo structure around - allocate one. */ | |
023a5355 ND |
1071 | sma = sem_lock_check(ns, semid); |
1072 | if (IS_ERR(sma)) | |
1073 | return ERR_PTR(PTR_ERR(sma)); | |
1074 | ||
1da177e4 LT |
1075 | nsems = sma->sem_nsems; |
1076 | ipc_rcu_getref(sma); | |
1077 | sem_unlock(sma); | |
1078 | ||
4668edc3 | 1079 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1da177e4 LT |
1080 | if (!new) { |
1081 | ipc_lock_by_ptr(&sma->sem_perm); | |
1082 | ipc_rcu_putref(sma); | |
1083 | sem_unlock(sma); | |
1084 | return ERR_PTR(-ENOMEM); | |
1085 | } | |
1da177e4 LT |
1086 | new->semadj = (short *) &new[1]; |
1087 | new->semid = semid; | |
1088 | ||
1089 | lock_semundo(); | |
1090 | un = lookup_undo(ulp, semid); | |
1091 | if (un) { | |
1092 | unlock_semundo(); | |
1093 | kfree(new); | |
1094 | ipc_lock_by_ptr(&sma->sem_perm); | |
1095 | ipc_rcu_putref(sma); | |
1096 | sem_unlock(sma); | |
1097 | goto out; | |
1098 | } | |
1099 | ipc_lock_by_ptr(&sma->sem_perm); | |
1100 | ipc_rcu_putref(sma); | |
1101 | if (sma->sem_perm.deleted) { | |
1102 | sem_unlock(sma); | |
1103 | unlock_semundo(); | |
1104 | kfree(new); | |
1105 | un = ERR_PTR(-EIDRM); | |
1106 | goto out; | |
1107 | } | |
1108 | new->proc_next = ulp->proc_list; | |
1109 | ulp->proc_list = new; | |
1110 | new->id_next = sma->undo; | |
1111 | sma->undo = new; | |
1112 | sem_unlock(sma); | |
1113 | un = new; | |
1114 | unlock_semundo(); | |
1115 | out: | |
1116 | return un; | |
1117 | } | |
1118 | ||
1119 | asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, | |
1120 | unsigned nsops, const struct timespec __user *timeout) | |
1121 | { | |
1122 | int error = -EINVAL; | |
1123 | struct sem_array *sma; | |
1124 | struct sembuf fast_sops[SEMOPM_FAST]; | |
1125 | struct sembuf* sops = fast_sops, *sop; | |
1126 | struct sem_undo *un; | |
b78755ab | 1127 | int undos = 0, alter = 0, max; |
1da177e4 LT |
1128 | struct sem_queue queue; |
1129 | unsigned long jiffies_left = 0; | |
e3893534 KK |
1130 | struct ipc_namespace *ns; |
1131 | ||
1132 | ns = current->nsproxy->ipc_ns; | |
1da177e4 LT |
1133 | |
1134 | if (nsops < 1 || semid < 0) | |
1135 | return -EINVAL; | |
e3893534 | 1136 | if (nsops > ns->sc_semopm) |
1da177e4 LT |
1137 | return -E2BIG; |
1138 | if(nsops > SEMOPM_FAST) { | |
1139 | sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); | |
1140 | if(sops==NULL) | |
1141 | return -ENOMEM; | |
1142 | } | |
1143 | if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { | |
1144 | error=-EFAULT; | |
1145 | goto out_free; | |
1146 | } | |
1147 | if (timeout) { | |
1148 | struct timespec _timeout; | |
1149 | if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { | |
1150 | error = -EFAULT; | |
1151 | goto out_free; | |
1152 | } | |
1153 | if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || | |
1154 | _timeout.tv_nsec >= 1000000000L) { | |
1155 | error = -EINVAL; | |
1156 | goto out_free; | |
1157 | } | |
1158 | jiffies_left = timespec_to_jiffies(&_timeout); | |
1159 | } | |
1160 | max = 0; | |
1161 | for (sop = sops; sop < sops + nsops; sop++) { | |
1162 | if (sop->sem_num >= max) | |
1163 | max = sop->sem_num; | |
1164 | if (sop->sem_flg & SEM_UNDO) | |
b78755ab MS |
1165 | undos = 1; |
1166 | if (sop->sem_op != 0) | |
1da177e4 LT |
1167 | alter = 1; |
1168 | } | |
1da177e4 LT |
1169 | |
1170 | retry_undos: | |
1171 | if (undos) { | |
e3893534 | 1172 | un = find_undo(ns, semid); |
1da177e4 LT |
1173 | if (IS_ERR(un)) { |
1174 | error = PTR_ERR(un); | |
1175 | goto out_free; | |
1176 | } | |
1177 | } else | |
1178 | un = NULL; | |
1179 | ||
023a5355 ND |
1180 | sma = sem_lock_check(ns, semid); |
1181 | if (IS_ERR(sma)) { | |
1182 | error = PTR_ERR(sma); | |
1da177e4 | 1183 | goto out_free; |
023a5355 ND |
1184 | } |
1185 | ||
1da177e4 | 1186 | /* |
023a5355 | 1187 | * semid identifiers are not unique - find_undo may have |
1da177e4 LT |
1188 | * allocated an undo structure, it was invalidated by an RMID |
1189 | * and now a new array with received the same id. Check and retry. | |
1190 | */ | |
1191 | if (un && un->semid == -1) { | |
1192 | sem_unlock(sma); | |
1193 | goto retry_undos; | |
1194 | } | |
1195 | error = -EFBIG; | |
1196 | if (max >= sma->sem_nsems) | |
1197 | goto out_unlock_free; | |
1198 | ||
1199 | error = -EACCES; | |
1200 | if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) | |
1201 | goto out_unlock_free; | |
1202 | ||
1203 | error = security_sem_semop(sma, sops, nsops, alter); | |
1204 | if (error) | |
1205 | goto out_unlock_free; | |
1206 | ||
b488893a | 1207 | error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); |
1da177e4 LT |
1208 | if (error <= 0) { |
1209 | if (alter && error == 0) | |
1210 | update_queue (sma); | |
1211 | goto out_unlock_free; | |
1212 | } | |
1213 | ||
1214 | /* We need to sleep on this operation, so we put the current | |
1215 | * task into the pending queue and go to sleep. | |
1216 | */ | |
1217 | ||
1218 | queue.sma = sma; | |
1219 | queue.sops = sops; | |
1220 | queue.nsops = nsops; | |
1221 | queue.undo = un; | |
b488893a | 1222 | queue.pid = task_tgid_vnr(current); |
1da177e4 LT |
1223 | queue.id = semid; |
1224 | queue.alter = alter; | |
1225 | if (alter) | |
1226 | append_to_queue(sma ,&queue); | |
1227 | else | |
1228 | prepend_to_queue(sma ,&queue); | |
1229 | ||
1230 | queue.status = -EINTR; | |
1231 | queue.sleeper = current; | |
1232 | current->state = TASK_INTERRUPTIBLE; | |
1233 | sem_unlock(sma); | |
1234 | ||
1235 | if (timeout) | |
1236 | jiffies_left = schedule_timeout(jiffies_left); | |
1237 | else | |
1238 | schedule(); | |
1239 | ||
1240 | error = queue.status; | |
1241 | while(unlikely(error == IN_WAKEUP)) { | |
1242 | cpu_relax(); | |
1243 | error = queue.status; | |
1244 | } | |
1245 | ||
1246 | if (error != -EINTR) { | |
1247 | /* fast path: update_queue already obtained all requested | |
1248 | * resources */ | |
1249 | goto out_free; | |
1250 | } | |
1251 | ||
e3893534 | 1252 | sma = sem_lock(ns, semid); |
023a5355 | 1253 | if (IS_ERR(sma)) { |
27315c96 | 1254 | BUG_ON(queue.prev != NULL); |
1da177e4 LT |
1255 | error = -EIDRM; |
1256 | goto out_free; | |
1257 | } | |
1258 | ||
1259 | /* | |
1260 | * If queue.status != -EINTR we are woken up by another process | |
1261 | */ | |
1262 | error = queue.status; | |
1263 | if (error != -EINTR) { | |
1264 | goto out_unlock_free; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * If an interrupt occurred we have to clean up the queue | |
1269 | */ | |
1270 | if (timeout && jiffies_left == 0) | |
1271 | error = -EAGAIN; | |
1272 | remove_from_queue(sma,&queue); | |
1273 | goto out_unlock_free; | |
1274 | ||
1275 | out_unlock_free: | |
1276 | sem_unlock(sma); | |
1277 | out_free: | |
1278 | if(sops != fast_sops) | |
1279 | kfree(sops); | |
1280 | return error; | |
1281 | } | |
1282 | ||
1283 | asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) | |
1284 | { | |
1285 | return sys_semtimedop(semid, tsops, nsops, NULL); | |
1286 | } | |
1287 | ||
1288 | /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between | |
1289 | * parent and child tasks. | |
1290 | * | |
1291 | * See the notes above unlock_semundo() regarding the spin_lock_init() | |
1292 | * in this code. Initialize the undo_list->lock here instead of get_undo_list() | |
1293 | * because of the reasoning in the comment above unlock_semundo. | |
1294 | */ | |
1295 | ||
1296 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | |
1297 | { | |
1298 | struct sem_undo_list *undo_list; | |
1299 | int error; | |
1300 | ||
1301 | if (clone_flags & CLONE_SYSVSEM) { | |
1302 | error = get_undo_list(&undo_list); | |
1303 | if (error) | |
1304 | return error; | |
1da177e4 LT |
1305 | atomic_inc(&undo_list->refcnt); |
1306 | tsk->sysvsem.undo_list = undo_list; | |
1307 | } else | |
1308 | tsk->sysvsem.undo_list = NULL; | |
1309 | ||
1310 | return 0; | |
1311 | } | |
1312 | ||
1313 | /* | |
1314 | * add semadj values to semaphores, free undo structures. | |
1315 | * undo structures are not freed when semaphore arrays are destroyed | |
1316 | * so some of them may be out of date. | |
1317 | * IMPLEMENTATION NOTE: There is some confusion over whether the | |
1318 | * set of adjustments that needs to be done should be done in an atomic | |
1319 | * manner or not. That is, if we are attempting to decrement the semval | |
1320 | * should we queue up and wait until we can do so legally? | |
1321 | * The original implementation attempted to do this (queue and wait). | |
1322 | * The current implementation does not do so. The POSIX standard | |
1323 | * and SVID should be consulted to determine what behavior is mandated. | |
1324 | */ | |
1325 | void exit_sem(struct task_struct *tsk) | |
1326 | { | |
1327 | struct sem_undo_list *undo_list; | |
1328 | struct sem_undo *u, **up; | |
e3893534 | 1329 | struct ipc_namespace *ns; |
1da177e4 LT |
1330 | |
1331 | undo_list = tsk->sysvsem.undo_list; | |
1332 | if (!undo_list) | |
1333 | return; | |
1334 | ||
1335 | if (!atomic_dec_and_test(&undo_list->refcnt)) | |
1336 | return; | |
1337 | ||
e3893534 | 1338 | ns = tsk->nsproxy->ipc_ns; |
1da177e4 LT |
1339 | /* There's no need to hold the semundo list lock, as current |
1340 | * is the last task exiting for this undo list. | |
1341 | */ | |
1342 | for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { | |
1343 | struct sem_array *sma; | |
1344 | int nsems, i; | |
1345 | struct sem_undo *un, **unp; | |
1346 | int semid; | |
1347 | ||
1348 | semid = u->semid; | |
1349 | ||
1350 | if(semid == -1) | |
1351 | continue; | |
e3893534 | 1352 | sma = sem_lock(ns, semid); |
023a5355 | 1353 | if (IS_ERR(sma)) |
1da177e4 LT |
1354 | continue; |
1355 | ||
1356 | if (u->semid == -1) | |
1357 | goto next_entry; | |
1358 | ||
e3893534 | 1359 | BUG_ON(sem_checkid(ns,sma,u->semid)); |
1da177e4 LT |
1360 | |
1361 | /* remove u from the sma->undo list */ | |
1362 | for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { | |
1363 | if (u == un) | |
1364 | goto found; | |
1365 | } | |
1366 | printk ("exit_sem undo list error id=%d\n", u->semid); | |
1367 | goto next_entry; | |
1368 | found: | |
1369 | *unp = un->id_next; | |
1370 | /* perform adjustments registered in u */ | |
1371 | nsems = sma->sem_nsems; | |
1372 | for (i = 0; i < nsems; i++) { | |
5f921ae9 | 1373 | struct sem * semaphore = &sma->sem_base[i]; |
1da177e4 | 1374 | if (u->semadj[i]) { |
5f921ae9 | 1375 | semaphore->semval += u->semadj[i]; |
1da177e4 LT |
1376 | /* |
1377 | * Range checks of the new semaphore value, | |
1378 | * not defined by sus: | |
1379 | * - Some unices ignore the undo entirely | |
1380 | * (e.g. HP UX 11i 11.22, Tru64 V5.1) | |
1381 | * - some cap the value (e.g. FreeBSD caps | |
1382 | * at 0, but doesn't enforce SEMVMX) | |
1383 | * | |
1384 | * Linux caps the semaphore value, both at 0 | |
1385 | * and at SEMVMX. | |
1386 | * | |
1387 | * Manfred <manfred@colorfullife.com> | |
1388 | */ | |
5f921ae9 IM |
1389 | if (semaphore->semval < 0) |
1390 | semaphore->semval = 0; | |
1391 | if (semaphore->semval > SEMVMX) | |
1392 | semaphore->semval = SEMVMX; | |
b488893a | 1393 | semaphore->sempid = task_tgid_vnr(current); |
1da177e4 LT |
1394 | } |
1395 | } | |
1396 | sma->sem_otime = get_seconds(); | |
1397 | /* maybe some queued-up processes were waiting for this */ | |
1398 | update_queue(sma); | |
1399 | next_entry: | |
1400 | sem_unlock(sma); | |
1401 | } | |
1402 | kfree(undo_list); | |
1403 | } | |
1404 | ||
1405 | #ifdef CONFIG_PROC_FS | |
19b4946c | 1406 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it) |
1da177e4 | 1407 | { |
19b4946c MW |
1408 | struct sem_array *sma = it; |
1409 | ||
1410 | return seq_printf(s, | |
1411 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", | |
1412 | sma->sem_perm.key, | |
7ca7e564 | 1413 | sma->sem_perm.id, |
19b4946c MW |
1414 | sma->sem_perm.mode, |
1415 | sma->sem_nsems, | |
1416 | sma->sem_perm.uid, | |
1417 | sma->sem_perm.gid, | |
1418 | sma->sem_perm.cuid, | |
1419 | sma->sem_perm.cgid, | |
1420 | sma->sem_otime, | |
1421 | sma->sem_ctime); | |
1da177e4 LT |
1422 | } |
1423 | #endif |