]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - ipc/msg.c
IPC/semaphores: remove one unused parameter from semctl_down()
[thirdparty/kernel/stable.git] / ipc / msg.c
CommitLineData
1da177e4
LT
1/*
2 * linux/ipc/msg.c
5a06a363 3 * Copyright (C) 1992 Krishna Balasubramanian
1da177e4
LT
4 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
624dffcb 15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
073115d6
SG
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
1e786937
KK
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
1da177e4
LT
23 */
24
c59ede7b 25#include <linux/capability.h>
1da177e4
LT
26#include <linux/slab.h>
27#include <linux/msg.h>
28#include <linux/spinlock.h>
29#include <linux/init.h>
f7bf3df8 30#include <linux/mm.h>
1da177e4
LT
31#include <linux/proc_fs.h>
32#include <linux/list.h>
33#include <linux/security.h>
34#include <linux/sched.h>
35#include <linux/syscalls.h>
36#include <linux/audit.h>
19b4946c 37#include <linux/seq_file.h>
3e148c79 38#include <linux/rwsem.h>
1e786937 39#include <linux/nsproxy.h>
ae5e1b22 40#include <linux/ipc_namespace.h>
5f921ae9 41
1da177e4
LT
42#include <asm/current.h>
43#include <asm/uaccess.h>
44#include "util.h"
45
5a06a363
IM
46/*
47 * one msg_receiver structure for each sleeping receiver:
48 */
1da177e4 49struct msg_receiver {
5a06a363
IM
50 struct list_head r_list;
51 struct task_struct *r_tsk;
1da177e4 52
5a06a363
IM
53 int r_mode;
54 long r_msgtype;
55 long r_maxsize;
1da177e4 56
80491eb9 57 struct msg_msg *volatile r_msg;
1da177e4
LT
58};
59
60/* one msg_sender for each sleeping sender */
61struct msg_sender {
5a06a363
IM
62 struct list_head list;
63 struct task_struct *tsk;
1da177e4
LT
64};
65
66#define SEARCH_ANY 1
67#define SEARCH_EQUAL 2
68#define SEARCH_NOTEQUAL 3
69#define SEARCH_LESSEQUAL 4
70
ed2ddbf8 71#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
1da177e4 72
1e786937 73#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
1e786937 74
01b8b07a 75static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
7748dbfa 76static int newque(struct ipc_namespace *, struct ipc_params *);
1da177e4 77#ifdef CONFIG_PROC_FS
19b4946c 78static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
1da177e4
LT
79#endif
80
f7bf3df8
ND
81/*
82 * Scale msgmni with the available lowmem size: the memory dedicated to msg
83 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
4d89dc6a
ND
84 * Also take into account the number of nsproxies created so far.
85 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
f7bf3df8 86 */
b6b337ad 87void recompute_msgmni(struct ipc_namespace *ns)
f7bf3df8
ND
88{
89 struct sysinfo i;
90 unsigned long allowed;
4d89dc6a 91 int nb_ns;
f7bf3df8
ND
92
93 si_meminfo(&i);
94 allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
95 / MSGMNB;
4d89dc6a
ND
96 nb_ns = atomic_read(&nr_ipc_ns);
97 allowed /= nb_ns;
f7bf3df8
ND
98
99 if (allowed < MSGMNI) {
100 ns->msg_ctlmni = MSGMNI;
101 goto out_callback;
102 }
103
4d89dc6a
ND
104 if (allowed > IPCMNI / nb_ns) {
105 ns->msg_ctlmni = IPCMNI / nb_ns;
f7bf3df8
ND
106 goto out_callback;
107 }
108
109 ns->msg_ctlmni = allowed;
110
111out_callback:
112
113 printk(KERN_INFO "msgmni has been set to %d for ipc namespace %p\n",
114 ns->msg_ctlmni, ns);
115}
116
ed2ddbf8 117void msg_init_ns(struct ipc_namespace *ns)
1e786937 118{
1e786937
KK
119 ns->msg_ctlmax = MSGMAX;
120 ns->msg_ctlmnb = MSGMNB;
f7bf3df8
ND
121
122 recompute_msgmni(ns);
123
3ac88a41
KK
124 atomic_set(&ns->msg_bytes, 0);
125 atomic_set(&ns->msg_hdrs, 0);
ed2ddbf8 126 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
1e786937
KK
127}
128
ae5e1b22 129#ifdef CONFIG_IPC_NS
1e786937
KK
130void msg_exit_ns(struct ipc_namespace *ns)
131{
01b8b07a 132 free_ipcs(ns, &msg_ids(ns), freeque);
1e786937 133}
ae5e1b22 134#endif
1e786937 135
5a06a363 136void __init msg_init(void)
1da177e4 137{
ed2ddbf8 138 msg_init_ns(&init_ipc_ns);
19b4946c
MW
139 ipc_init_proc_interface("sysvipc/msg",
140 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
1e786937 141 IPC_MSG_IDS, sysvipc_msg_proc_show);
1da177e4
LT
142}
143
3e148c79
ND
144/*
145 * This routine is called in the paths where the rw_mutex is held to protect
146 * access to the idr tree.
147 */
148static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
149 int id)
150{
151 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
152
b1ed88b4
PP
153 if (IS_ERR(ipcp))
154 return (struct msg_queue *)ipcp;
155
3e148c79
ND
156 return container_of(ipcp, struct msg_queue, q_perm);
157}
158
159/*
160 * msg_lock_(check_) routines are called in the paths where the rw_mutex
161 * is not held.
162 */
023a5355
ND
163static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
164{
03f02c76
ND
165 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
166
b1ed88b4
PP
167 if (IS_ERR(ipcp))
168 return (struct msg_queue *)ipcp;
169
03f02c76 170 return container_of(ipcp, struct msg_queue, q_perm);
023a5355
ND
171}
172
173static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
174 int id)
175{
03f02c76
ND
176 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
177
b1ed88b4
PP
178 if (IS_ERR(ipcp))
179 return (struct msg_queue *)ipcp;
180
03f02c76 181 return container_of(ipcp, struct msg_queue, q_perm);
023a5355
ND
182}
183
7ca7e564
ND
184static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
185{
186 ipc_rmid(&msg_ids(ns), &s->q_perm);
187}
188
f4566f04
ND
189/**
190 * newque - Create a new msg queue
191 * @ns: namespace
192 * @params: ptr to the structure that contains the key and msgflg
193 *
3e148c79 194 * Called with msg_ids.rw_mutex held (writer)
f4566f04 195 */
7748dbfa 196static int newque(struct ipc_namespace *ns, struct ipc_params *params)
1da177e4 197{
1da177e4 198 struct msg_queue *msq;
5a06a363 199 int id, retval;
7748dbfa
ND
200 key_t key = params->key;
201 int msgflg = params->flg;
1da177e4 202
5a06a363
IM
203 msq = ipc_rcu_alloc(sizeof(*msq));
204 if (!msq)
1da177e4
LT
205 return -ENOMEM;
206
5a06a363 207 msq->q_perm.mode = msgflg & S_IRWXUGO;
1da177e4
LT
208 msq->q_perm.key = key;
209
210 msq->q_perm.security = NULL;
211 retval = security_msg_queue_alloc(msq);
212 if (retval) {
213 ipc_rcu_putref(msq);
214 return retval;
215 }
216
7ca7e564
ND
217 /*
218 * ipc_addid() locks msq
219 */
1e786937 220 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
283bb7fa 221 if (id < 0) {
1da177e4
LT
222 security_msg_queue_free(msq);
223 ipc_rcu_putref(msq);
283bb7fa 224 return id;
1da177e4
LT
225 }
226
227 msq->q_stime = msq->q_rtime = 0;
228 msq->q_ctime = get_seconds();
229 msq->q_cbytes = msq->q_qnum = 0;
1e786937 230 msq->q_qbytes = ns->msg_ctlmnb;
1da177e4
LT
231 msq->q_lspid = msq->q_lrpid = 0;
232 INIT_LIST_HEAD(&msq->q_messages);
233 INIT_LIST_HEAD(&msq->q_receivers);
234 INIT_LIST_HEAD(&msq->q_senders);
7ca7e564 235
1da177e4
LT
236 msg_unlock(msq);
237
7ca7e564 238 return msq->q_perm.id;
1da177e4
LT
239}
240
5a06a363 241static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
1da177e4 242{
5a06a363
IM
243 mss->tsk = current;
244 current->state = TASK_INTERRUPTIBLE;
245 list_add_tail(&mss->list, &msq->q_senders);
1da177e4
LT
246}
247
5a06a363 248static inline void ss_del(struct msg_sender *mss)
1da177e4 249{
5a06a363 250 if (mss->list.next != NULL)
1da177e4
LT
251 list_del(&mss->list);
252}
253
5a06a363 254static void ss_wakeup(struct list_head *h, int kill)
1da177e4
LT
255{
256 struct list_head *tmp;
257
258 tmp = h->next;
259 while (tmp != h) {
5a06a363
IM
260 struct msg_sender *mss;
261
262 mss = list_entry(tmp, struct msg_sender, list);
1da177e4 263 tmp = tmp->next;
5a06a363
IM
264 if (kill)
265 mss->list.next = NULL;
1da177e4
LT
266 wake_up_process(mss->tsk);
267 }
268}
269
5a06a363 270static void expunge_all(struct msg_queue *msq, int res)
1da177e4
LT
271{
272 struct list_head *tmp;
273
274 tmp = msq->q_receivers.next;
275 while (tmp != &msq->q_receivers) {
5a06a363
IM
276 struct msg_receiver *msr;
277
278 msr = list_entry(tmp, struct msg_receiver, r_list);
1da177e4
LT
279 tmp = tmp->next;
280 msr->r_msg = NULL;
281 wake_up_process(msr->r_tsk);
282 smp_mb();
283 msr->r_msg = ERR_PTR(res);
284 }
285}
5a06a363
IM
286
287/*
288 * freeque() wakes up waiters on the sender and receiver waiting queue,
f4566f04
ND
289 * removes the message queue from message queue ID IDR, and cleans up all the
290 * messages associated with this queue.
1da177e4 291 *
3e148c79
ND
292 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
293 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
1da177e4 294 */
01b8b07a 295static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1da177e4
LT
296{
297 struct list_head *tmp;
01b8b07a 298 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
1da177e4 299
5a06a363
IM
300 expunge_all(msq, -EIDRM);
301 ss_wakeup(&msq->q_senders, 1);
7ca7e564 302 msg_rmid(ns, msq);
1da177e4 303 msg_unlock(msq);
5a06a363 304
1da177e4 305 tmp = msq->q_messages.next;
5a06a363
IM
306 while (tmp != &msq->q_messages) {
307 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
308
1da177e4 309 tmp = tmp->next;
3ac88a41 310 atomic_dec(&ns->msg_hdrs);
1da177e4
LT
311 free_msg(msg);
312 }
3ac88a41 313 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
1da177e4
LT
314 security_msg_queue_free(msq);
315 ipc_rcu_putref(msq);
316}
317
f4566f04 318/*
3e148c79 319 * Called with msg_ids.rw_mutex and ipcp locked.
f4566f04 320 */
03f02c76 321static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
7748dbfa 322{
03f02c76
ND
323 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
324
325 return security_msg_queue_associate(msq, msgflg);
7748dbfa
ND
326}
327
5a06a363 328asmlinkage long sys_msgget(key_t key, int msgflg)
1da177e4 329{
1e786937 330 struct ipc_namespace *ns;
7748dbfa
ND
331 struct ipc_ops msg_ops;
332 struct ipc_params msg_params;
1e786937
KK
333
334 ns = current->nsproxy->ipc_ns;
7ca7e564 335
7748dbfa
ND
336 msg_ops.getnew = newque;
337 msg_ops.associate = msg_security;
338 msg_ops.more_checks = NULL;
339
340 msg_params.key = key;
341 msg_params.flg = msgflg;
5a06a363 342
7748dbfa 343 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
1da177e4
LT
344}
345
5a06a363
IM
346static inline unsigned long
347copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
1da177e4
LT
348{
349 switch(version) {
350 case IPC_64:
5a06a363 351 return copy_to_user(buf, in, sizeof(*in));
1da177e4 352 case IPC_OLD:
5a06a363 353 {
1da177e4
LT
354 struct msqid_ds out;
355
5a06a363 356 memset(&out, 0, sizeof(out));
1da177e4
LT
357
358 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
359
360 out.msg_stime = in->msg_stime;
361 out.msg_rtime = in->msg_rtime;
362 out.msg_ctime = in->msg_ctime;
363
5a06a363 364 if (in->msg_cbytes > USHRT_MAX)
1da177e4
LT
365 out.msg_cbytes = USHRT_MAX;
366 else
367 out.msg_cbytes = in->msg_cbytes;
368 out.msg_lcbytes = in->msg_cbytes;
369
5a06a363 370 if (in->msg_qnum > USHRT_MAX)
1da177e4
LT
371 out.msg_qnum = USHRT_MAX;
372 else
373 out.msg_qnum = in->msg_qnum;
374
5a06a363 375 if (in->msg_qbytes > USHRT_MAX)
1da177e4
LT
376 out.msg_qbytes = USHRT_MAX;
377 else
378 out.msg_qbytes = in->msg_qbytes;
379 out.msg_lqbytes = in->msg_qbytes;
380
381 out.msg_lspid = in->msg_lspid;
382 out.msg_lrpid = in->msg_lrpid;
383
5a06a363
IM
384 return copy_to_user(buf, &out, sizeof(out));
385 }
1da177e4
LT
386 default:
387 return -EINVAL;
388 }
389}
390
391struct msq_setbuf {
392 unsigned long qbytes;
393 uid_t uid;
394 gid_t gid;
395 mode_t mode;
396};
397
5a06a363
IM
398static inline unsigned long
399copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
1da177e4
LT
400{
401 switch(version) {
402 case IPC_64:
5a06a363 403 {
1da177e4
LT
404 struct msqid64_ds tbuf;
405
5a06a363 406 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
1da177e4
LT
407 return -EFAULT;
408
409 out->qbytes = tbuf.msg_qbytes;
410 out->uid = tbuf.msg_perm.uid;
411 out->gid = tbuf.msg_perm.gid;
412 out->mode = tbuf.msg_perm.mode;
413
414 return 0;
5a06a363 415 }
1da177e4 416 case IPC_OLD:
5a06a363 417 {
1da177e4
LT
418 struct msqid_ds tbuf_old;
419
5a06a363 420 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1da177e4
LT
421 return -EFAULT;
422
423 out->uid = tbuf_old.msg_perm.uid;
424 out->gid = tbuf_old.msg_perm.gid;
425 out->mode = tbuf_old.msg_perm.mode;
426
5a06a363 427 if (tbuf_old.msg_qbytes == 0)
1da177e4
LT
428 out->qbytes = tbuf_old.msg_lqbytes;
429 else
430 out->qbytes = tbuf_old.msg_qbytes;
431
432 return 0;
5a06a363 433 }
1da177e4
LT
434 default:
435 return -EINVAL;
436 }
437}
438
a0d092fc
PP
439/*
440 * This function handles some msgctl commands which require the rw_mutex
441 * to be held in write mode.
442 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
443 */
444static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
445 struct msqid_ds __user *buf, int version)
1da177e4 446{
1da177e4 447 struct kern_ipc_perm *ipcp;
a0d092fc
PP
448 struct msq_setbuf setbuf;
449 struct msg_queue *msq;
450 int err;
451
452 if (cmd == IPC_SET) {
453 if (copy_msqid_from_user(&setbuf, buf, version))
454 return -EFAULT;
455 }
456
457 down_write(&msg_ids(ns).rw_mutex);
458 msq = msg_lock_check_down(ns, msqid);
459 if (IS_ERR(msq)) {
460 err = PTR_ERR(msq);
461 goto out_up;
462 }
463
464 ipcp = &msq->q_perm;
465
466 err = audit_ipc_obj(ipcp);
467 if (err)
468 goto out_unlock;
469
470 if (cmd == IPC_SET) {
471 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
472 setbuf.mode);
473 if (err)
474 goto out_unlock;
475 }
476
477 if (current->euid != ipcp->cuid &&
478 current->euid != ipcp->uid &&
479 !capable(CAP_SYS_ADMIN)) {
480 /* We _could_ check for CAP_CHOWN above, but we don't */
481 err = -EPERM;
482 goto out_unlock;
483 }
484
485 err = security_msg_queue_msgctl(msq, cmd);
486 if (err)
487 goto out_unlock;
488
489 switch (cmd) {
490 case IPC_RMID:
491 freeque(ns, ipcp);
492 goto out_up;
493 case IPC_SET:
494 if (setbuf.qbytes > ns->msg_ctlmnb &&
495 !capable(CAP_SYS_RESOURCE)) {
496 err = -EPERM;
497 goto out_unlock;
498 }
499
500 msq->q_qbytes = setbuf.qbytes;
501
502 ipcp->uid = setbuf.uid;
503 ipcp->gid = setbuf.gid;
504 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
505 (S_IRWXUGO & setbuf.mode);
506 msq->q_ctime = get_seconds();
507 /* sleeping receivers might be excluded by
508 * stricter permissions.
509 */
510 expunge_all(msq, -EAGAIN);
511 /* sleeping senders might be able to send
512 * due to a larger queue size.
513 */
514 ss_wakeup(&msq->q_senders, 0);
515 break;
516 default:
517 err = -EINVAL;
518 }
519out_unlock:
520 msg_unlock(msq);
521out_up:
522 up_write(&msg_ids(ns).rw_mutex);
523 return err;
524}
525
526asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
527{
5a06a363
IM
528 struct msg_queue *msq;
529 int err, version;
1e786937 530 struct ipc_namespace *ns;
5a06a363 531
1da177e4
LT
532 if (msqid < 0 || cmd < 0)
533 return -EINVAL;
534
535 version = ipc_parse_version(&cmd);
1e786937 536 ns = current->nsproxy->ipc_ns;
1da177e4
LT
537
538 switch (cmd) {
5a06a363
IM
539 case IPC_INFO:
540 case MSG_INFO:
541 {
1da177e4
LT
542 struct msginfo msginfo;
543 int max_id;
5a06a363 544
1da177e4
LT
545 if (!buf)
546 return -EFAULT;
5a06a363
IM
547 /*
548 * We must not return kernel stack data.
1da177e4
LT
549 * due to padding, it's not enough
550 * to set all member fields.
551 */
1da177e4
LT
552 err = security_msg_queue_msgctl(NULL, cmd);
553 if (err)
554 return err;
555
5a06a363 556 memset(&msginfo, 0, sizeof(msginfo));
1e786937
KK
557 msginfo.msgmni = ns->msg_ctlmni;
558 msginfo.msgmax = ns->msg_ctlmax;
559 msginfo.msgmnb = ns->msg_ctlmnb;
1da177e4
LT
560 msginfo.msgssz = MSGSSZ;
561 msginfo.msgseg = MSGSEG;
3e148c79 562 down_read(&msg_ids(ns).rw_mutex);
1da177e4 563 if (cmd == MSG_INFO) {
1e786937 564 msginfo.msgpool = msg_ids(ns).in_use;
3ac88a41
KK
565 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
566 msginfo.msgtql = atomic_read(&ns->msg_bytes);
1da177e4
LT
567 } else {
568 msginfo.msgmap = MSGMAP;
569 msginfo.msgpool = MSGPOOL;
570 msginfo.msgtql = MSGTQL;
571 }
7ca7e564 572 max_id = ipc_get_maxid(&msg_ids(ns));
3e148c79 573 up_read(&msg_ids(ns).rw_mutex);
5a06a363 574 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
1da177e4 575 return -EFAULT;
5a06a363 576 return (max_id < 0) ? 0 : max_id;
1da177e4 577 }
7ca7e564 578 case MSG_STAT: /* msqid is an index rather than a msg queue id */
1da177e4
LT
579 case IPC_STAT:
580 {
581 struct msqid64_ds tbuf;
582 int success_return;
5a06a363 583
1da177e4
LT
584 if (!buf)
585 return -EFAULT;
1da177e4 586
5a06a363 587 if (cmd == MSG_STAT) {
023a5355
ND
588 msq = msg_lock(ns, msqid);
589 if (IS_ERR(msq))
590 return PTR_ERR(msq);
7ca7e564 591 success_return = msq->q_perm.id;
1da177e4 592 } else {
023a5355
ND
593 msq = msg_lock_check(ns, msqid);
594 if (IS_ERR(msq))
595 return PTR_ERR(msq);
1da177e4
LT
596 success_return = 0;
597 }
598 err = -EACCES;
5a06a363 599 if (ipcperms(&msq->q_perm, S_IRUGO))
1da177e4
LT
600 goto out_unlock;
601
602 err = security_msg_queue_msgctl(msq, cmd);
603 if (err)
604 goto out_unlock;
605
023a5355
ND
606 memset(&tbuf, 0, sizeof(tbuf));
607
1da177e4
LT
608 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
609 tbuf.msg_stime = msq->q_stime;
610 tbuf.msg_rtime = msq->q_rtime;
611 tbuf.msg_ctime = msq->q_ctime;
612 tbuf.msg_cbytes = msq->q_cbytes;
613 tbuf.msg_qnum = msq->q_qnum;
614 tbuf.msg_qbytes = msq->q_qbytes;
615 tbuf.msg_lspid = msq->q_lspid;
616 tbuf.msg_lrpid = msq->q_lrpid;
617 msg_unlock(msq);
618 if (copy_msqid_to_user(buf, &tbuf, version))
619 return -EFAULT;
620 return success_return;
621 }
622 case IPC_SET:
1da177e4 623 case IPC_RMID:
a0d092fc
PP
624 err = msgctl_down(ns, msqid, cmd, buf, version);
625 return err;
1da177e4
LT
626 default:
627 return -EINVAL;
628 }
629
1da177e4
LT
630out_unlock:
631 msg_unlock(msq);
632 return err;
633}
634
5a06a363 635static int testmsg(struct msg_msg *msg, long type, int mode)
1da177e4
LT
636{
637 switch(mode)
638 {
639 case SEARCH_ANY:
640 return 1;
641 case SEARCH_LESSEQUAL:
5a06a363 642 if (msg->m_type <=type)
1da177e4
LT
643 return 1;
644 break;
645 case SEARCH_EQUAL:
5a06a363 646 if (msg->m_type == type)
1da177e4
LT
647 return 1;
648 break;
649 case SEARCH_NOTEQUAL:
5a06a363 650 if (msg->m_type != type)
1da177e4
LT
651 return 1;
652 break;
653 }
654 return 0;
655}
656
5a06a363 657static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
1da177e4 658{
5a06a363 659 struct list_head *tmp;
1da177e4
LT
660
661 tmp = msq->q_receivers.next;
662 while (tmp != &msq->q_receivers) {
5a06a363
IM
663 struct msg_receiver *msr;
664
665 msr = list_entry(tmp, struct msg_receiver, r_list);
1da177e4 666 tmp = tmp->next;
5a06a363
IM
667 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
668 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
669 msr->r_msgtype, msr->r_mode)) {
670
1da177e4 671 list_del(&msr->r_list);
5a06a363 672 if (msr->r_maxsize < msg->m_ts) {
1da177e4
LT
673 msr->r_msg = NULL;
674 wake_up_process(msr->r_tsk);
675 smp_mb();
676 msr->r_msg = ERR_PTR(-E2BIG);
677 } else {
678 msr->r_msg = NULL;
b488893a 679 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
1da177e4
LT
680 msq->q_rtime = get_seconds();
681 wake_up_process(msr->r_tsk);
682 smp_mb();
683 msr->r_msg = msg;
5a06a363 684
1da177e4
LT
685 return 1;
686 }
687 }
688 }
689 return 0;
690}
691
651971cb 692long do_msgsnd(int msqid, long mtype, void __user *mtext,
693 size_t msgsz, int msgflg)
1da177e4
LT
694{
695 struct msg_queue *msq;
696 struct msg_msg *msg;
1da177e4 697 int err;
1e786937
KK
698 struct ipc_namespace *ns;
699
700 ns = current->nsproxy->ipc_ns;
5a06a363 701
1e786937 702 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
1da177e4 703 return -EINVAL;
1da177e4
LT
704 if (mtype < 1)
705 return -EINVAL;
706
651971cb 707 msg = load_msg(mtext, msgsz);
5a06a363 708 if (IS_ERR(msg))
1da177e4
LT
709 return PTR_ERR(msg);
710
711 msg->m_type = mtype;
712 msg->m_ts = msgsz;
713
023a5355
ND
714 msq = msg_lock_check(ns, msqid);
715 if (IS_ERR(msq)) {
716 err = PTR_ERR(msq);
1da177e4 717 goto out_free;
023a5355 718 }
1da177e4
LT
719
720 for (;;) {
721 struct msg_sender s;
722
5a06a363 723 err = -EACCES;
1da177e4
LT
724 if (ipcperms(&msq->q_perm, S_IWUGO))
725 goto out_unlock_free;
726
727 err = security_msg_queue_msgsnd(msq, msg, msgflg);
728 if (err)
729 goto out_unlock_free;
730
5a06a363 731 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
1da177e4
LT
732 1 + msq->q_qnum <= msq->q_qbytes) {
733 break;
734 }
735
736 /* queue full, wait: */
5a06a363
IM
737 if (msgflg & IPC_NOWAIT) {
738 err = -EAGAIN;
1da177e4
LT
739 goto out_unlock_free;
740 }
741 ss_add(msq, &s);
742 ipc_rcu_getref(msq);
743 msg_unlock(msq);
744 schedule();
745
746 ipc_lock_by_ptr(&msq->q_perm);
747 ipc_rcu_putref(msq);
748 if (msq->q_perm.deleted) {
749 err = -EIDRM;
750 goto out_unlock_free;
751 }
752 ss_del(&s);
5a06a363 753
1da177e4 754 if (signal_pending(current)) {
5a06a363 755 err = -ERESTARTNOHAND;
1da177e4
LT
756 goto out_unlock_free;
757 }
758 }
759
b488893a 760 msq->q_lspid = task_tgid_vnr(current);
1da177e4
LT
761 msq->q_stime = get_seconds();
762
5a06a363 763 if (!pipelined_send(msq, msg)) {
1da177e4 764 /* noone is waiting for this message, enqueue it */
5a06a363 765 list_add_tail(&msg->m_list, &msq->q_messages);
1da177e4
LT
766 msq->q_cbytes += msgsz;
767 msq->q_qnum++;
3ac88a41
KK
768 atomic_add(msgsz, &ns->msg_bytes);
769 atomic_inc(&ns->msg_hdrs);
1da177e4 770 }
5a06a363 771
1da177e4
LT
772 err = 0;
773 msg = NULL;
774
775out_unlock_free:
776 msg_unlock(msq);
777out_free:
5a06a363 778 if (msg != NULL)
1da177e4
LT
779 free_msg(msg);
780 return err;
781}
782
651971cb 783asmlinkage long
784sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
785{
786 long mtype;
787
788 if (get_user(mtype, &msgp->mtype))
789 return -EFAULT;
790 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
791}
792
5a06a363 793static inline int convert_mode(long *msgtyp, int msgflg)
1da177e4 794{
5a06a363 795 /*
1da177e4
LT
796 * find message of correct type.
797 * msgtyp = 0 => get first.
798 * msgtyp > 0 => get first message of matching type.
5a06a363 799 * msgtyp < 0 => get message with least type must be < abs(msgtype).
1da177e4 800 */
5a06a363 801 if (*msgtyp == 0)
1da177e4 802 return SEARCH_ANY;
5a06a363
IM
803 if (*msgtyp < 0) {
804 *msgtyp = -*msgtyp;
1da177e4
LT
805 return SEARCH_LESSEQUAL;
806 }
5a06a363 807 if (msgflg & MSG_EXCEPT)
1da177e4
LT
808 return SEARCH_NOTEQUAL;
809 return SEARCH_EQUAL;
810}
811
651971cb 812long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
813 size_t msgsz, long msgtyp, int msgflg)
1da177e4
LT
814{
815 struct msg_queue *msq;
816 struct msg_msg *msg;
817 int mode;
1e786937 818 struct ipc_namespace *ns;
1da177e4
LT
819
820 if (msqid < 0 || (long) msgsz < 0)
821 return -EINVAL;
5a06a363 822 mode = convert_mode(&msgtyp, msgflg);
1e786937 823 ns = current->nsproxy->ipc_ns;
1da177e4 824
023a5355
ND
825 msq = msg_lock_check(ns, msqid);
826 if (IS_ERR(msq))
827 return PTR_ERR(msq);
1da177e4
LT
828
829 for (;;) {
830 struct msg_receiver msr_d;
5a06a363 831 struct list_head *tmp;
1da177e4
LT
832
833 msg = ERR_PTR(-EACCES);
5a06a363 834 if (ipcperms(&msq->q_perm, S_IRUGO))
1da177e4
LT
835 goto out_unlock;
836
837 msg = ERR_PTR(-EAGAIN);
838 tmp = msq->q_messages.next;
839 while (tmp != &msq->q_messages) {
840 struct msg_msg *walk_msg;
5a06a363
IM
841
842 walk_msg = list_entry(tmp, struct msg_msg, m_list);
843 if (testmsg(walk_msg, msgtyp, mode) &&
844 !security_msg_queue_msgrcv(msq, walk_msg, current,
845 msgtyp, mode)) {
846
1da177e4 847 msg = walk_msg;
5a06a363
IM
848 if (mode == SEARCH_LESSEQUAL &&
849 walk_msg->m_type != 1) {
850 msg = walk_msg;
851 msgtyp = walk_msg->m_type - 1;
1da177e4 852 } else {
5a06a363 853 msg = walk_msg;
1da177e4
LT
854 break;
855 }
856 }
857 tmp = tmp->next;
858 }
5a06a363
IM
859 if (!IS_ERR(msg)) {
860 /*
861 * Found a suitable message.
862 * Unlink it from the queue.
863 */
1da177e4
LT
864 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
865 msg = ERR_PTR(-E2BIG);
866 goto out_unlock;
867 }
868 list_del(&msg->m_list);
869 msq->q_qnum--;
870 msq->q_rtime = get_seconds();
b488893a 871 msq->q_lrpid = task_tgid_vnr(current);
1da177e4 872 msq->q_cbytes -= msg->m_ts;
3ac88a41
KK
873 atomic_sub(msg->m_ts, &ns->msg_bytes);
874 atomic_dec(&ns->msg_hdrs);
5a06a363 875 ss_wakeup(&msq->q_senders, 0);
1da177e4
LT
876 msg_unlock(msq);
877 break;
878 }
879 /* No message waiting. Wait for a message */
880 if (msgflg & IPC_NOWAIT) {
881 msg = ERR_PTR(-ENOMSG);
882 goto out_unlock;
883 }
5a06a363 884 list_add_tail(&msr_d.r_list, &msq->q_receivers);
1da177e4
LT
885 msr_d.r_tsk = current;
886 msr_d.r_msgtype = msgtyp;
887 msr_d.r_mode = mode;
5a06a363 888 if (msgflg & MSG_NOERROR)
1da177e4 889 msr_d.r_maxsize = INT_MAX;
5a06a363 890 else
1da177e4
LT
891 msr_d.r_maxsize = msgsz;
892 msr_d.r_msg = ERR_PTR(-EAGAIN);
893 current->state = TASK_INTERRUPTIBLE;
894 msg_unlock(msq);
895
896 schedule();
897
898 /* Lockless receive, part 1:
899 * Disable preemption. We don't hold a reference to the queue
900 * and getting a reference would defeat the idea of a lockless
901 * operation, thus the code relies on rcu to guarantee the
902 * existance of msq:
903 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
904 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
905 * rcu_read_lock() prevents preemption between reading r_msg
906 * and the spin_lock() inside ipc_lock_by_ptr().
907 */
908 rcu_read_lock();
909
910 /* Lockless receive, part 2:
911 * Wait until pipelined_send or expunge_all are outside of
912 * wake_up_process(). There is a race with exit(), see
913 * ipc/mqueue.c for the details.
914 */
5a06a363 915 msg = (struct msg_msg*)msr_d.r_msg;
1da177e4
LT
916 while (msg == NULL) {
917 cpu_relax();
5a06a363 918 msg = (struct msg_msg *)msr_d.r_msg;
1da177e4
LT
919 }
920
921 /* Lockless receive, part 3:
922 * If there is a message or an error then accept it without
923 * locking.
924 */
5a06a363 925 if (msg != ERR_PTR(-EAGAIN)) {
1da177e4
LT
926 rcu_read_unlock();
927 break;
928 }
929
930 /* Lockless receive, part 3:
931 * Acquire the queue spinlock.
932 */
933 ipc_lock_by_ptr(&msq->q_perm);
934 rcu_read_unlock();
935
936 /* Lockless receive, part 4:
937 * Repeat test after acquiring the spinlock.
938 */
939 msg = (struct msg_msg*)msr_d.r_msg;
5a06a363 940 if (msg != ERR_PTR(-EAGAIN))
1da177e4
LT
941 goto out_unlock;
942
943 list_del(&msr_d.r_list);
944 if (signal_pending(current)) {
945 msg = ERR_PTR(-ERESTARTNOHAND);
946out_unlock:
947 msg_unlock(msq);
948 break;
949 }
950 }
951 if (IS_ERR(msg))
5a06a363 952 return PTR_ERR(msg);
1da177e4
LT
953
954 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
651971cb 955 *pmtype = msg->m_type;
956 if (store_msg(mtext, msg, msgsz))
5a06a363 957 msgsz = -EFAULT;
651971cb 958
1da177e4 959 free_msg(msg);
5a06a363 960
1da177e4
LT
961 return msgsz;
962}
963
651971cb 964asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
965 long msgtyp, int msgflg)
966{
967 long err, mtype;
968
969 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
970 if (err < 0)
971 goto out;
972
973 if (put_user(mtype, &msgp->mtype))
974 err = -EFAULT;
975out:
976 return err;
977}
978
1da177e4 979#ifdef CONFIG_PROC_FS
19b4946c 980static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
1da177e4 981{
19b4946c
MW
982 struct msg_queue *msq = it;
983
984 return seq_printf(s,
5a06a363
IM
985 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
986 msq->q_perm.key,
7ca7e564 987 msq->q_perm.id,
5a06a363
IM
988 msq->q_perm.mode,
989 msq->q_cbytes,
990 msq->q_qnum,
991 msq->q_lspid,
992 msq->q_lrpid,
993 msq->q_perm.uid,
994 msq->q_perm.gid,
995 msq->q_perm.cuid,
996 msq->q_perm.cgid,
997 msq->q_stime,
998 msq->q_rtime,
999 msq->q_ctime);
1da177e4
LT
1000}
1001#endif