]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.11.6/ipc-rename-ids-rw_mutex.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.11.6 / ipc-rename-ids-rw_mutex.patch
CommitLineData
025e71a5
GKH
1From d9a605e40b1376eb02b067d7690580255a0df68f Mon Sep 17 00:00:00 2001
2From: Davidlohr Bueso <davidlohr.bueso@hp.com>
3Date: Wed, 11 Sep 2013 14:26:24 -0700
4Subject: ipc: rename ids->rw_mutex
5
6From: Davidlohr Bueso <davidlohr.bueso@hp.com>
7
8commit d9a605e40b1376eb02b067d7690580255a0df68f upstream.
9
10Since in some situations the lock can be shared for readers, we shouldn't
11be calling it a mutex, rename it to rwsem.
12
13Signed-off-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
14Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
15Cc: Rik van Riel <riel@redhat.com>
16Cc: Manfred Spraul <manfred@colorfullife.com>
17Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
18Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
19Cc: Mike Galbraith <efault@gmx.de>
20Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21
22---
23 include/linux/ipc_namespace.h | 2 -
24 ipc/msg.c | 20 +++++++--------
25 ipc/namespace.c | 4 +--
26 ipc/sem.c | 24 +++++++++---------
27 ipc/shm.c | 56 +++++++++++++++++++++---------------------
28 ipc/util.c | 28 ++++++++++-----------
29 ipc/util.h | 4 +--
30 7 files changed, 69 insertions(+), 69 deletions(-)
31
32--- a/include/linux/ipc_namespace.h
33+++ b/include/linux/ipc_namespace.h
34@@ -22,7 +22,7 @@ struct ipc_ids {
35 int in_use;
36 unsigned short seq;
37 unsigned short seq_max;
38- struct rw_semaphore rw_mutex;
39+ struct rw_semaphore rwsem;
40 struct idr ipcs_idr;
41 int next_id;
42 };
43--- a/ipc/msg.c
44+++ b/ipc/msg.c
45@@ -181,7 +181,7 @@ static void msg_rcu_free(struct rcu_head
46 * @ns: namespace
47 * @params: ptr to the structure that contains the key and msgflg
48 *
49- * Called with msg_ids.rw_mutex held (writer)
50+ * Called with msg_ids.rwsem held (writer)
51 */
52 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
53 {
54@@ -267,8 +267,8 @@ static void expunge_all(struct msg_queue
55 * removes the message queue from message queue ID IDR, and cleans up all the
56 * messages associated with this queue.
57 *
58- * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
59- * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
60+ * msg_ids.rwsem (writer) and the spinlock for this message queue are held
61+ * before freeque() is called. msg_ids.rwsem remains locked on exit.
62 */
63 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
64 {
65@@ -289,7 +289,7 @@ static void freeque(struct ipc_namespace
66 }
67
68 /*
69- * Called with msg_ids.rw_mutex and ipcp locked.
70+ * Called with msg_ids.rwsem and ipcp locked.
71 */
72 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
73 {
74@@ -393,9 +393,9 @@ copy_msqid_from_user(struct msqid64_ds *
75 }
76
77 /*
78- * This function handles some msgctl commands which require the rw_mutex
79+ * This function handles some msgctl commands which require the rwsem
80 * to be held in write mode.
81- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
82+ * NOTE: no locks must be held, the rwsem is taken inside this function.
83 */
84 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
85 struct msqid_ds __user *buf, int version)
86@@ -410,7 +410,7 @@ static int msgctl_down(struct ipc_namesp
87 return -EFAULT;
88 }
89
90- down_write(&msg_ids(ns).rw_mutex);
91+ down_write(&msg_ids(ns).rwsem);
92 rcu_read_lock();
93
94 ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
95@@ -466,7 +466,7 @@ out_unlock0:
96 out_unlock1:
97 rcu_read_unlock();
98 out_up:
99- up_write(&msg_ids(ns).rw_mutex);
100+ up_write(&msg_ids(ns).rwsem);
101 return err;
102 }
103
104@@ -501,7 +501,7 @@ static int msgctl_nolock(struct ipc_name
105 msginfo.msgmnb = ns->msg_ctlmnb;
106 msginfo.msgssz = MSGSSZ;
107 msginfo.msgseg = MSGSEG;
108- down_read(&msg_ids(ns).rw_mutex);
109+ down_read(&msg_ids(ns).rwsem);
110 if (cmd == MSG_INFO) {
111 msginfo.msgpool = msg_ids(ns).in_use;
112 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
113@@ -512,7 +512,7 @@ static int msgctl_nolock(struct ipc_name
114 msginfo.msgtql = MSGTQL;
115 }
116 max_id = ipc_get_maxid(&msg_ids(ns));
117- up_read(&msg_ids(ns).rw_mutex);
118+ up_read(&msg_ids(ns).rwsem);
119 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
120 return -EFAULT;
121 return (max_id < 0) ? 0 : max_id;
122--- a/ipc/namespace.c
123+++ b/ipc/namespace.c
124@@ -81,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns,
125 int next_id;
126 int total, in_use;
127
128- down_write(&ids->rw_mutex);
129+ down_write(&ids->rwsem);
130
131 in_use = ids->in_use;
132
133@@ -93,7 +93,7 @@ void free_ipcs(struct ipc_namespace *ns,
134 free(ns, perm);
135 total++;
136 }
137- up_write(&ids->rw_mutex);
138+ up_write(&ids->rwsem);
139 }
140
141 static void free_ipc_ns(struct ipc_namespace *ns)
142--- a/ipc/sem.c
143+++ b/ipc/sem.c
144@@ -365,7 +365,7 @@ static inline void sem_unlock(struct sem
145 }
146
147 /*
148- * sem_lock_(check_) routines are called in the paths where the rw_mutex
149+ * sem_lock_(check_) routines are called in the paths where the rwsem
150 * is not held.
151 *
152 * The caller holds the RCU read lock.
153@@ -464,7 +464,7 @@ static inline void sem_rmid(struct ipc_n
154 * @ns: namespace
155 * @params: ptr to the structure that contains key, semflg and nsems
156 *
157- * Called with sem_ids.rw_mutex held (as a writer)
158+ * Called with sem_ids.rwsem held (as a writer)
159 */
160
161 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
162@@ -529,7 +529,7 @@ static int newary(struct ipc_namespace *
163
164
165 /*
166- * Called with sem_ids.rw_mutex and ipcp locked.
167+ * Called with sem_ids.rwsem and ipcp locked.
168 */
169 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
170 {
171@@ -540,7 +540,7 @@ static inline int sem_security(struct ke
172 }
173
174 /*
175- * Called with sem_ids.rw_mutex and ipcp locked.
176+ * Called with sem_ids.rwsem and ipcp locked.
177 */
178 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
179 struct ipc_params *params)
180@@ -1031,8 +1031,8 @@ static int count_semzcnt (struct sem_arr
181 return semzcnt;
182 }
183
184-/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
185- * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
186+/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
187+ * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
188 * remains locked on exit.
189 */
190 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
191@@ -1152,7 +1152,7 @@ static int semctl_nolock(struct ipc_name
192 seminfo.semmnu = SEMMNU;
193 seminfo.semmap = SEMMAP;
194 seminfo.semume = SEMUME;
195- down_read(&sem_ids(ns).rw_mutex);
196+ down_read(&sem_ids(ns).rwsem);
197 if (cmd == SEM_INFO) {
198 seminfo.semusz = sem_ids(ns).in_use;
199 seminfo.semaem = ns->used_sems;
200@@ -1161,7 +1161,7 @@ static int semctl_nolock(struct ipc_name
201 seminfo.semaem = SEMAEM;
202 }
203 max_id = ipc_get_maxid(&sem_ids(ns));
204- up_read(&sem_ids(ns).rw_mutex);
205+ up_read(&sem_ids(ns).rwsem);
206 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
207 return -EFAULT;
208 return (max_id < 0) ? 0: max_id;
209@@ -1467,9 +1467,9 @@ copy_semid_from_user(struct semid64_ds *
210 }
211
212 /*
213- * This function handles some semctl commands which require the rw_mutex
214+ * This function handles some semctl commands which require the rwsem
215 * to be held in write mode.
216- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
217+ * NOTE: no locks must be held, the rwsem is taken inside this function.
218 */
219 static int semctl_down(struct ipc_namespace *ns, int semid,
220 int cmd, int version, void __user *p)
221@@ -1484,7 +1484,7 @@ static int semctl_down(struct ipc_namesp
222 return -EFAULT;
223 }
224
225- down_write(&sem_ids(ns).rw_mutex);
226+ down_write(&sem_ids(ns).rwsem);
227 rcu_read_lock();
228
229 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
230@@ -1523,7 +1523,7 @@ out_unlock0:
231 out_unlock1:
232 rcu_read_unlock();
233 out_up:
234- up_write(&sem_ids(ns).rw_mutex);
235+ up_write(&sem_ids(ns).rwsem);
236 return err;
237 }
238
239--- a/ipc/shm.c
240+++ b/ipc/shm.c
241@@ -83,8 +83,8 @@ void shm_init_ns(struct ipc_namespace *n
242 }
243
244 /*
245- * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
246- * Only shm_ids.rw_mutex remains locked on exit.
247+ * Called with shm_ids.rwsem (writer) and the shp structure locked.
248+ * Only shm_ids.rwsem remains locked on exit.
249 */
250 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
251 {
252@@ -148,7 +148,7 @@ static inline struct shmid_kernel *shm_o
253 }
254
255 /*
256- * shm_lock_(check_) routines are called in the paths where the rw_mutex
257+ * shm_lock_(check_) routines are called in the paths where the rwsem
258 * is not necessarily held.
259 */
260 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
261@@ -214,7 +214,7 @@ static void shm_open(struct vm_area_stru
262 * @ns: namespace
263 * @shp: struct to free
264 *
265- * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
266+ * It has to be called with shp and shm_ids.rwsem (writer) locked,
267 * but returns with shp unlocked and freed.
268 */
269 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
270@@ -261,7 +261,7 @@ static void shm_close(struct vm_area_str
271 struct shmid_kernel *shp;
272 struct ipc_namespace *ns = sfd->ns;
273
274- down_write(&shm_ids(ns).rw_mutex);
275+ down_write(&shm_ids(ns).rwsem);
276 /* remove from the list of attaches of the shm segment */
277 shp = shm_lock(ns, sfd->id);
278 BUG_ON(IS_ERR(shp));
279@@ -272,10 +272,10 @@ static void shm_close(struct vm_area_str
280 shm_destroy(ns, shp);
281 else
282 shm_unlock(shp);
283- up_write(&shm_ids(ns).rw_mutex);
284+ up_write(&shm_ids(ns).rwsem);
285 }
286
287-/* Called with ns->shm_ids(ns).rw_mutex locked */
288+/* Called with ns->shm_ids(ns).rwsem locked */
289 static int shm_try_destroy_current(int id, void *p, void *data)
290 {
291 struct ipc_namespace *ns = data;
292@@ -306,7 +306,7 @@ static int shm_try_destroy_current(int i
293 return 0;
294 }
295
296-/* Called with ns->shm_ids(ns).rw_mutex locked */
297+/* Called with ns->shm_ids(ns).rwsem locked */
298 static int shm_try_destroy_orphaned(int id, void *p, void *data)
299 {
300 struct ipc_namespace *ns = data;
301@@ -317,7 +317,7 @@ static int shm_try_destroy_orphaned(int
302 * We want to destroy segments without users and with already
303 * exit'ed originating process.
304 *
305- * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
306+ * As shp->* are changed under rwsem, it's safe to skip shp locking.
307 */
308 if (shp->shm_creator != NULL)
309 return 0;
310@@ -331,10 +331,10 @@ static int shm_try_destroy_orphaned(int
311
312 void shm_destroy_orphaned(struct ipc_namespace *ns)
313 {
314- down_write(&shm_ids(ns).rw_mutex);
315+ down_write(&shm_ids(ns).rwsem);
316 if (shm_ids(ns).in_use)
317 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
318- up_write(&shm_ids(ns).rw_mutex);
319+ up_write(&shm_ids(ns).rwsem);
320 }
321
322
323@@ -346,10 +346,10 @@ void exit_shm(struct task_struct *task)
324 return;
325
326 /* Destroy all already created segments, but not mapped yet */
327- down_write(&shm_ids(ns).rw_mutex);
328+ down_write(&shm_ids(ns).rwsem);
329 if (shm_ids(ns).in_use)
330 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
331- up_write(&shm_ids(ns).rw_mutex);
332+ up_write(&shm_ids(ns).rwsem);
333 }
334
335 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
336@@ -483,7 +483,7 @@ static const struct vm_operations_struct
337 * @ns: namespace
338 * @params: ptr to the structure that contains key, size and shmflg
339 *
340- * Called with shm_ids.rw_mutex held as a writer.
341+ * Called with shm_ids.rwsem held as a writer.
342 */
343
344 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
345@@ -590,7 +590,7 @@ no_file:
346 }
347
348 /*
349- * Called with shm_ids.rw_mutex and ipcp locked.
350+ * Called with shm_ids.rwsem and ipcp locked.
351 */
352 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
353 {
354@@ -601,7 +601,7 @@ static inline int shm_security(struct ke
355 }
356
357 /*
358- * Called with shm_ids.rw_mutex and ipcp locked.
359+ * Called with shm_ids.rwsem and ipcp locked.
360 */
361 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
362 struct ipc_params *params)
363@@ -714,7 +714,7 @@ static inline unsigned long copy_shminfo
364
365 /*
366 * Calculate and add used RSS and swap pages of a shm.
367- * Called with shm_ids.rw_mutex held as a reader
368+ * Called with shm_ids.rwsem held as a reader
369 */
370 static void shm_add_rss_swap(struct shmid_kernel *shp,
371 unsigned long *rss_add, unsigned long *swp_add)
372@@ -741,7 +741,7 @@ static void shm_add_rss_swap(struct shmi
373 }
374
375 /*
376- * Called with shm_ids.rw_mutex held as a reader
377+ * Called with shm_ids.rwsem held as a reader
378 */
379 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
380 unsigned long *swp)
381@@ -770,9 +770,9 @@ static void shm_get_stat(struct ipc_name
382 }
383
384 /*
385- * This function handles some shmctl commands which require the rw_mutex
386+ * This function handles some shmctl commands which require the rwsem
387 * to be held in write mode.
388- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
389+ * NOTE: no locks must be held, the rwsem is taken inside this function.
390 */
391 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
392 struct shmid_ds __user *buf, int version)
393@@ -787,7 +787,7 @@ static int shmctl_down(struct ipc_namesp
394 return -EFAULT;
395 }
396
397- down_write(&shm_ids(ns).rw_mutex);
398+ down_write(&shm_ids(ns).rwsem);
399 rcu_read_lock();
400
401 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
402@@ -826,7 +826,7 @@ out_unlock0:
403 out_unlock1:
404 rcu_read_unlock();
405 out_up:
406- up_write(&shm_ids(ns).rw_mutex);
407+ up_write(&shm_ids(ns).rwsem);
408 return err;
409 }
410
411@@ -857,9 +857,9 @@ static int shmctl_nolock(struct ipc_name
412 if(copy_shminfo_to_user (buf, &shminfo, version))
413 return -EFAULT;
414
415- down_read(&shm_ids(ns).rw_mutex);
416+ down_read(&shm_ids(ns).rwsem);
417 err = ipc_get_maxid(&shm_ids(ns));
418- up_read(&shm_ids(ns).rw_mutex);
419+ up_read(&shm_ids(ns).rwsem);
420
421 if(err<0)
422 err = 0;
423@@ -870,14 +870,14 @@ static int shmctl_nolock(struct ipc_name
424 struct shm_info shm_info;
425
426 memset(&shm_info, 0, sizeof(shm_info));
427- down_read(&shm_ids(ns).rw_mutex);
428+ down_read(&shm_ids(ns).rwsem);
429 shm_info.used_ids = shm_ids(ns).in_use;
430 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
431 shm_info.shm_tot = ns->shm_tot;
432 shm_info.swap_attempts = 0;
433 shm_info.swap_successes = 0;
434 err = ipc_get_maxid(&shm_ids(ns));
435- up_read(&shm_ids(ns).rw_mutex);
436+ up_read(&shm_ids(ns).rwsem);
437 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
438 err = -EFAULT;
439 goto out;
440@@ -1176,7 +1176,7 @@ out_fput:
441 fput(file);
442
443 out_nattch:
444- down_write(&shm_ids(ns).rw_mutex);
445+ down_write(&shm_ids(ns).rwsem);
446 shp = shm_lock(ns, shmid);
447 BUG_ON(IS_ERR(shp));
448 shp->shm_nattch--;
449@@ -1184,7 +1184,7 @@ out_nattch:
450 shm_destroy(ns, shp);
451 else
452 shm_unlock(shp);
453- up_write(&shm_ids(ns).rw_mutex);
454+ up_write(&shm_ids(ns).rwsem);
455 return err;
456
457 out_unlock:
458--- a/ipc/util.c
459+++ b/ipc/util.c
460@@ -119,7 +119,7 @@ __initcall(ipc_init);
461
462 void ipc_init_ids(struct ipc_ids *ids)
463 {
464- init_rwsem(&ids->rw_mutex);
465+ init_rwsem(&ids->rwsem);
466
467 ids->in_use = 0;
468 ids->seq = 0;
469@@ -174,7 +174,7 @@ void __init ipc_init_proc_interface(cons
470 * @ids: Identifier set
471 * @key: The key to find
472 *
473- * Requires ipc_ids.rw_mutex locked.
474+ * Requires ipc_ids.rwsem locked.
475 * Returns the LOCKED pointer to the ipc structure if found or NULL
476 * if not.
477 * If key is found ipc points to the owning ipc structure
478@@ -208,7 +208,7 @@ static struct kern_ipc_perm *ipc_findkey
479 * ipc_get_maxid - get the last assigned id
480 * @ids: IPC identifier set
481 *
482- * Called with ipc_ids.rw_mutex held.
483+ * Called with ipc_ids.rwsem held.
484 */
485
486 int ipc_get_maxid(struct ipc_ids *ids)
487@@ -246,7 +246,7 @@ int ipc_get_maxid(struct ipc_ids *ids)
488 * is returned. The 'new' entry is returned in a locked state on success.
489 * On failure the entry is not locked and a negative err-code is returned.
490 *
491- * Called with writer ipc_ids.rw_mutex held.
492+ * Called with writer ipc_ids.rwsem held.
493 */
494 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
495 {
496@@ -312,9 +312,9 @@ static int ipcget_new(struct ipc_namespa
497 {
498 int err;
499
500- down_write(&ids->rw_mutex);
501+ down_write(&ids->rwsem);
502 err = ops->getnew(ns, params);
503- up_write(&ids->rw_mutex);
504+ up_write(&ids->rwsem);
505 return err;
506 }
507
508@@ -331,7 +331,7 @@ static int ipcget_new(struct ipc_namespa
509 *
510 * On success, the IPC id is returned.
511 *
512- * It is called with ipc_ids.rw_mutex and ipcp->lock held.
513+ * It is called with ipc_ids.rwsem and ipcp->lock held.
514 */
515 static int ipc_check_perms(struct ipc_namespace *ns,
516 struct kern_ipc_perm *ipcp,
517@@ -376,7 +376,7 @@ static int ipcget_public(struct ipc_name
518 * Take the lock as a writer since we are potentially going to add
519 * a new entry + read locks are not "upgradable"
520 */
521- down_write(&ids->rw_mutex);
522+ down_write(&ids->rwsem);
523 ipcp = ipc_findkey(ids, params->key);
524 if (ipcp == NULL) {
525 /* key not used */
526@@ -402,7 +402,7 @@ static int ipcget_public(struct ipc_name
527 }
528 ipc_unlock(ipcp);
529 }
530- up_write(&ids->rw_mutex);
531+ up_write(&ids->rwsem);
532
533 return err;
534 }
535@@ -413,7 +413,7 @@ static int ipcget_public(struct ipc_name
536 * @ids: IPC identifier set
537 * @ipcp: ipc perm structure containing the identifier to remove
538 *
539- * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
540+ * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
541 * before this function is called, and remain locked on the exit.
542 */
543
544@@ -613,7 +613,7 @@ struct kern_ipc_perm *ipc_obtain_object(
545 }
546
547 /**
548- * ipc_lock - Lock an ipc structure without rw_mutex held
549+ * ipc_lock - Lock an ipc structure without rwsem held
550 * @ids: IPC identifier set
551 * @id: ipc id to look for
552 *
553@@ -740,7 +740,7 @@ int ipc_update_perm(struct ipc64_perm *i
554 * - performs some audit and permission check, depending on the given cmd
555 * - returns a pointer to the ipc object or otherwise, the corresponding error.
556 *
557- * Call holding the both the rw_mutex and the rcu read lock.
558+ * Call holding the both the rwsem and the rcu read lock.
559 */
560 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
561 struct ipc_ids *ids, int id, int cmd,
562@@ -860,7 +860,7 @@ static void *sysvipc_proc_start(struct s
563 * Take the lock - this will be released by the corresponding
564 * call to stop().
565 */
566- down_read(&ids->rw_mutex);
567+ down_read(&ids->rwsem);
568
569 /* pos < 0 is invalid */
570 if (*pos < 0)
571@@ -887,7 +887,7 @@ static void sysvipc_proc_stop(struct seq
572
573 ids = &iter->ns->ids[iface->ids];
574 /* Release the lock we took in start() */
575- up_read(&ids->rw_mutex);
576+ up_read(&ids->rwsem);
577 }
578
579 static int sysvipc_proc_show(struct seq_file *s, void *it)
580--- a/ipc/util.h
581+++ b/ipc/util.h
582@@ -101,10 +101,10 @@ void __init ipc_init_proc_interface(cons
583 #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
584 #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
585
586-/* must be called with ids->rw_mutex acquired for writing */
587+/* must be called with ids->rwsem acquired for writing */
588 int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
589
590-/* must be called with ids->rw_mutex acquired for reading */
591+/* must be called with ids->rwsem acquired for reading */
592 int ipc_get_maxid(struct ipc_ids *);
593
594 /* must be called with both locks acquired. */