]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/locks.c | |
3 | * | |
4 | * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. | |
5 | * Doug Evans (dje@spiff.uucp), August 07, 1992 | |
6 | * | |
7 | * Deadlock detection added. | |
8 | * FIXME: one thing isn't handled yet: | |
9 | * - mandatory locks (requires lots of changes elsewhere) | |
10 | * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. | |
11 | * | |
12 | * Miscellaneous edits, and a total rewrite of posix_lock_file() code. | |
13 | * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 | |
14 | * | |
15 | * Converted file_lock_table to a linked list from an array, which eliminates | |
16 | * the limits on how many active file locks are open. | |
17 | * Chad Page (pageone@netcom.com), November 27, 1994 | |
18 | * | |
19 | * Removed dependency on file descriptors. dup()'ed file descriptors now | |
20 | * get the same locks as the original file descriptors, and a close() on | |
21 | * any file descriptor removes ALL the locks on the file for the current | |
22 | * process. Since locks still depend on the process id, locks are inherited | |
23 | * after an exec() but not after a fork(). This agrees with POSIX, and both | |
24 | * BSD and SVR4 practice. | |
25 | * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 | |
26 | * | |
27 | * Scrapped free list which is redundant now that we allocate locks | |
28 | * dynamically with kmalloc()/kfree(). | |
29 | * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 | |
30 | * | |
31 | * Implemented two lock personalities - FL_FLOCK and FL_POSIX. | |
32 | * | |
33 | * FL_POSIX locks are created with calls to fcntl() and lockf() through the | |
34 | * fcntl() system call. They have the semantics described above. | |
35 | * | |
36 | * FL_FLOCK locks are created with calls to flock(), through the flock() | |
37 | * system call, which is new. Old C libraries implement flock() via fcntl() | |
38 | * and will continue to use the old, broken implementation. | |
39 | * | |
40 | * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated | |
41 | * with a file pointer (filp). As a result they can be shared by a parent | |
42 | * process and its children after a fork(). They are removed when the last | |
43 | * file descriptor referring to the file pointer is closed (unless explicitly | |
44 | * unlocked). | |
45 | * | |
46 | * FL_FLOCK locks never deadlock, an existing lock is always removed before | |
47 | * upgrading from shared to exclusive (or vice versa). When this happens | |
48 | * any processes blocked by the current lock are woken up and allowed to | |
49 | * run before the new lock is applied. | |
50 | * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 | |
51 | * | |
52 | * Removed some race conditions in flock_lock_file(), marked other possible | |
53 | * races. Just grep for FIXME to see them. | |
54 | * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. | |
55 | * | |
56 | * Addressed Dmitry's concerns. Deadlock checking no longer recursive. | |
57 | * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep | |
58 | * once we've checked for blocking and deadlocking. | |
59 | * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. | |
60 | * | |
61 | * Initial implementation of mandatory locks. SunOS turned out to be | |
62 | * a rotten model, so I implemented the "obvious" semantics. | |
63 | * See 'Documentation/filesystems/mandatory-locking.txt' for details. | |
64 | * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. | |
65 | * | |
66 | * Don't allow mandatory locks on mmap()'ed files. Added simple functions to | |
67 | * check if a file has mandatory locks, used by mmap(), open() and creat() to | |
68 | * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference | |
69 | * Manual, Section 2. | |
70 | * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. | |
71 | * | |
72 | * Tidied up block list handling. Added '/proc/locks' interface. | |
73 | * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. | |
74 | * | |
75 | * Fixed deadlock condition for pathological code that mixes calls to | |
76 | * flock() and fcntl(). | |
77 | * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. | |
78 | * | |
79 | * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use | |
80 | * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to | |
81 | * guarantee sensible behaviour in the case where file system modules might | |
82 | * be compiled with different options than the kernel itself. | |
83 | * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. | |
84 | * | |
85 | * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel | |
86 | * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. | |
87 | * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. | |
88 | * | |
89 | * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK | |
90 | * locks. Changed process synchronisation to avoid dereferencing locks that | |
91 | * have already been freed. | |
92 | * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. | |
93 | * | |
94 | * Made the block list a circular list to minimise searching in the list. | |
95 | * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. | |
96 | * | |
97 | * Made mandatory locking a mount option. Default is not to allow mandatory | |
98 | * locking. | |
99 | * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. | |
100 | * | |
101 | * Some adaptations for NFS support. | |
102 | * Olaf Kirch (okir@monad.swb.de), Dec 1996, | |
103 | * | |
104 | * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. | |
105 | * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. | |
106 | * | |
107 | * Use slab allocator instead of kmalloc/kfree. | |
108 | * Use generic list implementation from <linux/list.h>. | |
109 | * Sped up posix_locks_deadlock by only considering blocked locks. | |
110 | * Matthew Wilcox <willy@debian.org>, March, 2000. | |
111 | * | |
112 | * Leases and LOCK_MAND | |
113 | * Matthew Wilcox <willy@debian.org>, June, 2000. | |
114 | * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. | |
115 | */ | |
116 | ||
117 | #include <linux/capability.h> | |
118 | #include <linux/file.h> | |
119 | #include <linux/fdtable.h> | |
120 | #include <linux/fs.h> | |
121 | #include <linux/init.h> | |
122 | #include <linux/module.h> | |
123 | #include <linux/security.h> | |
124 | #include <linux/slab.h> | |
125 | #include <linux/syscalls.h> | |
126 | #include <linux/time.h> | |
127 | #include <linux/rcupdate.h> | |
128 | #include <linux/pid_namespace.h> | |
129 | #include <linux/hashtable.h> | |
130 | #include <linux/percpu.h> | |
131 | #include <linux/lglock.h> | |
132 | ||
133 | #include <asm/uaccess.h> | |
134 | ||
135 | #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) | |
136 | #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) | |
137 | #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG)) | |
138 | #define IS_FILE_PVT(fl) (fl->fl_flags & FL_FILE_PVT) | |
139 | ||
140 | static bool lease_breaking(struct file_lock *fl) | |
141 | { | |
142 | return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); | |
143 | } | |
144 | ||
145 | static int target_leasetype(struct file_lock *fl) | |
146 | { | |
147 | if (fl->fl_flags & FL_UNLOCK_PENDING) | |
148 | return F_UNLCK; | |
149 | if (fl->fl_flags & FL_DOWNGRADE_PENDING) | |
150 | return F_RDLCK; | |
151 | return fl->fl_type; | |
152 | } | |
153 | ||
154 | int leases_enable = 1; | |
155 | int lease_break_time = 45; | |
156 | ||
157 | #define for_each_lock(inode, lockp) \ | |
158 | for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) | |
159 | ||
160 | /* | |
161 | * The global file_lock_list is only used for displaying /proc/locks, so we | |
162 | * keep a list on each CPU, with each list protected by its own spinlock via | |
163 | * the file_lock_lglock. Note that alterations to the list also require that | |
164 | * the relevant i_lock is held. | |
165 | */ | |
166 | DEFINE_STATIC_LGLOCK(file_lock_lglock); | |
167 | static DEFINE_PER_CPU(struct hlist_head, file_lock_list); | |
168 | ||
169 | /* | |
170 | * The blocked_hash is used to find POSIX lock loops for deadlock detection. | |
171 | * It is protected by blocked_lock_lock. | |
172 | * | |
173 | * We hash locks by lockowner in order to optimize searching for the lock a | |
174 | * particular lockowner is waiting on. | |
175 | * | |
176 | * FIXME: make this value scale via some heuristic? We generally will want more | |
177 | * buckets when we have more lockowners holding locks, but that's a little | |
178 | * difficult to determine without knowing what the workload will look like. | |
179 | */ | |
180 | #define BLOCKED_HASH_BITS 7 | |
181 | static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); | |
182 | ||
183 | /* | |
184 | * This lock protects the blocked_hash. Generally, if you're accessing it, you | |
185 | * want to be holding this lock. | |
186 | * | |
187 | * In addition, it also protects the fl->fl_block list, and the fl->fl_next | |
188 | * pointer for file_lock structures that are acting as lock requests (in | |
189 | * contrast to those that are acting as records of acquired locks). | |
190 | * | |
191 | * Note that when we acquire this lock in order to change the above fields, | |
192 | * we often hold the i_lock as well. In certain cases, when reading the fields | |
193 | * protected by this lock, we can skip acquiring it iff we already hold the | |
194 | * i_lock. | |
195 | * | |
196 | * In particular, adding an entry to the fl_block list requires that you hold | |
197 | * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting | |
198 | * an entry from the list however only requires the file_lock_lock. | |
199 | */ | |
200 | static DEFINE_SPINLOCK(blocked_lock_lock); | |
201 | ||
202 | static struct kmem_cache *filelock_cache __read_mostly; | |
203 | ||
204 | static void locks_init_lock_heads(struct file_lock *fl) | |
205 | { | |
206 | INIT_HLIST_NODE(&fl->fl_link); | |
207 | INIT_LIST_HEAD(&fl->fl_block); | |
208 | init_waitqueue_head(&fl->fl_wait); | |
209 | } | |
210 | ||
211 | /* Allocate an empty lock structure. */ | |
212 | struct file_lock *locks_alloc_lock(void) | |
213 | { | |
214 | struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); | |
215 | ||
216 | if (fl) | |
217 | locks_init_lock_heads(fl); | |
218 | ||
219 | return fl; | |
220 | } | |
221 | EXPORT_SYMBOL_GPL(locks_alloc_lock); | |
222 | ||
223 | void locks_release_private(struct file_lock *fl) | |
224 | { | |
225 | if (fl->fl_ops) { | |
226 | if (fl->fl_ops->fl_release_private) | |
227 | fl->fl_ops->fl_release_private(fl); | |
228 | fl->fl_ops = NULL; | |
229 | } | |
230 | fl->fl_lmops = NULL; | |
231 | ||
232 | } | |
233 | EXPORT_SYMBOL_GPL(locks_release_private); | |
234 | ||
235 | /* Free a lock which is not in use. */ | |
236 | void locks_free_lock(struct file_lock *fl) | |
237 | { | |
238 | BUG_ON(waitqueue_active(&fl->fl_wait)); | |
239 | BUG_ON(!list_empty(&fl->fl_block)); | |
240 | BUG_ON(!hlist_unhashed(&fl->fl_link)); | |
241 | ||
242 | locks_release_private(fl); | |
243 | kmem_cache_free(filelock_cache, fl); | |
244 | } | |
245 | EXPORT_SYMBOL(locks_free_lock); | |
246 | ||
247 | void locks_init_lock(struct file_lock *fl) | |
248 | { | |
249 | memset(fl, 0, sizeof(struct file_lock)); | |
250 | locks_init_lock_heads(fl); | |
251 | } | |
252 | ||
253 | EXPORT_SYMBOL(locks_init_lock); | |
254 | ||
255 | static void locks_copy_private(struct file_lock *new, struct file_lock *fl) | |
256 | { | |
257 | if (fl->fl_ops) { | |
258 | if (fl->fl_ops->fl_copy_lock) | |
259 | fl->fl_ops->fl_copy_lock(new, fl); | |
260 | new->fl_ops = fl->fl_ops; | |
261 | } | |
262 | if (fl->fl_lmops) | |
263 | new->fl_lmops = fl->fl_lmops; | |
264 | } | |
265 | ||
266 | /* | |
267 | * Initialize a new lock from an existing file_lock structure. | |
268 | */ | |
269 | void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) | |
270 | { | |
271 | new->fl_owner = fl->fl_owner; | |
272 | new->fl_pid = fl->fl_pid; | |
273 | new->fl_file = NULL; | |
274 | new->fl_flags = fl->fl_flags; | |
275 | new->fl_type = fl->fl_type; | |
276 | new->fl_start = fl->fl_start; | |
277 | new->fl_end = fl->fl_end; | |
278 | new->fl_ops = NULL; | |
279 | new->fl_lmops = NULL; | |
280 | } | |
281 | EXPORT_SYMBOL(__locks_copy_lock); | |
282 | ||
283 | void locks_copy_lock(struct file_lock *new, struct file_lock *fl) | |
284 | { | |
285 | locks_release_private(new); | |
286 | ||
287 | __locks_copy_lock(new, fl); | |
288 | new->fl_file = fl->fl_file; | |
289 | new->fl_ops = fl->fl_ops; | |
290 | new->fl_lmops = fl->fl_lmops; | |
291 | ||
292 | locks_copy_private(new, fl); | |
293 | } | |
294 | ||
295 | EXPORT_SYMBOL(locks_copy_lock); | |
296 | ||
297 | static inline int flock_translate_cmd(int cmd) { | |
298 | if (cmd & LOCK_MAND) | |
299 | return cmd & (LOCK_MAND | LOCK_RW); | |
300 | switch (cmd) { | |
301 | case LOCK_SH: | |
302 | return F_RDLCK; | |
303 | case LOCK_EX: | |
304 | return F_WRLCK; | |
305 | case LOCK_UN: | |
306 | return F_UNLCK; | |
307 | } | |
308 | return -EINVAL; | |
309 | } | |
310 | ||
311 | /* Fill in a file_lock structure with an appropriate FLOCK lock. */ | |
312 | static int flock_make_lock(struct file *filp, struct file_lock **lock, | |
313 | unsigned int cmd) | |
314 | { | |
315 | struct file_lock *fl; | |
316 | int type = flock_translate_cmd(cmd); | |
317 | if (type < 0) | |
318 | return type; | |
319 | ||
320 | fl = locks_alloc_lock(); | |
321 | if (fl == NULL) | |
322 | return -ENOMEM; | |
323 | ||
324 | fl->fl_file = filp; | |
325 | fl->fl_pid = current->tgid; | |
326 | fl->fl_flags = FL_FLOCK; | |
327 | fl->fl_type = type; | |
328 | fl->fl_end = OFFSET_MAX; | |
329 | ||
330 | *lock = fl; | |
331 | return 0; | |
332 | } | |
333 | ||
334 | static int assign_type(struct file_lock *fl, long type) | |
335 | { | |
336 | switch (type) { | |
337 | case F_RDLCK: | |
338 | case F_WRLCK: | |
339 | case F_UNLCK: | |
340 | fl->fl_type = type; | |
341 | break; | |
342 | default: | |
343 | return -EINVAL; | |
344 | } | |
345 | return 0; | |
346 | } | |
347 | ||
348 | static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, | |
349 | struct flock64 *l) | |
350 | { | |
351 | switch (l->l_whence) { | |
352 | case SEEK_SET: | |
353 | fl->fl_start = 0; | |
354 | break; | |
355 | case SEEK_CUR: | |
356 | fl->fl_start = filp->f_pos; | |
357 | break; | |
358 | case SEEK_END: | |
359 | fl->fl_start = i_size_read(file_inode(filp)); | |
360 | break; | |
361 | default: | |
362 | return -EINVAL; | |
363 | } | |
364 | if (l->l_start > OFFSET_MAX - fl->fl_start) | |
365 | return -EOVERFLOW; | |
366 | fl->fl_start += l->l_start; | |
367 | if (fl->fl_start < 0) | |
368 | return -EINVAL; | |
369 | ||
370 | /* POSIX-1996 leaves the case l->l_len < 0 undefined; | |
371 | POSIX-2001 defines it. */ | |
372 | if (l->l_len > 0) { | |
373 | if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) | |
374 | return -EOVERFLOW; | |
375 | fl->fl_end = fl->fl_start + l->l_len - 1; | |
376 | ||
377 | } else if (l->l_len < 0) { | |
378 | if (fl->fl_start + l->l_len < 0) | |
379 | return -EINVAL; | |
380 | fl->fl_end = fl->fl_start - 1; | |
381 | fl->fl_start += l->l_len; | |
382 | } else | |
383 | fl->fl_end = OFFSET_MAX; | |
384 | ||
385 | fl->fl_owner = current->files; | |
386 | fl->fl_pid = current->tgid; | |
387 | fl->fl_file = filp; | |
388 | fl->fl_flags = FL_POSIX; | |
389 | fl->fl_ops = NULL; | |
390 | fl->fl_lmops = NULL; | |
391 | ||
392 | /* Ensure that fl->fl_filp has compatible f_mode */ | |
393 | switch (l->l_type) { | |
394 | case F_RDLCK: | |
395 | if (!(filp->f_mode & FMODE_READ)) | |
396 | return -EBADF; | |
397 | break; | |
398 | case F_WRLCK: | |
399 | if (!(filp->f_mode & FMODE_WRITE)) | |
400 | return -EBADF; | |
401 | break; | |
402 | } | |
403 | ||
404 | return assign_type(fl, l->l_type); | |
405 | } | |
406 | ||
407 | /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX | |
408 | * style lock. | |
409 | */ | |
410 | static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, | |
411 | struct flock *l) | |
412 | { | |
413 | struct flock64 ll = { | |
414 | .l_type = l->l_type, | |
415 | .l_whence = l->l_whence, | |
416 | .l_start = l->l_start, | |
417 | .l_len = l->l_len, | |
418 | }; | |
419 | ||
420 | return flock64_to_posix_lock(filp, fl, &ll); | |
421 | } | |
422 | ||
423 | /* default lease lock manager operations */ | |
424 | static void lease_break_callback(struct file_lock *fl) | |
425 | { | |
426 | kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); | |
427 | } | |
428 | ||
429 | static const struct lock_manager_operations lease_manager_ops = { | |
430 | .lm_break = lease_break_callback, | |
431 | .lm_change = lease_modify, | |
432 | }; | |
433 | ||
434 | /* | |
435 | * Initialize a lease, use the default lock manager operations | |
436 | */ | |
437 | static int lease_init(struct file *filp, long type, struct file_lock *fl) | |
438 | { | |
439 | if (assign_type(fl, type) != 0) | |
440 | return -EINVAL; | |
441 | ||
442 | fl->fl_owner = current->files; | |
443 | fl->fl_pid = current->tgid; | |
444 | ||
445 | fl->fl_file = filp; | |
446 | fl->fl_flags = FL_LEASE; | |
447 | fl->fl_start = 0; | |
448 | fl->fl_end = OFFSET_MAX; | |
449 | fl->fl_ops = NULL; | |
450 | fl->fl_lmops = &lease_manager_ops; | |
451 | return 0; | |
452 | } | |
453 | ||
454 | /* Allocate a file_lock initialised to this type of lease */ | |
455 | static struct file_lock *lease_alloc(struct file *filp, long type) | |
456 | { | |
457 | struct file_lock *fl = locks_alloc_lock(); | |
458 | int error = -ENOMEM; | |
459 | ||
460 | if (fl == NULL) | |
461 | return ERR_PTR(error); | |
462 | ||
463 | error = lease_init(filp, type, fl); | |
464 | if (error) { | |
465 | locks_free_lock(fl); | |
466 | return ERR_PTR(error); | |
467 | } | |
468 | return fl; | |
469 | } | |
470 | ||
471 | /* Check if two locks overlap each other. | |
472 | */ | |
473 | static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) | |
474 | { | |
475 | return ((fl1->fl_end >= fl2->fl_start) && | |
476 | (fl2->fl_end >= fl1->fl_start)); | |
477 | } | |
478 | ||
479 | /* | |
480 | * Check whether two locks have the same owner. | |
481 | */ | |
482 | static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) | |
483 | { | |
484 | if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner) | |
485 | return fl2->fl_lmops == fl1->fl_lmops && | |
486 | fl1->fl_lmops->lm_compare_owner(fl1, fl2); | |
487 | return fl1->fl_owner == fl2->fl_owner; | |
488 | } | |
489 | ||
490 | /* Must be called with the i_lock held! */ | |
491 | static void locks_insert_global_locks(struct file_lock *fl) | |
492 | { | |
493 | lg_local_lock(&file_lock_lglock); | |
494 | fl->fl_link_cpu = smp_processor_id(); | |
495 | hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); | |
496 | lg_local_unlock(&file_lock_lglock); | |
497 | } | |
498 | ||
499 | /* Must be called with the i_lock held! */ | |
500 | static void locks_delete_global_locks(struct file_lock *fl) | |
501 | { | |
502 | /* | |
503 | * Avoid taking lock if already unhashed. This is safe since this check | |
504 | * is done while holding the i_lock, and new insertions into the list | |
505 | * also require that it be held. | |
506 | */ | |
507 | if (hlist_unhashed(&fl->fl_link)) | |
508 | return; | |
509 | lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); | |
510 | hlist_del_init(&fl->fl_link); | |
511 | lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); | |
512 | } | |
513 | ||
514 | static unsigned long | |
515 | posix_owner_key(struct file_lock *fl) | |
516 | { | |
517 | if (fl->fl_lmops && fl->fl_lmops->lm_owner_key) | |
518 | return fl->fl_lmops->lm_owner_key(fl); | |
519 | return (unsigned long)fl->fl_owner; | |
520 | } | |
521 | ||
522 | static void locks_insert_global_blocked(struct file_lock *waiter) | |
523 | { | |
524 | hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); | |
525 | } | |
526 | ||
527 | static void locks_delete_global_blocked(struct file_lock *waiter) | |
528 | { | |
529 | hash_del(&waiter->fl_link); | |
530 | } | |
531 | ||
532 | /* Remove waiter from blocker's block list. | |
533 | * When blocker ends up pointing to itself then the list is empty. | |
534 | * | |
535 | * Must be called with blocked_lock_lock held. | |
536 | */ | |
537 | static void __locks_delete_block(struct file_lock *waiter) | |
538 | { | |
539 | locks_delete_global_blocked(waiter); | |
540 | list_del_init(&waiter->fl_block); | |
541 | waiter->fl_next = NULL; | |
542 | } | |
543 | ||
544 | static void locks_delete_block(struct file_lock *waiter) | |
545 | { | |
546 | spin_lock(&blocked_lock_lock); | |
547 | __locks_delete_block(waiter); | |
548 | spin_unlock(&blocked_lock_lock); | |
549 | } | |
550 | ||
551 | /* Insert waiter into blocker's block list. | |
552 | * We use a circular list so that processes can be easily woken up in | |
553 | * the order they blocked. The documentation doesn't require this but | |
554 | * it seems like the reasonable thing to do. | |
555 | * | |
556 | * Must be called with both the i_lock and blocked_lock_lock held. The fl_block | |
557 | * list itself is protected by the blocked_lock_lock, but by ensuring that the | |
558 | * i_lock is also held on insertions we can avoid taking the blocked_lock_lock | |
559 | * in some cases when we see that the fl_block list is empty. | |
560 | */ | |
561 | static void __locks_insert_block(struct file_lock *blocker, | |
562 | struct file_lock *waiter) | |
563 | { | |
564 | BUG_ON(!list_empty(&waiter->fl_block)); | |
565 | waiter->fl_next = blocker; | |
566 | list_add_tail(&waiter->fl_block, &blocker->fl_block); | |
567 | if (IS_POSIX(blocker) && !IS_FILE_PVT(blocker)) | |
568 | locks_insert_global_blocked(waiter); | |
569 | } | |
570 | ||
571 | /* Must be called with i_lock held. */ | |
572 | static void locks_insert_block(struct file_lock *blocker, | |
573 | struct file_lock *waiter) | |
574 | { | |
575 | spin_lock(&blocked_lock_lock); | |
576 | __locks_insert_block(blocker, waiter); | |
577 | spin_unlock(&blocked_lock_lock); | |
578 | } | |
579 | ||
580 | /* | |
581 | * Wake up processes blocked waiting for blocker. | |
582 | * | |
583 | * Must be called with the inode->i_lock held! | |
584 | */ | |
585 | static void locks_wake_up_blocks(struct file_lock *blocker) | |
586 | { | |
587 | /* | |
588 | * Avoid taking global lock if list is empty. This is safe since new | |
589 | * blocked requests are only added to the list under the i_lock, and | |
590 | * the i_lock is always held here. Note that removal from the fl_block | |
591 | * list does not require the i_lock, so we must recheck list_empty() | |
592 | * after acquiring the blocked_lock_lock. | |
593 | */ | |
594 | if (list_empty(&blocker->fl_block)) | |
595 | return; | |
596 | ||
597 | spin_lock(&blocked_lock_lock); | |
598 | while (!list_empty(&blocker->fl_block)) { | |
599 | struct file_lock *waiter; | |
600 | ||
601 | waiter = list_first_entry(&blocker->fl_block, | |
602 | struct file_lock, fl_block); | |
603 | __locks_delete_block(waiter); | |
604 | if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) | |
605 | waiter->fl_lmops->lm_notify(waiter); | |
606 | else | |
607 | wake_up(&waiter->fl_wait); | |
608 | } | |
609 | spin_unlock(&blocked_lock_lock); | |
610 | } | |
611 | ||
612 | /* Insert file lock fl into an inode's lock list at the position indicated | |
613 | * by pos. At the same time add the lock to the global file lock list. | |
614 | * | |
615 | * Must be called with the i_lock held! | |
616 | */ | |
617 | static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) | |
618 | { | |
619 | fl->fl_nspid = get_pid(task_tgid(current)); | |
620 | ||
621 | /* insert into file's list */ | |
622 | fl->fl_next = *pos; | |
623 | *pos = fl; | |
624 | ||
625 | locks_insert_global_locks(fl); | |
626 | } | |
627 | ||
628 | /** | |
629 | * locks_delete_lock - Delete a lock and then free it. | |
630 | * @thisfl_p: pointer that points to the fl_next field of the previous | |
631 | * inode->i_flock list entry | |
632 | * | |
633 | * Unlink a lock from all lists and free the namespace reference, but don't | |
634 | * free it yet. Wake up processes that are blocked waiting for this lock and | |
635 | * notify the FS that the lock has been cleared. | |
636 | * | |
637 | * Must be called with the i_lock held! | |
638 | */ | |
639 | static void locks_unlink_lock(struct file_lock **thisfl_p) | |
640 | { | |
641 | struct file_lock *fl = *thisfl_p; | |
642 | ||
643 | locks_delete_global_locks(fl); | |
644 | ||
645 | *thisfl_p = fl->fl_next; | |
646 | fl->fl_next = NULL; | |
647 | ||
648 | if (fl->fl_nspid) { | |
649 | put_pid(fl->fl_nspid); | |
650 | fl->fl_nspid = NULL; | |
651 | } | |
652 | ||
653 | locks_wake_up_blocks(fl); | |
654 | } | |
655 | ||
656 | /* | |
657 | * Unlink a lock from all lists and free it. | |
658 | * | |
659 | * Must be called with i_lock held! | |
660 | */ | |
661 | static void locks_delete_lock(struct file_lock **thisfl_p) | |
662 | { | |
663 | struct file_lock *fl = *thisfl_p; | |
664 | ||
665 | locks_unlink_lock(thisfl_p); | |
666 | locks_free_lock(fl); | |
667 | } | |
668 | ||
669 | /* Determine if lock sys_fl blocks lock caller_fl. Common functionality | |
670 | * checks for shared/exclusive status of overlapping locks. | |
671 | */ | |
672 | static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) | |
673 | { | |
674 | if (sys_fl->fl_type == F_WRLCK) | |
675 | return 1; | |
676 | if (caller_fl->fl_type == F_WRLCK) | |
677 | return 1; | |
678 | return 0; | |
679 | } | |
680 | ||
681 | /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific | |
682 | * checking before calling the locks_conflict(). | |
683 | */ | |
684 | static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) | |
685 | { | |
686 | /* POSIX locks owned by the same process do not conflict with | |
687 | * each other. | |
688 | */ | |
689 | if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) | |
690 | return (0); | |
691 | ||
692 | /* Check whether they overlap */ | |
693 | if (!locks_overlap(caller_fl, sys_fl)) | |
694 | return 0; | |
695 | ||
696 | return (locks_conflict(caller_fl, sys_fl)); | |
697 | } | |
698 | ||
699 | /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific | |
700 | * checking before calling the locks_conflict(). | |
701 | */ | |
702 | static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) | |
703 | { | |
704 | /* FLOCK locks referring to the same filp do not conflict with | |
705 | * each other. | |
706 | */ | |
707 | if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) | |
708 | return (0); | |
709 | if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) | |
710 | return 0; | |
711 | ||
712 | return (locks_conflict(caller_fl, sys_fl)); | |
713 | } | |
714 | ||
715 | void | |
716 | posix_test_lock(struct file *filp, struct file_lock *fl) | |
717 | { | |
718 | struct file_lock *cfl; | |
719 | struct inode *inode = file_inode(filp); | |
720 | ||
721 | spin_lock(&inode->i_lock); | |
722 | for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { | |
723 | if (!IS_POSIX(cfl)) | |
724 | continue; | |
725 | if (posix_locks_conflict(fl, cfl)) | |
726 | break; | |
727 | } | |
728 | if (cfl) { | |
729 | __locks_copy_lock(fl, cfl); | |
730 | if (cfl->fl_nspid) | |
731 | fl->fl_pid = pid_vnr(cfl->fl_nspid); | |
732 | } else | |
733 | fl->fl_type = F_UNLCK; | |
734 | spin_unlock(&inode->i_lock); | |
735 | return; | |
736 | } | |
737 | EXPORT_SYMBOL(posix_test_lock); | |
738 | ||
739 | /* | |
740 | * Deadlock detection: | |
741 | * | |
742 | * We attempt to detect deadlocks that are due purely to posix file | |
743 | * locks. | |
744 | * | |
745 | * We assume that a task can be waiting for at most one lock at a time. | |
746 | * So for any acquired lock, the process holding that lock may be | |
747 | * waiting on at most one other lock. That lock in turns may be held by | |
748 | * someone waiting for at most one other lock. Given a requested lock | |
749 | * caller_fl which is about to wait for a conflicting lock block_fl, we | |
750 | * follow this chain of waiters to ensure we are not about to create a | |
751 | * cycle. | |
752 | * | |
753 | * Since we do this before we ever put a process to sleep on a lock, we | |
754 | * are ensured that there is never a cycle; that is what guarantees that | |
755 | * the while() loop in posix_locks_deadlock() eventually completes. | |
756 | * | |
757 | * Note: the above assumption may not be true when handling lock | |
758 | * requests from a broken NFS client. It may also fail in the presence | |
759 | * of tasks (such as posix threads) sharing the same open file table. | |
760 | * To handle those cases, we just bail out after a few iterations. | |
761 | * | |
762 | * For FL_FILE_PVT locks, the owner is the filp, not the files_struct. | |
763 | * Because the owner is not even nominally tied to a thread of | |
764 | * execution, the deadlock detection below can't reasonably work well. Just | |
765 | * skip it for those. | |
766 | * | |
767 | * In principle, we could do a more limited deadlock detection on FL_FILE_PVT | |
768 | * locks that just checks for the case where two tasks are attempting to | |
769 | * upgrade from read to write locks on the same inode. | |
770 | */ | |
771 | ||
772 | #define MAX_DEADLK_ITERATIONS 10 | |
773 | ||
774 | /* Find a lock that the owner of the given block_fl is blocking on. */ | |
775 | static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) | |
776 | { | |
777 | struct file_lock *fl; | |
778 | ||
779 | hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { | |
780 | if (posix_same_owner(fl, block_fl)) | |
781 | return fl->fl_next; | |
782 | } | |
783 | return NULL; | |
784 | } | |
785 | ||
786 | /* Must be called with the blocked_lock_lock held! */ | |
787 | static int posix_locks_deadlock(struct file_lock *caller_fl, | |
788 | struct file_lock *block_fl) | |
789 | { | |
790 | int i = 0; | |
791 | ||
792 | /* | |
793 | * This deadlock detector can't reasonably detect deadlocks with | |
794 | * FL_FILE_PVT locks, since they aren't owned by a process, per-se. | |
795 | */ | |
796 | if (IS_FILE_PVT(caller_fl)) | |
797 | return 0; | |
798 | ||
799 | while ((block_fl = what_owner_is_waiting_for(block_fl))) { | |
800 | if (i++ > MAX_DEADLK_ITERATIONS) | |
801 | return 0; | |
802 | if (posix_same_owner(caller_fl, block_fl)) | |
803 | return 1; | |
804 | } | |
805 | return 0; | |
806 | } | |
807 | ||
808 | /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks | |
809 | * after any leases, but before any posix locks. | |
810 | * | |
811 | * Note that if called with an FL_EXISTS argument, the caller may determine | |
812 | * whether or not a lock was successfully freed by testing the return | |
813 | * value for -ENOENT. | |
814 | */ | |
815 | static int flock_lock_file(struct file *filp, struct file_lock *request) | |
816 | { | |
817 | struct file_lock *new_fl = NULL; | |
818 | struct file_lock **before; | |
819 | struct inode * inode = file_inode(filp); | |
820 | int error = 0; | |
821 | int found = 0; | |
822 | ||
823 | if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { | |
824 | new_fl = locks_alloc_lock(); | |
825 | if (!new_fl) | |
826 | return -ENOMEM; | |
827 | } | |
828 | ||
829 | spin_lock(&inode->i_lock); | |
830 | if (request->fl_flags & FL_ACCESS) | |
831 | goto find_conflict; | |
832 | ||
833 | for_each_lock(inode, before) { | |
834 | struct file_lock *fl = *before; | |
835 | if (IS_POSIX(fl)) | |
836 | break; | |
837 | if (IS_LEASE(fl)) | |
838 | continue; | |
839 | if (filp != fl->fl_file) | |
840 | continue; | |
841 | if (request->fl_type == fl->fl_type) | |
842 | goto out; | |
843 | found = 1; | |
844 | locks_delete_lock(before); | |
845 | break; | |
846 | } | |
847 | ||
848 | if (request->fl_type == F_UNLCK) { | |
849 | if ((request->fl_flags & FL_EXISTS) && !found) | |
850 | error = -ENOENT; | |
851 | goto out; | |
852 | } | |
853 | ||
854 | /* | |
855 | * If a higher-priority process was blocked on the old file lock, | |
856 | * give it the opportunity to lock the file. | |
857 | */ | |
858 | if (found) { | |
859 | spin_unlock(&inode->i_lock); | |
860 | cond_resched(); | |
861 | spin_lock(&inode->i_lock); | |
862 | } | |
863 | ||
864 | find_conflict: | |
865 | for_each_lock(inode, before) { | |
866 | struct file_lock *fl = *before; | |
867 | if (IS_POSIX(fl)) | |
868 | break; | |
869 | if (IS_LEASE(fl)) | |
870 | continue; | |
871 | if (!flock_locks_conflict(request, fl)) | |
872 | continue; | |
873 | error = -EAGAIN; | |
874 | if (!(request->fl_flags & FL_SLEEP)) | |
875 | goto out; | |
876 | error = FILE_LOCK_DEFERRED; | |
877 | locks_insert_block(fl, request); | |
878 | goto out; | |
879 | } | |
880 | if (request->fl_flags & FL_ACCESS) | |
881 | goto out; | |
882 | locks_copy_lock(new_fl, request); | |
883 | locks_insert_lock(before, new_fl); | |
884 | new_fl = NULL; | |
885 | error = 0; | |
886 | ||
887 | out: | |
888 | spin_unlock(&inode->i_lock); | |
889 | if (new_fl) | |
890 | locks_free_lock(new_fl); | |
891 | return error; | |
892 | } | |
893 | ||
894 | static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) | |
895 | { | |
896 | struct file_lock *fl; | |
897 | struct file_lock *new_fl = NULL; | |
898 | struct file_lock *new_fl2 = NULL; | |
899 | struct file_lock *left = NULL; | |
900 | struct file_lock *right = NULL; | |
901 | struct file_lock **before; | |
902 | int error; | |
903 | bool added = false; | |
904 | ||
905 | /* | |
906 | * We may need two file_lock structures for this operation, | |
907 | * so we get them in advance to avoid races. | |
908 | * | |
909 | * In some cases we can be sure, that no new locks will be needed | |
910 | */ | |
911 | if (!(request->fl_flags & FL_ACCESS) && | |
912 | (request->fl_type != F_UNLCK || | |
913 | request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { | |
914 | new_fl = locks_alloc_lock(); | |
915 | new_fl2 = locks_alloc_lock(); | |
916 | } | |
917 | ||
918 | spin_lock(&inode->i_lock); | |
919 | /* | |
920 | * New lock request. Walk all POSIX locks and look for conflicts. If | |
921 | * there are any, either return error or put the request on the | |
922 | * blocker's list of waiters and the global blocked_hash. | |
923 | */ | |
924 | if (request->fl_type != F_UNLCK) { | |
925 | for_each_lock(inode, before) { | |
926 | fl = *before; | |
927 | if (!IS_POSIX(fl)) | |
928 | continue; | |
929 | if (!posix_locks_conflict(request, fl)) | |
930 | continue; | |
931 | if (conflock) | |
932 | __locks_copy_lock(conflock, fl); | |
933 | error = -EAGAIN; | |
934 | if (!(request->fl_flags & FL_SLEEP)) | |
935 | goto out; | |
936 | /* | |
937 | * Deadlock detection and insertion into the blocked | |
938 | * locks list must be done while holding the same lock! | |
939 | */ | |
940 | error = -EDEADLK; | |
941 | spin_lock(&blocked_lock_lock); | |
942 | if (likely(!posix_locks_deadlock(request, fl))) { | |
943 | error = FILE_LOCK_DEFERRED; | |
944 | __locks_insert_block(fl, request); | |
945 | } | |
946 | spin_unlock(&blocked_lock_lock); | |
947 | goto out; | |
948 | } | |
949 | } | |
950 | ||
951 | /* If we're just looking for a conflict, we're done. */ | |
952 | error = 0; | |
953 | if (request->fl_flags & FL_ACCESS) | |
954 | goto out; | |
955 | ||
956 | /* | |
957 | * Find the first old lock with the same owner as the new lock. | |
958 | */ | |
959 | ||
960 | before = &inode->i_flock; | |
961 | ||
962 | /* First skip locks owned by other processes. */ | |
963 | while ((fl = *before) && (!IS_POSIX(fl) || | |
964 | !posix_same_owner(request, fl))) { | |
965 | before = &fl->fl_next; | |
966 | } | |
967 | ||
968 | /* Process locks with this owner. */ | |
969 | while ((fl = *before) && posix_same_owner(request, fl)) { | |
970 | /* Detect adjacent or overlapping regions (if same lock type) | |
971 | */ | |
972 | if (request->fl_type == fl->fl_type) { | |
973 | /* In all comparisons of start vs end, use | |
974 | * "start - 1" rather than "end + 1". If end | |
975 | * is OFFSET_MAX, end + 1 will become negative. | |
976 | */ | |
977 | if (fl->fl_end < request->fl_start - 1) | |
978 | goto next_lock; | |
979 | /* If the next lock in the list has entirely bigger | |
980 | * addresses than the new one, insert the lock here. | |
981 | */ | |
982 | if (fl->fl_start - 1 > request->fl_end) | |
983 | break; | |
984 | ||
985 | /* If we come here, the new and old lock are of the | |
986 | * same type and adjacent or overlapping. Make one | |
987 | * lock yielding from the lower start address of both | |
988 | * locks to the higher end address. | |
989 | */ | |
990 | if (fl->fl_start > request->fl_start) | |
991 | fl->fl_start = request->fl_start; | |
992 | else | |
993 | request->fl_start = fl->fl_start; | |
994 | if (fl->fl_end < request->fl_end) | |
995 | fl->fl_end = request->fl_end; | |
996 | else | |
997 | request->fl_end = fl->fl_end; | |
998 | if (added) { | |
999 | locks_delete_lock(before); | |
1000 | continue; | |
1001 | } | |
1002 | request = fl; | |
1003 | added = true; | |
1004 | } | |
1005 | else { | |
1006 | /* Processing for different lock types is a bit | |
1007 | * more complex. | |
1008 | */ | |
1009 | if (fl->fl_end < request->fl_start) | |
1010 | goto next_lock; | |
1011 | if (fl->fl_start > request->fl_end) | |
1012 | break; | |
1013 | if (request->fl_type == F_UNLCK) | |
1014 | added = true; | |
1015 | if (fl->fl_start < request->fl_start) | |
1016 | left = fl; | |
1017 | /* If the next lock in the list has a higher end | |
1018 | * address than the new one, insert the new one here. | |
1019 | */ | |
1020 | if (fl->fl_end > request->fl_end) { | |
1021 | right = fl; | |
1022 | break; | |
1023 | } | |
1024 | if (fl->fl_start >= request->fl_start) { | |
1025 | /* The new lock completely replaces an old | |
1026 | * one (This may happen several times). | |
1027 | */ | |
1028 | if (added) { | |
1029 | locks_delete_lock(before); | |
1030 | continue; | |
1031 | } | |
1032 | /* Replace the old lock with the new one. | |
1033 | * Wake up anybody waiting for the old one, | |
1034 | * as the change in lock type might satisfy | |
1035 | * their needs. | |
1036 | */ | |
1037 | locks_wake_up_blocks(fl); | |
1038 | fl->fl_start = request->fl_start; | |
1039 | fl->fl_end = request->fl_end; | |
1040 | fl->fl_type = request->fl_type; | |
1041 | locks_release_private(fl); | |
1042 | locks_copy_private(fl, request); | |
1043 | request = fl; | |
1044 | added = true; | |
1045 | } | |
1046 | } | |
1047 | /* Go on to next lock. | |
1048 | */ | |
1049 | next_lock: | |
1050 | before = &fl->fl_next; | |
1051 | } | |
1052 | ||
1053 | /* | |
1054 | * The above code only modifies existing locks in case of merging or | |
1055 | * replacing. If new lock(s) need to be inserted all modifications are | |
1056 | * done below this, so it's safe yet to bail out. | |
1057 | */ | |
1058 | error = -ENOLCK; /* "no luck" */ | |
1059 | if (right && left == right && !new_fl2) | |
1060 | goto out; | |
1061 | ||
1062 | error = 0; | |
1063 | if (!added) { | |
1064 | if (request->fl_type == F_UNLCK) { | |
1065 | if (request->fl_flags & FL_EXISTS) | |
1066 | error = -ENOENT; | |
1067 | goto out; | |
1068 | } | |
1069 | ||
1070 | if (!new_fl) { | |
1071 | error = -ENOLCK; | |
1072 | goto out; | |
1073 | } | |
1074 | locks_copy_lock(new_fl, request); | |
1075 | locks_insert_lock(before, new_fl); | |
1076 | new_fl = NULL; | |
1077 | } | |
1078 | if (right) { | |
1079 | if (left == right) { | |
1080 | /* The new lock breaks the old one in two pieces, | |
1081 | * so we have to use the second new lock. | |
1082 | */ | |
1083 | left = new_fl2; | |
1084 | new_fl2 = NULL; | |
1085 | locks_copy_lock(left, right); | |
1086 | locks_insert_lock(before, left); | |
1087 | } | |
1088 | right->fl_start = request->fl_end + 1; | |
1089 | locks_wake_up_blocks(right); | |
1090 | } | |
1091 | if (left) { | |
1092 | left->fl_end = request->fl_start - 1; | |
1093 | locks_wake_up_blocks(left); | |
1094 | } | |
1095 | out: | |
1096 | spin_unlock(&inode->i_lock); | |
1097 | /* | |
1098 | * Free any unused locks. | |
1099 | */ | |
1100 | if (new_fl) | |
1101 | locks_free_lock(new_fl); | |
1102 | if (new_fl2) | |
1103 | locks_free_lock(new_fl2); | |
1104 | return error; | |
1105 | } | |
1106 | ||
1107 | /** | |
1108 | * posix_lock_file - Apply a POSIX-style lock to a file | |
1109 | * @filp: The file to apply the lock to | |
1110 | * @fl: The lock to be applied | |
1111 | * @conflock: Place to return a copy of the conflicting lock, if found. | |
1112 | * | |
1113 | * Add a POSIX style lock to a file. | |
1114 | * We merge adjacent & overlapping locks whenever possible. | |
1115 | * POSIX locks are sorted by owner task, then by starting address | |
1116 | * | |
1117 | * Note that if called with an FL_EXISTS argument, the caller may determine | |
1118 | * whether or not a lock was successfully freed by testing the return | |
1119 | * value for -ENOENT. | |
1120 | */ | |
1121 | int posix_lock_file(struct file *filp, struct file_lock *fl, | |
1122 | struct file_lock *conflock) | |
1123 | { | |
1124 | return __posix_lock_file(file_inode(filp), fl, conflock); | |
1125 | } | |
1126 | EXPORT_SYMBOL(posix_lock_file); | |
1127 | ||
1128 | /** | |
1129 | * posix_lock_file_wait - Apply a POSIX-style lock to a file | |
1130 | * @filp: The file to apply the lock to | |
1131 | * @fl: The lock to be applied | |
1132 | * | |
1133 | * Add a POSIX style lock to a file. | |
1134 | * We merge adjacent & overlapping locks whenever possible. | |
1135 | * POSIX locks are sorted by owner task, then by starting address | |
1136 | */ | |
1137 | int posix_lock_file_wait(struct file *filp, struct file_lock *fl) | |
1138 | { | |
1139 | int error; | |
1140 | might_sleep (); | |
1141 | for (;;) { | |
1142 | error = posix_lock_file(filp, fl, NULL); | |
1143 | if (error != FILE_LOCK_DEFERRED) | |
1144 | break; | |
1145 | error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); | |
1146 | if (!error) | |
1147 | continue; | |
1148 | ||
1149 | locks_delete_block(fl); | |
1150 | break; | |
1151 | } | |
1152 | return error; | |
1153 | } | |
1154 | EXPORT_SYMBOL(posix_lock_file_wait); | |
1155 | ||
1156 | /** | |
1157 | * locks_mandatory_locked - Check for an active lock | |
1158 | * @inode: the file to check | |
1159 | * | |
1160 | * Searches the inode's list of locks to find any POSIX locks which conflict. | |
1161 | * This function is called from locks_verify_locked() only. | |
1162 | */ | |
1163 | int locks_mandatory_locked(struct inode *inode) | |
1164 | { | |
1165 | fl_owner_t owner = current->files; | |
1166 | struct file_lock *fl; | |
1167 | ||
1168 | /* | |
1169 | * Search the lock list for this inode for any POSIX locks. | |
1170 | */ | |
1171 | spin_lock(&inode->i_lock); | |
1172 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | |
1173 | if (!IS_POSIX(fl)) | |
1174 | continue; | |
1175 | if (fl->fl_owner != owner) | |
1176 | break; | |
1177 | } | |
1178 | spin_unlock(&inode->i_lock); | |
1179 | return fl ? -EAGAIN : 0; | |
1180 | } | |
1181 | ||
1182 | /** | |
1183 | * locks_mandatory_area - Check for a conflicting lock | |
1184 | * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ | |
1185 | * for shared | |
1186 | * @inode: the file to check | |
1187 | * @filp: how the file was opened (if it was) | |
1188 | * @offset: start of area to check | |
1189 | * @count: length of area to check | |
1190 | * | |
1191 | * Searches the inode's list of locks to find any POSIX locks which conflict. | |
1192 | * This function is called from rw_verify_area() and | |
1193 | * locks_verify_truncate(). | |
1194 | */ | |
1195 | int locks_mandatory_area(int read_write, struct inode *inode, | |
1196 | struct file *filp, loff_t offset, | |
1197 | size_t count) | |
1198 | { | |
1199 | struct file_lock fl; | |
1200 | int error; | |
1201 | ||
1202 | locks_init_lock(&fl); | |
1203 | fl.fl_owner = current->files; | |
1204 | fl.fl_pid = current->tgid; | |
1205 | fl.fl_file = filp; | |
1206 | fl.fl_flags = FL_POSIX | FL_ACCESS; | |
1207 | if (filp && !(filp->f_flags & O_NONBLOCK)) | |
1208 | fl.fl_flags |= FL_SLEEP; | |
1209 | fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; | |
1210 | fl.fl_start = offset; | |
1211 | fl.fl_end = offset + count - 1; | |
1212 | ||
1213 | for (;;) { | |
1214 | error = __posix_lock_file(inode, &fl, NULL); | |
1215 | if (error != FILE_LOCK_DEFERRED) | |
1216 | break; | |
1217 | error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); | |
1218 | if (!error) { | |
1219 | /* | |
1220 | * If we've been sleeping someone might have | |
1221 | * changed the permissions behind our back. | |
1222 | */ | |
1223 | if (__mandatory_lock(inode)) | |
1224 | continue; | |
1225 | } | |
1226 | ||
1227 | locks_delete_block(&fl); | |
1228 | break; | |
1229 | } | |
1230 | ||
1231 | return error; | |
1232 | } | |
1233 | ||
1234 | EXPORT_SYMBOL(locks_mandatory_area); | |
1235 | ||
1236 | static void lease_clear_pending(struct file_lock *fl, int arg) | |
1237 | { | |
1238 | switch (arg) { | |
1239 | case F_UNLCK: | |
1240 | fl->fl_flags &= ~FL_UNLOCK_PENDING; | |
1241 | /* fall through: */ | |
1242 | case F_RDLCK: | |
1243 | fl->fl_flags &= ~FL_DOWNGRADE_PENDING; | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | /* We already had a lease on this file; just change its type */ | |
1248 | int lease_modify(struct file_lock **before, int arg) | |
1249 | { | |
1250 | struct file_lock *fl = *before; | |
1251 | int error = assign_type(fl, arg); | |
1252 | ||
1253 | if (error) | |
1254 | return error; | |
1255 | lease_clear_pending(fl, arg); | |
1256 | locks_wake_up_blocks(fl); | |
1257 | if (arg == F_UNLCK) { | |
1258 | struct file *filp = fl->fl_file; | |
1259 | ||
1260 | f_delown(filp); | |
1261 | filp->f_owner.signum = 0; | |
1262 | fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); | |
1263 | if (fl->fl_fasync != NULL) { | |
1264 | printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); | |
1265 | fl->fl_fasync = NULL; | |
1266 | } | |
1267 | locks_delete_lock(before); | |
1268 | } | |
1269 | return 0; | |
1270 | } | |
1271 | ||
1272 | EXPORT_SYMBOL(lease_modify); | |
1273 | ||
1274 | static bool past_time(unsigned long then) | |
1275 | { | |
1276 | if (!then) | |
1277 | /* 0 is a special value meaning "this never expires": */ | |
1278 | return false; | |
1279 | return time_after(jiffies, then); | |
1280 | } | |
1281 | ||
1282 | static void time_out_leases(struct inode *inode) | |
1283 | { | |
1284 | struct file_lock **before; | |
1285 | struct file_lock *fl; | |
1286 | ||
1287 | before = &inode->i_flock; | |
1288 | while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) { | |
1289 | if (past_time(fl->fl_downgrade_time)) | |
1290 | lease_modify(before, F_RDLCK); | |
1291 | if (past_time(fl->fl_break_time)) | |
1292 | lease_modify(before, F_UNLCK); | |
1293 | if (fl == *before) /* lease_modify may have freed fl */ | |
1294 | before = &fl->fl_next; | |
1295 | } | |
1296 | } | |
1297 | ||
1298 | static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) | |
1299 | { | |
1300 | if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) | |
1301 | return false; | |
1302 | return locks_conflict(breaker, lease); | |
1303 | } | |
1304 | ||
1305 | /** | |
1306 | * __break_lease - revoke all outstanding leases on file | |
1307 | * @inode: the inode of the file to return | |
1308 | * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR: | |
1309 | * break all leases | |
1310 | * @type: FL_LEASE: break leases and delegations; FL_DELEG: break | |
1311 | * only delegations | |
1312 | * | |
1313 | * break_lease (inlined for speed) has checked there already is at least | |
1314 | * some kind of lock (maybe a lease) on this file. Leases are broken on | |
1315 | * a call to open() or truncate(). This function can sleep unless you | |
1316 | * specified %O_NONBLOCK to your open(). | |
1317 | */ | |
1318 | int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |
1319 | { | |
1320 | int error = 0; | |
1321 | struct file_lock *new_fl, *flock; | |
1322 | struct file_lock *fl; | |
1323 | unsigned long break_time; | |
1324 | int i_have_this_lease = 0; | |
1325 | bool lease_conflict = false; | |
1326 | int want_write = (mode & O_ACCMODE) != O_RDONLY; | |
1327 | ||
1328 | new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); | |
1329 | if (IS_ERR(new_fl)) | |
1330 | return PTR_ERR(new_fl); | |
1331 | new_fl->fl_flags = type; | |
1332 | ||
1333 | spin_lock(&inode->i_lock); | |
1334 | ||
1335 | time_out_leases(inode); | |
1336 | ||
1337 | flock = inode->i_flock; | |
1338 | if ((flock == NULL) || !IS_LEASE(flock)) | |
1339 | goto out; | |
1340 | ||
1341 | for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { | |
1342 | if (leases_conflict(fl, new_fl)) { | |
1343 | lease_conflict = true; | |
1344 | if (fl->fl_owner == current->files) | |
1345 | i_have_this_lease = 1; | |
1346 | } | |
1347 | } | |
1348 | if (!lease_conflict) | |
1349 | goto out; | |
1350 | ||
1351 | break_time = 0; | |
1352 | if (lease_break_time > 0) { | |
1353 | break_time = jiffies + lease_break_time * HZ; | |
1354 | if (break_time == 0) | |
1355 | break_time++; /* so that 0 means no break time */ | |
1356 | } | |
1357 | ||
1358 | for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { | |
1359 | if (!leases_conflict(fl, new_fl)) | |
1360 | continue; | |
1361 | if (want_write) { | |
1362 | if (fl->fl_flags & FL_UNLOCK_PENDING) | |
1363 | continue; | |
1364 | fl->fl_flags |= FL_UNLOCK_PENDING; | |
1365 | fl->fl_break_time = break_time; | |
1366 | } else { | |
1367 | if (lease_breaking(flock)) | |
1368 | continue; | |
1369 | fl->fl_flags |= FL_DOWNGRADE_PENDING; | |
1370 | fl->fl_downgrade_time = break_time; | |
1371 | } | |
1372 | fl->fl_lmops->lm_break(fl); | |
1373 | } | |
1374 | ||
1375 | if (i_have_this_lease || (mode & O_NONBLOCK)) { | |
1376 | error = -EWOULDBLOCK; | |
1377 | goto out; | |
1378 | } | |
1379 | ||
1380 | restart: | |
1381 | break_time = flock->fl_break_time; | |
1382 | if (break_time != 0) { | |
1383 | break_time -= jiffies; | |
1384 | if (break_time == 0) | |
1385 | break_time++; | |
1386 | } | |
1387 | locks_insert_block(flock, new_fl); | |
1388 | spin_unlock(&inode->i_lock); | |
1389 | error = wait_event_interruptible_timeout(new_fl->fl_wait, | |
1390 | !new_fl->fl_next, break_time); | |
1391 | spin_lock(&inode->i_lock); | |
1392 | locks_delete_block(new_fl); | |
1393 | if (error >= 0) { | |
1394 | if (error == 0) | |
1395 | time_out_leases(inode); | |
1396 | /* | |
1397 | * Wait for the next conflicting lease that has not been | |
1398 | * broken yet | |
1399 | */ | |
1400 | for (flock = inode->i_flock; flock && IS_LEASE(flock); | |
1401 | flock = flock->fl_next) { | |
1402 | if (leases_conflict(new_fl, flock)) | |
1403 | goto restart; | |
1404 | } | |
1405 | error = 0; | |
1406 | } | |
1407 | ||
1408 | out: | |
1409 | spin_unlock(&inode->i_lock); | |
1410 | locks_free_lock(new_fl); | |
1411 | return error; | |
1412 | } | |
1413 | ||
1414 | EXPORT_SYMBOL(__break_lease); | |
1415 | ||
1416 | /** | |
1417 | * lease_get_mtime - get the last modified time of an inode | |
1418 | * @inode: the inode | |
1419 | * @time: pointer to a timespec which will contain the last modified time | |
1420 | * | |
1421 | * This is to force NFS clients to flush their caches for files with | |
1422 | * exclusive leases. The justification is that if someone has an | |
1423 | * exclusive lease, then they could be modifying it. | |
1424 | */ | |
1425 | void lease_get_mtime(struct inode *inode, struct timespec *time) | |
1426 | { | |
1427 | struct file_lock *flock = inode->i_flock; | |
1428 | if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK)) | |
1429 | *time = current_fs_time(inode->i_sb); | |
1430 | else | |
1431 | *time = inode->i_mtime; | |
1432 | } | |
1433 | ||
1434 | EXPORT_SYMBOL(lease_get_mtime); | |
1435 | ||
1436 | /** | |
1437 | * fcntl_getlease - Enquire what lease is currently active | |
1438 | * @filp: the file | |
1439 | * | |
1440 | * The value returned by this function will be one of | |
1441 | * (if no lease break is pending): | |
1442 | * | |
1443 | * %F_RDLCK to indicate a shared lease is held. | |
1444 | * | |
1445 | * %F_WRLCK to indicate an exclusive lease is held. | |
1446 | * | |
1447 | * %F_UNLCK to indicate no lease is held. | |
1448 | * | |
1449 | * (if a lease break is pending): | |
1450 | * | |
1451 | * %F_RDLCK to indicate an exclusive lease needs to be | |
1452 | * changed to a shared lease (or removed). | |
1453 | * | |
1454 | * %F_UNLCK to indicate the lease needs to be removed. | |
1455 | * | |
1456 | * XXX: sfr & willy disagree over whether F_INPROGRESS | |
1457 | * should be returned to userspace. | |
1458 | */ | |
1459 | int fcntl_getlease(struct file *filp) | |
1460 | { | |
1461 | struct file_lock *fl; | |
1462 | struct inode *inode = file_inode(filp); | |
1463 | int type = F_UNLCK; | |
1464 | ||
1465 | spin_lock(&inode->i_lock); | |
1466 | time_out_leases(file_inode(filp)); | |
1467 | for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); | |
1468 | fl = fl->fl_next) { | |
1469 | if (fl->fl_file == filp) { | |
1470 | type = target_leasetype(fl); | |
1471 | break; | |
1472 | } | |
1473 | } | |
1474 | spin_unlock(&inode->i_lock); | |
1475 | return type; | |
1476 | } | |
1477 | ||
1478 | /** | |
1479 | * check_conflicting_open - see if the given dentry points to a file that has | |
1480 | * an existing open that would conflict with the | |
1481 | * desired lease. | |
1482 | * @dentry: dentry to check | |
1483 | * @arg: type of lease that we're trying to acquire | |
1484 | * | |
1485 | * Check to see if there's an existing open fd on this file that would | |
1486 | * conflict with the lease we're trying to set. | |
1487 | */ | |
1488 | static int | |
1489 | check_conflicting_open(const struct dentry *dentry, const long arg) | |
1490 | { | |
1491 | int ret = 0; | |
1492 | struct inode *inode = dentry->d_inode; | |
1493 | ||
1494 | if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) | |
1495 | return -EAGAIN; | |
1496 | ||
1497 | if ((arg == F_WRLCK) && ((d_count(dentry) > 1) || | |
1498 | (atomic_read(&inode->i_count) > 1))) | |
1499 | ret = -EAGAIN; | |
1500 | ||
1501 | return ret; | |
1502 | } | |
1503 | ||
1504 | static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp) | |
1505 | { | |
1506 | struct file_lock *fl, **before, **my_before = NULL, *lease; | |
1507 | struct dentry *dentry = filp->f_path.dentry; | |
1508 | struct inode *inode = dentry->d_inode; | |
1509 | bool is_deleg = (*flp)->fl_flags & FL_DELEG; | |
1510 | int error; | |
1511 | ||
1512 | lease = *flp; | |
1513 | /* | |
1514 | * In the delegation case we need mutual exclusion with | |
1515 | * a number of operations that take the i_mutex. We trylock | |
1516 | * because delegations are an optional optimization, and if | |
1517 | * there's some chance of a conflict--we'd rather not | |
1518 | * bother, maybe that's a sign this just isn't a good file to | |
1519 | * hand out a delegation on. | |
1520 | */ | |
1521 | if (is_deleg && !mutex_trylock(&inode->i_mutex)) | |
1522 | return -EAGAIN; | |
1523 | ||
1524 | if (is_deleg && arg == F_WRLCK) { | |
1525 | /* Write delegations are not currently supported: */ | |
1526 | mutex_unlock(&inode->i_mutex); | |
1527 | WARN_ON_ONCE(1); | |
1528 | return -EINVAL; | |
1529 | } | |
1530 | ||
1531 | error = check_conflicting_open(dentry, arg); | |
1532 | if (error) | |
1533 | goto out; | |
1534 | ||
1535 | /* | |
1536 | * At this point, we know that if there is an exclusive | |
1537 | * lease on this file, then we hold it on this filp | |
1538 | * (otherwise our open of this file would have blocked). | |
1539 | * And if we are trying to acquire an exclusive lease, | |
1540 | * then the file is not open by anyone (including us) | |
1541 | * except for this filp. | |
1542 | */ | |
1543 | error = -EAGAIN; | |
1544 | for (before = &inode->i_flock; | |
1545 | ((fl = *before) != NULL) && IS_LEASE(fl); | |
1546 | before = &fl->fl_next) { | |
1547 | if (fl->fl_file == filp) { | |
1548 | my_before = before; | |
1549 | continue; | |
1550 | } | |
1551 | /* | |
1552 | * No exclusive leases if someone else has a lease on | |
1553 | * this file: | |
1554 | */ | |
1555 | if (arg == F_WRLCK) | |
1556 | goto out; | |
1557 | /* | |
1558 | * Modifying our existing lease is OK, but no getting a | |
1559 | * new lease if someone else is opening for write: | |
1560 | */ | |
1561 | if (fl->fl_flags & FL_UNLOCK_PENDING) | |
1562 | goto out; | |
1563 | } | |
1564 | ||
1565 | if (my_before != NULL) { | |
1566 | error = lease->fl_lmops->lm_change(my_before, arg); | |
1567 | if (!error) | |
1568 | *flp = *my_before; | |
1569 | goto out; | |
1570 | } | |
1571 | ||
1572 | error = -EINVAL; | |
1573 | if (!leases_enable) | |
1574 | goto out; | |
1575 | ||
1576 | locks_insert_lock(before, lease); | |
1577 | /* | |
1578 | * The check in break_lease() is lockless. It's possible for another | |
1579 | * open to race in after we did the earlier check for a conflicting | |
1580 | * open but before the lease was inserted. Check again for a | |
1581 | * conflicting open and cancel the lease if there is one. | |
1582 | * | |
1583 | * We also add a barrier here to ensure that the insertion of the lock | |
1584 | * precedes these checks. | |
1585 | */ | |
1586 | smp_mb(); | |
1587 | error = check_conflicting_open(dentry, arg); | |
1588 | if (error) | |
1589 | locks_unlink_lock(flp); | |
1590 | out: | |
1591 | if (is_deleg) | |
1592 | mutex_unlock(&inode->i_mutex); | |
1593 | return error; | |
1594 | } | |
1595 | ||
1596 | static int generic_delete_lease(struct file *filp, struct file_lock **flp) | |
1597 | { | |
1598 | struct file_lock *fl, **before; | |
1599 | struct dentry *dentry = filp->f_path.dentry; | |
1600 | struct inode *inode = dentry->d_inode; | |
1601 | ||
1602 | for (before = &inode->i_flock; | |
1603 | ((fl = *before) != NULL) && IS_LEASE(fl); | |
1604 | before = &fl->fl_next) { | |
1605 | if (fl->fl_file != filp) | |
1606 | continue; | |
1607 | return (*flp)->fl_lmops->lm_change(before, F_UNLCK); | |
1608 | } | |
1609 | return -EAGAIN; | |
1610 | } | |
1611 | ||
1612 | /** | |
1613 | * generic_setlease - sets a lease on an open file | |
1614 | * @filp: file pointer | |
1615 | * @arg: type of lease to obtain | |
1616 | * @flp: input - file_lock to use, output - file_lock inserted | |
1617 | * | |
1618 | * The (input) flp->fl_lmops->lm_break function is required | |
1619 | * by break_lease(). | |
1620 | * | |
1621 | * Called with inode->i_lock held. | |
1622 | */ | |
1623 | int generic_setlease(struct file *filp, long arg, struct file_lock **flp) | |
1624 | { | |
1625 | struct dentry *dentry = filp->f_path.dentry; | |
1626 | struct inode *inode = dentry->d_inode; | |
1627 | int error; | |
1628 | ||
1629 | if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE)) | |
1630 | return -EACCES; | |
1631 | if (!S_ISREG(inode->i_mode)) | |
1632 | return -EINVAL; | |
1633 | error = security_file_lock(filp, arg); | |
1634 | if (error) | |
1635 | return error; | |
1636 | ||
1637 | time_out_leases(inode); | |
1638 | ||
1639 | BUG_ON(!(*flp)->fl_lmops->lm_break); | |
1640 | ||
1641 | switch (arg) { | |
1642 | case F_UNLCK: | |
1643 | return generic_delete_lease(filp, flp); | |
1644 | case F_RDLCK: | |
1645 | case F_WRLCK: | |
1646 | return generic_add_lease(filp, arg, flp); | |
1647 | default: | |
1648 | return -EINVAL; | |
1649 | } | |
1650 | } | |
1651 | EXPORT_SYMBOL(generic_setlease); | |
1652 | ||
1653 | static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease) | |
1654 | { | |
1655 | if (filp->f_op->setlease) | |
1656 | return filp->f_op->setlease(filp, arg, lease); | |
1657 | else | |
1658 | return generic_setlease(filp, arg, lease); | |
1659 | } | |
1660 | ||
1661 | /** | |
1662 | * vfs_setlease - sets a lease on an open file | |
1663 | * @filp: file pointer | |
1664 | * @arg: type of lease to obtain | |
1665 | * @lease: file_lock to use | |
1666 | * | |
1667 | * Call this to establish a lease on the file. | |
1668 | * The (*lease)->fl_lmops->lm_break operation must be set; if not, | |
1669 | * break_lease will oops! | |
1670 | * | |
1671 | * This will call the filesystem's setlease file method, if | |
1672 | * defined. Note that there is no getlease method; instead, the | |
1673 | * filesystem setlease method should call back to setlease() to | |
1674 | * add a lease to the inode's lease list, where fcntl_getlease() can | |
1675 | * find it. Since fcntl_getlease() only reports whether the current | |
1676 | * task holds a lease, a cluster filesystem need only do this for | |
1677 | * leases held by processes on this node. | |
1678 | * | |
1679 | * There is also no break_lease method; filesystems that | |
1680 | * handle their own leases should break leases themselves from the | |
1681 | * filesystem's open, create, and (on truncate) setattr methods. | |
1682 | * | |
1683 | * Warning: the only current setlease methods exist only to disable | |
1684 | * leases in certain cases. More vfs changes may be required to | |
1685 | * allow a full filesystem lease implementation. | |
1686 | */ | |
1687 | ||
1688 | int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) | |
1689 | { | |
1690 | struct inode *inode = file_inode(filp); | |
1691 | int error; | |
1692 | ||
1693 | spin_lock(&inode->i_lock); | |
1694 | error = __vfs_setlease(filp, arg, lease); | |
1695 | spin_unlock(&inode->i_lock); | |
1696 | ||
1697 | return error; | |
1698 | } | |
1699 | EXPORT_SYMBOL_GPL(vfs_setlease); | |
1700 | ||
1701 | static int do_fcntl_delete_lease(struct file *filp) | |
1702 | { | |
1703 | struct file_lock fl, *flp = &fl; | |
1704 | ||
1705 | lease_init(filp, F_UNLCK, flp); | |
1706 | ||
1707 | return vfs_setlease(filp, F_UNLCK, &flp); | |
1708 | } | |
1709 | ||
1710 | static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | |
1711 | { | |
1712 | struct file_lock *fl, *ret; | |
1713 | struct inode *inode = file_inode(filp); | |
1714 | struct fasync_struct *new; | |
1715 | int error; | |
1716 | ||
1717 | fl = lease_alloc(filp, arg); | |
1718 | if (IS_ERR(fl)) | |
1719 | return PTR_ERR(fl); | |
1720 | ||
1721 | new = fasync_alloc(); | |
1722 | if (!new) { | |
1723 | locks_free_lock(fl); | |
1724 | return -ENOMEM; | |
1725 | } | |
1726 | ret = fl; | |
1727 | spin_lock(&inode->i_lock); | |
1728 | error = __vfs_setlease(filp, arg, &ret); | |
1729 | if (error) { | |
1730 | spin_unlock(&inode->i_lock); | |
1731 | locks_free_lock(fl); | |
1732 | goto out_free_fasync; | |
1733 | } | |
1734 | if (ret != fl) | |
1735 | locks_free_lock(fl); | |
1736 | ||
1737 | /* | |
1738 | * fasync_insert_entry() returns the old entry if any. | |
1739 | * If there was no old entry, then it used 'new' and | |
1740 | * inserted it into the fasync list. Clear new so that | |
1741 | * we don't release it here. | |
1742 | */ | |
1743 | if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new)) | |
1744 | new = NULL; | |
1745 | ||
1746 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); | |
1747 | spin_unlock(&inode->i_lock); | |
1748 | ||
1749 | out_free_fasync: | |
1750 | if (new) | |
1751 | fasync_free(new); | |
1752 | return error; | |
1753 | } | |
1754 | ||
1755 | /** | |
1756 | * fcntl_setlease - sets a lease on an open file | |
1757 | * @fd: open file descriptor | |
1758 | * @filp: file pointer | |
1759 | * @arg: type of lease to obtain | |
1760 | * | |
1761 | * Call this fcntl to establish a lease on the file. | |
1762 | * Note that you also need to call %F_SETSIG to | |
1763 | * receive a signal when the lease is broken. | |
1764 | */ | |
1765 | int fcntl_setlease(unsigned int fd, struct file *filp, long arg) | |
1766 | { | |
1767 | if (arg == F_UNLCK) | |
1768 | return do_fcntl_delete_lease(filp); | |
1769 | return do_fcntl_add_lease(fd, filp, arg); | |
1770 | } | |
1771 | ||
1772 | /** | |
1773 | * flock_lock_file_wait - Apply a FLOCK-style lock to a file | |
1774 | * @filp: The file to apply the lock to | |
1775 | * @fl: The lock to be applied | |
1776 | * | |
1777 | * Add a FLOCK style lock to a file. | |
1778 | */ | |
1779 | int flock_lock_file_wait(struct file *filp, struct file_lock *fl) | |
1780 | { | |
1781 | int error; | |
1782 | might_sleep(); | |
1783 | for (;;) { | |
1784 | error = flock_lock_file(filp, fl); | |
1785 | if (error != FILE_LOCK_DEFERRED) | |
1786 | break; | |
1787 | error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); | |
1788 | if (!error) | |
1789 | continue; | |
1790 | ||
1791 | locks_delete_block(fl); | |
1792 | break; | |
1793 | } | |
1794 | return error; | |
1795 | } | |
1796 | ||
1797 | EXPORT_SYMBOL(flock_lock_file_wait); | |
1798 | ||
1799 | /** | |
1800 | * sys_flock: - flock() system call. | |
1801 | * @fd: the file descriptor to lock. | |
1802 | * @cmd: the type of lock to apply. | |
1803 | * | |
1804 | * Apply a %FL_FLOCK style lock to an open file descriptor. | |
1805 | * The @cmd can be one of | |
1806 | * | |
1807 | * %LOCK_SH -- a shared lock. | |
1808 | * | |
1809 | * %LOCK_EX -- an exclusive lock. | |
1810 | * | |
1811 | * %LOCK_UN -- remove an existing lock. | |
1812 | * | |
1813 | * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. | |
1814 | * | |
1815 | * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other | |
1816 | * processes read and write access respectively. | |
1817 | */ | |
1818 | SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) | |
1819 | { | |
1820 | struct fd f = fdget(fd); | |
1821 | struct file_lock *lock; | |
1822 | int can_sleep, unlock; | |
1823 | int error; | |
1824 | ||
1825 | error = -EBADF; | |
1826 | if (!f.file) | |
1827 | goto out; | |
1828 | ||
1829 | can_sleep = !(cmd & LOCK_NB); | |
1830 | cmd &= ~LOCK_NB; | |
1831 | unlock = (cmd == LOCK_UN); | |
1832 | ||
1833 | if (!unlock && !(cmd & LOCK_MAND) && | |
1834 | !(f.file->f_mode & (FMODE_READ|FMODE_WRITE))) | |
1835 | goto out_putf; | |
1836 | ||
1837 | error = flock_make_lock(f.file, &lock, cmd); | |
1838 | if (error) | |
1839 | goto out_putf; | |
1840 | if (can_sleep) | |
1841 | lock->fl_flags |= FL_SLEEP; | |
1842 | ||
1843 | error = security_file_lock(f.file, lock->fl_type); | |
1844 | if (error) | |
1845 | goto out_free; | |
1846 | ||
1847 | if (f.file->f_op->flock) | |
1848 | error = f.file->f_op->flock(f.file, | |
1849 | (can_sleep) ? F_SETLKW : F_SETLK, | |
1850 | lock); | |
1851 | else | |
1852 | error = flock_lock_file_wait(f.file, lock); | |
1853 | ||
1854 | out_free: | |
1855 | locks_free_lock(lock); | |
1856 | ||
1857 | out_putf: | |
1858 | fdput(f); | |
1859 | out: | |
1860 | return error; | |
1861 | } | |
1862 | ||
1863 | /** | |
1864 | * vfs_test_lock - test file byte range lock | |
1865 | * @filp: The file to test lock for | |
1866 | * @fl: The lock to test; also used to hold result | |
1867 | * | |
1868 | * Returns -ERRNO on failure. Indicates presence of conflicting lock by | |
1869 | * setting conf->fl_type to something other than F_UNLCK. | |
1870 | */ | |
1871 | int vfs_test_lock(struct file *filp, struct file_lock *fl) | |
1872 | { | |
1873 | if (filp->f_op->lock) | |
1874 | return filp->f_op->lock(filp, F_GETLK, fl); | |
1875 | posix_test_lock(filp, fl); | |
1876 | return 0; | |
1877 | } | |
1878 | EXPORT_SYMBOL_GPL(vfs_test_lock); | |
1879 | ||
1880 | static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) | |
1881 | { | |
1882 | flock->l_pid = IS_FILE_PVT(fl) ? -1 : fl->fl_pid; | |
1883 | #if BITS_PER_LONG == 32 | |
1884 | /* | |
1885 | * Make sure we can represent the posix lock via | |
1886 | * legacy 32bit flock. | |
1887 | */ | |
1888 | if (fl->fl_start > OFFT_OFFSET_MAX) | |
1889 | return -EOVERFLOW; | |
1890 | if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) | |
1891 | return -EOVERFLOW; | |
1892 | #endif | |
1893 | flock->l_start = fl->fl_start; | |
1894 | flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : | |
1895 | fl->fl_end - fl->fl_start + 1; | |
1896 | flock->l_whence = 0; | |
1897 | flock->l_type = fl->fl_type; | |
1898 | return 0; | |
1899 | } | |
1900 | ||
1901 | #if BITS_PER_LONG == 32 | |
1902 | static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) | |
1903 | { | |
1904 | flock->l_pid = IS_FILE_PVT(fl) ? -1 : fl->fl_pid; | |
1905 | flock->l_start = fl->fl_start; | |
1906 | flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : | |
1907 | fl->fl_end - fl->fl_start + 1; | |
1908 | flock->l_whence = 0; | |
1909 | flock->l_type = fl->fl_type; | |
1910 | } | |
1911 | #endif | |
1912 | ||
1913 | /* Report the first existing lock that would conflict with l. | |
1914 | * This implements the F_GETLK command of fcntl(). | |
1915 | */ | |
1916 | int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l) | |
1917 | { | |
1918 | struct file_lock file_lock; | |
1919 | struct flock flock; | |
1920 | int error; | |
1921 | ||
1922 | error = -EFAULT; | |
1923 | if (copy_from_user(&flock, l, sizeof(flock))) | |
1924 | goto out; | |
1925 | error = -EINVAL; | |
1926 | if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) | |
1927 | goto out; | |
1928 | ||
1929 | error = flock_to_posix_lock(filp, &file_lock, &flock); | |
1930 | if (error) | |
1931 | goto out; | |
1932 | ||
1933 | error = vfs_test_lock(filp, &file_lock); | |
1934 | if (error) | |
1935 | goto out; | |
1936 | ||
1937 | flock.l_type = file_lock.fl_type; | |
1938 | if (file_lock.fl_type != F_UNLCK) { | |
1939 | error = posix_lock_to_flock(&flock, &file_lock); | |
1940 | if (error) | |
1941 | goto out; | |
1942 | } | |
1943 | error = -EFAULT; | |
1944 | if (!copy_to_user(l, &flock, sizeof(flock))) | |
1945 | error = 0; | |
1946 | out: | |
1947 | return error; | |
1948 | } | |
1949 | ||
1950 | /** | |
1951 | * vfs_lock_file - file byte range lock | |
1952 | * @filp: The file to apply the lock to | |
1953 | * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) | |
1954 | * @fl: The lock to be applied | |
1955 | * @conf: Place to return a copy of the conflicting lock, if found. | |
1956 | * | |
1957 | * A caller that doesn't care about the conflicting lock may pass NULL | |
1958 | * as the final argument. | |
1959 | * | |
1960 | * If the filesystem defines a private ->lock() method, then @conf will | |
1961 | * be left unchanged; so a caller that cares should initialize it to | |
1962 | * some acceptable default. | |
1963 | * | |
1964 | * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX | |
1965 | * locks, the ->lock() interface may return asynchronously, before the lock has | |
1966 | * been granted or denied by the underlying filesystem, if (and only if) | |
1967 | * lm_grant is set. Callers expecting ->lock() to return asynchronously | |
1968 | * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) | |
1969 | * the request is for a blocking lock. When ->lock() does return asynchronously, | |
1970 | * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock | |
1971 | * request completes. | |
1972 | * If the request is for non-blocking lock the file system should return | |
1973 | * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine | |
1974 | * with the result. If the request timed out the callback routine will return a | |
1975 | * nonzero return code and the file system should release the lock. The file | |
1976 | * system is also responsible to keep a corresponding posix lock when it | |
1977 | * grants a lock so the VFS can find out which locks are locally held and do | |
1978 | * the correct lock cleanup when required. | |
1979 | * The underlying filesystem must not drop the kernel lock or call | |
1980 | * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED | |
1981 | * return code. | |
1982 | */ | |
1983 | int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) | |
1984 | { | |
1985 | if (filp->f_op->lock) | |
1986 | return filp->f_op->lock(filp, cmd, fl); | |
1987 | else | |
1988 | return posix_lock_file(filp, fl, conf); | |
1989 | } | |
1990 | EXPORT_SYMBOL_GPL(vfs_lock_file); | |
1991 | ||
1992 | static int do_lock_file_wait(struct file *filp, unsigned int cmd, | |
1993 | struct file_lock *fl) | |
1994 | { | |
1995 | int error; | |
1996 | ||
1997 | error = security_file_lock(filp, fl->fl_type); | |
1998 | if (error) | |
1999 | return error; | |
2000 | ||
2001 | for (;;) { | |
2002 | error = vfs_lock_file(filp, cmd, fl, NULL); | |
2003 | if (error != FILE_LOCK_DEFERRED) | |
2004 | break; | |
2005 | error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); | |
2006 | if (!error) | |
2007 | continue; | |
2008 | ||
2009 | locks_delete_block(fl); | |
2010 | break; | |
2011 | } | |
2012 | ||
2013 | return error; | |
2014 | } | |
2015 | ||
2016 | /* Apply the lock described by l to an open file descriptor. | |
2017 | * This implements both the F_SETLK and F_SETLKW commands of fcntl(). | |
2018 | */ | |
2019 | int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, | |
2020 | struct flock __user *l) | |
2021 | { | |
2022 | struct file_lock *file_lock = locks_alloc_lock(); | |
2023 | struct flock flock; | |
2024 | struct inode *inode; | |
2025 | struct file *f; | |
2026 | int error; | |
2027 | ||
2028 | if (file_lock == NULL) | |
2029 | return -ENOLCK; | |
2030 | ||
2031 | /* | |
2032 | * This might block, so we do it before checking the inode. | |
2033 | */ | |
2034 | error = -EFAULT; | |
2035 | if (copy_from_user(&flock, l, sizeof(flock))) | |
2036 | goto out; | |
2037 | ||
2038 | inode = file_inode(filp); | |
2039 | ||
2040 | /* Don't allow mandatory locks on files that may be memory mapped | |
2041 | * and shared. | |
2042 | */ | |
2043 | if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { | |
2044 | error = -EAGAIN; | |
2045 | goto out; | |
2046 | } | |
2047 | ||
2048 | again: | |
2049 | error = flock_to_posix_lock(filp, file_lock, &flock); | |
2050 | if (error) | |
2051 | goto out; | |
2052 | if (cmd == F_SETLKW) { | |
2053 | file_lock->fl_flags |= FL_SLEEP; | |
2054 | } | |
2055 | ||
2056 | error = do_lock_file_wait(filp, cmd, file_lock); | |
2057 | ||
2058 | /* | |
2059 | * Attempt to detect a close/fcntl race and recover by | |
2060 | * releasing the lock that was just acquired. | |
2061 | */ | |
2062 | /* | |
2063 | * we need that spin_lock here - it prevents reordering between | |
2064 | * update of inode->i_flock and check for it done in close(). | |
2065 | * rcu_read_lock() wouldn't do. | |
2066 | */ | |
2067 | spin_lock(¤t->files->file_lock); | |
2068 | f = fcheck(fd); | |
2069 | spin_unlock(¤t->files->file_lock); | |
2070 | if (!error && f != filp && flock.l_type != F_UNLCK) { | |
2071 | flock.l_type = F_UNLCK; | |
2072 | goto again; | |
2073 | } | |
2074 | ||
2075 | out: | |
2076 | locks_free_lock(file_lock); | |
2077 | return error; | |
2078 | } | |
2079 | ||
2080 | #if BITS_PER_LONG == 32 | |
2081 | /* Report the first existing lock that would conflict with l. | |
2082 | * This implements the F_GETLK command of fcntl(). | |
2083 | */ | |
2084 | int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l) | |
2085 | { | |
2086 | struct file_lock file_lock; | |
2087 | struct flock64 flock; | |
2088 | int error; | |
2089 | ||
2090 | error = -EFAULT; | |
2091 | if (copy_from_user(&flock, l, sizeof(flock))) | |
2092 | goto out; | |
2093 | error = -EINVAL; | |
2094 | if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) | |
2095 | goto out; | |
2096 | ||
2097 | error = flock64_to_posix_lock(filp, &file_lock, &flock); | |
2098 | if (error) | |
2099 | goto out; | |
2100 | ||
2101 | error = vfs_test_lock(filp, &file_lock); | |
2102 | if (error) | |
2103 | goto out; | |
2104 | ||
2105 | flock.l_type = file_lock.fl_type; | |
2106 | if (file_lock.fl_type != F_UNLCK) | |
2107 | posix_lock_to_flock64(&flock, &file_lock); | |
2108 | ||
2109 | error = -EFAULT; | |
2110 | if (!copy_to_user(l, &flock, sizeof(flock))) | |
2111 | error = 0; | |
2112 | ||
2113 | out: | |
2114 | return error; | |
2115 | } | |
2116 | ||
2117 | /* Apply the lock described by l to an open file descriptor. | |
2118 | * This implements both the F_SETLK and F_SETLKW commands of fcntl(). | |
2119 | */ | |
2120 | int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, | |
2121 | struct flock64 __user *l) | |
2122 | { | |
2123 | struct file_lock *file_lock = locks_alloc_lock(); | |
2124 | struct flock64 flock; | |
2125 | struct inode *inode; | |
2126 | struct file *f; | |
2127 | int error; | |
2128 | ||
2129 | if (file_lock == NULL) | |
2130 | return -ENOLCK; | |
2131 | ||
2132 | /* | |
2133 | * This might block, so we do it before checking the inode. | |
2134 | */ | |
2135 | error = -EFAULT; | |
2136 | if (copy_from_user(&flock, l, sizeof(flock))) | |
2137 | goto out; | |
2138 | ||
2139 | inode = file_inode(filp); | |
2140 | ||
2141 | /* Don't allow mandatory locks on files that may be memory mapped | |
2142 | * and shared. | |
2143 | */ | |
2144 | if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { | |
2145 | error = -EAGAIN; | |
2146 | goto out; | |
2147 | } | |
2148 | ||
2149 | again: | |
2150 | error = flock64_to_posix_lock(filp, file_lock, &flock); | |
2151 | if (error) | |
2152 | goto out; | |
2153 | if (cmd == F_SETLKW64) { | |
2154 | file_lock->fl_flags |= FL_SLEEP; | |
2155 | } | |
2156 | ||
2157 | error = do_lock_file_wait(filp, cmd, file_lock); | |
2158 | ||
2159 | /* | |
2160 | * Attempt to detect a close/fcntl race and recover by | |
2161 | * releasing the lock that was just acquired. | |
2162 | */ | |
2163 | spin_lock(¤t->files->file_lock); | |
2164 | f = fcheck(fd); | |
2165 | spin_unlock(¤t->files->file_lock); | |
2166 | if (!error && f != filp && flock.l_type != F_UNLCK) { | |
2167 | flock.l_type = F_UNLCK; | |
2168 | goto again; | |
2169 | } | |
2170 | ||
2171 | out: | |
2172 | locks_free_lock(file_lock); | |
2173 | return error; | |
2174 | } | |
2175 | #endif /* BITS_PER_LONG == 32 */ | |
2176 | ||
2177 | /* | |
2178 | * This function is called when the file is being removed | |
2179 | * from the task's fd array. POSIX locks belonging to this task | |
2180 | * are deleted at this time. | |
2181 | */ | |
2182 | void locks_remove_posix(struct file *filp, fl_owner_t owner) | |
2183 | { | |
2184 | struct file_lock lock; | |
2185 | ||
2186 | /* | |
2187 | * If there are no locks held on this file, we don't need to call | |
2188 | * posix_lock_file(). Another process could be setting a lock on this | |
2189 | * file at the same time, but we wouldn't remove that lock anyway. | |
2190 | */ | |
2191 | if (!file_inode(filp)->i_flock) | |
2192 | return; | |
2193 | ||
2194 | lock.fl_type = F_UNLCK; | |
2195 | lock.fl_flags = FL_POSIX | FL_CLOSE; | |
2196 | lock.fl_start = 0; | |
2197 | lock.fl_end = OFFSET_MAX; | |
2198 | lock.fl_owner = owner; | |
2199 | lock.fl_pid = current->tgid; | |
2200 | lock.fl_file = filp; | |
2201 | lock.fl_ops = NULL; | |
2202 | lock.fl_lmops = NULL; | |
2203 | ||
2204 | vfs_lock_file(filp, F_SETLK, &lock, NULL); | |
2205 | ||
2206 | if (lock.fl_ops && lock.fl_ops->fl_release_private) | |
2207 | lock.fl_ops->fl_release_private(&lock); | |
2208 | } | |
2209 | ||
2210 | EXPORT_SYMBOL(locks_remove_posix); | |
2211 | ||
2212 | /* | |
2213 | * This function is called on the last close of an open file. | |
2214 | */ | |
2215 | void locks_remove_file(struct file *filp) | |
2216 | { | |
2217 | struct inode * inode = file_inode(filp); | |
2218 | struct file_lock *fl; | |
2219 | struct file_lock **before; | |
2220 | ||
2221 | if (!inode->i_flock) | |
2222 | return; | |
2223 | ||
2224 | if (filp->f_op->flock) { | |
2225 | struct file_lock fl = { | |
2226 | .fl_pid = current->tgid, | |
2227 | .fl_file = filp, | |
2228 | .fl_flags = FL_FLOCK, | |
2229 | .fl_type = F_UNLCK, | |
2230 | .fl_end = OFFSET_MAX, | |
2231 | }; | |
2232 | filp->f_op->flock(filp, F_SETLKW, &fl); | |
2233 | if (fl.fl_ops && fl.fl_ops->fl_release_private) | |
2234 | fl.fl_ops->fl_release_private(&fl); | |
2235 | } | |
2236 | ||
2237 | spin_lock(&inode->i_lock); | |
2238 | before = &inode->i_flock; | |
2239 | ||
2240 | while ((fl = *before) != NULL) { | |
2241 | if (fl->fl_file == filp) { | |
2242 | if (IS_LEASE(fl)) { | |
2243 | lease_modify(before, F_UNLCK); | |
2244 | continue; | |
2245 | } | |
2246 | ||
2247 | /* | |
2248 | * There's a leftover lock on the list of a type that | |
2249 | * we didn't expect to see. Most likely a classic | |
2250 | * POSIX lock that ended up not getting released | |
2251 | * properly, or that raced onto the list somehow. Log | |
2252 | * some info about it and then just remove it from | |
2253 | * the list. | |
2254 | */ | |
2255 | WARN(!IS_FLOCK(fl), | |
2256 | "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n", | |
2257 | MAJOR(inode->i_sb->s_dev), | |
2258 | MINOR(inode->i_sb->s_dev), inode->i_ino, | |
2259 | fl->fl_type, fl->fl_flags, | |
2260 | fl->fl_start, fl->fl_end); | |
2261 | ||
2262 | locks_delete_lock(before); | |
2263 | continue; | |
2264 | } | |
2265 | before = &fl->fl_next; | |
2266 | } | |
2267 | spin_unlock(&inode->i_lock); | |
2268 | } | |
2269 | ||
2270 | /** | |
2271 | * posix_unblock_lock - stop waiting for a file lock | |
2272 | * @waiter: the lock which was waiting | |
2273 | * | |
2274 | * lockd needs to block waiting for locks. | |
2275 | */ | |
2276 | int | |
2277 | posix_unblock_lock(struct file_lock *waiter) | |
2278 | { | |
2279 | int status = 0; | |
2280 | ||
2281 | spin_lock(&blocked_lock_lock); | |
2282 | if (waiter->fl_next) | |
2283 | __locks_delete_block(waiter); | |
2284 | else | |
2285 | status = -ENOENT; | |
2286 | spin_unlock(&blocked_lock_lock); | |
2287 | return status; | |
2288 | } | |
2289 | EXPORT_SYMBOL(posix_unblock_lock); | |
2290 | ||
2291 | /** | |
2292 | * vfs_cancel_lock - file byte range unblock lock | |
2293 | * @filp: The file to apply the unblock to | |
2294 | * @fl: The lock to be unblocked | |
2295 | * | |
2296 | * Used by lock managers to cancel blocked requests | |
2297 | */ | |
2298 | int vfs_cancel_lock(struct file *filp, struct file_lock *fl) | |
2299 | { | |
2300 | if (filp->f_op->lock) | |
2301 | return filp->f_op->lock(filp, F_CANCELLK, fl); | |
2302 | return 0; | |
2303 | } | |
2304 | ||
2305 | EXPORT_SYMBOL_GPL(vfs_cancel_lock); | |
2306 | ||
2307 | #ifdef CONFIG_PROC_FS | |
2308 | #include <linux/proc_fs.h> | |
2309 | #include <linux/seq_file.h> | |
2310 | ||
2311 | struct locks_iterator { | |
2312 | int li_cpu; | |
2313 | loff_t li_pos; | |
2314 | }; | |
2315 | ||
2316 | static void lock_get_status(struct seq_file *f, struct file_lock *fl, | |
2317 | loff_t id, char *pfx) | |
2318 | { | |
2319 | struct inode *inode = NULL; | |
2320 | unsigned int fl_pid; | |
2321 | ||
2322 | if (fl->fl_nspid) | |
2323 | fl_pid = pid_vnr(fl->fl_nspid); | |
2324 | else | |
2325 | fl_pid = fl->fl_pid; | |
2326 | ||
2327 | if (fl->fl_file != NULL) | |
2328 | inode = file_inode(fl->fl_file); | |
2329 | ||
2330 | seq_printf(f, "%lld:%s ", id, pfx); | |
2331 | if (IS_POSIX(fl)) { | |
2332 | if (fl->fl_flags & FL_ACCESS) | |
2333 | seq_printf(f, "ACCESS"); | |
2334 | else if (IS_FILE_PVT(fl)) | |
2335 | seq_printf(f, "FLPVT "); | |
2336 | else | |
2337 | seq_printf(f, "POSIX "); | |
2338 | ||
2339 | seq_printf(f, " %s ", | |
2340 | (inode == NULL) ? "*NOINODE*" : | |
2341 | mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); | |
2342 | } else if (IS_FLOCK(fl)) { | |
2343 | if (fl->fl_type & LOCK_MAND) { | |
2344 | seq_printf(f, "FLOCK MSNFS "); | |
2345 | } else { | |
2346 | seq_printf(f, "FLOCK ADVISORY "); | |
2347 | } | |
2348 | } else if (IS_LEASE(fl)) { | |
2349 | seq_printf(f, "LEASE "); | |
2350 | if (lease_breaking(fl)) | |
2351 | seq_printf(f, "BREAKING "); | |
2352 | else if (fl->fl_file) | |
2353 | seq_printf(f, "ACTIVE "); | |
2354 | else | |
2355 | seq_printf(f, "BREAKER "); | |
2356 | } else { | |
2357 | seq_printf(f, "UNKNOWN UNKNOWN "); | |
2358 | } | |
2359 | if (fl->fl_type & LOCK_MAND) { | |
2360 | seq_printf(f, "%s ", | |
2361 | (fl->fl_type & LOCK_READ) | |
2362 | ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " | |
2363 | : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); | |
2364 | } else { | |
2365 | seq_printf(f, "%s ", | |
2366 | (lease_breaking(fl)) | |
2367 | ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ " | |
2368 | : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ "); | |
2369 | } | |
2370 | if (inode) { | |
2371 | #ifdef WE_CAN_BREAK_LSLK_NOW | |
2372 | seq_printf(f, "%d %s:%ld ", fl_pid, | |
2373 | inode->i_sb->s_id, inode->i_ino); | |
2374 | #else | |
2375 | /* userspace relies on this representation of dev_t ;-( */ | |
2376 | seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, | |
2377 | MAJOR(inode->i_sb->s_dev), | |
2378 | MINOR(inode->i_sb->s_dev), inode->i_ino); | |
2379 | #endif | |
2380 | } else { | |
2381 | seq_printf(f, "%d <none>:0 ", fl_pid); | |
2382 | } | |
2383 | if (IS_POSIX(fl)) { | |
2384 | if (fl->fl_end == OFFSET_MAX) | |
2385 | seq_printf(f, "%Ld EOF\n", fl->fl_start); | |
2386 | else | |
2387 | seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); | |
2388 | } else { | |
2389 | seq_printf(f, "0 EOF\n"); | |
2390 | } | |
2391 | } | |
2392 | ||
2393 | static int locks_show(struct seq_file *f, void *v) | |
2394 | { | |
2395 | struct locks_iterator *iter = f->private; | |
2396 | struct file_lock *fl, *bfl; | |
2397 | ||
2398 | fl = hlist_entry(v, struct file_lock, fl_link); | |
2399 | ||
2400 | lock_get_status(f, fl, iter->li_pos, ""); | |
2401 | ||
2402 | list_for_each_entry(bfl, &fl->fl_block, fl_block) | |
2403 | lock_get_status(f, bfl, iter->li_pos, " ->"); | |
2404 | ||
2405 | return 0; | |
2406 | } | |
2407 | ||
2408 | static void *locks_start(struct seq_file *f, loff_t *pos) | |
2409 | __acquires(&blocked_lock_lock) | |
2410 | { | |
2411 | struct locks_iterator *iter = f->private; | |
2412 | ||
2413 | iter->li_pos = *pos + 1; | |
2414 | lg_global_lock(&file_lock_lglock); | |
2415 | spin_lock(&blocked_lock_lock); | |
2416 | return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos); | |
2417 | } | |
2418 | ||
2419 | static void *locks_next(struct seq_file *f, void *v, loff_t *pos) | |
2420 | { | |
2421 | struct locks_iterator *iter = f->private; | |
2422 | ||
2423 | ++iter->li_pos; | |
2424 | return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos); | |
2425 | } | |
2426 | ||
2427 | static void locks_stop(struct seq_file *f, void *v) | |
2428 | __releases(&blocked_lock_lock) | |
2429 | { | |
2430 | spin_unlock(&blocked_lock_lock); | |
2431 | lg_global_unlock(&file_lock_lglock); | |
2432 | } | |
2433 | ||
2434 | static const struct seq_operations locks_seq_operations = { | |
2435 | .start = locks_start, | |
2436 | .next = locks_next, | |
2437 | .stop = locks_stop, | |
2438 | .show = locks_show, | |
2439 | }; | |
2440 | ||
2441 | static int locks_open(struct inode *inode, struct file *filp) | |
2442 | { | |
2443 | return seq_open_private(filp, &locks_seq_operations, | |
2444 | sizeof(struct locks_iterator)); | |
2445 | } | |
2446 | ||
2447 | static const struct file_operations proc_locks_operations = { | |
2448 | .open = locks_open, | |
2449 | .read = seq_read, | |
2450 | .llseek = seq_lseek, | |
2451 | .release = seq_release_private, | |
2452 | }; | |
2453 | ||
2454 | static int __init proc_locks_init(void) | |
2455 | { | |
2456 | proc_create("locks", 0, NULL, &proc_locks_operations); | |
2457 | return 0; | |
2458 | } | |
2459 | module_init(proc_locks_init); | |
2460 | #endif | |
2461 | ||
2462 | /** | |
2463 | * lock_may_read - checks that the region is free of locks | |
2464 | * @inode: the inode that is being read | |
2465 | * @start: the first byte to read | |
2466 | * @len: the number of bytes to read | |
2467 | * | |
2468 | * Emulates Windows locking requirements. Whole-file | |
2469 | * mandatory locks (share modes) can prohibit a read and | |
2470 | * byte-range POSIX locks can prohibit a read if they overlap. | |
2471 | * | |
2472 | * N.B. this function is only ever called | |
2473 | * from knfsd and ownership of locks is never checked. | |
2474 | */ | |
2475 | int lock_may_read(struct inode *inode, loff_t start, unsigned long len) | |
2476 | { | |
2477 | struct file_lock *fl; | |
2478 | int result = 1; | |
2479 | ||
2480 | spin_lock(&inode->i_lock); | |
2481 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | |
2482 | if (IS_POSIX(fl)) { | |
2483 | if (fl->fl_type == F_RDLCK) | |
2484 | continue; | |
2485 | if ((fl->fl_end < start) || (fl->fl_start > (start + len))) | |
2486 | continue; | |
2487 | } else if (IS_FLOCK(fl)) { | |
2488 | if (!(fl->fl_type & LOCK_MAND)) | |
2489 | continue; | |
2490 | if (fl->fl_type & LOCK_READ) | |
2491 | continue; | |
2492 | } else | |
2493 | continue; | |
2494 | result = 0; | |
2495 | break; | |
2496 | } | |
2497 | spin_unlock(&inode->i_lock); | |
2498 | return result; | |
2499 | } | |
2500 | ||
2501 | EXPORT_SYMBOL(lock_may_read); | |
2502 | ||
2503 | /** | |
2504 | * lock_may_write - checks that the region is free of locks | |
2505 | * @inode: the inode that is being written | |
2506 | * @start: the first byte to write | |
2507 | * @len: the number of bytes to write | |
2508 | * | |
2509 | * Emulates Windows locking requirements. Whole-file | |
2510 | * mandatory locks (share modes) can prohibit a write and | |
2511 | * byte-range POSIX locks can prohibit a write if they overlap. | |
2512 | * | |
2513 | * N.B. this function is only ever called | |
2514 | * from knfsd and ownership of locks is never checked. | |
2515 | */ | |
2516 | int lock_may_write(struct inode *inode, loff_t start, unsigned long len) | |
2517 | { | |
2518 | struct file_lock *fl; | |
2519 | int result = 1; | |
2520 | ||
2521 | spin_lock(&inode->i_lock); | |
2522 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | |
2523 | if (IS_POSIX(fl)) { | |
2524 | if ((fl->fl_end < start) || (fl->fl_start > (start + len))) | |
2525 | continue; | |
2526 | } else if (IS_FLOCK(fl)) { | |
2527 | if (!(fl->fl_type & LOCK_MAND)) | |
2528 | continue; | |
2529 | if (fl->fl_type & LOCK_WRITE) | |
2530 | continue; | |
2531 | } else | |
2532 | continue; | |
2533 | result = 0; | |
2534 | break; | |
2535 | } | |
2536 | spin_unlock(&inode->i_lock); | |
2537 | return result; | |
2538 | } | |
2539 | ||
2540 | EXPORT_SYMBOL(lock_may_write); | |
2541 | ||
2542 | static int __init filelock_init(void) | |
2543 | { | |
2544 | int i; | |
2545 | ||
2546 | filelock_cache = kmem_cache_create("file_lock_cache", | |
2547 | sizeof(struct file_lock), 0, SLAB_PANIC, NULL); | |
2548 | ||
2549 | lg_lock_init(&file_lock_lglock, "file_lock_lglock"); | |
2550 | ||
2551 | for_each_possible_cpu(i) | |
2552 | INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); | |
2553 | ||
2554 | return 0; | |
2555 | } | |
2556 | ||
2557 | core_initcall(filelock_init); |