]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/locks.c
Merge tag 'riscv-for-linus-6.8-mw4' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / fs / locks.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/locks.c
4 *
5 * We implement four types of file locks: BSD locks, posix locks, open
6 * file description locks, and leases. For details about BSD locks,
7 * see the flock(2) man page; for details about the other three, see
8 * fcntl(2).
9 *
10 *
11 * Locking conflicts and dependencies:
12 * If multiple threads attempt to lock the same byte (or flock the same file)
13 * only one can be granted the lock, and other must wait their turn.
14 * The first lock has been "applied" or "granted", the others are "waiting"
15 * and are "blocked" by the "applied" lock..
16 *
17 * Waiting and applied locks are all kept in trees whose properties are:
18 *
19 * - the root of a tree may be an applied or waiting lock.
20 * - every other node in the tree is a waiting lock that
21 * conflicts with every ancestor of that node.
22 *
23 * Every such tree begins life as a waiting singleton which obviously
24 * satisfies the above properties.
25 *
26 * The only ways we modify trees preserve these properties:
27 *
28 * 1. We may add a new leaf node, but only after first verifying that it
29 * conflicts with all of its ancestors.
30 * 2. We may remove the root of a tree, creating a new singleton
31 * tree from the root and N new trees rooted in the immediate
32 * children.
33 * 3. If the root of a tree is not currently an applied lock, we may
34 * apply it (if possible).
35 * 4. We may upgrade the root of the tree (either extend its range,
36 * or upgrade its entire range from read to write).
37 *
38 * When an applied lock is modified in a way that reduces or downgrades any
39 * part of its range, we remove all its children (2 above). This particularly
40 * happens when a lock is unlocked.
41 *
42 * For each of those child trees we "wake up" the thread which is
43 * waiting for the lock so it can continue handling as follows: if the
44 * root of the tree applies, we do so (3). If it doesn't, it must
45 * conflict with some applied lock. We remove (wake up) all of its children
46 * (2), and add it is a new leaf to the tree rooted in the applied
47 * lock (1). We then repeat the process recursively with those
48 * children.
49 *
50 */
51
52 #include <linux/capability.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/filelock.h>
56 #include <linux/fs.h>
57 #include <linux/init.h>
58 #include <linux/security.h>
59 #include <linux/slab.h>
60 #include <linux/syscalls.h>
61 #include <linux/time.h>
62 #include <linux/rcupdate.h>
63 #include <linux/pid_namespace.h>
64 #include <linux/hashtable.h>
65 #include <linux/percpu.h>
66 #include <linux/sysctl.h>
67
68 #define CREATE_TRACE_POINTS
69 #include <trace/events/filelock.h>
70
71 #include <linux/uaccess.h>
72
73 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
74 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
75 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
76 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
77 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
78
79 static bool lease_breaking(struct file_lock *fl)
80 {
81 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
82 }
83
84 static int target_leasetype(struct file_lock *fl)
85 {
86 if (fl->fl_flags & FL_UNLOCK_PENDING)
87 return F_UNLCK;
88 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
89 return F_RDLCK;
90 return fl->fl_type;
91 }
92
93 static int leases_enable = 1;
94 static int lease_break_time = 45;
95
96 #ifdef CONFIG_SYSCTL
97 static struct ctl_table locks_sysctls[] = {
98 {
99 .procname = "leases-enable",
100 .data = &leases_enable,
101 .maxlen = sizeof(int),
102 .mode = 0644,
103 .proc_handler = proc_dointvec,
104 },
105 #ifdef CONFIG_MMU
106 {
107 .procname = "lease-break-time",
108 .data = &lease_break_time,
109 .maxlen = sizeof(int),
110 .mode = 0644,
111 .proc_handler = proc_dointvec,
112 },
113 #endif /* CONFIG_MMU */
114 };
115
116 static int __init init_fs_locks_sysctls(void)
117 {
118 register_sysctl_init("fs", locks_sysctls);
119 return 0;
120 }
121 early_initcall(init_fs_locks_sysctls);
122 #endif /* CONFIG_SYSCTL */
123
124 /*
125 * The global file_lock_list is only used for displaying /proc/locks, so we
126 * keep a list on each CPU, with each list protected by its own spinlock.
127 * Global serialization is done using file_rwsem.
128 *
129 * Note that alterations to the list also require that the relevant flc_lock is
130 * held.
131 */
132 struct file_lock_list_struct {
133 spinlock_t lock;
134 struct hlist_head hlist;
135 };
136 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
137 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
138
139
140 /*
141 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
142 * It is protected by blocked_lock_lock.
143 *
144 * We hash locks by lockowner in order to optimize searching for the lock a
145 * particular lockowner is waiting on.
146 *
147 * FIXME: make this value scale via some heuristic? We generally will want more
148 * buckets when we have more lockowners holding locks, but that's a little
149 * difficult to determine without knowing what the workload will look like.
150 */
151 #define BLOCKED_HASH_BITS 7
152 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
153
154 /*
155 * This lock protects the blocked_hash. Generally, if you're accessing it, you
156 * want to be holding this lock.
157 *
158 * In addition, it also protects the fl->fl_blocked_requests list, and the
159 * fl->fl_blocker pointer for file_lock structures that are acting as lock
160 * requests (in contrast to those that are acting as records of acquired locks).
161 *
162 * Note that when we acquire this lock in order to change the above fields,
163 * we often hold the flc_lock as well. In certain cases, when reading the fields
164 * protected by this lock, we can skip acquiring it iff we already hold the
165 * flc_lock.
166 */
167 static DEFINE_SPINLOCK(blocked_lock_lock);
168
169 static struct kmem_cache *flctx_cache __ro_after_init;
170 static struct kmem_cache *filelock_cache __ro_after_init;
171
172 static struct file_lock_context *
173 locks_get_lock_context(struct inode *inode, int type)
174 {
175 struct file_lock_context *ctx;
176
177 /* paired with cmpxchg() below */
178 ctx = locks_inode_context(inode);
179 if (likely(ctx) || type == F_UNLCK)
180 goto out;
181
182 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
183 if (!ctx)
184 goto out;
185
186 spin_lock_init(&ctx->flc_lock);
187 INIT_LIST_HEAD(&ctx->flc_flock);
188 INIT_LIST_HEAD(&ctx->flc_posix);
189 INIT_LIST_HEAD(&ctx->flc_lease);
190
191 /*
192 * Assign the pointer if it's not already assigned. If it is, then
193 * free the context we just allocated.
194 */
195 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
196 kmem_cache_free(flctx_cache, ctx);
197 ctx = locks_inode_context(inode);
198 }
199 out:
200 trace_locks_get_lock_context(inode, type, ctx);
201 return ctx;
202 }
203
204 static void
205 locks_dump_ctx_list(struct list_head *list, char *list_type)
206 {
207 struct file_lock *fl;
208
209 list_for_each_entry(fl, list, fl_list) {
210 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
211 }
212 }
213
214 static void
215 locks_check_ctx_lists(struct inode *inode)
216 {
217 struct file_lock_context *ctx = inode->i_flctx;
218
219 if (unlikely(!list_empty(&ctx->flc_flock) ||
220 !list_empty(&ctx->flc_posix) ||
221 !list_empty(&ctx->flc_lease))) {
222 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
223 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
224 inode->i_ino);
225 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
226 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
227 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
228 }
229 }
230
231 static void
232 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
233 char *list_type)
234 {
235 struct file_lock *fl;
236 struct inode *inode = file_inode(filp);
237
238 list_for_each_entry(fl, list, fl_list)
239 if (fl->fl_file == filp)
240 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
241 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
242 list_type, MAJOR(inode->i_sb->s_dev),
243 MINOR(inode->i_sb->s_dev), inode->i_ino,
244 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
245 }
246
247 void
248 locks_free_lock_context(struct inode *inode)
249 {
250 struct file_lock_context *ctx = locks_inode_context(inode);
251
252 if (unlikely(ctx)) {
253 locks_check_ctx_lists(inode);
254 kmem_cache_free(flctx_cache, ctx);
255 }
256 }
257
258 static void locks_init_lock_heads(struct file_lock *fl)
259 {
260 INIT_HLIST_NODE(&fl->fl_link);
261 INIT_LIST_HEAD(&fl->fl_list);
262 INIT_LIST_HEAD(&fl->fl_blocked_requests);
263 INIT_LIST_HEAD(&fl->fl_blocked_member);
264 init_waitqueue_head(&fl->fl_wait);
265 }
266
267 /* Allocate an empty lock structure. */
268 struct file_lock *locks_alloc_lock(void)
269 {
270 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
271
272 if (fl)
273 locks_init_lock_heads(fl);
274
275 return fl;
276 }
277 EXPORT_SYMBOL_GPL(locks_alloc_lock);
278
279 void locks_release_private(struct file_lock *fl)
280 {
281 BUG_ON(waitqueue_active(&fl->fl_wait));
282 BUG_ON(!list_empty(&fl->fl_list));
283 BUG_ON(!list_empty(&fl->fl_blocked_requests));
284 BUG_ON(!list_empty(&fl->fl_blocked_member));
285 BUG_ON(!hlist_unhashed(&fl->fl_link));
286
287 if (fl->fl_ops) {
288 if (fl->fl_ops->fl_release_private)
289 fl->fl_ops->fl_release_private(fl);
290 fl->fl_ops = NULL;
291 }
292
293 if (fl->fl_lmops) {
294 if (fl->fl_lmops->lm_put_owner) {
295 fl->fl_lmops->lm_put_owner(fl->fl_owner);
296 fl->fl_owner = NULL;
297 }
298 fl->fl_lmops = NULL;
299 }
300 }
301 EXPORT_SYMBOL_GPL(locks_release_private);
302
303 /**
304 * locks_owner_has_blockers - Check for blocking lock requests
305 * @flctx: file lock context
306 * @owner: lock owner
307 *
308 * Return values:
309 * %true: @owner has at least one blocker
310 * %false: @owner has no blockers
311 */
312 bool locks_owner_has_blockers(struct file_lock_context *flctx,
313 fl_owner_t owner)
314 {
315 struct file_lock *fl;
316
317 spin_lock(&flctx->flc_lock);
318 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
319 if (fl->fl_owner != owner)
320 continue;
321 if (!list_empty(&fl->fl_blocked_requests)) {
322 spin_unlock(&flctx->flc_lock);
323 return true;
324 }
325 }
326 spin_unlock(&flctx->flc_lock);
327 return false;
328 }
329 EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
330
331 /* Free a lock which is not in use. */
332 void locks_free_lock(struct file_lock *fl)
333 {
334 locks_release_private(fl);
335 kmem_cache_free(filelock_cache, fl);
336 }
337 EXPORT_SYMBOL(locks_free_lock);
338
339 static void
340 locks_dispose_list(struct list_head *dispose)
341 {
342 struct file_lock *fl;
343
344 while (!list_empty(dispose)) {
345 fl = list_first_entry(dispose, struct file_lock, fl_list);
346 list_del_init(&fl->fl_list);
347 locks_free_lock(fl);
348 }
349 }
350
351 void locks_init_lock(struct file_lock *fl)
352 {
353 memset(fl, 0, sizeof(struct file_lock));
354 locks_init_lock_heads(fl);
355 }
356 EXPORT_SYMBOL(locks_init_lock);
357
358 /*
359 * Initialize a new lock from an existing file_lock structure.
360 */
361 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
362 {
363 new->fl_owner = fl->fl_owner;
364 new->fl_pid = fl->fl_pid;
365 new->fl_file = NULL;
366 new->fl_flags = fl->fl_flags;
367 new->fl_type = fl->fl_type;
368 new->fl_start = fl->fl_start;
369 new->fl_end = fl->fl_end;
370 new->fl_lmops = fl->fl_lmops;
371 new->fl_ops = NULL;
372
373 if (fl->fl_lmops) {
374 if (fl->fl_lmops->lm_get_owner)
375 fl->fl_lmops->lm_get_owner(fl->fl_owner);
376 }
377 }
378 EXPORT_SYMBOL(locks_copy_conflock);
379
380 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
381 {
382 /* "new" must be a freshly-initialized lock */
383 WARN_ON_ONCE(new->fl_ops);
384
385 locks_copy_conflock(new, fl);
386
387 new->fl_file = fl->fl_file;
388 new->fl_ops = fl->fl_ops;
389
390 if (fl->fl_ops) {
391 if (fl->fl_ops->fl_copy_lock)
392 fl->fl_ops->fl_copy_lock(new, fl);
393 }
394 }
395 EXPORT_SYMBOL(locks_copy_lock);
396
397 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
398 {
399 struct file_lock *f;
400
401 /*
402 * As ctx->flc_lock is held, new requests cannot be added to
403 * ->fl_blocked_requests, so we don't need a lock to check if it
404 * is empty.
405 */
406 if (list_empty(&fl->fl_blocked_requests))
407 return;
408 spin_lock(&blocked_lock_lock);
409 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
410 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
411 f->fl_blocker = new;
412 spin_unlock(&blocked_lock_lock);
413 }
414
415 static inline int flock_translate_cmd(int cmd) {
416 switch (cmd) {
417 case LOCK_SH:
418 return F_RDLCK;
419 case LOCK_EX:
420 return F_WRLCK;
421 case LOCK_UN:
422 return F_UNLCK;
423 }
424 return -EINVAL;
425 }
426
427 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
428 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
429 {
430 locks_init_lock(fl);
431
432 fl->fl_file = filp;
433 fl->fl_owner = filp;
434 fl->fl_pid = current->tgid;
435 fl->fl_flags = FL_FLOCK;
436 fl->fl_type = type;
437 fl->fl_end = OFFSET_MAX;
438 }
439
440 static int assign_type(struct file_lock *fl, int type)
441 {
442 switch (type) {
443 case F_RDLCK:
444 case F_WRLCK:
445 case F_UNLCK:
446 fl->fl_type = type;
447 break;
448 default:
449 return -EINVAL;
450 }
451 return 0;
452 }
453
454 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
455 struct flock64 *l)
456 {
457 switch (l->l_whence) {
458 case SEEK_SET:
459 fl->fl_start = 0;
460 break;
461 case SEEK_CUR:
462 fl->fl_start = filp->f_pos;
463 break;
464 case SEEK_END:
465 fl->fl_start = i_size_read(file_inode(filp));
466 break;
467 default:
468 return -EINVAL;
469 }
470 if (l->l_start > OFFSET_MAX - fl->fl_start)
471 return -EOVERFLOW;
472 fl->fl_start += l->l_start;
473 if (fl->fl_start < 0)
474 return -EINVAL;
475
476 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
477 POSIX-2001 defines it. */
478 if (l->l_len > 0) {
479 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
480 return -EOVERFLOW;
481 fl->fl_end = fl->fl_start + (l->l_len - 1);
482
483 } else if (l->l_len < 0) {
484 if (fl->fl_start + l->l_len < 0)
485 return -EINVAL;
486 fl->fl_end = fl->fl_start - 1;
487 fl->fl_start += l->l_len;
488 } else
489 fl->fl_end = OFFSET_MAX;
490
491 fl->fl_owner = current->files;
492 fl->fl_pid = current->tgid;
493 fl->fl_file = filp;
494 fl->fl_flags = FL_POSIX;
495 fl->fl_ops = NULL;
496 fl->fl_lmops = NULL;
497
498 return assign_type(fl, l->l_type);
499 }
500
501 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
502 * style lock.
503 */
504 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
505 struct flock *l)
506 {
507 struct flock64 ll = {
508 .l_type = l->l_type,
509 .l_whence = l->l_whence,
510 .l_start = l->l_start,
511 .l_len = l->l_len,
512 };
513
514 return flock64_to_posix_lock(filp, fl, &ll);
515 }
516
517 /* default lease lock manager operations */
518 static bool
519 lease_break_callback(struct file_lock *fl)
520 {
521 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
522 return false;
523 }
524
525 static void
526 lease_setup(struct file_lock *fl, void **priv)
527 {
528 struct file *filp = fl->fl_file;
529 struct fasync_struct *fa = *priv;
530
531 /*
532 * fasync_insert_entry() returns the old entry if any. If there was no
533 * old entry, then it used "priv" and inserted it into the fasync list.
534 * Clear the pointer to indicate that it shouldn't be freed.
535 */
536 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
537 *priv = NULL;
538
539 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
540 }
541
542 static const struct lock_manager_operations lease_manager_ops = {
543 .lm_break = lease_break_callback,
544 .lm_change = lease_modify,
545 .lm_setup = lease_setup,
546 };
547
548 /*
549 * Initialize a lease, use the default lock manager operations
550 */
551 static int lease_init(struct file *filp, int type, struct file_lock *fl)
552 {
553 if (assign_type(fl, type) != 0)
554 return -EINVAL;
555
556 fl->fl_owner = filp;
557 fl->fl_pid = current->tgid;
558
559 fl->fl_file = filp;
560 fl->fl_flags = FL_LEASE;
561 fl->fl_start = 0;
562 fl->fl_end = OFFSET_MAX;
563 fl->fl_ops = NULL;
564 fl->fl_lmops = &lease_manager_ops;
565 return 0;
566 }
567
568 /* Allocate a file_lock initialised to this type of lease */
569 static struct file_lock *lease_alloc(struct file *filp, int type)
570 {
571 struct file_lock *fl = locks_alloc_lock();
572 int error = -ENOMEM;
573
574 if (fl == NULL)
575 return ERR_PTR(error);
576
577 error = lease_init(filp, type, fl);
578 if (error) {
579 locks_free_lock(fl);
580 return ERR_PTR(error);
581 }
582 return fl;
583 }
584
585 /* Check if two locks overlap each other.
586 */
587 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
588 {
589 return ((fl1->fl_end >= fl2->fl_start) &&
590 (fl2->fl_end >= fl1->fl_start));
591 }
592
593 /*
594 * Check whether two locks have the same owner.
595 */
596 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
597 {
598 return fl1->fl_owner == fl2->fl_owner;
599 }
600
601 /* Must be called with the flc_lock held! */
602 static void locks_insert_global_locks(struct file_lock *fl)
603 {
604 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
605
606 percpu_rwsem_assert_held(&file_rwsem);
607
608 spin_lock(&fll->lock);
609 fl->fl_link_cpu = smp_processor_id();
610 hlist_add_head(&fl->fl_link, &fll->hlist);
611 spin_unlock(&fll->lock);
612 }
613
614 /* Must be called with the flc_lock held! */
615 static void locks_delete_global_locks(struct file_lock *fl)
616 {
617 struct file_lock_list_struct *fll;
618
619 percpu_rwsem_assert_held(&file_rwsem);
620
621 /*
622 * Avoid taking lock if already unhashed. This is safe since this check
623 * is done while holding the flc_lock, and new insertions into the list
624 * also require that it be held.
625 */
626 if (hlist_unhashed(&fl->fl_link))
627 return;
628
629 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
630 spin_lock(&fll->lock);
631 hlist_del_init(&fl->fl_link);
632 spin_unlock(&fll->lock);
633 }
634
635 static unsigned long
636 posix_owner_key(struct file_lock *fl)
637 {
638 return (unsigned long)fl->fl_owner;
639 }
640
641 static void locks_insert_global_blocked(struct file_lock *waiter)
642 {
643 lockdep_assert_held(&blocked_lock_lock);
644
645 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
646 }
647
648 static void locks_delete_global_blocked(struct file_lock *waiter)
649 {
650 lockdep_assert_held(&blocked_lock_lock);
651
652 hash_del(&waiter->fl_link);
653 }
654
655 /* Remove waiter from blocker's block list.
656 * When blocker ends up pointing to itself then the list is empty.
657 *
658 * Must be called with blocked_lock_lock held.
659 */
660 static void __locks_delete_block(struct file_lock *waiter)
661 {
662 locks_delete_global_blocked(waiter);
663 list_del_init(&waiter->fl_blocked_member);
664 }
665
666 static void __locks_wake_up_blocks(struct file_lock *blocker)
667 {
668 while (!list_empty(&blocker->fl_blocked_requests)) {
669 struct file_lock *waiter;
670
671 waiter = list_first_entry(&blocker->fl_blocked_requests,
672 struct file_lock, fl_blocked_member);
673 __locks_delete_block(waiter);
674 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
675 waiter->fl_lmops->lm_notify(waiter);
676 else
677 wake_up(&waiter->fl_wait);
678
679 /*
680 * The setting of fl_blocker to NULL marks the "done"
681 * point in deleting a block. Paired with acquire at the top
682 * of locks_delete_block().
683 */
684 smp_store_release(&waiter->fl_blocker, NULL);
685 }
686 }
687
688 /**
689 * locks_delete_block - stop waiting for a file lock
690 * @waiter: the lock which was waiting
691 *
692 * lockd/nfsd need to disconnect the lock while working on it.
693 */
694 int locks_delete_block(struct file_lock *waiter)
695 {
696 int status = -ENOENT;
697
698 /*
699 * If fl_blocker is NULL, it won't be set again as this thread "owns"
700 * the lock and is the only one that might try to claim the lock.
701 *
702 * We use acquire/release to manage fl_blocker so that we can
703 * optimize away taking the blocked_lock_lock in many cases.
704 *
705 * The smp_load_acquire guarantees two things:
706 *
707 * 1/ that fl_blocked_requests can be tested locklessly. If something
708 * was recently added to that list it must have been in a locked region
709 * *before* the locked region when fl_blocker was set to NULL.
710 *
711 * 2/ that no other thread is accessing 'waiter', so it is safe to free
712 * it. __locks_wake_up_blocks is careful not to touch waiter after
713 * fl_blocker is released.
714 *
715 * If a lockless check of fl_blocker shows it to be NULL, we know that
716 * no new locks can be inserted into its fl_blocked_requests list, and
717 * can avoid doing anything further if the list is empty.
718 */
719 if (!smp_load_acquire(&waiter->fl_blocker) &&
720 list_empty(&waiter->fl_blocked_requests))
721 return status;
722
723 spin_lock(&blocked_lock_lock);
724 if (waiter->fl_blocker)
725 status = 0;
726 __locks_wake_up_blocks(waiter);
727 __locks_delete_block(waiter);
728
729 /*
730 * The setting of fl_blocker to NULL marks the "done" point in deleting
731 * a block. Paired with acquire at the top of this function.
732 */
733 smp_store_release(&waiter->fl_blocker, NULL);
734 spin_unlock(&blocked_lock_lock);
735 return status;
736 }
737 EXPORT_SYMBOL(locks_delete_block);
738
739 /* Insert waiter into blocker's block list.
740 * We use a circular list so that processes can be easily woken up in
741 * the order they blocked. The documentation doesn't require this but
742 * it seems like the reasonable thing to do.
743 *
744 * Must be called with both the flc_lock and blocked_lock_lock held. The
745 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
746 * but by ensuring that the flc_lock is also held on insertions we can avoid
747 * taking the blocked_lock_lock in some cases when we see that the
748 * fl_blocked_requests list is empty.
749 *
750 * Rather than just adding to the list, we check for conflicts with any existing
751 * waiters, and add beneath any waiter that blocks the new waiter.
752 * Thus wakeups don't happen until needed.
753 */
754 static void __locks_insert_block(struct file_lock *blocker,
755 struct file_lock *waiter,
756 bool conflict(struct file_lock *,
757 struct file_lock *))
758 {
759 struct file_lock *fl;
760 BUG_ON(!list_empty(&waiter->fl_blocked_member));
761
762 new_blocker:
763 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
764 if (conflict(fl, waiter)) {
765 blocker = fl;
766 goto new_blocker;
767 }
768 waiter->fl_blocker = blocker;
769 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
770 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
771 locks_insert_global_blocked(waiter);
772
773 /* The requests in waiter->fl_blocked are known to conflict with
774 * waiter, but might not conflict with blocker, or the requests
775 * and lock which block it. So they all need to be woken.
776 */
777 __locks_wake_up_blocks(waiter);
778 }
779
780 /* Must be called with flc_lock held. */
781 static void locks_insert_block(struct file_lock *blocker,
782 struct file_lock *waiter,
783 bool conflict(struct file_lock *,
784 struct file_lock *))
785 {
786 spin_lock(&blocked_lock_lock);
787 __locks_insert_block(blocker, waiter, conflict);
788 spin_unlock(&blocked_lock_lock);
789 }
790
791 /*
792 * Wake up processes blocked waiting for blocker.
793 *
794 * Must be called with the inode->flc_lock held!
795 */
796 static void locks_wake_up_blocks(struct file_lock *blocker)
797 {
798 /*
799 * Avoid taking global lock if list is empty. This is safe since new
800 * blocked requests are only added to the list under the flc_lock, and
801 * the flc_lock is always held here. Note that removal from the
802 * fl_blocked_requests list does not require the flc_lock, so we must
803 * recheck list_empty() after acquiring the blocked_lock_lock.
804 */
805 if (list_empty(&blocker->fl_blocked_requests))
806 return;
807
808 spin_lock(&blocked_lock_lock);
809 __locks_wake_up_blocks(blocker);
810 spin_unlock(&blocked_lock_lock);
811 }
812
813 static void
814 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
815 {
816 list_add_tail(&fl->fl_list, before);
817 locks_insert_global_locks(fl);
818 }
819
820 static void
821 locks_unlink_lock_ctx(struct file_lock *fl)
822 {
823 locks_delete_global_locks(fl);
824 list_del_init(&fl->fl_list);
825 locks_wake_up_blocks(fl);
826 }
827
828 static void
829 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
830 {
831 locks_unlink_lock_ctx(fl);
832 if (dispose)
833 list_add(&fl->fl_list, dispose);
834 else
835 locks_free_lock(fl);
836 }
837
838 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
839 * checks for shared/exclusive status of overlapping locks.
840 */
841 static bool locks_conflict(struct file_lock *caller_fl,
842 struct file_lock *sys_fl)
843 {
844 if (sys_fl->fl_type == F_WRLCK)
845 return true;
846 if (caller_fl->fl_type == F_WRLCK)
847 return true;
848 return false;
849 }
850
851 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
852 * checking before calling the locks_conflict().
853 */
854 static bool posix_locks_conflict(struct file_lock *caller_fl,
855 struct file_lock *sys_fl)
856 {
857 /* POSIX locks owned by the same process do not conflict with
858 * each other.
859 */
860 if (posix_same_owner(caller_fl, sys_fl))
861 return false;
862
863 /* Check whether they overlap */
864 if (!locks_overlap(caller_fl, sys_fl))
865 return false;
866
867 return locks_conflict(caller_fl, sys_fl);
868 }
869
870 /* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
871 * path so checks for additional GETLK-specific things like F_UNLCK.
872 */
873 static bool posix_test_locks_conflict(struct file_lock *caller_fl,
874 struct file_lock *sys_fl)
875 {
876 /* F_UNLCK checks any locks on the same fd. */
877 if (caller_fl->fl_type == F_UNLCK) {
878 if (!posix_same_owner(caller_fl, sys_fl))
879 return false;
880 return locks_overlap(caller_fl, sys_fl);
881 }
882 return posix_locks_conflict(caller_fl, sys_fl);
883 }
884
885 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
886 * checking before calling the locks_conflict().
887 */
888 static bool flock_locks_conflict(struct file_lock *caller_fl,
889 struct file_lock *sys_fl)
890 {
891 /* FLOCK locks referring to the same filp do not conflict with
892 * each other.
893 */
894 if (caller_fl->fl_file == sys_fl->fl_file)
895 return false;
896
897 return locks_conflict(caller_fl, sys_fl);
898 }
899
900 void
901 posix_test_lock(struct file *filp, struct file_lock *fl)
902 {
903 struct file_lock *cfl;
904 struct file_lock_context *ctx;
905 struct inode *inode = file_inode(filp);
906 void *owner;
907 void (*func)(void);
908
909 ctx = locks_inode_context(inode);
910 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
911 fl->fl_type = F_UNLCK;
912 return;
913 }
914
915 retry:
916 spin_lock(&ctx->flc_lock);
917 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
918 if (!posix_test_locks_conflict(fl, cfl))
919 continue;
920 if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
921 && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
922 owner = cfl->fl_lmops->lm_mod_owner;
923 func = cfl->fl_lmops->lm_expire_lock;
924 __module_get(owner);
925 spin_unlock(&ctx->flc_lock);
926 (*func)();
927 module_put(owner);
928 goto retry;
929 }
930 locks_copy_conflock(fl, cfl);
931 goto out;
932 }
933 fl->fl_type = F_UNLCK;
934 out:
935 spin_unlock(&ctx->flc_lock);
936 return;
937 }
938 EXPORT_SYMBOL(posix_test_lock);
939
940 /*
941 * Deadlock detection:
942 *
943 * We attempt to detect deadlocks that are due purely to posix file
944 * locks.
945 *
946 * We assume that a task can be waiting for at most one lock at a time.
947 * So for any acquired lock, the process holding that lock may be
948 * waiting on at most one other lock. That lock in turns may be held by
949 * someone waiting for at most one other lock. Given a requested lock
950 * caller_fl which is about to wait for a conflicting lock block_fl, we
951 * follow this chain of waiters to ensure we are not about to create a
952 * cycle.
953 *
954 * Since we do this before we ever put a process to sleep on a lock, we
955 * are ensured that there is never a cycle; that is what guarantees that
956 * the while() loop in posix_locks_deadlock() eventually completes.
957 *
958 * Note: the above assumption may not be true when handling lock
959 * requests from a broken NFS client. It may also fail in the presence
960 * of tasks (such as posix threads) sharing the same open file table.
961 * To handle those cases, we just bail out after a few iterations.
962 *
963 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
964 * Because the owner is not even nominally tied to a thread of
965 * execution, the deadlock detection below can't reasonably work well. Just
966 * skip it for those.
967 *
968 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
969 * locks that just checks for the case where two tasks are attempting to
970 * upgrade from read to write locks on the same inode.
971 */
972
973 #define MAX_DEADLK_ITERATIONS 10
974
975 /* Find a lock that the owner of the given block_fl is blocking on. */
976 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
977 {
978 struct file_lock *fl;
979
980 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
981 if (posix_same_owner(fl, block_fl)) {
982 while (fl->fl_blocker)
983 fl = fl->fl_blocker;
984 return fl;
985 }
986 }
987 return NULL;
988 }
989
990 /* Must be called with the blocked_lock_lock held! */
991 static int posix_locks_deadlock(struct file_lock *caller_fl,
992 struct file_lock *block_fl)
993 {
994 int i = 0;
995
996 lockdep_assert_held(&blocked_lock_lock);
997
998 /*
999 * This deadlock detector can't reasonably detect deadlocks with
1000 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1001 */
1002 if (IS_OFDLCK(caller_fl))
1003 return 0;
1004
1005 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1006 if (i++ > MAX_DEADLK_ITERATIONS)
1007 return 0;
1008 if (posix_same_owner(caller_fl, block_fl))
1009 return 1;
1010 }
1011 return 0;
1012 }
1013
1014 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1015 * after any leases, but before any posix locks.
1016 *
1017 * Note that if called with an FL_EXISTS argument, the caller may determine
1018 * whether or not a lock was successfully freed by testing the return
1019 * value for -ENOENT.
1020 */
1021 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1022 {
1023 struct file_lock *new_fl = NULL;
1024 struct file_lock *fl;
1025 struct file_lock_context *ctx;
1026 int error = 0;
1027 bool found = false;
1028 LIST_HEAD(dispose);
1029
1030 ctx = locks_get_lock_context(inode, request->fl_type);
1031 if (!ctx) {
1032 if (request->fl_type != F_UNLCK)
1033 return -ENOMEM;
1034 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1035 }
1036
1037 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1038 new_fl = locks_alloc_lock();
1039 if (!new_fl)
1040 return -ENOMEM;
1041 }
1042
1043 percpu_down_read(&file_rwsem);
1044 spin_lock(&ctx->flc_lock);
1045 if (request->fl_flags & FL_ACCESS)
1046 goto find_conflict;
1047
1048 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1049 if (request->fl_file != fl->fl_file)
1050 continue;
1051 if (request->fl_type == fl->fl_type)
1052 goto out;
1053 found = true;
1054 locks_delete_lock_ctx(fl, &dispose);
1055 break;
1056 }
1057
1058 if (request->fl_type == F_UNLCK) {
1059 if ((request->fl_flags & FL_EXISTS) && !found)
1060 error = -ENOENT;
1061 goto out;
1062 }
1063
1064 find_conflict:
1065 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1066 if (!flock_locks_conflict(request, fl))
1067 continue;
1068 error = -EAGAIN;
1069 if (!(request->fl_flags & FL_SLEEP))
1070 goto out;
1071 error = FILE_LOCK_DEFERRED;
1072 locks_insert_block(fl, request, flock_locks_conflict);
1073 goto out;
1074 }
1075 if (request->fl_flags & FL_ACCESS)
1076 goto out;
1077 locks_copy_lock(new_fl, request);
1078 locks_move_blocks(new_fl, request);
1079 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1080 new_fl = NULL;
1081 error = 0;
1082
1083 out:
1084 spin_unlock(&ctx->flc_lock);
1085 percpu_up_read(&file_rwsem);
1086 if (new_fl)
1087 locks_free_lock(new_fl);
1088 locks_dispose_list(&dispose);
1089 trace_flock_lock_inode(inode, request, error);
1090 return error;
1091 }
1092
1093 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1094 struct file_lock *conflock)
1095 {
1096 struct file_lock *fl, *tmp;
1097 struct file_lock *new_fl = NULL;
1098 struct file_lock *new_fl2 = NULL;
1099 struct file_lock *left = NULL;
1100 struct file_lock *right = NULL;
1101 struct file_lock_context *ctx;
1102 int error;
1103 bool added = false;
1104 LIST_HEAD(dispose);
1105 void *owner;
1106 void (*func)(void);
1107
1108 ctx = locks_get_lock_context(inode, request->fl_type);
1109 if (!ctx)
1110 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1111
1112 /*
1113 * We may need two file_lock structures for this operation,
1114 * so we get them in advance to avoid races.
1115 *
1116 * In some cases we can be sure, that no new locks will be needed
1117 */
1118 if (!(request->fl_flags & FL_ACCESS) &&
1119 (request->fl_type != F_UNLCK ||
1120 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1121 new_fl = locks_alloc_lock();
1122 new_fl2 = locks_alloc_lock();
1123 }
1124
1125 retry:
1126 percpu_down_read(&file_rwsem);
1127 spin_lock(&ctx->flc_lock);
1128 /*
1129 * New lock request. Walk all POSIX locks and look for conflicts. If
1130 * there are any, either return error or put the request on the
1131 * blocker's list of waiters and the global blocked_hash.
1132 */
1133 if (request->fl_type != F_UNLCK) {
1134 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1135 if (!posix_locks_conflict(request, fl))
1136 continue;
1137 if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1138 && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1139 owner = fl->fl_lmops->lm_mod_owner;
1140 func = fl->fl_lmops->lm_expire_lock;
1141 __module_get(owner);
1142 spin_unlock(&ctx->flc_lock);
1143 percpu_up_read(&file_rwsem);
1144 (*func)();
1145 module_put(owner);
1146 goto retry;
1147 }
1148 if (conflock)
1149 locks_copy_conflock(conflock, fl);
1150 error = -EAGAIN;
1151 if (!(request->fl_flags & FL_SLEEP))
1152 goto out;
1153 /*
1154 * Deadlock detection and insertion into the blocked
1155 * locks list must be done while holding the same lock!
1156 */
1157 error = -EDEADLK;
1158 spin_lock(&blocked_lock_lock);
1159 /*
1160 * Ensure that we don't find any locks blocked on this
1161 * request during deadlock detection.
1162 */
1163 __locks_wake_up_blocks(request);
1164 if (likely(!posix_locks_deadlock(request, fl))) {
1165 error = FILE_LOCK_DEFERRED;
1166 __locks_insert_block(fl, request,
1167 posix_locks_conflict);
1168 }
1169 spin_unlock(&blocked_lock_lock);
1170 goto out;
1171 }
1172 }
1173
1174 /* If we're just looking for a conflict, we're done. */
1175 error = 0;
1176 if (request->fl_flags & FL_ACCESS)
1177 goto out;
1178
1179 /* Find the first old lock with the same owner as the new lock */
1180 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1181 if (posix_same_owner(request, fl))
1182 break;
1183 }
1184
1185 /* Process locks with this owner. */
1186 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1187 if (!posix_same_owner(request, fl))
1188 break;
1189
1190 /* Detect adjacent or overlapping regions (if same lock type) */
1191 if (request->fl_type == fl->fl_type) {
1192 /* In all comparisons of start vs end, use
1193 * "start - 1" rather than "end + 1". If end
1194 * is OFFSET_MAX, end + 1 will become negative.
1195 */
1196 if (fl->fl_end < request->fl_start - 1)
1197 continue;
1198 /* If the next lock in the list has entirely bigger
1199 * addresses than the new one, insert the lock here.
1200 */
1201 if (fl->fl_start - 1 > request->fl_end)
1202 break;
1203
1204 /* If we come here, the new and old lock are of the
1205 * same type and adjacent or overlapping. Make one
1206 * lock yielding from the lower start address of both
1207 * locks to the higher end address.
1208 */
1209 if (fl->fl_start > request->fl_start)
1210 fl->fl_start = request->fl_start;
1211 else
1212 request->fl_start = fl->fl_start;
1213 if (fl->fl_end < request->fl_end)
1214 fl->fl_end = request->fl_end;
1215 else
1216 request->fl_end = fl->fl_end;
1217 if (added) {
1218 locks_delete_lock_ctx(fl, &dispose);
1219 continue;
1220 }
1221 request = fl;
1222 added = true;
1223 } else {
1224 /* Processing for different lock types is a bit
1225 * more complex.
1226 */
1227 if (fl->fl_end < request->fl_start)
1228 continue;
1229 if (fl->fl_start > request->fl_end)
1230 break;
1231 if (request->fl_type == F_UNLCK)
1232 added = true;
1233 if (fl->fl_start < request->fl_start)
1234 left = fl;
1235 /* If the next lock in the list has a higher end
1236 * address than the new one, insert the new one here.
1237 */
1238 if (fl->fl_end > request->fl_end) {
1239 right = fl;
1240 break;
1241 }
1242 if (fl->fl_start >= request->fl_start) {
1243 /* The new lock completely replaces an old
1244 * one (This may happen several times).
1245 */
1246 if (added) {
1247 locks_delete_lock_ctx(fl, &dispose);
1248 continue;
1249 }
1250 /*
1251 * Replace the old lock with new_fl, and
1252 * remove the old one. It's safe to do the
1253 * insert here since we know that we won't be
1254 * using new_fl later, and that the lock is
1255 * just replacing an existing lock.
1256 */
1257 error = -ENOLCK;
1258 if (!new_fl)
1259 goto out;
1260 locks_copy_lock(new_fl, request);
1261 locks_move_blocks(new_fl, request);
1262 request = new_fl;
1263 new_fl = NULL;
1264 locks_insert_lock_ctx(request, &fl->fl_list);
1265 locks_delete_lock_ctx(fl, &dispose);
1266 added = true;
1267 }
1268 }
1269 }
1270
1271 /*
1272 * The above code only modifies existing locks in case of merging or
1273 * replacing. If new lock(s) need to be inserted all modifications are
1274 * done below this, so it's safe yet to bail out.
1275 */
1276 error = -ENOLCK; /* "no luck" */
1277 if (right && left == right && !new_fl2)
1278 goto out;
1279
1280 error = 0;
1281 if (!added) {
1282 if (request->fl_type == F_UNLCK) {
1283 if (request->fl_flags & FL_EXISTS)
1284 error = -ENOENT;
1285 goto out;
1286 }
1287
1288 if (!new_fl) {
1289 error = -ENOLCK;
1290 goto out;
1291 }
1292 locks_copy_lock(new_fl, request);
1293 locks_move_blocks(new_fl, request);
1294 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1295 fl = new_fl;
1296 new_fl = NULL;
1297 }
1298 if (right) {
1299 if (left == right) {
1300 /* The new lock breaks the old one in two pieces,
1301 * so we have to use the second new lock.
1302 */
1303 left = new_fl2;
1304 new_fl2 = NULL;
1305 locks_copy_lock(left, right);
1306 locks_insert_lock_ctx(left, &fl->fl_list);
1307 }
1308 right->fl_start = request->fl_end + 1;
1309 locks_wake_up_blocks(right);
1310 }
1311 if (left) {
1312 left->fl_end = request->fl_start - 1;
1313 locks_wake_up_blocks(left);
1314 }
1315 out:
1316 spin_unlock(&ctx->flc_lock);
1317 percpu_up_read(&file_rwsem);
1318 trace_posix_lock_inode(inode, request, error);
1319 /*
1320 * Free any unused locks.
1321 */
1322 if (new_fl)
1323 locks_free_lock(new_fl);
1324 if (new_fl2)
1325 locks_free_lock(new_fl2);
1326 locks_dispose_list(&dispose);
1327
1328 return error;
1329 }
1330
1331 /**
1332 * posix_lock_file - Apply a POSIX-style lock to a file
1333 * @filp: The file to apply the lock to
1334 * @fl: The lock to be applied
1335 * @conflock: Place to return a copy of the conflicting lock, if found.
1336 *
1337 * Add a POSIX style lock to a file.
1338 * We merge adjacent & overlapping locks whenever possible.
1339 * POSIX locks are sorted by owner task, then by starting address
1340 *
1341 * Note that if called with an FL_EXISTS argument, the caller may determine
1342 * whether or not a lock was successfully freed by testing the return
1343 * value for -ENOENT.
1344 */
1345 int posix_lock_file(struct file *filp, struct file_lock *fl,
1346 struct file_lock *conflock)
1347 {
1348 return posix_lock_inode(file_inode(filp), fl, conflock);
1349 }
1350 EXPORT_SYMBOL(posix_lock_file);
1351
1352 /**
1353 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1354 * @inode: inode of file to which lock request should be applied
1355 * @fl: The lock to be applied
1356 *
1357 * Apply a POSIX style lock request to an inode.
1358 */
1359 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1360 {
1361 int error;
1362 might_sleep ();
1363 for (;;) {
1364 error = posix_lock_inode(inode, fl, NULL);
1365 if (error != FILE_LOCK_DEFERRED)
1366 break;
1367 error = wait_event_interruptible(fl->fl_wait,
1368 list_empty(&fl->fl_blocked_member));
1369 if (error)
1370 break;
1371 }
1372 locks_delete_block(fl);
1373 return error;
1374 }
1375
1376 static void lease_clear_pending(struct file_lock *fl, int arg)
1377 {
1378 switch (arg) {
1379 case F_UNLCK:
1380 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1381 fallthrough;
1382 case F_RDLCK:
1383 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1384 }
1385 }
1386
1387 /* We already had a lease on this file; just change its type */
1388 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1389 {
1390 int error = assign_type(fl, arg);
1391
1392 if (error)
1393 return error;
1394 lease_clear_pending(fl, arg);
1395 locks_wake_up_blocks(fl);
1396 if (arg == F_UNLCK) {
1397 struct file *filp = fl->fl_file;
1398
1399 f_delown(filp);
1400 filp->f_owner.signum = 0;
1401 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1402 if (fl->fl_fasync != NULL) {
1403 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1404 fl->fl_fasync = NULL;
1405 }
1406 locks_delete_lock_ctx(fl, dispose);
1407 }
1408 return 0;
1409 }
1410 EXPORT_SYMBOL(lease_modify);
1411
1412 static bool past_time(unsigned long then)
1413 {
1414 if (!then)
1415 /* 0 is a special value meaning "this never expires": */
1416 return false;
1417 return time_after(jiffies, then);
1418 }
1419
1420 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1421 {
1422 struct file_lock_context *ctx = inode->i_flctx;
1423 struct file_lock *fl, *tmp;
1424
1425 lockdep_assert_held(&ctx->flc_lock);
1426
1427 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1428 trace_time_out_leases(inode, fl);
1429 if (past_time(fl->fl_downgrade_time))
1430 lease_modify(fl, F_RDLCK, dispose);
1431 if (past_time(fl->fl_break_time))
1432 lease_modify(fl, F_UNLCK, dispose);
1433 }
1434 }
1435
1436 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1437 {
1438 bool rc;
1439
1440 if (lease->fl_lmops->lm_breaker_owns_lease
1441 && lease->fl_lmops->lm_breaker_owns_lease(lease))
1442 return false;
1443 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1444 rc = false;
1445 goto trace;
1446 }
1447 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1448 rc = false;
1449 goto trace;
1450 }
1451
1452 rc = locks_conflict(breaker, lease);
1453 trace:
1454 trace_leases_conflict(rc, lease, breaker);
1455 return rc;
1456 }
1457
1458 static bool
1459 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1460 {
1461 struct file_lock_context *ctx = inode->i_flctx;
1462 struct file_lock *fl;
1463
1464 lockdep_assert_held(&ctx->flc_lock);
1465
1466 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1467 if (leases_conflict(fl, breaker))
1468 return true;
1469 }
1470 return false;
1471 }
1472
1473 /**
1474 * __break_lease - revoke all outstanding leases on file
1475 * @inode: the inode of the file to return
1476 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1477 * break all leases
1478 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1479 * only delegations
1480 *
1481 * break_lease (inlined for speed) has checked there already is at least
1482 * some kind of lock (maybe a lease) on this file. Leases are broken on
1483 * a call to open() or truncate(). This function can sleep unless you
1484 * specified %O_NONBLOCK to your open().
1485 */
1486 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1487 {
1488 int error = 0;
1489 struct file_lock_context *ctx;
1490 struct file_lock *new_fl, *fl, *tmp;
1491 unsigned long break_time;
1492 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1493 LIST_HEAD(dispose);
1494
1495 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1496 if (IS_ERR(new_fl))
1497 return PTR_ERR(new_fl);
1498 new_fl->fl_flags = type;
1499
1500 /* typically we will check that ctx is non-NULL before calling */
1501 ctx = locks_inode_context(inode);
1502 if (!ctx) {
1503 WARN_ON_ONCE(1);
1504 goto free_lock;
1505 }
1506
1507 percpu_down_read(&file_rwsem);
1508 spin_lock(&ctx->flc_lock);
1509
1510 time_out_leases(inode, &dispose);
1511
1512 if (!any_leases_conflict(inode, new_fl))
1513 goto out;
1514
1515 break_time = 0;
1516 if (lease_break_time > 0) {
1517 break_time = jiffies + lease_break_time * HZ;
1518 if (break_time == 0)
1519 break_time++; /* so that 0 means no break time */
1520 }
1521
1522 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1523 if (!leases_conflict(fl, new_fl))
1524 continue;
1525 if (want_write) {
1526 if (fl->fl_flags & FL_UNLOCK_PENDING)
1527 continue;
1528 fl->fl_flags |= FL_UNLOCK_PENDING;
1529 fl->fl_break_time = break_time;
1530 } else {
1531 if (lease_breaking(fl))
1532 continue;
1533 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1534 fl->fl_downgrade_time = break_time;
1535 }
1536 if (fl->fl_lmops->lm_break(fl))
1537 locks_delete_lock_ctx(fl, &dispose);
1538 }
1539
1540 if (list_empty(&ctx->flc_lease))
1541 goto out;
1542
1543 if (mode & O_NONBLOCK) {
1544 trace_break_lease_noblock(inode, new_fl);
1545 error = -EWOULDBLOCK;
1546 goto out;
1547 }
1548
1549 restart:
1550 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1551 break_time = fl->fl_break_time;
1552 if (break_time != 0)
1553 break_time -= jiffies;
1554 if (break_time == 0)
1555 break_time++;
1556 locks_insert_block(fl, new_fl, leases_conflict);
1557 trace_break_lease_block(inode, new_fl);
1558 spin_unlock(&ctx->flc_lock);
1559 percpu_up_read(&file_rwsem);
1560
1561 locks_dispose_list(&dispose);
1562 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1563 list_empty(&new_fl->fl_blocked_member),
1564 break_time);
1565
1566 percpu_down_read(&file_rwsem);
1567 spin_lock(&ctx->flc_lock);
1568 trace_break_lease_unblock(inode, new_fl);
1569 locks_delete_block(new_fl);
1570 if (error >= 0) {
1571 /*
1572 * Wait for the next conflicting lease that has not been
1573 * broken yet
1574 */
1575 if (error == 0)
1576 time_out_leases(inode, &dispose);
1577 if (any_leases_conflict(inode, new_fl))
1578 goto restart;
1579 error = 0;
1580 }
1581 out:
1582 spin_unlock(&ctx->flc_lock);
1583 percpu_up_read(&file_rwsem);
1584 locks_dispose_list(&dispose);
1585 free_lock:
1586 locks_free_lock(new_fl);
1587 return error;
1588 }
1589 EXPORT_SYMBOL(__break_lease);
1590
1591 /**
1592 * lease_get_mtime - update modified time of an inode with exclusive lease
1593 * @inode: the inode
1594 * @time: pointer to a timespec which contains the last modified time
1595 *
1596 * This is to force NFS clients to flush their caches for files with
1597 * exclusive leases. The justification is that if someone has an
1598 * exclusive lease, then they could be modifying it.
1599 */
1600 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1601 {
1602 bool has_lease = false;
1603 struct file_lock_context *ctx;
1604 struct file_lock *fl;
1605
1606 ctx = locks_inode_context(inode);
1607 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1608 spin_lock(&ctx->flc_lock);
1609 fl = list_first_entry_or_null(&ctx->flc_lease,
1610 struct file_lock, fl_list);
1611 if (fl && (fl->fl_type == F_WRLCK))
1612 has_lease = true;
1613 spin_unlock(&ctx->flc_lock);
1614 }
1615
1616 if (has_lease)
1617 *time = current_time(inode);
1618 }
1619 EXPORT_SYMBOL(lease_get_mtime);
1620
1621 /**
1622 * fcntl_getlease - Enquire what lease is currently active
1623 * @filp: the file
1624 *
1625 * The value returned by this function will be one of
1626 * (if no lease break is pending):
1627 *
1628 * %F_RDLCK to indicate a shared lease is held.
1629 *
1630 * %F_WRLCK to indicate an exclusive lease is held.
1631 *
1632 * %F_UNLCK to indicate no lease is held.
1633 *
1634 * (if a lease break is pending):
1635 *
1636 * %F_RDLCK to indicate an exclusive lease needs to be
1637 * changed to a shared lease (or removed).
1638 *
1639 * %F_UNLCK to indicate the lease needs to be removed.
1640 *
1641 * XXX: sfr & willy disagree over whether F_INPROGRESS
1642 * should be returned to userspace.
1643 */
1644 int fcntl_getlease(struct file *filp)
1645 {
1646 struct file_lock *fl;
1647 struct inode *inode = file_inode(filp);
1648 struct file_lock_context *ctx;
1649 int type = F_UNLCK;
1650 LIST_HEAD(dispose);
1651
1652 ctx = locks_inode_context(inode);
1653 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1654 percpu_down_read(&file_rwsem);
1655 spin_lock(&ctx->flc_lock);
1656 time_out_leases(inode, &dispose);
1657 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1658 if (fl->fl_file != filp)
1659 continue;
1660 type = target_leasetype(fl);
1661 break;
1662 }
1663 spin_unlock(&ctx->flc_lock);
1664 percpu_up_read(&file_rwsem);
1665
1666 locks_dispose_list(&dispose);
1667 }
1668 return type;
1669 }
1670
1671 /**
1672 * check_conflicting_open - see if the given file points to an inode that has
1673 * an existing open that would conflict with the
1674 * desired lease.
1675 * @filp: file to check
1676 * @arg: type of lease that we're trying to acquire
1677 * @flags: current lock flags
1678 *
1679 * Check to see if there's an existing open fd on this file that would
1680 * conflict with the lease we're trying to set.
1681 */
1682 static int
1683 check_conflicting_open(struct file *filp, const int arg, int flags)
1684 {
1685 struct inode *inode = file_inode(filp);
1686 int self_wcount = 0, self_rcount = 0;
1687
1688 if (flags & FL_LAYOUT)
1689 return 0;
1690 if (flags & FL_DELEG)
1691 /* We leave these checks to the caller */
1692 return 0;
1693
1694 if (arg == F_RDLCK)
1695 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1696 else if (arg != F_WRLCK)
1697 return 0;
1698
1699 /*
1700 * Make sure that only read/write count is from lease requestor.
1701 * Note that this will result in denying write leases when i_writecount
1702 * is negative, which is what we want. (We shouldn't grant write leases
1703 * on files open for execution.)
1704 */
1705 if (filp->f_mode & FMODE_WRITE)
1706 self_wcount = 1;
1707 else if (filp->f_mode & FMODE_READ)
1708 self_rcount = 1;
1709
1710 if (atomic_read(&inode->i_writecount) != self_wcount ||
1711 atomic_read(&inode->i_readcount) != self_rcount)
1712 return -EAGAIN;
1713
1714 return 0;
1715 }
1716
1717 static int
1718 generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **priv)
1719 {
1720 struct file_lock *fl, *my_fl = NULL, *lease;
1721 struct inode *inode = file_inode(filp);
1722 struct file_lock_context *ctx;
1723 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1724 int error;
1725 LIST_HEAD(dispose);
1726
1727 lease = *flp;
1728 trace_generic_add_lease(inode, lease);
1729
1730 /* Note that arg is never F_UNLCK here */
1731 ctx = locks_get_lock_context(inode, arg);
1732 if (!ctx)
1733 return -ENOMEM;
1734
1735 /*
1736 * In the delegation case we need mutual exclusion with
1737 * a number of operations that take the i_mutex. We trylock
1738 * because delegations are an optional optimization, and if
1739 * there's some chance of a conflict--we'd rather not
1740 * bother, maybe that's a sign this just isn't a good file to
1741 * hand out a delegation on.
1742 */
1743 if (is_deleg && !inode_trylock(inode))
1744 return -EAGAIN;
1745
1746 percpu_down_read(&file_rwsem);
1747 spin_lock(&ctx->flc_lock);
1748 time_out_leases(inode, &dispose);
1749 error = check_conflicting_open(filp, arg, lease->fl_flags);
1750 if (error)
1751 goto out;
1752
1753 /*
1754 * At this point, we know that if there is an exclusive
1755 * lease on this file, then we hold it on this filp
1756 * (otherwise our open of this file would have blocked).
1757 * And if we are trying to acquire an exclusive lease,
1758 * then the file is not open by anyone (including us)
1759 * except for this filp.
1760 */
1761 error = -EAGAIN;
1762 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1763 if (fl->fl_file == filp &&
1764 fl->fl_owner == lease->fl_owner) {
1765 my_fl = fl;
1766 continue;
1767 }
1768
1769 /*
1770 * No exclusive leases if someone else has a lease on
1771 * this file:
1772 */
1773 if (arg == F_WRLCK)
1774 goto out;
1775 /*
1776 * Modifying our existing lease is OK, but no getting a
1777 * new lease if someone else is opening for write:
1778 */
1779 if (fl->fl_flags & FL_UNLOCK_PENDING)
1780 goto out;
1781 }
1782
1783 if (my_fl != NULL) {
1784 lease = my_fl;
1785 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1786 if (error)
1787 goto out;
1788 goto out_setup;
1789 }
1790
1791 error = -EINVAL;
1792 if (!leases_enable)
1793 goto out;
1794
1795 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1796 /*
1797 * The check in break_lease() is lockless. It's possible for another
1798 * open to race in after we did the earlier check for a conflicting
1799 * open but before the lease was inserted. Check again for a
1800 * conflicting open and cancel the lease if there is one.
1801 *
1802 * We also add a barrier here to ensure that the insertion of the lock
1803 * precedes these checks.
1804 */
1805 smp_mb();
1806 error = check_conflicting_open(filp, arg, lease->fl_flags);
1807 if (error) {
1808 locks_unlink_lock_ctx(lease);
1809 goto out;
1810 }
1811
1812 out_setup:
1813 if (lease->fl_lmops->lm_setup)
1814 lease->fl_lmops->lm_setup(lease, priv);
1815 out:
1816 spin_unlock(&ctx->flc_lock);
1817 percpu_up_read(&file_rwsem);
1818 locks_dispose_list(&dispose);
1819 if (is_deleg)
1820 inode_unlock(inode);
1821 if (!error && !my_fl)
1822 *flp = NULL;
1823 return error;
1824 }
1825
1826 static int generic_delete_lease(struct file *filp, void *owner)
1827 {
1828 int error = -EAGAIN;
1829 struct file_lock *fl, *victim = NULL;
1830 struct inode *inode = file_inode(filp);
1831 struct file_lock_context *ctx;
1832 LIST_HEAD(dispose);
1833
1834 ctx = locks_inode_context(inode);
1835 if (!ctx) {
1836 trace_generic_delete_lease(inode, NULL);
1837 return error;
1838 }
1839
1840 percpu_down_read(&file_rwsem);
1841 spin_lock(&ctx->flc_lock);
1842 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1843 if (fl->fl_file == filp &&
1844 fl->fl_owner == owner) {
1845 victim = fl;
1846 break;
1847 }
1848 }
1849 trace_generic_delete_lease(inode, victim);
1850 if (victim)
1851 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1852 spin_unlock(&ctx->flc_lock);
1853 percpu_up_read(&file_rwsem);
1854 locks_dispose_list(&dispose);
1855 return error;
1856 }
1857
1858 /**
1859 * generic_setlease - sets a lease on an open file
1860 * @filp: file pointer
1861 * @arg: type of lease to obtain
1862 * @flp: input - file_lock to use, output - file_lock inserted
1863 * @priv: private data for lm_setup (may be NULL if lm_setup
1864 * doesn't require it)
1865 *
1866 * The (input) flp->fl_lmops->lm_break function is required
1867 * by break_lease().
1868 */
1869 int generic_setlease(struct file *filp, int arg, struct file_lock **flp,
1870 void **priv)
1871 {
1872 struct inode *inode = file_inode(filp);
1873 vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
1874 int error;
1875
1876 if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
1877 return -EACCES;
1878 if (!S_ISREG(inode->i_mode))
1879 return -EINVAL;
1880 error = security_file_lock(filp, arg);
1881 if (error)
1882 return error;
1883
1884 switch (arg) {
1885 case F_UNLCK:
1886 return generic_delete_lease(filp, *priv);
1887 case F_RDLCK:
1888 case F_WRLCK:
1889 if (!(*flp)->fl_lmops->lm_break) {
1890 WARN_ON_ONCE(1);
1891 return -ENOLCK;
1892 }
1893
1894 return generic_add_lease(filp, arg, flp, priv);
1895 default:
1896 return -EINVAL;
1897 }
1898 }
1899 EXPORT_SYMBOL(generic_setlease);
1900
1901 /*
1902 * Kernel subsystems can register to be notified on any attempt to set
1903 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1904 * to close files that it may have cached when there is an attempt to set a
1905 * conflicting lease.
1906 */
1907 static struct srcu_notifier_head lease_notifier_chain;
1908
1909 static inline void
1910 lease_notifier_chain_init(void)
1911 {
1912 srcu_init_notifier_head(&lease_notifier_chain);
1913 }
1914
1915 static inline void
1916 setlease_notifier(int arg, struct file_lock *lease)
1917 {
1918 if (arg != F_UNLCK)
1919 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1920 }
1921
1922 int lease_register_notifier(struct notifier_block *nb)
1923 {
1924 return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1925 }
1926 EXPORT_SYMBOL_GPL(lease_register_notifier);
1927
1928 void lease_unregister_notifier(struct notifier_block *nb)
1929 {
1930 srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1931 }
1932 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1933
1934 /**
1935 * vfs_setlease - sets a lease on an open file
1936 * @filp: file pointer
1937 * @arg: type of lease to obtain
1938 * @lease: file_lock to use when adding a lease
1939 * @priv: private info for lm_setup when adding a lease (may be
1940 * NULL if lm_setup doesn't require it)
1941 *
1942 * Call this to establish a lease on the file. The "lease" argument is not
1943 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1944 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1945 * set; if not, this function will return -ENOLCK (and generate a scary-looking
1946 * stack trace).
1947 *
1948 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1949 * may be NULL if the lm_setup operation doesn't require it.
1950 */
1951 int
1952 vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv)
1953 {
1954 if (lease)
1955 setlease_notifier(arg, *lease);
1956 if (filp->f_op->setlease)
1957 return filp->f_op->setlease(filp, arg, lease, priv);
1958 else
1959 return generic_setlease(filp, arg, lease, priv);
1960 }
1961 EXPORT_SYMBOL_GPL(vfs_setlease);
1962
1963 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
1964 {
1965 struct file_lock *fl;
1966 struct fasync_struct *new;
1967 int error;
1968
1969 fl = lease_alloc(filp, arg);
1970 if (IS_ERR(fl))
1971 return PTR_ERR(fl);
1972
1973 new = fasync_alloc();
1974 if (!new) {
1975 locks_free_lock(fl);
1976 return -ENOMEM;
1977 }
1978 new->fa_fd = fd;
1979
1980 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1981 if (fl)
1982 locks_free_lock(fl);
1983 if (new)
1984 fasync_free(new);
1985 return error;
1986 }
1987
1988 /**
1989 * fcntl_setlease - sets a lease on an open file
1990 * @fd: open file descriptor
1991 * @filp: file pointer
1992 * @arg: type of lease to obtain
1993 *
1994 * Call this fcntl to establish a lease on the file.
1995 * Note that you also need to call %F_SETSIG to
1996 * receive a signal when the lease is broken.
1997 */
1998 int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
1999 {
2000 if (arg == F_UNLCK)
2001 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2002 return do_fcntl_add_lease(fd, filp, arg);
2003 }
2004
2005 /**
2006 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2007 * @inode: inode of the file to apply to
2008 * @fl: The lock to be applied
2009 *
2010 * Apply a FLOCK style lock request to an inode.
2011 */
2012 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2013 {
2014 int error;
2015 might_sleep();
2016 for (;;) {
2017 error = flock_lock_inode(inode, fl);
2018 if (error != FILE_LOCK_DEFERRED)
2019 break;
2020 error = wait_event_interruptible(fl->fl_wait,
2021 list_empty(&fl->fl_blocked_member));
2022 if (error)
2023 break;
2024 }
2025 locks_delete_block(fl);
2026 return error;
2027 }
2028
2029 /**
2030 * locks_lock_inode_wait - Apply a lock to an inode
2031 * @inode: inode of the file to apply to
2032 * @fl: The lock to be applied
2033 *
2034 * Apply a POSIX or FLOCK style lock request to an inode.
2035 */
2036 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2037 {
2038 int res = 0;
2039 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2040 case FL_POSIX:
2041 res = posix_lock_inode_wait(inode, fl);
2042 break;
2043 case FL_FLOCK:
2044 res = flock_lock_inode_wait(inode, fl);
2045 break;
2046 default:
2047 BUG();
2048 }
2049 return res;
2050 }
2051 EXPORT_SYMBOL(locks_lock_inode_wait);
2052
2053 /**
2054 * sys_flock: - flock() system call.
2055 * @fd: the file descriptor to lock.
2056 * @cmd: the type of lock to apply.
2057 *
2058 * Apply a %FL_FLOCK style lock to an open file descriptor.
2059 * The @cmd can be one of:
2060 *
2061 * - %LOCK_SH -- a shared lock.
2062 * - %LOCK_EX -- an exclusive lock.
2063 * - %LOCK_UN -- remove an existing lock.
2064 * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2065 *
2066 * %LOCK_MAND support has been removed from the kernel.
2067 */
2068 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2069 {
2070 int can_sleep, error, type;
2071 struct file_lock fl;
2072 struct fd f;
2073
2074 /*
2075 * LOCK_MAND locks were broken for a long time in that they never
2076 * conflicted with one another and didn't prevent any sort of open,
2077 * read or write activity.
2078 *
2079 * Just ignore these requests now, to preserve legacy behavior, but
2080 * throw a warning to let people know that they don't actually work.
2081 */
2082 if (cmd & LOCK_MAND) {
2083 pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
2084 return 0;
2085 }
2086
2087 type = flock_translate_cmd(cmd & ~LOCK_NB);
2088 if (type < 0)
2089 return type;
2090
2091 error = -EBADF;
2092 f = fdget(fd);
2093 if (!f.file)
2094 return error;
2095
2096 if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2097 goto out_putf;
2098
2099 flock_make_lock(f.file, &fl, type);
2100
2101 error = security_file_lock(f.file, fl.fl_type);
2102 if (error)
2103 goto out_putf;
2104
2105 can_sleep = !(cmd & LOCK_NB);
2106 if (can_sleep)
2107 fl.fl_flags |= FL_SLEEP;
2108
2109 if (f.file->f_op->flock)
2110 error = f.file->f_op->flock(f.file,
2111 (can_sleep) ? F_SETLKW : F_SETLK,
2112 &fl);
2113 else
2114 error = locks_lock_file_wait(f.file, &fl);
2115
2116 locks_release_private(&fl);
2117 out_putf:
2118 fdput(f);
2119
2120 return error;
2121 }
2122
2123 /**
2124 * vfs_test_lock - test file byte range lock
2125 * @filp: The file to test lock for
2126 * @fl: The lock to test; also used to hold result
2127 *
2128 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2129 * setting conf->fl_type to something other than F_UNLCK.
2130 */
2131 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2132 {
2133 WARN_ON_ONCE(filp != fl->fl_file);
2134 if (filp->f_op->lock)
2135 return filp->f_op->lock(filp, F_GETLK, fl);
2136 posix_test_lock(filp, fl);
2137 return 0;
2138 }
2139 EXPORT_SYMBOL_GPL(vfs_test_lock);
2140
2141 /**
2142 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2143 * @fl: The file_lock who's fl_pid should be translated
2144 * @ns: The namespace into which the pid should be translated
2145 *
2146 * Used to translate a fl_pid into a namespace virtual pid number
2147 */
2148 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2149 {
2150 pid_t vnr;
2151 struct pid *pid;
2152
2153 if (IS_OFDLCK(fl))
2154 return -1;
2155 if (IS_REMOTELCK(fl))
2156 return fl->fl_pid;
2157 /*
2158 * If the flock owner process is dead and its pid has been already
2159 * freed, the translation below won't work, but we still want to show
2160 * flock owner pid number in init pidns.
2161 */
2162 if (ns == &init_pid_ns)
2163 return (pid_t)fl->fl_pid;
2164
2165 rcu_read_lock();
2166 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2167 vnr = pid_nr_ns(pid, ns);
2168 rcu_read_unlock();
2169 return vnr;
2170 }
2171
2172 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2173 {
2174 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2175 #if BITS_PER_LONG == 32
2176 /*
2177 * Make sure we can represent the posix lock via
2178 * legacy 32bit flock.
2179 */
2180 if (fl->fl_start > OFFT_OFFSET_MAX)
2181 return -EOVERFLOW;
2182 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2183 return -EOVERFLOW;
2184 #endif
2185 flock->l_start = fl->fl_start;
2186 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2187 fl->fl_end - fl->fl_start + 1;
2188 flock->l_whence = 0;
2189 flock->l_type = fl->fl_type;
2190 return 0;
2191 }
2192
2193 #if BITS_PER_LONG == 32
2194 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2195 {
2196 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2197 flock->l_start = fl->fl_start;
2198 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2199 fl->fl_end - fl->fl_start + 1;
2200 flock->l_whence = 0;
2201 flock->l_type = fl->fl_type;
2202 }
2203 #endif
2204
2205 /* Report the first existing lock that would conflict with l.
2206 * This implements the F_GETLK command of fcntl().
2207 */
2208 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2209 {
2210 struct file_lock *fl;
2211 int error;
2212
2213 fl = locks_alloc_lock();
2214 if (fl == NULL)
2215 return -ENOMEM;
2216 error = -EINVAL;
2217 if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2218 && flock->l_type != F_WRLCK)
2219 goto out;
2220
2221 error = flock_to_posix_lock(filp, fl, flock);
2222 if (error)
2223 goto out;
2224
2225 if (cmd == F_OFD_GETLK) {
2226 error = -EINVAL;
2227 if (flock->l_pid != 0)
2228 goto out;
2229
2230 fl->fl_flags |= FL_OFDLCK;
2231 fl->fl_owner = filp;
2232 }
2233
2234 error = vfs_test_lock(filp, fl);
2235 if (error)
2236 goto out;
2237
2238 flock->l_type = fl->fl_type;
2239 if (fl->fl_type != F_UNLCK) {
2240 error = posix_lock_to_flock(flock, fl);
2241 if (error)
2242 goto out;
2243 }
2244 out:
2245 locks_free_lock(fl);
2246 return error;
2247 }
2248
2249 /**
2250 * vfs_lock_file - file byte range lock
2251 * @filp: The file to apply the lock to
2252 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2253 * @fl: The lock to be applied
2254 * @conf: Place to return a copy of the conflicting lock, if found.
2255 *
2256 * A caller that doesn't care about the conflicting lock may pass NULL
2257 * as the final argument.
2258 *
2259 * If the filesystem defines a private ->lock() method, then @conf will
2260 * be left unchanged; so a caller that cares should initialize it to
2261 * some acceptable default.
2262 *
2263 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2264 * locks, the ->lock() interface may return asynchronously, before the lock has
2265 * been granted or denied by the underlying filesystem, if (and only if)
2266 * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
2267 * flags need to be set.
2268 *
2269 * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
2270 * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
2271 * blocking lock. When ->lock() does return asynchronously, it must return
2272 * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
2273 * If the request is for non-blocking lock the file system should return
2274 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2275 * with the result. If the request timed out the callback routine will return a
2276 * nonzero return code and the file system should release the lock. The file
2277 * system is also responsible to keep a corresponding posix lock when it
2278 * grants a lock so the VFS can find out which locks are locally held and do
2279 * the correct lock cleanup when required.
2280 * The underlying filesystem must not drop the kernel lock or call
2281 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2282 * return code.
2283 */
2284 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2285 {
2286 WARN_ON_ONCE(filp != fl->fl_file);
2287 if (filp->f_op->lock)
2288 return filp->f_op->lock(filp, cmd, fl);
2289 else
2290 return posix_lock_file(filp, fl, conf);
2291 }
2292 EXPORT_SYMBOL_GPL(vfs_lock_file);
2293
2294 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2295 struct file_lock *fl)
2296 {
2297 int error;
2298
2299 error = security_file_lock(filp, fl->fl_type);
2300 if (error)
2301 return error;
2302
2303 for (;;) {
2304 error = vfs_lock_file(filp, cmd, fl, NULL);
2305 if (error != FILE_LOCK_DEFERRED)
2306 break;
2307 error = wait_event_interruptible(fl->fl_wait,
2308 list_empty(&fl->fl_blocked_member));
2309 if (error)
2310 break;
2311 }
2312 locks_delete_block(fl);
2313
2314 return error;
2315 }
2316
2317 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2318 static int
2319 check_fmode_for_setlk(struct file_lock *fl)
2320 {
2321 switch (fl->fl_type) {
2322 case F_RDLCK:
2323 if (!(fl->fl_file->f_mode & FMODE_READ))
2324 return -EBADF;
2325 break;
2326 case F_WRLCK:
2327 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2328 return -EBADF;
2329 }
2330 return 0;
2331 }
2332
2333 /* Apply the lock described by l to an open file descriptor.
2334 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2335 */
2336 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2337 struct flock *flock)
2338 {
2339 struct file_lock *file_lock = locks_alloc_lock();
2340 struct inode *inode = file_inode(filp);
2341 struct file *f;
2342 int error;
2343
2344 if (file_lock == NULL)
2345 return -ENOLCK;
2346
2347 error = flock_to_posix_lock(filp, file_lock, flock);
2348 if (error)
2349 goto out;
2350
2351 error = check_fmode_for_setlk(file_lock);
2352 if (error)
2353 goto out;
2354
2355 /*
2356 * If the cmd is requesting file-private locks, then set the
2357 * FL_OFDLCK flag and override the owner.
2358 */
2359 switch (cmd) {
2360 case F_OFD_SETLK:
2361 error = -EINVAL;
2362 if (flock->l_pid != 0)
2363 goto out;
2364
2365 cmd = F_SETLK;
2366 file_lock->fl_flags |= FL_OFDLCK;
2367 file_lock->fl_owner = filp;
2368 break;
2369 case F_OFD_SETLKW:
2370 error = -EINVAL;
2371 if (flock->l_pid != 0)
2372 goto out;
2373
2374 cmd = F_SETLKW;
2375 file_lock->fl_flags |= FL_OFDLCK;
2376 file_lock->fl_owner = filp;
2377 fallthrough;
2378 case F_SETLKW:
2379 file_lock->fl_flags |= FL_SLEEP;
2380 }
2381
2382 error = do_lock_file_wait(filp, cmd, file_lock);
2383
2384 /*
2385 * Attempt to detect a close/fcntl race and recover by releasing the
2386 * lock that was just acquired. There is no need to do that when we're
2387 * unlocking though, or for OFD locks.
2388 */
2389 if (!error && file_lock->fl_type != F_UNLCK &&
2390 !(file_lock->fl_flags & FL_OFDLCK)) {
2391 struct files_struct *files = current->files;
2392 /*
2393 * We need that spin_lock here - it prevents reordering between
2394 * update of i_flctx->flc_posix and check for it done in
2395 * close(). rcu_read_lock() wouldn't do.
2396 */
2397 spin_lock(&files->file_lock);
2398 f = files_lookup_fd_locked(files, fd);
2399 spin_unlock(&files->file_lock);
2400 if (f != filp) {
2401 file_lock->fl_type = F_UNLCK;
2402 error = do_lock_file_wait(filp, cmd, file_lock);
2403 WARN_ON_ONCE(error);
2404 error = -EBADF;
2405 }
2406 }
2407 out:
2408 trace_fcntl_setlk(inode, file_lock, error);
2409 locks_free_lock(file_lock);
2410 return error;
2411 }
2412
2413 #if BITS_PER_LONG == 32
2414 /* Report the first existing lock that would conflict with l.
2415 * This implements the F_GETLK command of fcntl().
2416 */
2417 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2418 {
2419 struct file_lock *fl;
2420 int error;
2421
2422 fl = locks_alloc_lock();
2423 if (fl == NULL)
2424 return -ENOMEM;
2425
2426 error = -EINVAL;
2427 if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2428 && flock->l_type != F_WRLCK)
2429 goto out;
2430
2431 error = flock64_to_posix_lock(filp, fl, flock);
2432 if (error)
2433 goto out;
2434
2435 if (cmd == F_OFD_GETLK) {
2436 error = -EINVAL;
2437 if (flock->l_pid != 0)
2438 goto out;
2439
2440 fl->fl_flags |= FL_OFDLCK;
2441 fl->fl_owner = filp;
2442 }
2443
2444 error = vfs_test_lock(filp, fl);
2445 if (error)
2446 goto out;
2447
2448 flock->l_type = fl->fl_type;
2449 if (fl->fl_type != F_UNLCK)
2450 posix_lock_to_flock64(flock, fl);
2451
2452 out:
2453 locks_free_lock(fl);
2454 return error;
2455 }
2456
2457 /* Apply the lock described by l to an open file descriptor.
2458 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2459 */
2460 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2461 struct flock64 *flock)
2462 {
2463 struct file_lock *file_lock = locks_alloc_lock();
2464 struct file *f;
2465 int error;
2466
2467 if (file_lock == NULL)
2468 return -ENOLCK;
2469
2470 error = flock64_to_posix_lock(filp, file_lock, flock);
2471 if (error)
2472 goto out;
2473
2474 error = check_fmode_for_setlk(file_lock);
2475 if (error)
2476 goto out;
2477
2478 /*
2479 * If the cmd is requesting file-private locks, then set the
2480 * FL_OFDLCK flag and override the owner.
2481 */
2482 switch (cmd) {
2483 case F_OFD_SETLK:
2484 error = -EINVAL;
2485 if (flock->l_pid != 0)
2486 goto out;
2487
2488 cmd = F_SETLK64;
2489 file_lock->fl_flags |= FL_OFDLCK;
2490 file_lock->fl_owner = filp;
2491 break;
2492 case F_OFD_SETLKW:
2493 error = -EINVAL;
2494 if (flock->l_pid != 0)
2495 goto out;
2496
2497 cmd = F_SETLKW64;
2498 file_lock->fl_flags |= FL_OFDLCK;
2499 file_lock->fl_owner = filp;
2500 fallthrough;
2501 case F_SETLKW64:
2502 file_lock->fl_flags |= FL_SLEEP;
2503 }
2504
2505 error = do_lock_file_wait(filp, cmd, file_lock);
2506
2507 /*
2508 * Attempt to detect a close/fcntl race and recover by releasing the
2509 * lock that was just acquired. There is no need to do that when we're
2510 * unlocking though, or for OFD locks.
2511 */
2512 if (!error && file_lock->fl_type != F_UNLCK &&
2513 !(file_lock->fl_flags & FL_OFDLCK)) {
2514 struct files_struct *files = current->files;
2515 /*
2516 * We need that spin_lock here - it prevents reordering between
2517 * update of i_flctx->flc_posix and check for it done in
2518 * close(). rcu_read_lock() wouldn't do.
2519 */
2520 spin_lock(&files->file_lock);
2521 f = files_lookup_fd_locked(files, fd);
2522 spin_unlock(&files->file_lock);
2523 if (f != filp) {
2524 file_lock->fl_type = F_UNLCK;
2525 error = do_lock_file_wait(filp, cmd, file_lock);
2526 WARN_ON_ONCE(error);
2527 error = -EBADF;
2528 }
2529 }
2530 out:
2531 locks_free_lock(file_lock);
2532 return error;
2533 }
2534 #endif /* BITS_PER_LONG == 32 */
2535
2536 /*
2537 * This function is called when the file is being removed
2538 * from the task's fd array. POSIX locks belonging to this task
2539 * are deleted at this time.
2540 */
2541 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2542 {
2543 int error;
2544 struct inode *inode = file_inode(filp);
2545 struct file_lock lock;
2546 struct file_lock_context *ctx;
2547
2548 /*
2549 * If there are no locks held on this file, we don't need to call
2550 * posix_lock_file(). Another process could be setting a lock on this
2551 * file at the same time, but we wouldn't remove that lock anyway.
2552 */
2553 ctx = locks_inode_context(inode);
2554 if (!ctx || list_empty(&ctx->flc_posix))
2555 return;
2556
2557 locks_init_lock(&lock);
2558 lock.fl_type = F_UNLCK;
2559 lock.fl_flags = FL_POSIX | FL_CLOSE;
2560 lock.fl_start = 0;
2561 lock.fl_end = OFFSET_MAX;
2562 lock.fl_owner = owner;
2563 lock.fl_pid = current->tgid;
2564 lock.fl_file = filp;
2565 lock.fl_ops = NULL;
2566 lock.fl_lmops = NULL;
2567
2568 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2569
2570 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2571 lock.fl_ops->fl_release_private(&lock);
2572 trace_locks_remove_posix(inode, &lock, error);
2573 }
2574 EXPORT_SYMBOL(locks_remove_posix);
2575
2576 /* The i_flctx must be valid when calling into here */
2577 static void
2578 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2579 {
2580 struct file_lock fl;
2581 struct inode *inode = file_inode(filp);
2582
2583 if (list_empty(&flctx->flc_flock))
2584 return;
2585
2586 flock_make_lock(filp, &fl, F_UNLCK);
2587 fl.fl_flags |= FL_CLOSE;
2588
2589 if (filp->f_op->flock)
2590 filp->f_op->flock(filp, F_SETLKW, &fl);
2591 else
2592 flock_lock_inode(inode, &fl);
2593
2594 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2595 fl.fl_ops->fl_release_private(&fl);
2596 }
2597
2598 /* The i_flctx must be valid when calling into here */
2599 static void
2600 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2601 {
2602 struct file_lock *fl, *tmp;
2603 LIST_HEAD(dispose);
2604
2605 if (list_empty(&ctx->flc_lease))
2606 return;
2607
2608 percpu_down_read(&file_rwsem);
2609 spin_lock(&ctx->flc_lock);
2610 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2611 if (filp == fl->fl_file)
2612 lease_modify(fl, F_UNLCK, &dispose);
2613 spin_unlock(&ctx->flc_lock);
2614 percpu_up_read(&file_rwsem);
2615
2616 locks_dispose_list(&dispose);
2617 }
2618
2619 /*
2620 * This function is called on the last close of an open file.
2621 */
2622 void locks_remove_file(struct file *filp)
2623 {
2624 struct file_lock_context *ctx;
2625
2626 ctx = locks_inode_context(file_inode(filp));
2627 if (!ctx)
2628 return;
2629
2630 /* remove any OFD locks */
2631 locks_remove_posix(filp, filp);
2632
2633 /* remove flock locks */
2634 locks_remove_flock(filp, ctx);
2635
2636 /* remove any leases */
2637 locks_remove_lease(filp, ctx);
2638
2639 spin_lock(&ctx->flc_lock);
2640 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2641 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2642 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2643 spin_unlock(&ctx->flc_lock);
2644 }
2645
2646 /**
2647 * vfs_cancel_lock - file byte range unblock lock
2648 * @filp: The file to apply the unblock to
2649 * @fl: The lock to be unblocked
2650 *
2651 * Used by lock managers to cancel blocked requests
2652 */
2653 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2654 {
2655 WARN_ON_ONCE(filp != fl->fl_file);
2656 if (filp->f_op->lock)
2657 return filp->f_op->lock(filp, F_CANCELLK, fl);
2658 return 0;
2659 }
2660 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2661
2662 /**
2663 * vfs_inode_has_locks - are any file locks held on @inode?
2664 * @inode: inode to check for locks
2665 *
2666 * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2667 * set on @inode.
2668 */
2669 bool vfs_inode_has_locks(struct inode *inode)
2670 {
2671 struct file_lock_context *ctx;
2672 bool ret;
2673
2674 ctx = locks_inode_context(inode);
2675 if (!ctx)
2676 return false;
2677
2678 spin_lock(&ctx->flc_lock);
2679 ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2680 spin_unlock(&ctx->flc_lock);
2681 return ret;
2682 }
2683 EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2684
2685 #ifdef CONFIG_PROC_FS
2686 #include <linux/proc_fs.h>
2687 #include <linux/seq_file.h>
2688
2689 struct locks_iterator {
2690 int li_cpu;
2691 loff_t li_pos;
2692 };
2693
2694 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2695 loff_t id, char *pfx, int repeat)
2696 {
2697 struct inode *inode = NULL;
2698 unsigned int fl_pid;
2699 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2700 int type;
2701
2702 fl_pid = locks_translate_pid(fl, proc_pidns);
2703 /*
2704 * If lock owner is dead (and pid is freed) or not visible in current
2705 * pidns, zero is shown as a pid value. Check lock info from
2706 * init_pid_ns to get saved lock pid value.
2707 */
2708
2709 if (fl->fl_file != NULL)
2710 inode = file_inode(fl->fl_file);
2711
2712 seq_printf(f, "%lld: ", id);
2713
2714 if (repeat)
2715 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2716
2717 if (IS_POSIX(fl)) {
2718 if (fl->fl_flags & FL_ACCESS)
2719 seq_puts(f, "ACCESS");
2720 else if (IS_OFDLCK(fl))
2721 seq_puts(f, "OFDLCK");
2722 else
2723 seq_puts(f, "POSIX ");
2724
2725 seq_printf(f, " %s ",
2726 (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2727 } else if (IS_FLOCK(fl)) {
2728 seq_puts(f, "FLOCK ADVISORY ");
2729 } else if (IS_LEASE(fl)) {
2730 if (fl->fl_flags & FL_DELEG)
2731 seq_puts(f, "DELEG ");
2732 else
2733 seq_puts(f, "LEASE ");
2734
2735 if (lease_breaking(fl))
2736 seq_puts(f, "BREAKING ");
2737 else if (fl->fl_file)
2738 seq_puts(f, "ACTIVE ");
2739 else
2740 seq_puts(f, "BREAKER ");
2741 } else {
2742 seq_puts(f, "UNKNOWN UNKNOWN ");
2743 }
2744 type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2745
2746 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2747 (type == F_RDLCK) ? "READ" : "UNLCK");
2748 if (inode) {
2749 /* userspace relies on this representation of dev_t */
2750 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2751 MAJOR(inode->i_sb->s_dev),
2752 MINOR(inode->i_sb->s_dev), inode->i_ino);
2753 } else {
2754 seq_printf(f, "%d <none>:0 ", fl_pid);
2755 }
2756 if (IS_POSIX(fl)) {
2757 if (fl->fl_end == OFFSET_MAX)
2758 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2759 else
2760 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2761 } else {
2762 seq_puts(f, "0 EOF\n");
2763 }
2764 }
2765
2766 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2767 {
2768 struct file_lock *tmp;
2769
2770 /* NULL node or root node */
2771 if (node == NULL || node->fl_blocker == NULL)
2772 return NULL;
2773
2774 /* Next member in the linked list could be itself */
2775 tmp = list_next_entry(node, fl_blocked_member);
2776 if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2777 || tmp == node) {
2778 return NULL;
2779 }
2780
2781 return tmp;
2782 }
2783
2784 static int locks_show(struct seq_file *f, void *v)
2785 {
2786 struct locks_iterator *iter = f->private;
2787 struct file_lock *cur, *tmp;
2788 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2789 int level = 0;
2790
2791 cur = hlist_entry(v, struct file_lock, fl_link);
2792
2793 if (locks_translate_pid(cur, proc_pidns) == 0)
2794 return 0;
2795
2796 /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2797 * is the left child of current node, the next silibing in fl_blocked_member is the
2798 * right child, we can alse get the parent of current node from fl_blocker, so this
2799 * question becomes traversal of a binary tree
2800 */
2801 while (cur != NULL) {
2802 if (level)
2803 lock_get_status(f, cur, iter->li_pos, "-> ", level);
2804 else
2805 lock_get_status(f, cur, iter->li_pos, "", level);
2806
2807 if (!list_empty(&cur->fl_blocked_requests)) {
2808 /* Turn left */
2809 cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2810 struct file_lock, fl_blocked_member);
2811 level++;
2812 } else {
2813 /* Turn right */
2814 tmp = get_next_blocked_member(cur);
2815 /* Fall back to parent node */
2816 while (tmp == NULL && cur->fl_blocker != NULL) {
2817 cur = cur->fl_blocker;
2818 level--;
2819 tmp = get_next_blocked_member(cur);
2820 }
2821 cur = tmp;
2822 }
2823 }
2824
2825 return 0;
2826 }
2827
2828 static void __show_fd_locks(struct seq_file *f,
2829 struct list_head *head, int *id,
2830 struct file *filp, struct files_struct *files)
2831 {
2832 struct file_lock *fl;
2833
2834 list_for_each_entry(fl, head, fl_list) {
2835
2836 if (filp != fl->fl_file)
2837 continue;
2838 if (fl->fl_owner != files &&
2839 fl->fl_owner != filp)
2840 continue;
2841
2842 (*id)++;
2843 seq_puts(f, "lock:\t");
2844 lock_get_status(f, fl, *id, "", 0);
2845 }
2846 }
2847
2848 void show_fd_locks(struct seq_file *f,
2849 struct file *filp, struct files_struct *files)
2850 {
2851 struct inode *inode = file_inode(filp);
2852 struct file_lock_context *ctx;
2853 int id = 0;
2854
2855 ctx = locks_inode_context(inode);
2856 if (!ctx)
2857 return;
2858
2859 spin_lock(&ctx->flc_lock);
2860 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2861 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2862 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2863 spin_unlock(&ctx->flc_lock);
2864 }
2865
2866 static void *locks_start(struct seq_file *f, loff_t *pos)
2867 __acquires(&blocked_lock_lock)
2868 {
2869 struct locks_iterator *iter = f->private;
2870
2871 iter->li_pos = *pos + 1;
2872 percpu_down_write(&file_rwsem);
2873 spin_lock(&blocked_lock_lock);
2874 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2875 }
2876
2877 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2878 {
2879 struct locks_iterator *iter = f->private;
2880
2881 ++iter->li_pos;
2882 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2883 }
2884
2885 static void locks_stop(struct seq_file *f, void *v)
2886 __releases(&blocked_lock_lock)
2887 {
2888 spin_unlock(&blocked_lock_lock);
2889 percpu_up_write(&file_rwsem);
2890 }
2891
2892 static const struct seq_operations locks_seq_operations = {
2893 .start = locks_start,
2894 .next = locks_next,
2895 .stop = locks_stop,
2896 .show = locks_show,
2897 };
2898
2899 static int __init proc_locks_init(void)
2900 {
2901 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2902 sizeof(struct locks_iterator), NULL);
2903 return 0;
2904 }
2905 fs_initcall(proc_locks_init);
2906 #endif
2907
2908 static int __init filelock_init(void)
2909 {
2910 int i;
2911
2912 flctx_cache = kmem_cache_create("file_lock_ctx",
2913 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2914
2915 filelock_cache = kmem_cache_create("file_lock_cache",
2916 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2917
2918 for_each_possible_cpu(i) {
2919 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2920
2921 spin_lock_init(&fll->lock);
2922 INIT_HLIST_HEAD(&fll->hlist);
2923 }
2924
2925 lease_notifier_chain_init();
2926 return 0;
2927 }
2928 core_initcall(filelock_init);