1 // SPDX-License-Identifier: GPL-2.0-only
5 * We implement four types of file locks: BSD locks, posix locks, open
6 * file description locks, and leases. For details about BSD locks,
7 * see the flock(2) man page; for details about the other three, see
11 * Locking conflicts and dependencies:
12 * If multiple threads attempt to lock the same byte (or flock the same file)
13 * only one can be granted the lock, and other must wait their turn.
14 * The first lock has been "applied" or "granted", the others are "waiting"
15 * and are "blocked" by the "applied" lock..
17 * Waiting and applied locks are all kept in trees whose properties are:
19 * - the root of a tree may be an applied or waiting lock.
20 * - every other node in the tree is a waiting lock that
21 * conflicts with every ancestor of that node.
23 * Every such tree begins life as a waiting singleton which obviously
24 * satisfies the above properties.
26 * The only ways we modify trees preserve these properties:
28 * 1. We may add a new leaf node, but only after first verifying that it
29 * conflicts with all of its ancestors.
30 * 2. We may remove the root of a tree, creating a new singleton
31 * tree from the root and N new trees rooted in the immediate
33 * 3. If the root of a tree is not currently an applied lock, we may
34 * apply it (if possible).
35 * 4. We may upgrade the root of the tree (either extend its range,
36 * or upgrade its entire range from read to write).
38 * When an applied lock is modified in a way that reduces or downgrades any
39 * part of its range, we remove all its children (2 above). This particularly
40 * happens when a lock is unlocked.
42 * For each of those child trees we "wake up" the thread which is
43 * waiting for the lock so it can continue handling as follows: if the
44 * root of the tree applies, we do so (3). If it doesn't, it must
45 * conflict with some applied lock. We remove (wake up) all of its children
46 * (2), and add it is a new leaf to the tree rooted in the applied
47 * lock (1). We then repeat the process recursively with those
52 #include <linux/capability.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
56 #include <linux/init.h>
57 #include <linux/security.h>
58 #include <linux/slab.h>
59 #include <linux/syscalls.h>
60 #include <linux/time.h>
61 #include <linux/rcupdate.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/hashtable.h>
64 #include <linux/percpu.h>
65 #include <linux/sysctl.h>
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/filelock.h>
70 #include <linux/uaccess.h>
72 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
73 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
74 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
75 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
76 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
78 static bool lease_breaking(struct file_lock
*fl
)
80 return fl
->fl_flags
& (FL_UNLOCK_PENDING
| FL_DOWNGRADE_PENDING
);
83 static int target_leasetype(struct file_lock
*fl
)
85 if (fl
->fl_flags
& FL_UNLOCK_PENDING
)
87 if (fl
->fl_flags
& FL_DOWNGRADE_PENDING
)
92 static int leases_enable
= 1;
93 static int lease_break_time
= 45;
96 static struct ctl_table locks_sysctls
[] = {
98 .procname
= "leases-enable",
99 .data
= &leases_enable
,
100 .maxlen
= sizeof(int),
102 .proc_handler
= proc_dointvec
,
106 .procname
= "lease-break-time",
107 .data
= &lease_break_time
,
108 .maxlen
= sizeof(int),
110 .proc_handler
= proc_dointvec
,
112 #endif /* CONFIG_MMU */
116 static int __init
init_fs_locks_sysctls(void)
118 register_sysctl_init("fs", locks_sysctls
);
121 early_initcall(init_fs_locks_sysctls
);
122 #endif /* CONFIG_SYSCTL */
125 * The global file_lock_list is only used for displaying /proc/locks, so we
126 * keep a list on each CPU, with each list protected by its own spinlock.
127 * Global serialization is done using file_rwsem.
129 * Note that alterations to the list also require that the relevant flc_lock is
132 struct file_lock_list_struct
{
134 struct hlist_head hlist
;
136 static DEFINE_PER_CPU(struct file_lock_list_struct
, file_lock_list
);
137 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem
);
141 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
142 * It is protected by blocked_lock_lock.
144 * We hash locks by lockowner in order to optimize searching for the lock a
145 * particular lockowner is waiting on.
147 * FIXME: make this value scale via some heuristic? We generally will want more
148 * buckets when we have more lockowners holding locks, but that's a little
149 * difficult to determine without knowing what the workload will look like.
151 #define BLOCKED_HASH_BITS 7
152 static DEFINE_HASHTABLE(blocked_hash
, BLOCKED_HASH_BITS
);
155 * This lock protects the blocked_hash. Generally, if you're accessing it, you
156 * want to be holding this lock.
158 * In addition, it also protects the fl->fl_blocked_requests list, and the
159 * fl->fl_blocker pointer for file_lock structures that are acting as lock
160 * requests (in contrast to those that are acting as records of acquired locks).
162 * Note that when we acquire this lock in order to change the above fields,
163 * we often hold the flc_lock as well. In certain cases, when reading the fields
164 * protected by this lock, we can skip acquiring it iff we already hold the
167 static DEFINE_SPINLOCK(blocked_lock_lock
);
169 static struct kmem_cache
*flctx_cache __read_mostly
;
170 static struct kmem_cache
*filelock_cache __read_mostly
;
172 static struct file_lock_context
*
173 locks_get_lock_context(struct inode
*inode
, int type
)
175 struct file_lock_context
*ctx
;
177 /* paired with cmpxchg() below */
178 ctx
= smp_load_acquire(&inode
->i_flctx
);
179 if (likely(ctx
) || type
== F_UNLCK
)
182 ctx
= kmem_cache_alloc(flctx_cache
, GFP_KERNEL
);
186 spin_lock_init(&ctx
->flc_lock
);
187 INIT_LIST_HEAD(&ctx
->flc_flock
);
188 INIT_LIST_HEAD(&ctx
->flc_posix
);
189 INIT_LIST_HEAD(&ctx
->flc_lease
);
192 * Assign the pointer if it's not already assigned. If it is, then
193 * free the context we just allocated.
195 if (cmpxchg(&inode
->i_flctx
, NULL
, ctx
)) {
196 kmem_cache_free(flctx_cache
, ctx
);
197 ctx
= smp_load_acquire(&inode
->i_flctx
);
200 trace_locks_get_lock_context(inode
, type
, ctx
);
205 locks_dump_ctx_list(struct list_head
*list
, char *list_type
)
207 struct file_lock
*fl
;
209 list_for_each_entry(fl
, list
, fl_list
) {
210 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type
, fl
->fl_owner
, fl
->fl_flags
, fl
->fl_type
, fl
->fl_pid
);
215 locks_check_ctx_lists(struct inode
*inode
)
217 struct file_lock_context
*ctx
= inode
->i_flctx
;
219 if (unlikely(!list_empty(&ctx
->flc_flock
) ||
220 !list_empty(&ctx
->flc_posix
) ||
221 !list_empty(&ctx
->flc_lease
))) {
222 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
223 MAJOR(inode
->i_sb
->s_dev
), MINOR(inode
->i_sb
->s_dev
),
225 locks_dump_ctx_list(&ctx
->flc_flock
, "FLOCK");
226 locks_dump_ctx_list(&ctx
->flc_posix
, "POSIX");
227 locks_dump_ctx_list(&ctx
->flc_lease
, "LEASE");
232 locks_check_ctx_file_list(struct file
*filp
, struct list_head
*list
,
235 struct file_lock
*fl
;
236 struct inode
*inode
= locks_inode(filp
);
238 list_for_each_entry(fl
, list
, fl_list
)
239 if (fl
->fl_file
== filp
)
240 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
241 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
242 list_type
, MAJOR(inode
->i_sb
->s_dev
),
243 MINOR(inode
->i_sb
->s_dev
), inode
->i_ino
,
244 fl
->fl_owner
, fl
->fl_flags
, fl
->fl_type
, fl
->fl_pid
);
248 locks_free_lock_context(struct inode
*inode
)
250 struct file_lock_context
*ctx
= inode
->i_flctx
;
253 locks_check_ctx_lists(inode
);
254 kmem_cache_free(flctx_cache
, ctx
);
258 static void locks_init_lock_heads(struct file_lock
*fl
)
260 INIT_HLIST_NODE(&fl
->fl_link
);
261 INIT_LIST_HEAD(&fl
->fl_list
);
262 INIT_LIST_HEAD(&fl
->fl_blocked_requests
);
263 INIT_LIST_HEAD(&fl
->fl_blocked_member
);
264 init_waitqueue_head(&fl
->fl_wait
);
267 /* Allocate an empty lock structure. */
268 struct file_lock
*locks_alloc_lock(void)
270 struct file_lock
*fl
= kmem_cache_zalloc(filelock_cache
, GFP_KERNEL
);
273 locks_init_lock_heads(fl
);
277 EXPORT_SYMBOL_GPL(locks_alloc_lock
);
279 void locks_release_private(struct file_lock
*fl
)
281 BUG_ON(waitqueue_active(&fl
->fl_wait
));
282 BUG_ON(!list_empty(&fl
->fl_list
));
283 BUG_ON(!list_empty(&fl
->fl_blocked_requests
));
284 BUG_ON(!list_empty(&fl
->fl_blocked_member
));
285 BUG_ON(!hlist_unhashed(&fl
->fl_link
));
288 if (fl
->fl_ops
->fl_release_private
)
289 fl
->fl_ops
->fl_release_private(fl
);
294 if (fl
->fl_lmops
->lm_put_owner
) {
295 fl
->fl_lmops
->lm_put_owner(fl
->fl_owner
);
301 EXPORT_SYMBOL_GPL(locks_release_private
);
304 * locks_owner_has_blockers - Check for blocking lock requests
305 * @flctx: file lock context
309 * %true: @owner has at least one blocker
310 * %false: @owner has no blockers
312 bool locks_owner_has_blockers(struct file_lock_context
*flctx
,
315 struct file_lock
*fl
;
317 spin_lock(&flctx
->flc_lock
);
318 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
319 if (fl
->fl_owner
!= owner
)
321 if (!list_empty(&fl
->fl_blocked_requests
)) {
322 spin_unlock(&flctx
->flc_lock
);
326 spin_unlock(&flctx
->flc_lock
);
329 EXPORT_SYMBOL_GPL(locks_owner_has_blockers
);
331 /* Free a lock which is not in use. */
332 void locks_free_lock(struct file_lock
*fl
)
334 locks_release_private(fl
);
335 kmem_cache_free(filelock_cache
, fl
);
337 EXPORT_SYMBOL(locks_free_lock
);
340 locks_dispose_list(struct list_head
*dispose
)
342 struct file_lock
*fl
;
344 while (!list_empty(dispose
)) {
345 fl
= list_first_entry(dispose
, struct file_lock
, fl_list
);
346 list_del_init(&fl
->fl_list
);
351 void locks_init_lock(struct file_lock
*fl
)
353 memset(fl
, 0, sizeof(struct file_lock
));
354 locks_init_lock_heads(fl
);
356 EXPORT_SYMBOL(locks_init_lock
);
359 * Initialize a new lock from an existing file_lock structure.
361 void locks_copy_conflock(struct file_lock
*new, struct file_lock
*fl
)
363 new->fl_owner
= fl
->fl_owner
;
364 new->fl_pid
= fl
->fl_pid
;
366 new->fl_flags
= fl
->fl_flags
;
367 new->fl_type
= fl
->fl_type
;
368 new->fl_start
= fl
->fl_start
;
369 new->fl_end
= fl
->fl_end
;
370 new->fl_lmops
= fl
->fl_lmops
;
374 if (fl
->fl_lmops
->lm_get_owner
)
375 fl
->fl_lmops
->lm_get_owner(fl
->fl_owner
);
378 EXPORT_SYMBOL(locks_copy_conflock
);
380 void locks_copy_lock(struct file_lock
*new, struct file_lock
*fl
)
382 /* "new" must be a freshly-initialized lock */
383 WARN_ON_ONCE(new->fl_ops
);
385 locks_copy_conflock(new, fl
);
387 new->fl_file
= fl
->fl_file
;
388 new->fl_ops
= fl
->fl_ops
;
391 if (fl
->fl_ops
->fl_copy_lock
)
392 fl
->fl_ops
->fl_copy_lock(new, fl
);
395 EXPORT_SYMBOL(locks_copy_lock
);
397 static void locks_move_blocks(struct file_lock
*new, struct file_lock
*fl
)
402 * As ctx->flc_lock is held, new requests cannot be added to
403 * ->fl_blocked_requests, so we don't need a lock to check if it
406 if (list_empty(&fl
->fl_blocked_requests
))
408 spin_lock(&blocked_lock_lock
);
409 list_splice_init(&fl
->fl_blocked_requests
, &new->fl_blocked_requests
);
410 list_for_each_entry(f
, &new->fl_blocked_requests
, fl_blocked_member
)
412 spin_unlock(&blocked_lock_lock
);
415 static inline int flock_translate_cmd(int cmd
) {
427 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
428 static struct file_lock
*
429 flock_make_lock(struct file
*filp
, unsigned int cmd
, struct file_lock
*fl
)
431 int type
= flock_translate_cmd(cmd
);
434 return ERR_PTR(type
);
437 fl
= locks_alloc_lock();
439 return ERR_PTR(-ENOMEM
);
446 fl
->fl_pid
= current
->tgid
;
447 fl
->fl_flags
= FL_FLOCK
;
449 fl
->fl_end
= OFFSET_MAX
;
454 static int assign_type(struct file_lock
*fl
, long type
)
468 static int flock64_to_posix_lock(struct file
*filp
, struct file_lock
*fl
,
471 switch (l
->l_whence
) {
476 fl
->fl_start
= filp
->f_pos
;
479 fl
->fl_start
= i_size_read(file_inode(filp
));
484 if (l
->l_start
> OFFSET_MAX
- fl
->fl_start
)
486 fl
->fl_start
+= l
->l_start
;
487 if (fl
->fl_start
< 0)
490 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
491 POSIX-2001 defines it. */
493 if (l
->l_len
- 1 > OFFSET_MAX
- fl
->fl_start
)
495 fl
->fl_end
= fl
->fl_start
+ (l
->l_len
- 1);
497 } else if (l
->l_len
< 0) {
498 if (fl
->fl_start
+ l
->l_len
< 0)
500 fl
->fl_end
= fl
->fl_start
- 1;
501 fl
->fl_start
+= l
->l_len
;
503 fl
->fl_end
= OFFSET_MAX
;
505 fl
->fl_owner
= current
->files
;
506 fl
->fl_pid
= current
->tgid
;
508 fl
->fl_flags
= FL_POSIX
;
512 return assign_type(fl
, l
->l_type
);
515 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
518 static int flock_to_posix_lock(struct file
*filp
, struct file_lock
*fl
,
521 struct flock64 ll
= {
523 .l_whence
= l
->l_whence
,
524 .l_start
= l
->l_start
,
528 return flock64_to_posix_lock(filp
, fl
, &ll
);
531 /* default lease lock manager operations */
533 lease_break_callback(struct file_lock
*fl
)
535 kill_fasync(&fl
->fl_fasync
, SIGIO
, POLL_MSG
);
540 lease_setup(struct file_lock
*fl
, void **priv
)
542 struct file
*filp
= fl
->fl_file
;
543 struct fasync_struct
*fa
= *priv
;
546 * fasync_insert_entry() returns the old entry if any. If there was no
547 * old entry, then it used "priv" and inserted it into the fasync list.
548 * Clear the pointer to indicate that it shouldn't be freed.
550 if (!fasync_insert_entry(fa
->fa_fd
, filp
, &fl
->fl_fasync
, fa
))
553 __f_setown(filp
, task_pid(current
), PIDTYPE_TGID
, 0);
556 static const struct lock_manager_operations lease_manager_ops
= {
557 .lm_break
= lease_break_callback
,
558 .lm_change
= lease_modify
,
559 .lm_setup
= lease_setup
,
563 * Initialize a lease, use the default lock manager operations
565 static int lease_init(struct file
*filp
, long type
, struct file_lock
*fl
)
567 if (assign_type(fl
, type
) != 0)
571 fl
->fl_pid
= current
->tgid
;
574 fl
->fl_flags
= FL_LEASE
;
576 fl
->fl_end
= OFFSET_MAX
;
578 fl
->fl_lmops
= &lease_manager_ops
;
582 /* Allocate a file_lock initialised to this type of lease */
583 static struct file_lock
*lease_alloc(struct file
*filp
, long type
)
585 struct file_lock
*fl
= locks_alloc_lock();
589 return ERR_PTR(error
);
591 error
= lease_init(filp
, type
, fl
);
594 return ERR_PTR(error
);
599 /* Check if two locks overlap each other.
601 static inline int locks_overlap(struct file_lock
*fl1
, struct file_lock
*fl2
)
603 return ((fl1
->fl_end
>= fl2
->fl_start
) &&
604 (fl2
->fl_end
>= fl1
->fl_start
));
608 * Check whether two locks have the same owner.
610 static int posix_same_owner(struct file_lock
*fl1
, struct file_lock
*fl2
)
612 return fl1
->fl_owner
== fl2
->fl_owner
;
615 /* Must be called with the flc_lock held! */
616 static void locks_insert_global_locks(struct file_lock
*fl
)
618 struct file_lock_list_struct
*fll
= this_cpu_ptr(&file_lock_list
);
620 percpu_rwsem_assert_held(&file_rwsem
);
622 spin_lock(&fll
->lock
);
623 fl
->fl_link_cpu
= smp_processor_id();
624 hlist_add_head(&fl
->fl_link
, &fll
->hlist
);
625 spin_unlock(&fll
->lock
);
628 /* Must be called with the flc_lock held! */
629 static void locks_delete_global_locks(struct file_lock
*fl
)
631 struct file_lock_list_struct
*fll
;
633 percpu_rwsem_assert_held(&file_rwsem
);
636 * Avoid taking lock if already unhashed. This is safe since this check
637 * is done while holding the flc_lock, and new insertions into the list
638 * also require that it be held.
640 if (hlist_unhashed(&fl
->fl_link
))
643 fll
= per_cpu_ptr(&file_lock_list
, fl
->fl_link_cpu
);
644 spin_lock(&fll
->lock
);
645 hlist_del_init(&fl
->fl_link
);
646 spin_unlock(&fll
->lock
);
650 posix_owner_key(struct file_lock
*fl
)
652 return (unsigned long)fl
->fl_owner
;
655 static void locks_insert_global_blocked(struct file_lock
*waiter
)
657 lockdep_assert_held(&blocked_lock_lock
);
659 hash_add(blocked_hash
, &waiter
->fl_link
, posix_owner_key(waiter
));
662 static void locks_delete_global_blocked(struct file_lock
*waiter
)
664 lockdep_assert_held(&blocked_lock_lock
);
666 hash_del(&waiter
->fl_link
);
669 /* Remove waiter from blocker's block list.
670 * When blocker ends up pointing to itself then the list is empty.
672 * Must be called with blocked_lock_lock held.
674 static void __locks_delete_block(struct file_lock
*waiter
)
676 locks_delete_global_blocked(waiter
);
677 list_del_init(&waiter
->fl_blocked_member
);
680 static void __locks_wake_up_blocks(struct file_lock
*blocker
)
682 while (!list_empty(&blocker
->fl_blocked_requests
)) {
683 struct file_lock
*waiter
;
685 waiter
= list_first_entry(&blocker
->fl_blocked_requests
,
686 struct file_lock
, fl_blocked_member
);
687 __locks_delete_block(waiter
);
688 if (waiter
->fl_lmops
&& waiter
->fl_lmops
->lm_notify
)
689 waiter
->fl_lmops
->lm_notify(waiter
);
691 wake_up(&waiter
->fl_wait
);
694 * The setting of fl_blocker to NULL marks the "done"
695 * point in deleting a block. Paired with acquire at the top
696 * of locks_delete_block().
698 smp_store_release(&waiter
->fl_blocker
, NULL
);
703 * locks_delete_block - stop waiting for a file lock
704 * @waiter: the lock which was waiting
706 * lockd/nfsd need to disconnect the lock while working on it.
708 int locks_delete_block(struct file_lock
*waiter
)
710 int status
= -ENOENT
;
713 * If fl_blocker is NULL, it won't be set again as this thread "owns"
714 * the lock and is the only one that might try to claim the lock.
716 * We use acquire/release to manage fl_blocker so that we can
717 * optimize away taking the blocked_lock_lock in many cases.
719 * The smp_load_acquire guarantees two things:
721 * 1/ that fl_blocked_requests can be tested locklessly. If something
722 * was recently added to that list it must have been in a locked region
723 * *before* the locked region when fl_blocker was set to NULL.
725 * 2/ that no other thread is accessing 'waiter', so it is safe to free
726 * it. __locks_wake_up_blocks is careful not to touch waiter after
727 * fl_blocker is released.
729 * If a lockless check of fl_blocker shows it to be NULL, we know that
730 * no new locks can be inserted into its fl_blocked_requests list, and
731 * can avoid doing anything further if the list is empty.
733 if (!smp_load_acquire(&waiter
->fl_blocker
) &&
734 list_empty(&waiter
->fl_blocked_requests
))
737 spin_lock(&blocked_lock_lock
);
738 if (waiter
->fl_blocker
)
740 __locks_wake_up_blocks(waiter
);
741 __locks_delete_block(waiter
);
744 * The setting of fl_blocker to NULL marks the "done" point in deleting
745 * a block. Paired with acquire at the top of this function.
747 smp_store_release(&waiter
->fl_blocker
, NULL
);
748 spin_unlock(&blocked_lock_lock
);
751 EXPORT_SYMBOL(locks_delete_block
);
753 /* Insert waiter into blocker's block list.
754 * We use a circular list so that processes can be easily woken up in
755 * the order they blocked. The documentation doesn't require this but
756 * it seems like the reasonable thing to do.
758 * Must be called with both the flc_lock and blocked_lock_lock held. The
759 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
760 * but by ensuring that the flc_lock is also held on insertions we can avoid
761 * taking the blocked_lock_lock in some cases when we see that the
762 * fl_blocked_requests list is empty.
764 * Rather than just adding to the list, we check for conflicts with any existing
765 * waiters, and add beneath any waiter that blocks the new waiter.
766 * Thus wakeups don't happen until needed.
768 static void __locks_insert_block(struct file_lock
*blocker
,
769 struct file_lock
*waiter
,
770 bool conflict(struct file_lock
*,
773 struct file_lock
*fl
;
774 BUG_ON(!list_empty(&waiter
->fl_blocked_member
));
777 list_for_each_entry(fl
, &blocker
->fl_blocked_requests
, fl_blocked_member
)
778 if (conflict(fl
, waiter
)) {
782 waiter
->fl_blocker
= blocker
;
783 list_add_tail(&waiter
->fl_blocked_member
, &blocker
->fl_blocked_requests
);
784 if (IS_POSIX(blocker
) && !IS_OFDLCK(blocker
))
785 locks_insert_global_blocked(waiter
);
787 /* The requests in waiter->fl_blocked are known to conflict with
788 * waiter, but might not conflict with blocker, or the requests
789 * and lock which block it. So they all need to be woken.
791 __locks_wake_up_blocks(waiter
);
794 /* Must be called with flc_lock held. */
795 static void locks_insert_block(struct file_lock
*blocker
,
796 struct file_lock
*waiter
,
797 bool conflict(struct file_lock
*,
800 spin_lock(&blocked_lock_lock
);
801 __locks_insert_block(blocker
, waiter
, conflict
);
802 spin_unlock(&blocked_lock_lock
);
806 * Wake up processes blocked waiting for blocker.
808 * Must be called with the inode->flc_lock held!
810 static void locks_wake_up_blocks(struct file_lock
*blocker
)
813 * Avoid taking global lock if list is empty. This is safe since new
814 * blocked requests are only added to the list under the flc_lock, and
815 * the flc_lock is always held here. Note that removal from the
816 * fl_blocked_requests list does not require the flc_lock, so we must
817 * recheck list_empty() after acquiring the blocked_lock_lock.
819 if (list_empty(&blocker
->fl_blocked_requests
))
822 spin_lock(&blocked_lock_lock
);
823 __locks_wake_up_blocks(blocker
);
824 spin_unlock(&blocked_lock_lock
);
828 locks_insert_lock_ctx(struct file_lock
*fl
, struct list_head
*before
)
830 list_add_tail(&fl
->fl_list
, before
);
831 locks_insert_global_locks(fl
);
835 locks_unlink_lock_ctx(struct file_lock
*fl
)
837 locks_delete_global_locks(fl
);
838 list_del_init(&fl
->fl_list
);
839 locks_wake_up_blocks(fl
);
843 locks_delete_lock_ctx(struct file_lock
*fl
, struct list_head
*dispose
)
845 locks_unlink_lock_ctx(fl
);
847 list_add(&fl
->fl_list
, dispose
);
852 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
853 * checks for shared/exclusive status of overlapping locks.
855 static bool locks_conflict(struct file_lock
*caller_fl
,
856 struct file_lock
*sys_fl
)
858 if (sys_fl
->fl_type
== F_WRLCK
)
860 if (caller_fl
->fl_type
== F_WRLCK
)
865 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
866 * checking before calling the locks_conflict().
868 static bool posix_locks_conflict(struct file_lock
*caller_fl
,
869 struct file_lock
*sys_fl
)
871 /* POSIX locks owned by the same process do not conflict with
874 if (posix_same_owner(caller_fl
, sys_fl
))
877 /* Check whether they overlap */
878 if (!locks_overlap(caller_fl
, sys_fl
))
881 return locks_conflict(caller_fl
, sys_fl
);
884 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
885 * checking before calling the locks_conflict().
887 static bool flock_locks_conflict(struct file_lock
*caller_fl
,
888 struct file_lock
*sys_fl
)
890 /* FLOCK locks referring to the same filp do not conflict with
893 if (caller_fl
->fl_file
== sys_fl
->fl_file
)
896 return locks_conflict(caller_fl
, sys_fl
);
900 posix_test_lock(struct file
*filp
, struct file_lock
*fl
)
902 struct file_lock
*cfl
;
903 struct file_lock_context
*ctx
;
904 struct inode
*inode
= locks_inode(filp
);
908 ctx
= smp_load_acquire(&inode
->i_flctx
);
909 if (!ctx
|| list_empty_careful(&ctx
->flc_posix
)) {
910 fl
->fl_type
= F_UNLCK
;
915 spin_lock(&ctx
->flc_lock
);
916 list_for_each_entry(cfl
, &ctx
->flc_posix
, fl_list
) {
917 if (!posix_locks_conflict(fl
, cfl
))
919 if (cfl
->fl_lmops
&& cfl
->fl_lmops
->lm_lock_expirable
920 && (*cfl
->fl_lmops
->lm_lock_expirable
)(cfl
)) {
921 owner
= cfl
->fl_lmops
->lm_mod_owner
;
922 func
= cfl
->fl_lmops
->lm_expire_lock
;
924 spin_unlock(&ctx
->flc_lock
);
929 locks_copy_conflock(fl
, cfl
);
932 fl
->fl_type
= F_UNLCK
;
934 spin_unlock(&ctx
->flc_lock
);
937 EXPORT_SYMBOL(posix_test_lock
);
940 * Deadlock detection:
942 * We attempt to detect deadlocks that are due purely to posix file
945 * We assume that a task can be waiting for at most one lock at a time.
946 * So for any acquired lock, the process holding that lock may be
947 * waiting on at most one other lock. That lock in turns may be held by
948 * someone waiting for at most one other lock. Given a requested lock
949 * caller_fl which is about to wait for a conflicting lock block_fl, we
950 * follow this chain of waiters to ensure we are not about to create a
953 * Since we do this before we ever put a process to sleep on a lock, we
954 * are ensured that there is never a cycle; that is what guarantees that
955 * the while() loop in posix_locks_deadlock() eventually completes.
957 * Note: the above assumption may not be true when handling lock
958 * requests from a broken NFS client. It may also fail in the presence
959 * of tasks (such as posix threads) sharing the same open file table.
960 * To handle those cases, we just bail out after a few iterations.
962 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
963 * Because the owner is not even nominally tied to a thread of
964 * execution, the deadlock detection below can't reasonably work well. Just
967 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
968 * locks that just checks for the case where two tasks are attempting to
969 * upgrade from read to write locks on the same inode.
972 #define MAX_DEADLK_ITERATIONS 10
974 /* Find a lock that the owner of the given block_fl is blocking on. */
975 static struct file_lock
*what_owner_is_waiting_for(struct file_lock
*block_fl
)
977 struct file_lock
*fl
;
979 hash_for_each_possible(blocked_hash
, fl
, fl_link
, posix_owner_key(block_fl
)) {
980 if (posix_same_owner(fl
, block_fl
)) {
981 while (fl
->fl_blocker
)
989 /* Must be called with the blocked_lock_lock held! */
990 static int posix_locks_deadlock(struct file_lock
*caller_fl
,
991 struct file_lock
*block_fl
)
995 lockdep_assert_held(&blocked_lock_lock
);
998 * This deadlock detector can't reasonably detect deadlocks with
999 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1001 if (IS_OFDLCK(caller_fl
))
1004 while ((block_fl
= what_owner_is_waiting_for(block_fl
))) {
1005 if (i
++ > MAX_DEADLK_ITERATIONS
)
1007 if (posix_same_owner(caller_fl
, block_fl
))
1013 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1014 * after any leases, but before any posix locks.
1016 * Note that if called with an FL_EXISTS argument, the caller may determine
1017 * whether or not a lock was successfully freed by testing the return
1018 * value for -ENOENT.
1020 static int flock_lock_inode(struct inode
*inode
, struct file_lock
*request
)
1022 struct file_lock
*new_fl
= NULL
;
1023 struct file_lock
*fl
;
1024 struct file_lock_context
*ctx
;
1029 ctx
= locks_get_lock_context(inode
, request
->fl_type
);
1031 if (request
->fl_type
!= F_UNLCK
)
1033 return (request
->fl_flags
& FL_EXISTS
) ? -ENOENT
: 0;
1036 if (!(request
->fl_flags
& FL_ACCESS
) && (request
->fl_type
!= F_UNLCK
)) {
1037 new_fl
= locks_alloc_lock();
1042 percpu_down_read(&file_rwsem
);
1043 spin_lock(&ctx
->flc_lock
);
1044 if (request
->fl_flags
& FL_ACCESS
)
1047 list_for_each_entry(fl
, &ctx
->flc_flock
, fl_list
) {
1048 if (request
->fl_file
!= fl
->fl_file
)
1050 if (request
->fl_type
== fl
->fl_type
)
1053 locks_delete_lock_ctx(fl
, &dispose
);
1057 if (request
->fl_type
== F_UNLCK
) {
1058 if ((request
->fl_flags
& FL_EXISTS
) && !found
)
1064 list_for_each_entry(fl
, &ctx
->flc_flock
, fl_list
) {
1065 if (!flock_locks_conflict(request
, fl
))
1068 if (!(request
->fl_flags
& FL_SLEEP
))
1070 error
= FILE_LOCK_DEFERRED
;
1071 locks_insert_block(fl
, request
, flock_locks_conflict
);
1074 if (request
->fl_flags
& FL_ACCESS
)
1076 locks_copy_lock(new_fl
, request
);
1077 locks_move_blocks(new_fl
, request
);
1078 locks_insert_lock_ctx(new_fl
, &ctx
->flc_flock
);
1083 spin_unlock(&ctx
->flc_lock
);
1084 percpu_up_read(&file_rwsem
);
1086 locks_free_lock(new_fl
);
1087 locks_dispose_list(&dispose
);
1088 trace_flock_lock_inode(inode
, request
, error
);
1092 static int posix_lock_inode(struct inode
*inode
, struct file_lock
*request
,
1093 struct file_lock
*conflock
)
1095 struct file_lock
*fl
, *tmp
;
1096 struct file_lock
*new_fl
= NULL
;
1097 struct file_lock
*new_fl2
= NULL
;
1098 struct file_lock
*left
= NULL
;
1099 struct file_lock
*right
= NULL
;
1100 struct file_lock_context
*ctx
;
1107 ctx
= locks_get_lock_context(inode
, request
->fl_type
);
1109 return (request
->fl_type
== F_UNLCK
) ? 0 : -ENOMEM
;
1112 * We may need two file_lock structures for this operation,
1113 * so we get them in advance to avoid races.
1115 * In some cases we can be sure, that no new locks will be needed
1117 if (!(request
->fl_flags
& FL_ACCESS
) &&
1118 (request
->fl_type
!= F_UNLCK
||
1119 request
->fl_start
!= 0 || request
->fl_end
!= OFFSET_MAX
)) {
1120 new_fl
= locks_alloc_lock();
1121 new_fl2
= locks_alloc_lock();
1125 percpu_down_read(&file_rwsem
);
1126 spin_lock(&ctx
->flc_lock
);
1128 * New lock request. Walk all POSIX locks and look for conflicts. If
1129 * there are any, either return error or put the request on the
1130 * blocker's list of waiters and the global blocked_hash.
1132 if (request
->fl_type
!= F_UNLCK
) {
1133 list_for_each_entry(fl
, &ctx
->flc_posix
, fl_list
) {
1134 if (!posix_locks_conflict(request
, fl
))
1136 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_lock_expirable
1137 && (*fl
->fl_lmops
->lm_lock_expirable
)(fl
)) {
1138 owner
= fl
->fl_lmops
->lm_mod_owner
;
1139 func
= fl
->fl_lmops
->lm_expire_lock
;
1140 __module_get(owner
);
1141 spin_unlock(&ctx
->flc_lock
);
1142 percpu_up_read(&file_rwsem
);
1148 locks_copy_conflock(conflock
, fl
);
1150 if (!(request
->fl_flags
& FL_SLEEP
))
1153 * Deadlock detection and insertion into the blocked
1154 * locks list must be done while holding the same lock!
1157 spin_lock(&blocked_lock_lock
);
1159 * Ensure that we don't find any locks blocked on this
1160 * request during deadlock detection.
1162 __locks_wake_up_blocks(request
);
1163 if (likely(!posix_locks_deadlock(request
, fl
))) {
1164 error
= FILE_LOCK_DEFERRED
;
1165 __locks_insert_block(fl
, request
,
1166 posix_locks_conflict
);
1168 spin_unlock(&blocked_lock_lock
);
1173 /* If we're just looking for a conflict, we're done. */
1175 if (request
->fl_flags
& FL_ACCESS
)
1178 /* Find the first old lock with the same owner as the new lock */
1179 list_for_each_entry(fl
, &ctx
->flc_posix
, fl_list
) {
1180 if (posix_same_owner(request
, fl
))
1184 /* Process locks with this owner. */
1185 list_for_each_entry_safe_from(fl
, tmp
, &ctx
->flc_posix
, fl_list
) {
1186 if (!posix_same_owner(request
, fl
))
1189 /* Detect adjacent or overlapping regions (if same lock type) */
1190 if (request
->fl_type
== fl
->fl_type
) {
1191 /* In all comparisons of start vs end, use
1192 * "start - 1" rather than "end + 1". If end
1193 * is OFFSET_MAX, end + 1 will become negative.
1195 if (fl
->fl_end
< request
->fl_start
- 1)
1197 /* If the next lock in the list has entirely bigger
1198 * addresses than the new one, insert the lock here.
1200 if (fl
->fl_start
- 1 > request
->fl_end
)
1203 /* If we come here, the new and old lock are of the
1204 * same type and adjacent or overlapping. Make one
1205 * lock yielding from the lower start address of both
1206 * locks to the higher end address.
1208 if (fl
->fl_start
> request
->fl_start
)
1209 fl
->fl_start
= request
->fl_start
;
1211 request
->fl_start
= fl
->fl_start
;
1212 if (fl
->fl_end
< request
->fl_end
)
1213 fl
->fl_end
= request
->fl_end
;
1215 request
->fl_end
= fl
->fl_end
;
1217 locks_delete_lock_ctx(fl
, &dispose
);
1223 /* Processing for different lock types is a bit
1226 if (fl
->fl_end
< request
->fl_start
)
1228 if (fl
->fl_start
> request
->fl_end
)
1230 if (request
->fl_type
== F_UNLCK
)
1232 if (fl
->fl_start
< request
->fl_start
)
1234 /* If the next lock in the list has a higher end
1235 * address than the new one, insert the new one here.
1237 if (fl
->fl_end
> request
->fl_end
) {
1241 if (fl
->fl_start
>= request
->fl_start
) {
1242 /* The new lock completely replaces an old
1243 * one (This may happen several times).
1246 locks_delete_lock_ctx(fl
, &dispose
);
1250 * Replace the old lock with new_fl, and
1251 * remove the old one. It's safe to do the
1252 * insert here since we know that we won't be
1253 * using new_fl later, and that the lock is
1254 * just replacing an existing lock.
1259 locks_copy_lock(new_fl
, request
);
1260 locks_move_blocks(new_fl
, request
);
1263 locks_insert_lock_ctx(request
, &fl
->fl_list
);
1264 locks_delete_lock_ctx(fl
, &dispose
);
1271 * The above code only modifies existing locks in case of merging or
1272 * replacing. If new lock(s) need to be inserted all modifications are
1273 * done below this, so it's safe yet to bail out.
1275 error
= -ENOLCK
; /* "no luck" */
1276 if (right
&& left
== right
&& !new_fl2
)
1281 if (request
->fl_type
== F_UNLCK
) {
1282 if (request
->fl_flags
& FL_EXISTS
)
1291 locks_copy_lock(new_fl
, request
);
1292 locks_move_blocks(new_fl
, request
);
1293 locks_insert_lock_ctx(new_fl
, &fl
->fl_list
);
1298 if (left
== right
) {
1299 /* The new lock breaks the old one in two pieces,
1300 * so we have to use the second new lock.
1304 locks_copy_lock(left
, right
);
1305 locks_insert_lock_ctx(left
, &fl
->fl_list
);
1307 right
->fl_start
= request
->fl_end
+ 1;
1308 locks_wake_up_blocks(right
);
1311 left
->fl_end
= request
->fl_start
- 1;
1312 locks_wake_up_blocks(left
);
1315 spin_unlock(&ctx
->flc_lock
);
1316 percpu_up_read(&file_rwsem
);
1318 * Free any unused locks.
1321 locks_free_lock(new_fl
);
1323 locks_free_lock(new_fl2
);
1324 locks_dispose_list(&dispose
);
1325 trace_posix_lock_inode(inode
, request
, error
);
1331 * posix_lock_file - Apply a POSIX-style lock to a file
1332 * @filp: The file to apply the lock to
1333 * @fl: The lock to be applied
1334 * @conflock: Place to return a copy of the conflicting lock, if found.
1336 * Add a POSIX style lock to a file.
1337 * We merge adjacent & overlapping locks whenever possible.
1338 * POSIX locks are sorted by owner task, then by starting address
1340 * Note that if called with an FL_EXISTS argument, the caller may determine
1341 * whether or not a lock was successfully freed by testing the return
1342 * value for -ENOENT.
1344 int posix_lock_file(struct file
*filp
, struct file_lock
*fl
,
1345 struct file_lock
*conflock
)
1347 return posix_lock_inode(locks_inode(filp
), fl
, conflock
);
1349 EXPORT_SYMBOL(posix_lock_file
);
1352 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1353 * @inode: inode of file to which lock request should be applied
1354 * @fl: The lock to be applied
1356 * Apply a POSIX style lock request to an inode.
1358 static int posix_lock_inode_wait(struct inode
*inode
, struct file_lock
*fl
)
1363 error
= posix_lock_inode(inode
, fl
, NULL
);
1364 if (error
!= FILE_LOCK_DEFERRED
)
1366 error
= wait_event_interruptible(fl
->fl_wait
,
1367 list_empty(&fl
->fl_blocked_member
));
1371 locks_delete_block(fl
);
1375 static void lease_clear_pending(struct file_lock
*fl
, int arg
)
1379 fl
->fl_flags
&= ~FL_UNLOCK_PENDING
;
1382 fl
->fl_flags
&= ~FL_DOWNGRADE_PENDING
;
1386 /* We already had a lease on this file; just change its type */
1387 int lease_modify(struct file_lock
*fl
, int arg
, struct list_head
*dispose
)
1389 int error
= assign_type(fl
, arg
);
1393 lease_clear_pending(fl
, arg
);
1394 locks_wake_up_blocks(fl
);
1395 if (arg
== F_UNLCK
) {
1396 struct file
*filp
= fl
->fl_file
;
1399 filp
->f_owner
.signum
= 0;
1400 fasync_helper(0, fl
->fl_file
, 0, &fl
->fl_fasync
);
1401 if (fl
->fl_fasync
!= NULL
) {
1402 printk(KERN_ERR
"locks_delete_lock: fasync == %p\n", fl
->fl_fasync
);
1403 fl
->fl_fasync
= NULL
;
1405 locks_delete_lock_ctx(fl
, dispose
);
1409 EXPORT_SYMBOL(lease_modify
);
1411 static bool past_time(unsigned long then
)
1414 /* 0 is a special value meaning "this never expires": */
1416 return time_after(jiffies
, then
);
1419 static void time_out_leases(struct inode
*inode
, struct list_head
*dispose
)
1421 struct file_lock_context
*ctx
= inode
->i_flctx
;
1422 struct file_lock
*fl
, *tmp
;
1424 lockdep_assert_held(&ctx
->flc_lock
);
1426 list_for_each_entry_safe(fl
, tmp
, &ctx
->flc_lease
, fl_list
) {
1427 trace_time_out_leases(inode
, fl
);
1428 if (past_time(fl
->fl_downgrade_time
))
1429 lease_modify(fl
, F_RDLCK
, dispose
);
1430 if (past_time(fl
->fl_break_time
))
1431 lease_modify(fl
, F_UNLCK
, dispose
);
1435 static bool leases_conflict(struct file_lock
*lease
, struct file_lock
*breaker
)
1439 if (lease
->fl_lmops
->lm_breaker_owns_lease
1440 && lease
->fl_lmops
->lm_breaker_owns_lease(lease
))
1442 if ((breaker
->fl_flags
& FL_LAYOUT
) != (lease
->fl_flags
& FL_LAYOUT
)) {
1446 if ((breaker
->fl_flags
& FL_DELEG
) && (lease
->fl_flags
& FL_LEASE
)) {
1451 rc
= locks_conflict(breaker
, lease
);
1453 trace_leases_conflict(rc
, lease
, breaker
);
1458 any_leases_conflict(struct inode
*inode
, struct file_lock
*breaker
)
1460 struct file_lock_context
*ctx
= inode
->i_flctx
;
1461 struct file_lock
*fl
;
1463 lockdep_assert_held(&ctx
->flc_lock
);
1465 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1466 if (leases_conflict(fl
, breaker
))
1473 * __break_lease - revoke all outstanding leases on file
1474 * @inode: the inode of the file to return
1475 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1477 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1480 * break_lease (inlined for speed) has checked there already is at least
1481 * some kind of lock (maybe a lease) on this file. Leases are broken on
1482 * a call to open() or truncate(). This function can sleep unless you
1483 * specified %O_NONBLOCK to your open().
1485 int __break_lease(struct inode
*inode
, unsigned int mode
, unsigned int type
)
1488 struct file_lock_context
*ctx
;
1489 struct file_lock
*new_fl
, *fl
, *tmp
;
1490 unsigned long break_time
;
1491 int want_write
= (mode
& O_ACCMODE
) != O_RDONLY
;
1494 new_fl
= lease_alloc(NULL
, want_write
? F_WRLCK
: F_RDLCK
);
1496 return PTR_ERR(new_fl
);
1497 new_fl
->fl_flags
= type
;
1499 /* typically we will check that ctx is non-NULL before calling */
1500 ctx
= smp_load_acquire(&inode
->i_flctx
);
1506 percpu_down_read(&file_rwsem
);
1507 spin_lock(&ctx
->flc_lock
);
1509 time_out_leases(inode
, &dispose
);
1511 if (!any_leases_conflict(inode
, new_fl
))
1515 if (lease_break_time
> 0) {
1516 break_time
= jiffies
+ lease_break_time
* HZ
;
1517 if (break_time
== 0)
1518 break_time
++; /* so that 0 means no break time */
1521 list_for_each_entry_safe(fl
, tmp
, &ctx
->flc_lease
, fl_list
) {
1522 if (!leases_conflict(fl
, new_fl
))
1525 if (fl
->fl_flags
& FL_UNLOCK_PENDING
)
1527 fl
->fl_flags
|= FL_UNLOCK_PENDING
;
1528 fl
->fl_break_time
= break_time
;
1530 if (lease_breaking(fl
))
1532 fl
->fl_flags
|= FL_DOWNGRADE_PENDING
;
1533 fl
->fl_downgrade_time
= break_time
;
1535 if (fl
->fl_lmops
->lm_break(fl
))
1536 locks_delete_lock_ctx(fl
, &dispose
);
1539 if (list_empty(&ctx
->flc_lease
))
1542 if (mode
& O_NONBLOCK
) {
1543 trace_break_lease_noblock(inode
, new_fl
);
1544 error
= -EWOULDBLOCK
;
1549 fl
= list_first_entry(&ctx
->flc_lease
, struct file_lock
, fl_list
);
1550 break_time
= fl
->fl_break_time
;
1551 if (break_time
!= 0)
1552 break_time
-= jiffies
;
1553 if (break_time
== 0)
1555 locks_insert_block(fl
, new_fl
, leases_conflict
);
1556 trace_break_lease_block(inode
, new_fl
);
1557 spin_unlock(&ctx
->flc_lock
);
1558 percpu_up_read(&file_rwsem
);
1560 locks_dispose_list(&dispose
);
1561 error
= wait_event_interruptible_timeout(new_fl
->fl_wait
,
1562 list_empty(&new_fl
->fl_blocked_member
),
1565 percpu_down_read(&file_rwsem
);
1566 spin_lock(&ctx
->flc_lock
);
1567 trace_break_lease_unblock(inode
, new_fl
);
1568 locks_delete_block(new_fl
);
1571 * Wait for the next conflicting lease that has not been
1575 time_out_leases(inode
, &dispose
);
1576 if (any_leases_conflict(inode
, new_fl
))
1581 spin_unlock(&ctx
->flc_lock
);
1582 percpu_up_read(&file_rwsem
);
1583 locks_dispose_list(&dispose
);
1585 locks_free_lock(new_fl
);
1588 EXPORT_SYMBOL(__break_lease
);
1591 * lease_get_mtime - update modified time of an inode with exclusive lease
1593 * @time: pointer to a timespec which contains the last modified time
1595 * This is to force NFS clients to flush their caches for files with
1596 * exclusive leases. The justification is that if someone has an
1597 * exclusive lease, then they could be modifying it.
1599 void lease_get_mtime(struct inode
*inode
, struct timespec64
*time
)
1601 bool has_lease
= false;
1602 struct file_lock_context
*ctx
;
1603 struct file_lock
*fl
;
1605 ctx
= smp_load_acquire(&inode
->i_flctx
);
1606 if (ctx
&& !list_empty_careful(&ctx
->flc_lease
)) {
1607 spin_lock(&ctx
->flc_lock
);
1608 fl
= list_first_entry_or_null(&ctx
->flc_lease
,
1609 struct file_lock
, fl_list
);
1610 if (fl
&& (fl
->fl_type
== F_WRLCK
))
1612 spin_unlock(&ctx
->flc_lock
);
1616 *time
= current_time(inode
);
1618 EXPORT_SYMBOL(lease_get_mtime
);
1621 * fcntl_getlease - Enquire what lease is currently active
1624 * The value returned by this function will be one of
1625 * (if no lease break is pending):
1627 * %F_RDLCK to indicate a shared lease is held.
1629 * %F_WRLCK to indicate an exclusive lease is held.
1631 * %F_UNLCK to indicate no lease is held.
1633 * (if a lease break is pending):
1635 * %F_RDLCK to indicate an exclusive lease needs to be
1636 * changed to a shared lease (or removed).
1638 * %F_UNLCK to indicate the lease needs to be removed.
1640 * XXX: sfr & willy disagree over whether F_INPROGRESS
1641 * should be returned to userspace.
1643 int fcntl_getlease(struct file
*filp
)
1645 struct file_lock
*fl
;
1646 struct inode
*inode
= locks_inode(filp
);
1647 struct file_lock_context
*ctx
;
1651 ctx
= smp_load_acquire(&inode
->i_flctx
);
1652 if (ctx
&& !list_empty_careful(&ctx
->flc_lease
)) {
1653 percpu_down_read(&file_rwsem
);
1654 spin_lock(&ctx
->flc_lock
);
1655 time_out_leases(inode
, &dispose
);
1656 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1657 if (fl
->fl_file
!= filp
)
1659 type
= target_leasetype(fl
);
1662 spin_unlock(&ctx
->flc_lock
);
1663 percpu_up_read(&file_rwsem
);
1665 locks_dispose_list(&dispose
);
1671 * check_conflicting_open - see if the given file points to an inode that has
1672 * an existing open that would conflict with the
1674 * @filp: file to check
1675 * @arg: type of lease that we're trying to acquire
1676 * @flags: current lock flags
1678 * Check to see if there's an existing open fd on this file that would
1679 * conflict with the lease we're trying to set.
1682 check_conflicting_open(struct file
*filp
, const long arg
, int flags
)
1684 struct inode
*inode
= locks_inode(filp
);
1685 int self_wcount
= 0, self_rcount
= 0;
1687 if (flags
& FL_LAYOUT
)
1689 if (flags
& FL_DELEG
)
1690 /* We leave these checks to the caller */
1694 return inode_is_open_for_write(inode
) ? -EAGAIN
: 0;
1695 else if (arg
!= F_WRLCK
)
1699 * Make sure that only read/write count is from lease requestor.
1700 * Note that this will result in denying write leases when i_writecount
1701 * is negative, which is what we want. (We shouldn't grant write leases
1702 * on files open for execution.)
1704 if (filp
->f_mode
& FMODE_WRITE
)
1706 else if (filp
->f_mode
& FMODE_READ
)
1709 if (atomic_read(&inode
->i_writecount
) != self_wcount
||
1710 atomic_read(&inode
->i_readcount
) != self_rcount
)
1717 generic_add_lease(struct file
*filp
, long arg
, struct file_lock
**flp
, void **priv
)
1719 struct file_lock
*fl
, *my_fl
= NULL
, *lease
;
1720 struct inode
*inode
= locks_inode(filp
);
1721 struct file_lock_context
*ctx
;
1722 bool is_deleg
= (*flp
)->fl_flags
& FL_DELEG
;
1727 trace_generic_add_lease(inode
, lease
);
1729 /* Note that arg is never F_UNLCK here */
1730 ctx
= locks_get_lock_context(inode
, arg
);
1735 * In the delegation case we need mutual exclusion with
1736 * a number of operations that take the i_mutex. We trylock
1737 * because delegations are an optional optimization, and if
1738 * there's some chance of a conflict--we'd rather not
1739 * bother, maybe that's a sign this just isn't a good file to
1740 * hand out a delegation on.
1742 if (is_deleg
&& !inode_trylock(inode
))
1745 if (is_deleg
&& arg
== F_WRLCK
) {
1746 /* Write delegations are not currently supported: */
1747 inode_unlock(inode
);
1752 percpu_down_read(&file_rwsem
);
1753 spin_lock(&ctx
->flc_lock
);
1754 time_out_leases(inode
, &dispose
);
1755 error
= check_conflicting_open(filp
, arg
, lease
->fl_flags
);
1760 * At this point, we know that if there is an exclusive
1761 * lease on this file, then we hold it on this filp
1762 * (otherwise our open of this file would have blocked).
1763 * And if we are trying to acquire an exclusive lease,
1764 * then the file is not open by anyone (including us)
1765 * except for this filp.
1768 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1769 if (fl
->fl_file
== filp
&&
1770 fl
->fl_owner
== lease
->fl_owner
) {
1776 * No exclusive leases if someone else has a lease on
1782 * Modifying our existing lease is OK, but no getting a
1783 * new lease if someone else is opening for write:
1785 if (fl
->fl_flags
& FL_UNLOCK_PENDING
)
1789 if (my_fl
!= NULL
) {
1791 error
= lease
->fl_lmops
->lm_change(lease
, arg
, &dispose
);
1801 locks_insert_lock_ctx(lease
, &ctx
->flc_lease
);
1803 * The check in break_lease() is lockless. It's possible for another
1804 * open to race in after we did the earlier check for a conflicting
1805 * open but before the lease was inserted. Check again for a
1806 * conflicting open and cancel the lease if there is one.
1808 * We also add a barrier here to ensure that the insertion of the lock
1809 * precedes these checks.
1812 error
= check_conflicting_open(filp
, arg
, lease
->fl_flags
);
1814 locks_unlink_lock_ctx(lease
);
1819 if (lease
->fl_lmops
->lm_setup
)
1820 lease
->fl_lmops
->lm_setup(lease
, priv
);
1822 spin_unlock(&ctx
->flc_lock
);
1823 percpu_up_read(&file_rwsem
);
1824 locks_dispose_list(&dispose
);
1826 inode_unlock(inode
);
1827 if (!error
&& !my_fl
)
1832 static int generic_delete_lease(struct file
*filp
, void *owner
)
1834 int error
= -EAGAIN
;
1835 struct file_lock
*fl
, *victim
= NULL
;
1836 struct inode
*inode
= locks_inode(filp
);
1837 struct file_lock_context
*ctx
;
1840 ctx
= smp_load_acquire(&inode
->i_flctx
);
1842 trace_generic_delete_lease(inode
, NULL
);
1846 percpu_down_read(&file_rwsem
);
1847 spin_lock(&ctx
->flc_lock
);
1848 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1849 if (fl
->fl_file
== filp
&&
1850 fl
->fl_owner
== owner
) {
1855 trace_generic_delete_lease(inode
, victim
);
1857 error
= fl
->fl_lmops
->lm_change(victim
, F_UNLCK
, &dispose
);
1858 spin_unlock(&ctx
->flc_lock
);
1859 percpu_up_read(&file_rwsem
);
1860 locks_dispose_list(&dispose
);
1865 * generic_setlease - sets a lease on an open file
1866 * @filp: file pointer
1867 * @arg: type of lease to obtain
1868 * @flp: input - file_lock to use, output - file_lock inserted
1869 * @priv: private data for lm_setup (may be NULL if lm_setup
1870 * doesn't require it)
1872 * The (input) flp->fl_lmops->lm_break function is required
1875 int generic_setlease(struct file
*filp
, long arg
, struct file_lock
**flp
,
1878 struct inode
*inode
= locks_inode(filp
);
1881 if ((!uid_eq(current_fsuid(), inode
->i_uid
)) && !capable(CAP_LEASE
))
1883 if (!S_ISREG(inode
->i_mode
))
1885 error
= security_file_lock(filp
, arg
);
1891 return generic_delete_lease(filp
, *priv
);
1894 if (!(*flp
)->fl_lmops
->lm_break
) {
1899 return generic_add_lease(filp
, arg
, flp
, priv
);
1904 EXPORT_SYMBOL(generic_setlease
);
1906 #if IS_ENABLED(CONFIG_SRCU)
1908 * Kernel subsystems can register to be notified on any attempt to set
1909 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1910 * to close files that it may have cached when there is an attempt to set a
1911 * conflicting lease.
1913 static struct srcu_notifier_head lease_notifier_chain
;
1916 lease_notifier_chain_init(void)
1918 srcu_init_notifier_head(&lease_notifier_chain
);
1922 setlease_notifier(long arg
, struct file_lock
*lease
)
1925 srcu_notifier_call_chain(&lease_notifier_chain
, arg
, lease
);
1928 int lease_register_notifier(struct notifier_block
*nb
)
1930 return srcu_notifier_chain_register(&lease_notifier_chain
, nb
);
1932 EXPORT_SYMBOL_GPL(lease_register_notifier
);
1934 void lease_unregister_notifier(struct notifier_block
*nb
)
1936 srcu_notifier_chain_unregister(&lease_notifier_chain
, nb
);
1938 EXPORT_SYMBOL_GPL(lease_unregister_notifier
);
1940 #else /* !IS_ENABLED(CONFIG_SRCU) */
1942 lease_notifier_chain_init(void)
1947 setlease_notifier(long arg
, struct file_lock
*lease
)
1951 int lease_register_notifier(struct notifier_block
*nb
)
1955 EXPORT_SYMBOL_GPL(lease_register_notifier
);
1957 void lease_unregister_notifier(struct notifier_block
*nb
)
1960 EXPORT_SYMBOL_GPL(lease_unregister_notifier
);
1962 #endif /* IS_ENABLED(CONFIG_SRCU) */
1965 * vfs_setlease - sets a lease on an open file
1966 * @filp: file pointer
1967 * @arg: type of lease to obtain
1968 * @lease: file_lock to use when adding a lease
1969 * @priv: private info for lm_setup when adding a lease (may be
1970 * NULL if lm_setup doesn't require it)
1972 * Call this to establish a lease on the file. The "lease" argument is not
1973 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1974 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1975 * set; if not, this function will return -ENOLCK (and generate a scary-looking
1978 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1979 * may be NULL if the lm_setup operation doesn't require it.
1982 vfs_setlease(struct file
*filp
, long arg
, struct file_lock
**lease
, void **priv
)
1985 setlease_notifier(arg
, *lease
);
1986 if (filp
->f_op
->setlease
)
1987 return filp
->f_op
->setlease(filp
, arg
, lease
, priv
);
1989 return generic_setlease(filp
, arg
, lease
, priv
);
1991 EXPORT_SYMBOL_GPL(vfs_setlease
);
1993 static int do_fcntl_add_lease(unsigned int fd
, struct file
*filp
, long arg
)
1995 struct file_lock
*fl
;
1996 struct fasync_struct
*new;
1999 fl
= lease_alloc(filp
, arg
);
2003 new = fasync_alloc();
2005 locks_free_lock(fl
);
2010 error
= vfs_setlease(filp
, arg
, &fl
, (void **)&new);
2012 locks_free_lock(fl
);
2019 * fcntl_setlease - sets a lease on an open file
2020 * @fd: open file descriptor
2021 * @filp: file pointer
2022 * @arg: type of lease to obtain
2024 * Call this fcntl to establish a lease on the file.
2025 * Note that you also need to call %F_SETSIG to
2026 * receive a signal when the lease is broken.
2028 int fcntl_setlease(unsigned int fd
, struct file
*filp
, long arg
)
2031 return vfs_setlease(filp
, F_UNLCK
, NULL
, (void **)&filp
);
2032 return do_fcntl_add_lease(fd
, filp
, arg
);
2036 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2037 * @inode: inode of the file to apply to
2038 * @fl: The lock to be applied
2040 * Apply a FLOCK style lock request to an inode.
2042 static int flock_lock_inode_wait(struct inode
*inode
, struct file_lock
*fl
)
2047 error
= flock_lock_inode(inode
, fl
);
2048 if (error
!= FILE_LOCK_DEFERRED
)
2050 error
= wait_event_interruptible(fl
->fl_wait
,
2051 list_empty(&fl
->fl_blocked_member
));
2055 locks_delete_block(fl
);
2060 * locks_lock_inode_wait - Apply a lock to an inode
2061 * @inode: inode of the file to apply to
2062 * @fl: The lock to be applied
2064 * Apply a POSIX or FLOCK style lock request to an inode.
2066 int locks_lock_inode_wait(struct inode
*inode
, struct file_lock
*fl
)
2069 switch (fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)) {
2071 res
= posix_lock_inode_wait(inode
, fl
);
2074 res
= flock_lock_inode_wait(inode
, fl
);
2081 EXPORT_SYMBOL(locks_lock_inode_wait
);
2084 * sys_flock: - flock() system call.
2085 * @fd: the file descriptor to lock.
2086 * @cmd: the type of lock to apply.
2088 * Apply a %FL_FLOCK style lock to an open file descriptor.
2089 * The @cmd can be one of:
2091 * - %LOCK_SH -- a shared lock.
2092 * - %LOCK_EX -- an exclusive lock.
2093 * - %LOCK_UN -- remove an existing lock.
2094 * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2096 * %LOCK_MAND support has been removed from the kernel.
2098 SYSCALL_DEFINE2(flock
, unsigned int, fd
, unsigned int, cmd
)
2100 struct fd f
= fdget(fd
);
2101 struct file_lock
*lock
;
2102 int can_sleep
, unlock
;
2109 can_sleep
= !(cmd
& LOCK_NB
);
2111 unlock
= (cmd
== LOCK_UN
);
2113 if (!unlock
&& !(f
.file
->f_mode
& (FMODE_READ
|FMODE_WRITE
)))
2117 * LOCK_MAND locks were broken for a long time in that they never
2118 * conflicted with one another and didn't prevent any sort of open,
2119 * read or write activity.
2121 * Just ignore these requests now, to preserve legacy behavior, but
2122 * throw a warning to let people know that they don't actually work.
2124 if (cmd
& LOCK_MAND
) {
2125 pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
2130 lock
= flock_make_lock(f
.file
, cmd
, NULL
);
2132 error
= PTR_ERR(lock
);
2137 lock
->fl_flags
|= FL_SLEEP
;
2139 error
= security_file_lock(f
.file
, lock
->fl_type
);
2143 if (f
.file
->f_op
->flock
)
2144 error
= f
.file
->f_op
->flock(f
.file
,
2145 (can_sleep
) ? F_SETLKW
: F_SETLK
,
2148 error
= locks_lock_file_wait(f
.file
, lock
);
2151 locks_free_lock(lock
);
2160 * vfs_test_lock - test file byte range lock
2161 * @filp: The file to test lock for
2162 * @fl: The lock to test; also used to hold result
2164 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2165 * setting conf->fl_type to something other than F_UNLCK.
2167 int vfs_test_lock(struct file
*filp
, struct file_lock
*fl
)
2169 if (filp
->f_op
->lock
)
2170 return filp
->f_op
->lock(filp
, F_GETLK
, fl
);
2171 posix_test_lock(filp
, fl
);
2174 EXPORT_SYMBOL_GPL(vfs_test_lock
);
2177 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2178 * @fl: The file_lock who's fl_pid should be translated
2179 * @ns: The namespace into which the pid should be translated
2181 * Used to tranlate a fl_pid into a namespace virtual pid number
2183 static pid_t
locks_translate_pid(struct file_lock
*fl
, struct pid_namespace
*ns
)
2190 if (IS_REMOTELCK(fl
))
2193 * If the flock owner process is dead and its pid has been already
2194 * freed, the translation below won't work, but we still want to show
2195 * flock owner pid number in init pidns.
2197 if (ns
== &init_pid_ns
)
2198 return (pid_t
)fl
->fl_pid
;
2201 pid
= find_pid_ns(fl
->fl_pid
, &init_pid_ns
);
2202 vnr
= pid_nr_ns(pid
, ns
);
2207 static int posix_lock_to_flock(struct flock
*flock
, struct file_lock
*fl
)
2209 flock
->l_pid
= locks_translate_pid(fl
, task_active_pid_ns(current
));
2210 #if BITS_PER_LONG == 32
2212 * Make sure we can represent the posix lock via
2213 * legacy 32bit flock.
2215 if (fl
->fl_start
> OFFT_OFFSET_MAX
)
2217 if (fl
->fl_end
!= OFFSET_MAX
&& fl
->fl_end
> OFFT_OFFSET_MAX
)
2220 flock
->l_start
= fl
->fl_start
;
2221 flock
->l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
2222 fl
->fl_end
- fl
->fl_start
+ 1;
2223 flock
->l_whence
= 0;
2224 flock
->l_type
= fl
->fl_type
;
2228 #if BITS_PER_LONG == 32
2229 static void posix_lock_to_flock64(struct flock64
*flock
, struct file_lock
*fl
)
2231 flock
->l_pid
= locks_translate_pid(fl
, task_active_pid_ns(current
));
2232 flock
->l_start
= fl
->fl_start
;
2233 flock
->l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
2234 fl
->fl_end
- fl
->fl_start
+ 1;
2235 flock
->l_whence
= 0;
2236 flock
->l_type
= fl
->fl_type
;
2240 /* Report the first existing lock that would conflict with l.
2241 * This implements the F_GETLK command of fcntl().
2243 int fcntl_getlk(struct file
*filp
, unsigned int cmd
, struct flock
*flock
)
2245 struct file_lock
*fl
;
2248 fl
= locks_alloc_lock();
2252 if (flock
->l_type
!= F_RDLCK
&& flock
->l_type
!= F_WRLCK
)
2255 error
= flock_to_posix_lock(filp
, fl
, flock
);
2259 if (cmd
== F_OFD_GETLK
) {
2261 if (flock
->l_pid
!= 0)
2264 fl
->fl_flags
|= FL_OFDLCK
;
2265 fl
->fl_owner
= filp
;
2268 error
= vfs_test_lock(filp
, fl
);
2272 flock
->l_type
= fl
->fl_type
;
2273 if (fl
->fl_type
!= F_UNLCK
) {
2274 error
= posix_lock_to_flock(flock
, fl
);
2279 locks_free_lock(fl
);
2284 * vfs_lock_file - file byte range lock
2285 * @filp: The file to apply the lock to
2286 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2287 * @fl: The lock to be applied
2288 * @conf: Place to return a copy of the conflicting lock, if found.
2290 * A caller that doesn't care about the conflicting lock may pass NULL
2291 * as the final argument.
2293 * If the filesystem defines a private ->lock() method, then @conf will
2294 * be left unchanged; so a caller that cares should initialize it to
2295 * some acceptable default.
2297 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2298 * locks, the ->lock() interface may return asynchronously, before the lock has
2299 * been granted or denied by the underlying filesystem, if (and only if)
2300 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2301 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2302 * the request is for a blocking lock. When ->lock() does return asynchronously,
2303 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2304 * request completes.
2305 * If the request is for non-blocking lock the file system should return
2306 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2307 * with the result. If the request timed out the callback routine will return a
2308 * nonzero return code and the file system should release the lock. The file
2309 * system is also responsible to keep a corresponding posix lock when it
2310 * grants a lock so the VFS can find out which locks are locally held and do
2311 * the correct lock cleanup when required.
2312 * The underlying filesystem must not drop the kernel lock or call
2313 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2316 int vfs_lock_file(struct file
*filp
, unsigned int cmd
, struct file_lock
*fl
, struct file_lock
*conf
)
2318 if (filp
->f_op
->lock
)
2319 return filp
->f_op
->lock(filp
, cmd
, fl
);
2321 return posix_lock_file(filp
, fl
, conf
);
2323 EXPORT_SYMBOL_GPL(vfs_lock_file
);
2325 static int do_lock_file_wait(struct file
*filp
, unsigned int cmd
,
2326 struct file_lock
*fl
)
2330 error
= security_file_lock(filp
, fl
->fl_type
);
2335 error
= vfs_lock_file(filp
, cmd
, fl
, NULL
);
2336 if (error
!= FILE_LOCK_DEFERRED
)
2338 error
= wait_event_interruptible(fl
->fl_wait
,
2339 list_empty(&fl
->fl_blocked_member
));
2343 locks_delete_block(fl
);
2348 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2350 check_fmode_for_setlk(struct file_lock
*fl
)
2352 switch (fl
->fl_type
) {
2354 if (!(fl
->fl_file
->f_mode
& FMODE_READ
))
2358 if (!(fl
->fl_file
->f_mode
& FMODE_WRITE
))
2364 /* Apply the lock described by l to an open file descriptor.
2365 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2367 int fcntl_setlk(unsigned int fd
, struct file
*filp
, unsigned int cmd
,
2368 struct flock
*flock
)
2370 struct file_lock
*file_lock
= locks_alloc_lock();
2371 struct inode
*inode
= locks_inode(filp
);
2375 if (file_lock
== NULL
)
2378 error
= flock_to_posix_lock(filp
, file_lock
, flock
);
2382 error
= check_fmode_for_setlk(file_lock
);
2387 * If the cmd is requesting file-private locks, then set the
2388 * FL_OFDLCK flag and override the owner.
2393 if (flock
->l_pid
!= 0)
2397 file_lock
->fl_flags
|= FL_OFDLCK
;
2398 file_lock
->fl_owner
= filp
;
2402 if (flock
->l_pid
!= 0)
2406 file_lock
->fl_flags
|= FL_OFDLCK
;
2407 file_lock
->fl_owner
= filp
;
2410 file_lock
->fl_flags
|= FL_SLEEP
;
2413 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2416 * Attempt to detect a close/fcntl race and recover by releasing the
2417 * lock that was just acquired. There is no need to do that when we're
2418 * unlocking though, or for OFD locks.
2420 if (!error
&& file_lock
->fl_type
!= F_UNLCK
&&
2421 !(file_lock
->fl_flags
& FL_OFDLCK
)) {
2422 struct files_struct
*files
= current
->files
;
2424 * We need that spin_lock here - it prevents reordering between
2425 * update of i_flctx->flc_posix and check for it done in
2426 * close(). rcu_read_lock() wouldn't do.
2428 spin_lock(&files
->file_lock
);
2429 f
= files_lookup_fd_locked(files
, fd
);
2430 spin_unlock(&files
->file_lock
);
2432 file_lock
->fl_type
= F_UNLCK
;
2433 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2434 WARN_ON_ONCE(error
);
2439 trace_fcntl_setlk(inode
, file_lock
, error
);
2440 locks_free_lock(file_lock
);
2444 #if BITS_PER_LONG == 32
2445 /* Report the first existing lock that would conflict with l.
2446 * This implements the F_GETLK command of fcntl().
2448 int fcntl_getlk64(struct file
*filp
, unsigned int cmd
, struct flock64
*flock
)
2450 struct file_lock
*fl
;
2453 fl
= locks_alloc_lock();
2458 if (flock
->l_type
!= F_RDLCK
&& flock
->l_type
!= F_WRLCK
)
2461 error
= flock64_to_posix_lock(filp
, fl
, flock
);
2465 if (cmd
== F_OFD_GETLK
) {
2467 if (flock
->l_pid
!= 0)
2471 fl
->fl_flags
|= FL_OFDLCK
;
2472 fl
->fl_owner
= filp
;
2475 error
= vfs_test_lock(filp
, fl
);
2479 flock
->l_type
= fl
->fl_type
;
2480 if (fl
->fl_type
!= F_UNLCK
)
2481 posix_lock_to_flock64(flock
, fl
);
2484 locks_free_lock(fl
);
2488 /* Apply the lock described by l to an open file descriptor.
2489 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2491 int fcntl_setlk64(unsigned int fd
, struct file
*filp
, unsigned int cmd
,
2492 struct flock64
*flock
)
2494 struct file_lock
*file_lock
= locks_alloc_lock();
2498 if (file_lock
== NULL
)
2501 error
= flock64_to_posix_lock(filp
, file_lock
, flock
);
2505 error
= check_fmode_for_setlk(file_lock
);
2510 * If the cmd is requesting file-private locks, then set the
2511 * FL_OFDLCK flag and override the owner.
2516 if (flock
->l_pid
!= 0)
2520 file_lock
->fl_flags
|= FL_OFDLCK
;
2521 file_lock
->fl_owner
= filp
;
2525 if (flock
->l_pid
!= 0)
2529 file_lock
->fl_flags
|= FL_OFDLCK
;
2530 file_lock
->fl_owner
= filp
;
2533 file_lock
->fl_flags
|= FL_SLEEP
;
2536 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2539 * Attempt to detect a close/fcntl race and recover by releasing the
2540 * lock that was just acquired. There is no need to do that when we're
2541 * unlocking though, or for OFD locks.
2543 if (!error
&& file_lock
->fl_type
!= F_UNLCK
&&
2544 !(file_lock
->fl_flags
& FL_OFDLCK
)) {
2545 struct files_struct
*files
= current
->files
;
2547 * We need that spin_lock here - it prevents reordering between
2548 * update of i_flctx->flc_posix and check for it done in
2549 * close(). rcu_read_lock() wouldn't do.
2551 spin_lock(&files
->file_lock
);
2552 f
= files_lookup_fd_locked(files
, fd
);
2553 spin_unlock(&files
->file_lock
);
2555 file_lock
->fl_type
= F_UNLCK
;
2556 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2557 WARN_ON_ONCE(error
);
2562 locks_free_lock(file_lock
);
2565 #endif /* BITS_PER_LONG == 32 */
2568 * This function is called when the file is being removed
2569 * from the task's fd array. POSIX locks belonging to this task
2570 * are deleted at this time.
2572 void locks_remove_posix(struct file
*filp
, fl_owner_t owner
)
2575 struct inode
*inode
= locks_inode(filp
);
2576 struct file_lock lock
;
2577 struct file_lock_context
*ctx
;
2580 * If there are no locks held on this file, we don't need to call
2581 * posix_lock_file(). Another process could be setting a lock on this
2582 * file at the same time, but we wouldn't remove that lock anyway.
2584 ctx
= smp_load_acquire(&inode
->i_flctx
);
2585 if (!ctx
|| list_empty(&ctx
->flc_posix
))
2588 locks_init_lock(&lock
);
2589 lock
.fl_type
= F_UNLCK
;
2590 lock
.fl_flags
= FL_POSIX
| FL_CLOSE
;
2592 lock
.fl_end
= OFFSET_MAX
;
2593 lock
.fl_owner
= owner
;
2594 lock
.fl_pid
= current
->tgid
;
2595 lock
.fl_file
= filp
;
2597 lock
.fl_lmops
= NULL
;
2599 error
= vfs_lock_file(filp
, F_SETLK
, &lock
, NULL
);
2601 if (lock
.fl_ops
&& lock
.fl_ops
->fl_release_private
)
2602 lock
.fl_ops
->fl_release_private(&lock
);
2603 trace_locks_remove_posix(inode
, &lock
, error
);
2605 EXPORT_SYMBOL(locks_remove_posix
);
2607 /* The i_flctx must be valid when calling into here */
2609 locks_remove_flock(struct file
*filp
, struct file_lock_context
*flctx
)
2611 struct file_lock fl
;
2612 struct inode
*inode
= locks_inode(filp
);
2614 if (list_empty(&flctx
->flc_flock
))
2617 flock_make_lock(filp
, LOCK_UN
, &fl
);
2618 fl
.fl_flags
|= FL_CLOSE
;
2620 if (filp
->f_op
->flock
)
2621 filp
->f_op
->flock(filp
, F_SETLKW
, &fl
);
2623 flock_lock_inode(inode
, &fl
);
2625 if (fl
.fl_ops
&& fl
.fl_ops
->fl_release_private
)
2626 fl
.fl_ops
->fl_release_private(&fl
);
2629 /* The i_flctx must be valid when calling into here */
2631 locks_remove_lease(struct file
*filp
, struct file_lock_context
*ctx
)
2633 struct file_lock
*fl
, *tmp
;
2636 if (list_empty(&ctx
->flc_lease
))
2639 percpu_down_read(&file_rwsem
);
2640 spin_lock(&ctx
->flc_lock
);
2641 list_for_each_entry_safe(fl
, tmp
, &ctx
->flc_lease
, fl_list
)
2642 if (filp
== fl
->fl_file
)
2643 lease_modify(fl
, F_UNLCK
, &dispose
);
2644 spin_unlock(&ctx
->flc_lock
);
2645 percpu_up_read(&file_rwsem
);
2647 locks_dispose_list(&dispose
);
2651 * This function is called on the last close of an open file.
2653 void locks_remove_file(struct file
*filp
)
2655 struct file_lock_context
*ctx
;
2657 ctx
= smp_load_acquire(&locks_inode(filp
)->i_flctx
);
2661 /* remove any OFD locks */
2662 locks_remove_posix(filp
, filp
);
2664 /* remove flock locks */
2665 locks_remove_flock(filp
, ctx
);
2667 /* remove any leases */
2668 locks_remove_lease(filp
, ctx
);
2670 spin_lock(&ctx
->flc_lock
);
2671 locks_check_ctx_file_list(filp
, &ctx
->flc_posix
, "POSIX");
2672 locks_check_ctx_file_list(filp
, &ctx
->flc_flock
, "FLOCK");
2673 locks_check_ctx_file_list(filp
, &ctx
->flc_lease
, "LEASE");
2674 spin_unlock(&ctx
->flc_lock
);
2678 * vfs_cancel_lock - file byte range unblock lock
2679 * @filp: The file to apply the unblock to
2680 * @fl: The lock to be unblocked
2682 * Used by lock managers to cancel blocked requests
2684 int vfs_cancel_lock(struct file
*filp
, struct file_lock
*fl
)
2686 if (filp
->f_op
->lock
)
2687 return filp
->f_op
->lock(filp
, F_CANCELLK
, fl
);
2690 EXPORT_SYMBOL_GPL(vfs_cancel_lock
);
2692 #ifdef CONFIG_PROC_FS
2693 #include <linux/proc_fs.h>
2694 #include <linux/seq_file.h>
2696 struct locks_iterator
{
2701 static void lock_get_status(struct seq_file
*f
, struct file_lock
*fl
,
2702 loff_t id
, char *pfx
, int repeat
)
2704 struct inode
*inode
= NULL
;
2705 unsigned int fl_pid
;
2706 struct pid_namespace
*proc_pidns
= proc_pid_ns(file_inode(f
->file
)->i_sb
);
2709 fl_pid
= locks_translate_pid(fl
, proc_pidns
);
2711 * If lock owner is dead (and pid is freed) or not visible in current
2712 * pidns, zero is shown as a pid value. Check lock info from
2713 * init_pid_ns to get saved lock pid value.
2716 if (fl
->fl_file
!= NULL
)
2717 inode
= locks_inode(fl
->fl_file
);
2719 seq_printf(f
, "%lld: ", id
);
2722 seq_printf(f
, "%*s", repeat
- 1 + (int)strlen(pfx
), pfx
);
2725 if (fl
->fl_flags
& FL_ACCESS
)
2726 seq_puts(f
, "ACCESS");
2727 else if (IS_OFDLCK(fl
))
2728 seq_puts(f
, "OFDLCK");
2730 seq_puts(f
, "POSIX ");
2732 seq_printf(f
, " %s ",
2733 (inode
== NULL
) ? "*NOINODE*" : "ADVISORY ");
2734 } else if (IS_FLOCK(fl
)) {
2735 seq_puts(f
, "FLOCK ADVISORY ");
2736 } else if (IS_LEASE(fl
)) {
2737 if (fl
->fl_flags
& FL_DELEG
)
2738 seq_puts(f
, "DELEG ");
2740 seq_puts(f
, "LEASE ");
2742 if (lease_breaking(fl
))
2743 seq_puts(f
, "BREAKING ");
2744 else if (fl
->fl_file
)
2745 seq_puts(f
, "ACTIVE ");
2747 seq_puts(f
, "BREAKER ");
2749 seq_puts(f
, "UNKNOWN UNKNOWN ");
2751 type
= IS_LEASE(fl
) ? target_leasetype(fl
) : fl
->fl_type
;
2753 seq_printf(f
, "%s ", (type
== F_WRLCK
) ? "WRITE" :
2754 (type
== F_RDLCK
) ? "READ" : "UNLCK");
2756 /* userspace relies on this representation of dev_t */
2757 seq_printf(f
, "%d %02x:%02x:%lu ", fl_pid
,
2758 MAJOR(inode
->i_sb
->s_dev
),
2759 MINOR(inode
->i_sb
->s_dev
), inode
->i_ino
);
2761 seq_printf(f
, "%d <none>:0 ", fl_pid
);
2764 if (fl
->fl_end
== OFFSET_MAX
)
2765 seq_printf(f
, "%Ld EOF\n", fl
->fl_start
);
2767 seq_printf(f
, "%Ld %Ld\n", fl
->fl_start
, fl
->fl_end
);
2769 seq_puts(f
, "0 EOF\n");
2773 static struct file_lock
*get_next_blocked_member(struct file_lock
*node
)
2775 struct file_lock
*tmp
;
2777 /* NULL node or root node */
2778 if (node
== NULL
|| node
->fl_blocker
== NULL
)
2781 /* Next member in the linked list could be itself */
2782 tmp
= list_next_entry(node
, fl_blocked_member
);
2783 if (list_entry_is_head(tmp
, &node
->fl_blocker
->fl_blocked_requests
, fl_blocked_member
)
2791 static int locks_show(struct seq_file
*f
, void *v
)
2793 struct locks_iterator
*iter
= f
->private;
2794 struct file_lock
*cur
, *tmp
;
2795 struct pid_namespace
*proc_pidns
= proc_pid_ns(file_inode(f
->file
)->i_sb
);
2798 cur
= hlist_entry(v
, struct file_lock
, fl_link
);
2800 if (locks_translate_pid(cur
, proc_pidns
) == 0)
2803 /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2804 * is the left child of current node, the next silibing in fl_blocked_member is the
2805 * right child, we can alse get the parent of current node from fl_blocker, so this
2806 * question becomes traversal of a binary tree
2808 while (cur
!= NULL
) {
2810 lock_get_status(f
, cur
, iter
->li_pos
, "-> ", level
);
2812 lock_get_status(f
, cur
, iter
->li_pos
, "", level
);
2814 if (!list_empty(&cur
->fl_blocked_requests
)) {
2816 cur
= list_first_entry_or_null(&cur
->fl_blocked_requests
,
2817 struct file_lock
, fl_blocked_member
);
2821 tmp
= get_next_blocked_member(cur
);
2822 /* Fall back to parent node */
2823 while (tmp
== NULL
&& cur
->fl_blocker
!= NULL
) {
2824 cur
= cur
->fl_blocker
;
2826 tmp
= get_next_blocked_member(cur
);
2835 static void __show_fd_locks(struct seq_file
*f
,
2836 struct list_head
*head
, int *id
,
2837 struct file
*filp
, struct files_struct
*files
)
2839 struct file_lock
*fl
;
2841 list_for_each_entry(fl
, head
, fl_list
) {
2843 if (filp
!= fl
->fl_file
)
2845 if (fl
->fl_owner
!= files
&&
2846 fl
->fl_owner
!= filp
)
2850 seq_puts(f
, "lock:\t");
2851 lock_get_status(f
, fl
, *id
, "", 0);
2855 void show_fd_locks(struct seq_file
*f
,
2856 struct file
*filp
, struct files_struct
*files
)
2858 struct inode
*inode
= locks_inode(filp
);
2859 struct file_lock_context
*ctx
;
2862 ctx
= smp_load_acquire(&inode
->i_flctx
);
2866 spin_lock(&ctx
->flc_lock
);
2867 __show_fd_locks(f
, &ctx
->flc_flock
, &id
, filp
, files
);
2868 __show_fd_locks(f
, &ctx
->flc_posix
, &id
, filp
, files
);
2869 __show_fd_locks(f
, &ctx
->flc_lease
, &id
, filp
, files
);
2870 spin_unlock(&ctx
->flc_lock
);
2873 static void *locks_start(struct seq_file
*f
, loff_t
*pos
)
2874 __acquires(&blocked_lock_lock
)
2876 struct locks_iterator
*iter
= f
->private;
2878 iter
->li_pos
= *pos
+ 1;
2879 percpu_down_write(&file_rwsem
);
2880 spin_lock(&blocked_lock_lock
);
2881 return seq_hlist_start_percpu(&file_lock_list
.hlist
, &iter
->li_cpu
, *pos
);
2884 static void *locks_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
2886 struct locks_iterator
*iter
= f
->private;
2889 return seq_hlist_next_percpu(v
, &file_lock_list
.hlist
, &iter
->li_cpu
, pos
);
2892 static void locks_stop(struct seq_file
*f
, void *v
)
2893 __releases(&blocked_lock_lock
)
2895 spin_unlock(&blocked_lock_lock
);
2896 percpu_up_write(&file_rwsem
);
2899 static const struct seq_operations locks_seq_operations
= {
2900 .start
= locks_start
,
2906 static int __init
proc_locks_init(void)
2908 proc_create_seq_private("locks", 0, NULL
, &locks_seq_operations
,
2909 sizeof(struct locks_iterator
), NULL
);
2912 fs_initcall(proc_locks_init
);
2915 static int __init
filelock_init(void)
2919 flctx_cache
= kmem_cache_create("file_lock_ctx",
2920 sizeof(struct file_lock_context
), 0, SLAB_PANIC
, NULL
);
2922 filelock_cache
= kmem_cache_create("file_lock_cache",
2923 sizeof(struct file_lock
), 0, SLAB_PANIC
, NULL
);
2925 for_each_possible_cpu(i
) {
2926 struct file_lock_list_struct
*fll
= per_cpu_ptr(&file_lock_list
, i
);
2928 spin_lock_init(&fll
->lock
);
2929 INIT_HLIST_HEAD(&fll
->hlist
);
2932 lease_notifier_chain_init();
2935 core_initcall(filelock_init
);