]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - fs/namespace.c
irqchip/al-fic: make AL_FIC depend on HAS_IOMEM
[thirdparty/kernel/linux.git] / fs / namespace.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/namespace.c
4 *
5 * (C) Copyright Al Viro 2000, 2001
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35
36 #include "pnode.h"
37 #include "internal.h"
38
39 /* Maximum number of mounts in a mount namespace */
40 static unsigned int sysctl_mount_max __read_mostly = 100000;
41
42 static unsigned int m_hash_mask __read_mostly;
43 static unsigned int m_hash_shift __read_mostly;
44 static unsigned int mp_hash_mask __read_mostly;
45 static unsigned int mp_hash_shift __read_mostly;
46
47 static __initdata unsigned long mhash_entries;
48 static int __init set_mhash_entries(char *str)
49 {
50 if (!str)
51 return 0;
52 mhash_entries = simple_strtoul(str, &str, 0);
53 return 1;
54 }
55 __setup("mhash_entries=", set_mhash_entries);
56
57 static __initdata unsigned long mphash_entries;
58 static int __init set_mphash_entries(char *str)
59 {
60 if (!str)
61 return 0;
62 mphash_entries = simple_strtoul(str, &str, 0);
63 return 1;
64 }
65 __setup("mphash_entries=", set_mphash_entries);
66
67 static u64 event;
68 static DEFINE_IDA(mnt_id_ida);
69 static DEFINE_IDA(mnt_group_ida);
70
71 static struct hlist_head *mount_hashtable __read_mostly;
72 static struct hlist_head *mountpoint_hashtable __read_mostly;
73 static struct kmem_cache *mnt_cache __read_mostly;
74 static DECLARE_RWSEM(namespace_sem);
75 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
76 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
77
78 struct mount_kattr {
79 unsigned int attr_set;
80 unsigned int attr_clr;
81 unsigned int propagation;
82 unsigned int lookup_flags;
83 bool recurse;
84 struct user_namespace *mnt_userns;
85 struct mnt_idmap *mnt_idmap;
86 };
87
88 /* /sys/fs */
89 struct kobject *fs_kobj;
90 EXPORT_SYMBOL_GPL(fs_kobj);
91
92 /*
93 * vfsmount lock may be taken for read to prevent changes to the
94 * vfsmount hash, ie. during mountpoint lookups or walking back
95 * up the tree.
96 *
97 * It should be taken for write in all cases where the vfsmount
98 * tree or hash is modified or when a vfsmount structure is modified.
99 */
100 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
101
102 static inline void lock_mount_hash(void)
103 {
104 write_seqlock(&mount_lock);
105 }
106
107 static inline void unlock_mount_hash(void)
108 {
109 write_sequnlock(&mount_lock);
110 }
111
112 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
113 {
114 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
115 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
116 tmp = tmp + (tmp >> m_hash_shift);
117 return &mount_hashtable[tmp & m_hash_mask];
118 }
119
120 static inline struct hlist_head *mp_hash(struct dentry *dentry)
121 {
122 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
123 tmp = tmp + (tmp >> mp_hash_shift);
124 return &mountpoint_hashtable[tmp & mp_hash_mask];
125 }
126
127 static int mnt_alloc_id(struct mount *mnt)
128 {
129 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
130
131 if (res < 0)
132 return res;
133 mnt->mnt_id = res;
134 return 0;
135 }
136
137 static void mnt_free_id(struct mount *mnt)
138 {
139 ida_free(&mnt_id_ida, mnt->mnt_id);
140 }
141
142 /*
143 * Allocate a new peer group ID
144 */
145 static int mnt_alloc_group_id(struct mount *mnt)
146 {
147 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
148
149 if (res < 0)
150 return res;
151 mnt->mnt_group_id = res;
152 return 0;
153 }
154
155 /*
156 * Release a peer group ID
157 */
158 void mnt_release_group_id(struct mount *mnt)
159 {
160 ida_free(&mnt_group_ida, mnt->mnt_group_id);
161 mnt->mnt_group_id = 0;
162 }
163
164 /*
165 * vfsmount lock must be held for read
166 */
167 static inline void mnt_add_count(struct mount *mnt, int n)
168 {
169 #ifdef CONFIG_SMP
170 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
171 #else
172 preempt_disable();
173 mnt->mnt_count += n;
174 preempt_enable();
175 #endif
176 }
177
178 /*
179 * vfsmount lock must be held for write
180 */
181 int mnt_get_count(struct mount *mnt)
182 {
183 #ifdef CONFIG_SMP
184 int count = 0;
185 int cpu;
186
187 for_each_possible_cpu(cpu) {
188 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
189 }
190
191 return count;
192 #else
193 return mnt->mnt_count;
194 #endif
195 }
196
197 static struct mount *alloc_vfsmnt(const char *name)
198 {
199 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
200 if (mnt) {
201 int err;
202
203 err = mnt_alloc_id(mnt);
204 if (err)
205 goto out_free_cache;
206
207 if (name) {
208 mnt->mnt_devname = kstrdup_const(name,
209 GFP_KERNEL_ACCOUNT);
210 if (!mnt->mnt_devname)
211 goto out_free_id;
212 }
213
214 #ifdef CONFIG_SMP
215 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
216 if (!mnt->mnt_pcp)
217 goto out_free_devname;
218
219 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
220 #else
221 mnt->mnt_count = 1;
222 mnt->mnt_writers = 0;
223 #endif
224
225 INIT_HLIST_NODE(&mnt->mnt_hash);
226 INIT_LIST_HEAD(&mnt->mnt_child);
227 INIT_LIST_HEAD(&mnt->mnt_mounts);
228 INIT_LIST_HEAD(&mnt->mnt_list);
229 INIT_LIST_HEAD(&mnt->mnt_expire);
230 INIT_LIST_HEAD(&mnt->mnt_share);
231 INIT_LIST_HEAD(&mnt->mnt_slave_list);
232 INIT_LIST_HEAD(&mnt->mnt_slave);
233 INIT_HLIST_NODE(&mnt->mnt_mp_list);
234 INIT_LIST_HEAD(&mnt->mnt_umounting);
235 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
236 mnt->mnt.mnt_idmap = &nop_mnt_idmap;
237 }
238 return mnt;
239
240 #ifdef CONFIG_SMP
241 out_free_devname:
242 kfree_const(mnt->mnt_devname);
243 #endif
244 out_free_id:
245 mnt_free_id(mnt);
246 out_free_cache:
247 kmem_cache_free(mnt_cache, mnt);
248 return NULL;
249 }
250
251 /*
252 * Most r/o checks on a fs are for operations that take
253 * discrete amounts of time, like a write() or unlink().
254 * We must keep track of when those operations start
255 * (for permission checks) and when they end, so that
256 * we can determine when writes are able to occur to
257 * a filesystem.
258 */
259 /*
260 * __mnt_is_readonly: check whether a mount is read-only
261 * @mnt: the mount to check for its write status
262 *
263 * This shouldn't be used directly ouside of the VFS.
264 * It does not guarantee that the filesystem will stay
265 * r/w, just that it is right *now*. This can not and
266 * should not be used in place of IS_RDONLY(inode).
267 * mnt_want/drop_write() will _keep_ the filesystem
268 * r/w.
269 */
270 bool __mnt_is_readonly(struct vfsmount *mnt)
271 {
272 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
273 }
274 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
275
276 static inline void mnt_inc_writers(struct mount *mnt)
277 {
278 #ifdef CONFIG_SMP
279 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
280 #else
281 mnt->mnt_writers++;
282 #endif
283 }
284
285 static inline void mnt_dec_writers(struct mount *mnt)
286 {
287 #ifdef CONFIG_SMP
288 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
289 #else
290 mnt->mnt_writers--;
291 #endif
292 }
293
294 static unsigned int mnt_get_writers(struct mount *mnt)
295 {
296 #ifdef CONFIG_SMP
297 unsigned int count = 0;
298 int cpu;
299
300 for_each_possible_cpu(cpu) {
301 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
302 }
303
304 return count;
305 #else
306 return mnt->mnt_writers;
307 #endif
308 }
309
310 static int mnt_is_readonly(struct vfsmount *mnt)
311 {
312 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
313 return 1;
314 /*
315 * The barrier pairs with the barrier in sb_start_ro_state_change()
316 * making sure if we don't see s_readonly_remount set yet, we also will
317 * not see any superblock / mount flag changes done by remount.
318 * It also pairs with the barrier in sb_end_ro_state_change()
319 * assuring that if we see s_readonly_remount already cleared, we will
320 * see the values of superblock / mount flags updated by remount.
321 */
322 smp_rmb();
323 return __mnt_is_readonly(mnt);
324 }
325
326 /*
327 * Most r/o & frozen checks on a fs are for operations that take discrete
328 * amounts of time, like a write() or unlink(). We must keep track of when
329 * those operations start (for permission checks) and when they end, so that we
330 * can determine when writes are able to occur to a filesystem.
331 */
332 /**
333 * __mnt_want_write - get write access to a mount without freeze protection
334 * @m: the mount on which to take a write
335 *
336 * This tells the low-level filesystem that a write is about to be performed to
337 * it, and makes sure that writes are allowed (mnt it read-write) before
338 * returning success. This operation does not protect against filesystem being
339 * frozen. When the write operation is finished, __mnt_drop_write() must be
340 * called. This is effectively a refcount.
341 */
342 int __mnt_want_write(struct vfsmount *m)
343 {
344 struct mount *mnt = real_mount(m);
345 int ret = 0;
346
347 preempt_disable();
348 mnt_inc_writers(mnt);
349 /*
350 * The store to mnt_inc_writers must be visible before we pass
351 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
352 * incremented count after it has set MNT_WRITE_HOLD.
353 */
354 smp_mb();
355 might_lock(&mount_lock.lock);
356 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
357 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
358 cpu_relax();
359 } else {
360 /*
361 * This prevents priority inversion, if the task
362 * setting MNT_WRITE_HOLD got preempted on a remote
363 * CPU, and it prevents life lock if the task setting
364 * MNT_WRITE_HOLD has a lower priority and is bound to
365 * the same CPU as the task that is spinning here.
366 */
367 preempt_enable();
368 lock_mount_hash();
369 unlock_mount_hash();
370 preempt_disable();
371 }
372 }
373 /*
374 * The barrier pairs with the barrier sb_start_ro_state_change() making
375 * sure that if we see MNT_WRITE_HOLD cleared, we will also see
376 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
377 * mnt_is_readonly() and bail in case we are racing with remount
378 * read-only.
379 */
380 smp_rmb();
381 if (mnt_is_readonly(m)) {
382 mnt_dec_writers(mnt);
383 ret = -EROFS;
384 }
385 preempt_enable();
386
387 return ret;
388 }
389
390 /**
391 * mnt_want_write - get write access to a mount
392 * @m: the mount on which to take a write
393 *
394 * This tells the low-level filesystem that a write is about to be performed to
395 * it, and makes sure that writes are allowed (mount is read-write, filesystem
396 * is not frozen) before returning success. When the write operation is
397 * finished, mnt_drop_write() must be called. This is effectively a refcount.
398 */
399 int mnt_want_write(struct vfsmount *m)
400 {
401 int ret;
402
403 sb_start_write(m->mnt_sb);
404 ret = __mnt_want_write(m);
405 if (ret)
406 sb_end_write(m->mnt_sb);
407 return ret;
408 }
409 EXPORT_SYMBOL_GPL(mnt_want_write);
410
411 /**
412 * __mnt_want_write_file - get write access to a file's mount
413 * @file: the file who's mount on which to take a write
414 *
415 * This is like __mnt_want_write, but if the file is already open for writing it
416 * skips incrementing mnt_writers (since the open file already has a reference)
417 * and instead only does the check for emergency r/o remounts. This must be
418 * paired with __mnt_drop_write_file.
419 */
420 int __mnt_want_write_file(struct file *file)
421 {
422 if (file->f_mode & FMODE_WRITER) {
423 /*
424 * Superblock may have become readonly while there are still
425 * writable fd's, e.g. due to a fs error with errors=remount-ro
426 */
427 if (__mnt_is_readonly(file->f_path.mnt))
428 return -EROFS;
429 return 0;
430 }
431 return __mnt_want_write(file->f_path.mnt);
432 }
433
434 /**
435 * mnt_want_write_file - get write access to a file's mount
436 * @file: the file who's mount on which to take a write
437 *
438 * This is like mnt_want_write, but if the file is already open for writing it
439 * skips incrementing mnt_writers (since the open file already has a reference)
440 * and instead only does the freeze protection and the check for emergency r/o
441 * remounts. This must be paired with mnt_drop_write_file.
442 */
443 int mnt_want_write_file(struct file *file)
444 {
445 int ret;
446
447 sb_start_write(file_inode(file)->i_sb);
448 ret = __mnt_want_write_file(file);
449 if (ret)
450 sb_end_write(file_inode(file)->i_sb);
451 return ret;
452 }
453 EXPORT_SYMBOL_GPL(mnt_want_write_file);
454
455 /**
456 * __mnt_drop_write - give up write access to a mount
457 * @mnt: the mount on which to give up write access
458 *
459 * Tells the low-level filesystem that we are done
460 * performing writes to it. Must be matched with
461 * __mnt_want_write() call above.
462 */
463 void __mnt_drop_write(struct vfsmount *mnt)
464 {
465 preempt_disable();
466 mnt_dec_writers(real_mount(mnt));
467 preempt_enable();
468 }
469
470 /**
471 * mnt_drop_write - give up write access to a mount
472 * @mnt: the mount on which to give up write access
473 *
474 * Tells the low-level filesystem that we are done performing writes to it and
475 * also allows filesystem to be frozen again. Must be matched with
476 * mnt_want_write() call above.
477 */
478 void mnt_drop_write(struct vfsmount *mnt)
479 {
480 __mnt_drop_write(mnt);
481 sb_end_write(mnt->mnt_sb);
482 }
483 EXPORT_SYMBOL_GPL(mnt_drop_write);
484
485 void __mnt_drop_write_file(struct file *file)
486 {
487 if (!(file->f_mode & FMODE_WRITER))
488 __mnt_drop_write(file->f_path.mnt);
489 }
490
491 void mnt_drop_write_file(struct file *file)
492 {
493 __mnt_drop_write_file(file);
494 sb_end_write(file_inode(file)->i_sb);
495 }
496 EXPORT_SYMBOL(mnt_drop_write_file);
497
498 /**
499 * mnt_hold_writers - prevent write access to the given mount
500 * @mnt: mnt to prevent write access to
501 *
502 * Prevents write access to @mnt if there are no active writers for @mnt.
503 * This function needs to be called and return successfully before changing
504 * properties of @mnt that need to remain stable for callers with write access
505 * to @mnt.
506 *
507 * After this functions has been called successfully callers must pair it with
508 * a call to mnt_unhold_writers() in order to stop preventing write access to
509 * @mnt.
510 *
511 * Context: This function expects lock_mount_hash() to be held serializing
512 * setting MNT_WRITE_HOLD.
513 * Return: On success 0 is returned.
514 * On error, -EBUSY is returned.
515 */
516 static inline int mnt_hold_writers(struct mount *mnt)
517 {
518 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
519 /*
520 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
521 * should be visible before we do.
522 */
523 smp_mb();
524
525 /*
526 * With writers on hold, if this value is zero, then there are
527 * definitely no active writers (although held writers may subsequently
528 * increment the count, they'll have to wait, and decrement it after
529 * seeing MNT_READONLY).
530 *
531 * It is OK to have counter incremented on one CPU and decremented on
532 * another: the sum will add up correctly. The danger would be when we
533 * sum up each counter, if we read a counter before it is incremented,
534 * but then read another CPU's count which it has been subsequently
535 * decremented from -- we would see more decrements than we should.
536 * MNT_WRITE_HOLD protects against this scenario, because
537 * mnt_want_write first increments count, then smp_mb, then spins on
538 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
539 * we're counting up here.
540 */
541 if (mnt_get_writers(mnt) > 0)
542 return -EBUSY;
543
544 return 0;
545 }
546
547 /**
548 * mnt_unhold_writers - stop preventing write access to the given mount
549 * @mnt: mnt to stop preventing write access to
550 *
551 * Stop preventing write access to @mnt allowing callers to gain write access
552 * to @mnt again.
553 *
554 * This function can only be called after a successful call to
555 * mnt_hold_writers().
556 *
557 * Context: This function expects lock_mount_hash() to be held.
558 */
559 static inline void mnt_unhold_writers(struct mount *mnt)
560 {
561 /*
562 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
563 * that become unheld will see MNT_READONLY.
564 */
565 smp_wmb();
566 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
567 }
568
569 static int mnt_make_readonly(struct mount *mnt)
570 {
571 int ret;
572
573 ret = mnt_hold_writers(mnt);
574 if (!ret)
575 mnt->mnt.mnt_flags |= MNT_READONLY;
576 mnt_unhold_writers(mnt);
577 return ret;
578 }
579
580 int sb_prepare_remount_readonly(struct super_block *sb)
581 {
582 struct mount *mnt;
583 int err = 0;
584
585 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
586 if (atomic_long_read(&sb->s_remove_count))
587 return -EBUSY;
588
589 lock_mount_hash();
590 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
591 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
592 err = mnt_hold_writers(mnt);
593 if (err)
594 break;
595 }
596 }
597 if (!err && atomic_long_read(&sb->s_remove_count))
598 err = -EBUSY;
599
600 if (!err)
601 sb_start_ro_state_change(sb);
602 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
603 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
604 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
605 }
606 unlock_mount_hash();
607
608 return err;
609 }
610
611 static void free_vfsmnt(struct mount *mnt)
612 {
613 mnt_idmap_put(mnt_idmap(&mnt->mnt));
614 kfree_const(mnt->mnt_devname);
615 #ifdef CONFIG_SMP
616 free_percpu(mnt->mnt_pcp);
617 #endif
618 kmem_cache_free(mnt_cache, mnt);
619 }
620
621 static void delayed_free_vfsmnt(struct rcu_head *head)
622 {
623 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
624 }
625
626 /* call under rcu_read_lock */
627 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
628 {
629 struct mount *mnt;
630 if (read_seqretry(&mount_lock, seq))
631 return 1;
632 if (bastard == NULL)
633 return 0;
634 mnt = real_mount(bastard);
635 mnt_add_count(mnt, 1);
636 smp_mb(); // see mntput_no_expire()
637 if (likely(!read_seqretry(&mount_lock, seq)))
638 return 0;
639 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
640 mnt_add_count(mnt, -1);
641 return 1;
642 }
643 lock_mount_hash();
644 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
645 mnt_add_count(mnt, -1);
646 unlock_mount_hash();
647 return 1;
648 }
649 unlock_mount_hash();
650 /* caller will mntput() */
651 return -1;
652 }
653
654 /* call under rcu_read_lock */
655 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
656 {
657 int res = __legitimize_mnt(bastard, seq);
658 if (likely(!res))
659 return true;
660 if (unlikely(res < 0)) {
661 rcu_read_unlock();
662 mntput(bastard);
663 rcu_read_lock();
664 }
665 return false;
666 }
667
668 /**
669 * __lookup_mnt - find first child mount
670 * @mnt: parent mount
671 * @dentry: mountpoint
672 *
673 * If @mnt has a child mount @c mounted @dentry find and return it.
674 *
675 * Note that the child mount @c need not be unique. There are cases
676 * where shadow mounts are created. For example, during mount
677 * propagation when a source mount @mnt whose root got overmounted by a
678 * mount @o after path lookup but before @namespace_sem could be
679 * acquired gets copied and propagated. So @mnt gets copied including
680 * @o. When @mnt is propagated to a destination mount @d that already
681 * has another mount @n mounted at the same mountpoint then the source
682 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
683 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
684 * on @dentry.
685 *
686 * Return: The first child of @mnt mounted @dentry or NULL.
687 */
688 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
689 {
690 struct hlist_head *head = m_hash(mnt, dentry);
691 struct mount *p;
692
693 hlist_for_each_entry_rcu(p, head, mnt_hash)
694 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
695 return p;
696 return NULL;
697 }
698
699 /*
700 * lookup_mnt - Return the first child mount mounted at path
701 *
702 * "First" means first mounted chronologically. If you create the
703 * following mounts:
704 *
705 * mount /dev/sda1 /mnt
706 * mount /dev/sda2 /mnt
707 * mount /dev/sda3 /mnt
708 *
709 * Then lookup_mnt() on the base /mnt dentry in the root mount will
710 * return successively the root dentry and vfsmount of /dev/sda1, then
711 * /dev/sda2, then /dev/sda3, then NULL.
712 *
713 * lookup_mnt takes a reference to the found vfsmount.
714 */
715 struct vfsmount *lookup_mnt(const struct path *path)
716 {
717 struct mount *child_mnt;
718 struct vfsmount *m;
719 unsigned seq;
720
721 rcu_read_lock();
722 do {
723 seq = read_seqbegin(&mount_lock);
724 child_mnt = __lookup_mnt(path->mnt, path->dentry);
725 m = child_mnt ? &child_mnt->mnt : NULL;
726 } while (!legitimize_mnt(m, seq));
727 rcu_read_unlock();
728 return m;
729 }
730
731 static inline void lock_ns_list(struct mnt_namespace *ns)
732 {
733 spin_lock(&ns->ns_lock);
734 }
735
736 static inline void unlock_ns_list(struct mnt_namespace *ns)
737 {
738 spin_unlock(&ns->ns_lock);
739 }
740
741 static inline bool mnt_is_cursor(struct mount *mnt)
742 {
743 return mnt->mnt.mnt_flags & MNT_CURSOR;
744 }
745
746 /*
747 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
748 * current mount namespace.
749 *
750 * The common case is dentries are not mountpoints at all and that
751 * test is handled inline. For the slow case when we are actually
752 * dealing with a mountpoint of some kind, walk through all of the
753 * mounts in the current mount namespace and test to see if the dentry
754 * is a mountpoint.
755 *
756 * The mount_hashtable is not usable in the context because we
757 * need to identify all mounts that may be in the current mount
758 * namespace not just a mount that happens to have some specified
759 * parent mount.
760 */
761 bool __is_local_mountpoint(struct dentry *dentry)
762 {
763 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
764 struct mount *mnt;
765 bool is_covered = false;
766
767 down_read(&namespace_sem);
768 lock_ns_list(ns);
769 list_for_each_entry(mnt, &ns->list, mnt_list) {
770 if (mnt_is_cursor(mnt))
771 continue;
772 is_covered = (mnt->mnt_mountpoint == dentry);
773 if (is_covered)
774 break;
775 }
776 unlock_ns_list(ns);
777 up_read(&namespace_sem);
778
779 return is_covered;
780 }
781
782 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
783 {
784 struct hlist_head *chain = mp_hash(dentry);
785 struct mountpoint *mp;
786
787 hlist_for_each_entry(mp, chain, m_hash) {
788 if (mp->m_dentry == dentry) {
789 mp->m_count++;
790 return mp;
791 }
792 }
793 return NULL;
794 }
795
796 static struct mountpoint *get_mountpoint(struct dentry *dentry)
797 {
798 struct mountpoint *mp, *new = NULL;
799 int ret;
800
801 if (d_mountpoint(dentry)) {
802 /* might be worth a WARN_ON() */
803 if (d_unlinked(dentry))
804 return ERR_PTR(-ENOENT);
805 mountpoint:
806 read_seqlock_excl(&mount_lock);
807 mp = lookup_mountpoint(dentry);
808 read_sequnlock_excl(&mount_lock);
809 if (mp)
810 goto done;
811 }
812
813 if (!new)
814 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
815 if (!new)
816 return ERR_PTR(-ENOMEM);
817
818
819 /* Exactly one processes may set d_mounted */
820 ret = d_set_mounted(dentry);
821
822 /* Someone else set d_mounted? */
823 if (ret == -EBUSY)
824 goto mountpoint;
825
826 /* The dentry is not available as a mountpoint? */
827 mp = ERR_PTR(ret);
828 if (ret)
829 goto done;
830
831 /* Add the new mountpoint to the hash table */
832 read_seqlock_excl(&mount_lock);
833 new->m_dentry = dget(dentry);
834 new->m_count = 1;
835 hlist_add_head(&new->m_hash, mp_hash(dentry));
836 INIT_HLIST_HEAD(&new->m_list);
837 read_sequnlock_excl(&mount_lock);
838
839 mp = new;
840 new = NULL;
841 done:
842 kfree(new);
843 return mp;
844 }
845
846 /*
847 * vfsmount lock must be held. Additionally, the caller is responsible
848 * for serializing calls for given disposal list.
849 */
850 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
851 {
852 if (!--mp->m_count) {
853 struct dentry *dentry = mp->m_dentry;
854 BUG_ON(!hlist_empty(&mp->m_list));
855 spin_lock(&dentry->d_lock);
856 dentry->d_flags &= ~DCACHE_MOUNTED;
857 spin_unlock(&dentry->d_lock);
858 dput_to_list(dentry, list);
859 hlist_del(&mp->m_hash);
860 kfree(mp);
861 }
862 }
863
864 /* called with namespace_lock and vfsmount lock */
865 static void put_mountpoint(struct mountpoint *mp)
866 {
867 __put_mountpoint(mp, &ex_mountpoints);
868 }
869
870 static inline int check_mnt(struct mount *mnt)
871 {
872 return mnt->mnt_ns == current->nsproxy->mnt_ns;
873 }
874
875 /*
876 * vfsmount lock must be held for write
877 */
878 static void touch_mnt_namespace(struct mnt_namespace *ns)
879 {
880 if (ns) {
881 ns->event = ++event;
882 wake_up_interruptible(&ns->poll);
883 }
884 }
885
886 /*
887 * vfsmount lock must be held for write
888 */
889 static void __touch_mnt_namespace(struct mnt_namespace *ns)
890 {
891 if (ns && ns->event != event) {
892 ns->event = event;
893 wake_up_interruptible(&ns->poll);
894 }
895 }
896
897 /*
898 * vfsmount lock must be held for write
899 */
900 static struct mountpoint *unhash_mnt(struct mount *mnt)
901 {
902 struct mountpoint *mp;
903 mnt->mnt_parent = mnt;
904 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
905 list_del_init(&mnt->mnt_child);
906 hlist_del_init_rcu(&mnt->mnt_hash);
907 hlist_del_init(&mnt->mnt_mp_list);
908 mp = mnt->mnt_mp;
909 mnt->mnt_mp = NULL;
910 return mp;
911 }
912
913 /*
914 * vfsmount lock must be held for write
915 */
916 static void umount_mnt(struct mount *mnt)
917 {
918 put_mountpoint(unhash_mnt(mnt));
919 }
920
921 /*
922 * vfsmount lock must be held for write
923 */
924 void mnt_set_mountpoint(struct mount *mnt,
925 struct mountpoint *mp,
926 struct mount *child_mnt)
927 {
928 mp->m_count++;
929 mnt_add_count(mnt, 1); /* essentially, that's mntget */
930 child_mnt->mnt_mountpoint = mp->m_dentry;
931 child_mnt->mnt_parent = mnt;
932 child_mnt->mnt_mp = mp;
933 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
934 }
935
936 /**
937 * mnt_set_mountpoint_beneath - mount a mount beneath another one
938 *
939 * @new_parent: the source mount
940 * @top_mnt: the mount beneath which @new_parent is mounted
941 * @new_mp: the new mountpoint of @top_mnt on @new_parent
942 *
943 * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
944 * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
945 * @new_mp. And mount @new_parent on the old parent and old
946 * mountpoint of @top_mnt.
947 *
948 * Context: This function expects namespace_lock() and lock_mount_hash()
949 * to have been acquired in that order.
950 */
951 static void mnt_set_mountpoint_beneath(struct mount *new_parent,
952 struct mount *top_mnt,
953 struct mountpoint *new_mp)
954 {
955 struct mount *old_top_parent = top_mnt->mnt_parent;
956 struct mountpoint *old_top_mp = top_mnt->mnt_mp;
957
958 mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent);
959 mnt_change_mountpoint(new_parent, new_mp, top_mnt);
960 }
961
962
963 static void __attach_mnt(struct mount *mnt, struct mount *parent)
964 {
965 hlist_add_head_rcu(&mnt->mnt_hash,
966 m_hash(&parent->mnt, mnt->mnt_mountpoint));
967 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
968 }
969
970 /**
971 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
972 * list of child mounts
973 * @parent: the parent
974 * @mnt: the new mount
975 * @mp: the new mountpoint
976 * @beneath: whether to mount @mnt beneath or on top of @parent
977 *
978 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
979 * to @parent's child mount list and to @mount_hashtable.
980 *
981 * If @beneath is true, remove @mnt from its current parent and
982 * mountpoint and mount it on @mp on @parent, and mount @parent on the
983 * old parent and old mountpoint of @mnt. Finally, attach @parent to
984 * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
985 *
986 * Note, when __attach_mnt() is called @mnt->mnt_parent already points
987 * to the correct parent.
988 *
989 * Context: This function expects namespace_lock() and lock_mount_hash()
990 * to have been acquired in that order.
991 */
992 static void attach_mnt(struct mount *mnt, struct mount *parent,
993 struct mountpoint *mp, bool beneath)
994 {
995 if (beneath)
996 mnt_set_mountpoint_beneath(mnt, parent, mp);
997 else
998 mnt_set_mountpoint(parent, mp, mnt);
999 /*
1000 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
1001 * beneath @parent then @mnt will need to be attached to
1002 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
1003 * isn't the same mount as @parent.
1004 */
1005 __attach_mnt(mnt, mnt->mnt_parent);
1006 }
1007
1008 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1009 {
1010 struct mountpoint *old_mp = mnt->mnt_mp;
1011 struct mount *old_parent = mnt->mnt_parent;
1012
1013 list_del_init(&mnt->mnt_child);
1014 hlist_del_init(&mnt->mnt_mp_list);
1015 hlist_del_init_rcu(&mnt->mnt_hash);
1016
1017 attach_mnt(mnt, parent, mp, false);
1018
1019 put_mountpoint(old_mp);
1020 mnt_add_count(old_parent, -1);
1021 }
1022
1023 /*
1024 * vfsmount lock must be held for write
1025 */
1026 static void commit_tree(struct mount *mnt)
1027 {
1028 struct mount *parent = mnt->mnt_parent;
1029 struct mount *m;
1030 LIST_HEAD(head);
1031 struct mnt_namespace *n = parent->mnt_ns;
1032
1033 BUG_ON(parent == mnt);
1034
1035 list_add_tail(&head, &mnt->mnt_list);
1036 list_for_each_entry(m, &head, mnt_list)
1037 m->mnt_ns = n;
1038
1039 list_splice(&head, n->list.prev);
1040
1041 n->mounts += n->pending_mounts;
1042 n->pending_mounts = 0;
1043
1044 __attach_mnt(mnt, parent);
1045 touch_mnt_namespace(n);
1046 }
1047
1048 static struct mount *next_mnt(struct mount *p, struct mount *root)
1049 {
1050 struct list_head *next = p->mnt_mounts.next;
1051 if (next == &p->mnt_mounts) {
1052 while (1) {
1053 if (p == root)
1054 return NULL;
1055 next = p->mnt_child.next;
1056 if (next != &p->mnt_parent->mnt_mounts)
1057 break;
1058 p = p->mnt_parent;
1059 }
1060 }
1061 return list_entry(next, struct mount, mnt_child);
1062 }
1063
1064 static struct mount *skip_mnt_tree(struct mount *p)
1065 {
1066 struct list_head *prev = p->mnt_mounts.prev;
1067 while (prev != &p->mnt_mounts) {
1068 p = list_entry(prev, struct mount, mnt_child);
1069 prev = p->mnt_mounts.prev;
1070 }
1071 return p;
1072 }
1073
1074 /**
1075 * vfs_create_mount - Create a mount for a configured superblock
1076 * @fc: The configuration context with the superblock attached
1077 *
1078 * Create a mount to an already configured superblock. If necessary, the
1079 * caller should invoke vfs_get_tree() before calling this.
1080 *
1081 * Note that this does not attach the mount to anything.
1082 */
1083 struct vfsmount *vfs_create_mount(struct fs_context *fc)
1084 {
1085 struct mount *mnt;
1086
1087 if (!fc->root)
1088 return ERR_PTR(-EINVAL);
1089
1090 mnt = alloc_vfsmnt(fc->source ?: "none");
1091 if (!mnt)
1092 return ERR_PTR(-ENOMEM);
1093
1094 if (fc->sb_flags & SB_KERNMOUNT)
1095 mnt->mnt.mnt_flags = MNT_INTERNAL;
1096
1097 atomic_inc(&fc->root->d_sb->s_active);
1098 mnt->mnt.mnt_sb = fc->root->d_sb;
1099 mnt->mnt.mnt_root = dget(fc->root);
1100 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1101 mnt->mnt_parent = mnt;
1102
1103 lock_mount_hash();
1104 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1105 unlock_mount_hash();
1106 return &mnt->mnt;
1107 }
1108 EXPORT_SYMBOL(vfs_create_mount);
1109
1110 struct vfsmount *fc_mount(struct fs_context *fc)
1111 {
1112 int err = vfs_get_tree(fc);
1113 if (!err) {
1114 up_write(&fc->root->d_sb->s_umount);
1115 return vfs_create_mount(fc);
1116 }
1117 return ERR_PTR(err);
1118 }
1119 EXPORT_SYMBOL(fc_mount);
1120
1121 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1122 int flags, const char *name,
1123 void *data)
1124 {
1125 struct fs_context *fc;
1126 struct vfsmount *mnt;
1127 int ret = 0;
1128
1129 if (!type)
1130 return ERR_PTR(-EINVAL);
1131
1132 fc = fs_context_for_mount(type, flags);
1133 if (IS_ERR(fc))
1134 return ERR_CAST(fc);
1135
1136 if (name)
1137 ret = vfs_parse_fs_string(fc, "source",
1138 name, strlen(name));
1139 if (!ret)
1140 ret = parse_monolithic_mount_data(fc, data);
1141 if (!ret)
1142 mnt = fc_mount(fc);
1143 else
1144 mnt = ERR_PTR(ret);
1145
1146 put_fs_context(fc);
1147 return mnt;
1148 }
1149 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1150
1151 struct vfsmount *
1152 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1153 const char *name, void *data)
1154 {
1155 /* Until it is worked out how to pass the user namespace
1156 * through from the parent mount to the submount don't support
1157 * unprivileged mounts with submounts.
1158 */
1159 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1160 return ERR_PTR(-EPERM);
1161
1162 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1163 }
1164 EXPORT_SYMBOL_GPL(vfs_submount);
1165
1166 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1167 int flag)
1168 {
1169 struct super_block *sb = old->mnt.mnt_sb;
1170 struct mount *mnt;
1171 int err;
1172
1173 mnt = alloc_vfsmnt(old->mnt_devname);
1174 if (!mnt)
1175 return ERR_PTR(-ENOMEM);
1176
1177 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1178 mnt->mnt_group_id = 0; /* not a peer of original */
1179 else
1180 mnt->mnt_group_id = old->mnt_group_id;
1181
1182 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1183 err = mnt_alloc_group_id(mnt);
1184 if (err)
1185 goto out_free;
1186 }
1187
1188 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1189 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1190
1191 atomic_inc(&sb->s_active);
1192 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1193
1194 mnt->mnt.mnt_sb = sb;
1195 mnt->mnt.mnt_root = dget(root);
1196 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1197 mnt->mnt_parent = mnt;
1198 lock_mount_hash();
1199 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1200 unlock_mount_hash();
1201
1202 if ((flag & CL_SLAVE) ||
1203 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1204 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1205 mnt->mnt_master = old;
1206 CLEAR_MNT_SHARED(mnt);
1207 } else if (!(flag & CL_PRIVATE)) {
1208 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1209 list_add(&mnt->mnt_share, &old->mnt_share);
1210 if (IS_MNT_SLAVE(old))
1211 list_add(&mnt->mnt_slave, &old->mnt_slave);
1212 mnt->mnt_master = old->mnt_master;
1213 } else {
1214 CLEAR_MNT_SHARED(mnt);
1215 }
1216 if (flag & CL_MAKE_SHARED)
1217 set_mnt_shared(mnt);
1218
1219 /* stick the duplicate mount on the same expiry list
1220 * as the original if that was on one */
1221 if (flag & CL_EXPIRE) {
1222 if (!list_empty(&old->mnt_expire))
1223 list_add(&mnt->mnt_expire, &old->mnt_expire);
1224 }
1225
1226 return mnt;
1227
1228 out_free:
1229 mnt_free_id(mnt);
1230 free_vfsmnt(mnt);
1231 return ERR_PTR(err);
1232 }
1233
1234 static void cleanup_mnt(struct mount *mnt)
1235 {
1236 struct hlist_node *p;
1237 struct mount *m;
1238 /*
1239 * The warning here probably indicates that somebody messed
1240 * up a mnt_want/drop_write() pair. If this happens, the
1241 * filesystem was probably unable to make r/w->r/o transitions.
1242 * The locking used to deal with mnt_count decrement provides barriers,
1243 * so mnt_get_writers() below is safe.
1244 */
1245 WARN_ON(mnt_get_writers(mnt));
1246 if (unlikely(mnt->mnt_pins.first))
1247 mnt_pin_kill(mnt);
1248 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1249 hlist_del(&m->mnt_umount);
1250 mntput(&m->mnt);
1251 }
1252 fsnotify_vfsmount_delete(&mnt->mnt);
1253 dput(mnt->mnt.mnt_root);
1254 deactivate_super(mnt->mnt.mnt_sb);
1255 mnt_free_id(mnt);
1256 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1257 }
1258
1259 static void __cleanup_mnt(struct rcu_head *head)
1260 {
1261 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1262 }
1263
1264 static LLIST_HEAD(delayed_mntput_list);
1265 static void delayed_mntput(struct work_struct *unused)
1266 {
1267 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1268 struct mount *m, *t;
1269
1270 llist_for_each_entry_safe(m, t, node, mnt_llist)
1271 cleanup_mnt(m);
1272 }
1273 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1274
1275 static void mntput_no_expire(struct mount *mnt)
1276 {
1277 LIST_HEAD(list);
1278 int count;
1279
1280 rcu_read_lock();
1281 if (likely(READ_ONCE(mnt->mnt_ns))) {
1282 /*
1283 * Since we don't do lock_mount_hash() here,
1284 * ->mnt_ns can change under us. However, if it's
1285 * non-NULL, then there's a reference that won't
1286 * be dropped until after an RCU delay done after
1287 * turning ->mnt_ns NULL. So if we observe it
1288 * non-NULL under rcu_read_lock(), the reference
1289 * we are dropping is not the final one.
1290 */
1291 mnt_add_count(mnt, -1);
1292 rcu_read_unlock();
1293 return;
1294 }
1295 lock_mount_hash();
1296 /*
1297 * make sure that if __legitimize_mnt() has not seen us grab
1298 * mount_lock, we'll see their refcount increment here.
1299 */
1300 smp_mb();
1301 mnt_add_count(mnt, -1);
1302 count = mnt_get_count(mnt);
1303 if (count != 0) {
1304 WARN_ON(count < 0);
1305 rcu_read_unlock();
1306 unlock_mount_hash();
1307 return;
1308 }
1309 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1310 rcu_read_unlock();
1311 unlock_mount_hash();
1312 return;
1313 }
1314 mnt->mnt.mnt_flags |= MNT_DOOMED;
1315 rcu_read_unlock();
1316
1317 list_del(&mnt->mnt_instance);
1318
1319 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1320 struct mount *p, *tmp;
1321 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1322 __put_mountpoint(unhash_mnt(p), &list);
1323 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1324 }
1325 }
1326 unlock_mount_hash();
1327 shrink_dentry_list(&list);
1328
1329 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1330 struct task_struct *task = current;
1331 if (likely(!(task->flags & PF_KTHREAD))) {
1332 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1333 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1334 return;
1335 }
1336 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1337 schedule_delayed_work(&delayed_mntput_work, 1);
1338 return;
1339 }
1340 cleanup_mnt(mnt);
1341 }
1342
1343 void mntput(struct vfsmount *mnt)
1344 {
1345 if (mnt) {
1346 struct mount *m = real_mount(mnt);
1347 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1348 if (unlikely(m->mnt_expiry_mark))
1349 m->mnt_expiry_mark = 0;
1350 mntput_no_expire(m);
1351 }
1352 }
1353 EXPORT_SYMBOL(mntput);
1354
1355 struct vfsmount *mntget(struct vfsmount *mnt)
1356 {
1357 if (mnt)
1358 mnt_add_count(real_mount(mnt), 1);
1359 return mnt;
1360 }
1361 EXPORT_SYMBOL(mntget);
1362
1363 /*
1364 * Make a mount point inaccessible to new lookups.
1365 * Because there may still be current users, the caller MUST WAIT
1366 * for an RCU grace period before destroying the mount point.
1367 */
1368 void mnt_make_shortterm(struct vfsmount *mnt)
1369 {
1370 if (mnt)
1371 real_mount(mnt)->mnt_ns = NULL;
1372 }
1373
1374 /**
1375 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1376 * @path: path to check
1377 *
1378 * d_mountpoint() can only be used reliably to establish if a dentry is
1379 * not mounted in any namespace and that common case is handled inline.
1380 * d_mountpoint() isn't aware of the possibility there may be multiple
1381 * mounts using a given dentry in a different namespace. This function
1382 * checks if the passed in path is a mountpoint rather than the dentry
1383 * alone.
1384 */
1385 bool path_is_mountpoint(const struct path *path)
1386 {
1387 unsigned seq;
1388 bool res;
1389
1390 if (!d_mountpoint(path->dentry))
1391 return false;
1392
1393 rcu_read_lock();
1394 do {
1395 seq = read_seqbegin(&mount_lock);
1396 res = __path_is_mountpoint(path);
1397 } while (read_seqretry(&mount_lock, seq));
1398 rcu_read_unlock();
1399
1400 return res;
1401 }
1402 EXPORT_SYMBOL(path_is_mountpoint);
1403
1404 struct vfsmount *mnt_clone_internal(const struct path *path)
1405 {
1406 struct mount *p;
1407 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1408 if (IS_ERR(p))
1409 return ERR_CAST(p);
1410 p->mnt.mnt_flags |= MNT_INTERNAL;
1411 return &p->mnt;
1412 }
1413
1414 #ifdef CONFIG_PROC_FS
1415 static struct mount *mnt_list_next(struct mnt_namespace *ns,
1416 struct list_head *p)
1417 {
1418 struct mount *mnt, *ret = NULL;
1419
1420 lock_ns_list(ns);
1421 list_for_each_continue(p, &ns->list) {
1422 mnt = list_entry(p, typeof(*mnt), mnt_list);
1423 if (!mnt_is_cursor(mnt)) {
1424 ret = mnt;
1425 break;
1426 }
1427 }
1428 unlock_ns_list(ns);
1429
1430 return ret;
1431 }
1432
1433 /* iterator; we want it to have access to namespace_sem, thus here... */
1434 static void *m_start(struct seq_file *m, loff_t *pos)
1435 {
1436 struct proc_mounts *p = m->private;
1437 struct list_head *prev;
1438
1439 down_read(&namespace_sem);
1440 if (!*pos) {
1441 prev = &p->ns->list;
1442 } else {
1443 prev = &p->cursor.mnt_list;
1444
1445 /* Read after we'd reached the end? */
1446 if (list_empty(prev))
1447 return NULL;
1448 }
1449
1450 return mnt_list_next(p->ns, prev);
1451 }
1452
1453 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1454 {
1455 struct proc_mounts *p = m->private;
1456 struct mount *mnt = v;
1457
1458 ++*pos;
1459 return mnt_list_next(p->ns, &mnt->mnt_list);
1460 }
1461
1462 static void m_stop(struct seq_file *m, void *v)
1463 {
1464 struct proc_mounts *p = m->private;
1465 struct mount *mnt = v;
1466
1467 lock_ns_list(p->ns);
1468 if (mnt)
1469 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1470 else
1471 list_del_init(&p->cursor.mnt_list);
1472 unlock_ns_list(p->ns);
1473 up_read(&namespace_sem);
1474 }
1475
1476 static int m_show(struct seq_file *m, void *v)
1477 {
1478 struct proc_mounts *p = m->private;
1479 struct mount *r = v;
1480 return p->show(m, &r->mnt);
1481 }
1482
1483 const struct seq_operations mounts_op = {
1484 .start = m_start,
1485 .next = m_next,
1486 .stop = m_stop,
1487 .show = m_show,
1488 };
1489
1490 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
1491 {
1492 down_read(&namespace_sem);
1493 lock_ns_list(ns);
1494 list_del(&cursor->mnt_list);
1495 unlock_ns_list(ns);
1496 up_read(&namespace_sem);
1497 }
1498 #endif /* CONFIG_PROC_FS */
1499
1500 /**
1501 * may_umount_tree - check if a mount tree is busy
1502 * @m: root of mount tree
1503 *
1504 * This is called to check if a tree of mounts has any
1505 * open files, pwds, chroots or sub mounts that are
1506 * busy.
1507 */
1508 int may_umount_tree(struct vfsmount *m)
1509 {
1510 struct mount *mnt = real_mount(m);
1511 int actual_refs = 0;
1512 int minimum_refs = 0;
1513 struct mount *p;
1514 BUG_ON(!m);
1515
1516 /* write lock needed for mnt_get_count */
1517 lock_mount_hash();
1518 for (p = mnt; p; p = next_mnt(p, mnt)) {
1519 actual_refs += mnt_get_count(p);
1520 minimum_refs += 2;
1521 }
1522 unlock_mount_hash();
1523
1524 if (actual_refs > minimum_refs)
1525 return 0;
1526
1527 return 1;
1528 }
1529
1530 EXPORT_SYMBOL(may_umount_tree);
1531
1532 /**
1533 * may_umount - check if a mount point is busy
1534 * @mnt: root of mount
1535 *
1536 * This is called to check if a mount point has any
1537 * open files, pwds, chroots or sub mounts. If the
1538 * mount has sub mounts this will return busy
1539 * regardless of whether the sub mounts are busy.
1540 *
1541 * Doesn't take quota and stuff into account. IOW, in some cases it will
1542 * give false negatives. The main reason why it's here is that we need
1543 * a non-destructive way to look for easily umountable filesystems.
1544 */
1545 int may_umount(struct vfsmount *mnt)
1546 {
1547 int ret = 1;
1548 down_read(&namespace_sem);
1549 lock_mount_hash();
1550 if (propagate_mount_busy(real_mount(mnt), 2))
1551 ret = 0;
1552 unlock_mount_hash();
1553 up_read(&namespace_sem);
1554 return ret;
1555 }
1556
1557 EXPORT_SYMBOL(may_umount);
1558
1559 static void namespace_unlock(void)
1560 {
1561 struct hlist_head head;
1562 struct hlist_node *p;
1563 struct mount *m;
1564 LIST_HEAD(list);
1565
1566 hlist_move_list(&unmounted, &head);
1567 list_splice_init(&ex_mountpoints, &list);
1568
1569 up_write(&namespace_sem);
1570
1571 shrink_dentry_list(&list);
1572
1573 if (likely(hlist_empty(&head)))
1574 return;
1575
1576 synchronize_rcu_expedited();
1577
1578 hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1579 hlist_del(&m->mnt_umount);
1580 mntput(&m->mnt);
1581 }
1582 }
1583
1584 static inline void namespace_lock(void)
1585 {
1586 down_write(&namespace_sem);
1587 }
1588
1589 enum umount_tree_flags {
1590 UMOUNT_SYNC = 1,
1591 UMOUNT_PROPAGATE = 2,
1592 UMOUNT_CONNECTED = 4,
1593 };
1594
1595 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1596 {
1597 /* Leaving mounts connected is only valid for lazy umounts */
1598 if (how & UMOUNT_SYNC)
1599 return true;
1600
1601 /* A mount without a parent has nothing to be connected to */
1602 if (!mnt_has_parent(mnt))
1603 return true;
1604
1605 /* Because the reference counting rules change when mounts are
1606 * unmounted and connected, umounted mounts may not be
1607 * connected to mounted mounts.
1608 */
1609 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1610 return true;
1611
1612 /* Has it been requested that the mount remain connected? */
1613 if (how & UMOUNT_CONNECTED)
1614 return false;
1615
1616 /* Is the mount locked such that it needs to remain connected? */
1617 if (IS_MNT_LOCKED(mnt))
1618 return false;
1619
1620 /* By default disconnect the mount */
1621 return true;
1622 }
1623
1624 /*
1625 * mount_lock must be held
1626 * namespace_sem must be held for write
1627 */
1628 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1629 {
1630 LIST_HEAD(tmp_list);
1631 struct mount *p;
1632
1633 if (how & UMOUNT_PROPAGATE)
1634 propagate_mount_unlock(mnt);
1635
1636 /* Gather the mounts to umount */
1637 for (p = mnt; p; p = next_mnt(p, mnt)) {
1638 p->mnt.mnt_flags |= MNT_UMOUNT;
1639 list_move(&p->mnt_list, &tmp_list);
1640 }
1641
1642 /* Hide the mounts from mnt_mounts */
1643 list_for_each_entry(p, &tmp_list, mnt_list) {
1644 list_del_init(&p->mnt_child);
1645 }
1646
1647 /* Add propogated mounts to the tmp_list */
1648 if (how & UMOUNT_PROPAGATE)
1649 propagate_umount(&tmp_list);
1650
1651 while (!list_empty(&tmp_list)) {
1652 struct mnt_namespace *ns;
1653 bool disconnect;
1654 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1655 list_del_init(&p->mnt_expire);
1656 list_del_init(&p->mnt_list);
1657 ns = p->mnt_ns;
1658 if (ns) {
1659 ns->mounts--;
1660 __touch_mnt_namespace(ns);
1661 }
1662 p->mnt_ns = NULL;
1663 if (how & UMOUNT_SYNC)
1664 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1665
1666 disconnect = disconnect_mount(p, how);
1667 if (mnt_has_parent(p)) {
1668 mnt_add_count(p->mnt_parent, -1);
1669 if (!disconnect) {
1670 /* Don't forget about p */
1671 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1672 } else {
1673 umount_mnt(p);
1674 }
1675 }
1676 change_mnt_propagation(p, MS_PRIVATE);
1677 if (disconnect)
1678 hlist_add_head(&p->mnt_umount, &unmounted);
1679 }
1680 }
1681
1682 static void shrink_submounts(struct mount *mnt);
1683
1684 static int do_umount_root(struct super_block *sb)
1685 {
1686 int ret = 0;
1687
1688 down_write(&sb->s_umount);
1689 if (!sb_rdonly(sb)) {
1690 struct fs_context *fc;
1691
1692 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1693 SB_RDONLY);
1694 if (IS_ERR(fc)) {
1695 ret = PTR_ERR(fc);
1696 } else {
1697 ret = parse_monolithic_mount_data(fc, NULL);
1698 if (!ret)
1699 ret = reconfigure_super(fc);
1700 put_fs_context(fc);
1701 }
1702 }
1703 up_write(&sb->s_umount);
1704 return ret;
1705 }
1706
1707 static int do_umount(struct mount *mnt, int flags)
1708 {
1709 struct super_block *sb = mnt->mnt.mnt_sb;
1710 int retval;
1711
1712 retval = security_sb_umount(&mnt->mnt, flags);
1713 if (retval)
1714 return retval;
1715
1716 /*
1717 * Allow userspace to request a mountpoint be expired rather than
1718 * unmounting unconditionally. Unmount only happens if:
1719 * (1) the mark is already set (the mark is cleared by mntput())
1720 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1721 */
1722 if (flags & MNT_EXPIRE) {
1723 if (&mnt->mnt == current->fs->root.mnt ||
1724 flags & (MNT_FORCE | MNT_DETACH))
1725 return -EINVAL;
1726
1727 /*
1728 * probably don't strictly need the lock here if we examined
1729 * all race cases, but it's a slowpath.
1730 */
1731 lock_mount_hash();
1732 if (mnt_get_count(mnt) != 2) {
1733 unlock_mount_hash();
1734 return -EBUSY;
1735 }
1736 unlock_mount_hash();
1737
1738 if (!xchg(&mnt->mnt_expiry_mark, 1))
1739 return -EAGAIN;
1740 }
1741
1742 /*
1743 * If we may have to abort operations to get out of this
1744 * mount, and they will themselves hold resources we must
1745 * allow the fs to do things. In the Unix tradition of
1746 * 'Gee thats tricky lets do it in userspace' the umount_begin
1747 * might fail to complete on the first run through as other tasks
1748 * must return, and the like. Thats for the mount program to worry
1749 * about for the moment.
1750 */
1751
1752 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1753 sb->s_op->umount_begin(sb);
1754 }
1755
1756 /*
1757 * No sense to grab the lock for this test, but test itself looks
1758 * somewhat bogus. Suggestions for better replacement?
1759 * Ho-hum... In principle, we might treat that as umount + switch
1760 * to rootfs. GC would eventually take care of the old vfsmount.
1761 * Actually it makes sense, especially if rootfs would contain a
1762 * /reboot - static binary that would close all descriptors and
1763 * call reboot(9). Then init(8) could umount root and exec /reboot.
1764 */
1765 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1766 /*
1767 * Special case for "unmounting" root ...
1768 * we just try to remount it readonly.
1769 */
1770 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1771 return -EPERM;
1772 return do_umount_root(sb);
1773 }
1774
1775 namespace_lock();
1776 lock_mount_hash();
1777
1778 /* Recheck MNT_LOCKED with the locks held */
1779 retval = -EINVAL;
1780 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1781 goto out;
1782
1783 event++;
1784 if (flags & MNT_DETACH) {
1785 if (!list_empty(&mnt->mnt_list))
1786 umount_tree(mnt, UMOUNT_PROPAGATE);
1787 retval = 0;
1788 } else {
1789 shrink_submounts(mnt);
1790 retval = -EBUSY;
1791 if (!propagate_mount_busy(mnt, 2)) {
1792 if (!list_empty(&mnt->mnt_list))
1793 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1794 retval = 0;
1795 }
1796 }
1797 out:
1798 unlock_mount_hash();
1799 namespace_unlock();
1800 return retval;
1801 }
1802
1803 /*
1804 * __detach_mounts - lazily unmount all mounts on the specified dentry
1805 *
1806 * During unlink, rmdir, and d_drop it is possible to loose the path
1807 * to an existing mountpoint, and wind up leaking the mount.
1808 * detach_mounts allows lazily unmounting those mounts instead of
1809 * leaking them.
1810 *
1811 * The caller may hold dentry->d_inode->i_mutex.
1812 */
1813 void __detach_mounts(struct dentry *dentry)
1814 {
1815 struct mountpoint *mp;
1816 struct mount *mnt;
1817
1818 namespace_lock();
1819 lock_mount_hash();
1820 mp = lookup_mountpoint(dentry);
1821 if (!mp)
1822 goto out_unlock;
1823
1824 event++;
1825 while (!hlist_empty(&mp->m_list)) {
1826 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1827 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1828 umount_mnt(mnt);
1829 hlist_add_head(&mnt->mnt_umount, &unmounted);
1830 }
1831 else umount_tree(mnt, UMOUNT_CONNECTED);
1832 }
1833 put_mountpoint(mp);
1834 out_unlock:
1835 unlock_mount_hash();
1836 namespace_unlock();
1837 }
1838
1839 /*
1840 * Is the caller allowed to modify his namespace?
1841 */
1842 bool may_mount(void)
1843 {
1844 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1845 }
1846
1847 /**
1848 * path_mounted - check whether path is mounted
1849 * @path: path to check
1850 *
1851 * Determine whether @path refers to the root of a mount.
1852 *
1853 * Return: true if @path is the root of a mount, false if not.
1854 */
1855 static inline bool path_mounted(const struct path *path)
1856 {
1857 return path->mnt->mnt_root == path->dentry;
1858 }
1859
1860 static void warn_mandlock(void)
1861 {
1862 pr_warn_once("=======================================================\n"
1863 "WARNING: The mand mount option has been deprecated and\n"
1864 " and is ignored by this kernel. Remove the mand\n"
1865 " option from the mount to silence this warning.\n"
1866 "=======================================================\n");
1867 }
1868
1869 static int can_umount(const struct path *path, int flags)
1870 {
1871 struct mount *mnt = real_mount(path->mnt);
1872
1873 if (!may_mount())
1874 return -EPERM;
1875 if (!path_mounted(path))
1876 return -EINVAL;
1877 if (!check_mnt(mnt))
1878 return -EINVAL;
1879 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1880 return -EINVAL;
1881 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1882 return -EPERM;
1883 return 0;
1884 }
1885
1886 // caller is responsible for flags being sane
1887 int path_umount(struct path *path, int flags)
1888 {
1889 struct mount *mnt = real_mount(path->mnt);
1890 int ret;
1891
1892 ret = can_umount(path, flags);
1893 if (!ret)
1894 ret = do_umount(mnt, flags);
1895
1896 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1897 dput(path->dentry);
1898 mntput_no_expire(mnt);
1899 return ret;
1900 }
1901
1902 static int ksys_umount(char __user *name, int flags)
1903 {
1904 int lookup_flags = LOOKUP_MOUNTPOINT;
1905 struct path path;
1906 int ret;
1907
1908 // basic validity checks done first
1909 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1910 return -EINVAL;
1911
1912 if (!(flags & UMOUNT_NOFOLLOW))
1913 lookup_flags |= LOOKUP_FOLLOW;
1914 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1915 if (ret)
1916 return ret;
1917 return path_umount(&path, flags);
1918 }
1919
1920 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1921 {
1922 return ksys_umount(name, flags);
1923 }
1924
1925 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1926
1927 /*
1928 * The 2.0 compatible umount. No flags.
1929 */
1930 SYSCALL_DEFINE1(oldumount, char __user *, name)
1931 {
1932 return ksys_umount(name, 0);
1933 }
1934
1935 #endif
1936
1937 static bool is_mnt_ns_file(struct dentry *dentry)
1938 {
1939 /* Is this a proxy for a mount namespace? */
1940 return dentry->d_op == &ns_dentry_operations &&
1941 dentry->d_fsdata == &mntns_operations;
1942 }
1943
1944 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1945 {
1946 return container_of(ns, struct mnt_namespace, ns);
1947 }
1948
1949 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1950 {
1951 return &mnt->ns;
1952 }
1953
1954 static bool mnt_ns_loop(struct dentry *dentry)
1955 {
1956 /* Could bind mounting the mount namespace inode cause a
1957 * mount namespace loop?
1958 */
1959 struct mnt_namespace *mnt_ns;
1960 if (!is_mnt_ns_file(dentry))
1961 return false;
1962
1963 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1964 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1965 }
1966
1967 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1968 int flag)
1969 {
1970 struct mount *res, *p, *q, *r, *parent;
1971
1972 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1973 return ERR_PTR(-EINVAL);
1974
1975 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1976 return ERR_PTR(-EINVAL);
1977
1978 res = q = clone_mnt(mnt, dentry, flag);
1979 if (IS_ERR(q))
1980 return q;
1981
1982 q->mnt_mountpoint = mnt->mnt_mountpoint;
1983
1984 p = mnt;
1985 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1986 struct mount *s;
1987 if (!is_subdir(r->mnt_mountpoint, dentry))
1988 continue;
1989
1990 for (s = r; s; s = next_mnt(s, r)) {
1991 if (!(flag & CL_COPY_UNBINDABLE) &&
1992 IS_MNT_UNBINDABLE(s)) {
1993 if (s->mnt.mnt_flags & MNT_LOCKED) {
1994 /* Both unbindable and locked. */
1995 q = ERR_PTR(-EPERM);
1996 goto out;
1997 } else {
1998 s = skip_mnt_tree(s);
1999 continue;
2000 }
2001 }
2002 if (!(flag & CL_COPY_MNT_NS_FILE) &&
2003 is_mnt_ns_file(s->mnt.mnt_root)) {
2004 s = skip_mnt_tree(s);
2005 continue;
2006 }
2007 while (p != s->mnt_parent) {
2008 p = p->mnt_parent;
2009 q = q->mnt_parent;
2010 }
2011 p = s;
2012 parent = q;
2013 q = clone_mnt(p, p->mnt.mnt_root, flag);
2014 if (IS_ERR(q))
2015 goto out;
2016 lock_mount_hash();
2017 list_add_tail(&q->mnt_list, &res->mnt_list);
2018 attach_mnt(q, parent, p->mnt_mp, false);
2019 unlock_mount_hash();
2020 }
2021 }
2022 return res;
2023 out:
2024 if (res) {
2025 lock_mount_hash();
2026 umount_tree(res, UMOUNT_SYNC);
2027 unlock_mount_hash();
2028 }
2029 return q;
2030 }
2031
2032 /* Caller should check returned pointer for errors */
2033
2034 struct vfsmount *collect_mounts(const struct path *path)
2035 {
2036 struct mount *tree;
2037 namespace_lock();
2038 if (!check_mnt(real_mount(path->mnt)))
2039 tree = ERR_PTR(-EINVAL);
2040 else
2041 tree = copy_tree(real_mount(path->mnt), path->dentry,
2042 CL_COPY_ALL | CL_PRIVATE);
2043 namespace_unlock();
2044 if (IS_ERR(tree))
2045 return ERR_CAST(tree);
2046 return &tree->mnt;
2047 }
2048
2049 static void free_mnt_ns(struct mnt_namespace *);
2050 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
2051
2052 void dissolve_on_fput(struct vfsmount *mnt)
2053 {
2054 struct mnt_namespace *ns;
2055 namespace_lock();
2056 lock_mount_hash();
2057 ns = real_mount(mnt)->mnt_ns;
2058 if (ns) {
2059 if (is_anon_ns(ns))
2060 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
2061 else
2062 ns = NULL;
2063 }
2064 unlock_mount_hash();
2065 namespace_unlock();
2066 if (ns)
2067 free_mnt_ns(ns);
2068 }
2069
2070 void drop_collected_mounts(struct vfsmount *mnt)
2071 {
2072 namespace_lock();
2073 lock_mount_hash();
2074 umount_tree(real_mount(mnt), 0);
2075 unlock_mount_hash();
2076 namespace_unlock();
2077 }
2078
2079 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2080 {
2081 struct mount *child;
2082
2083 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2084 if (!is_subdir(child->mnt_mountpoint, dentry))
2085 continue;
2086
2087 if (child->mnt.mnt_flags & MNT_LOCKED)
2088 return true;
2089 }
2090 return false;
2091 }
2092
2093 /**
2094 * clone_private_mount - create a private clone of a path
2095 * @path: path to clone
2096 *
2097 * This creates a new vfsmount, which will be the clone of @path. The new mount
2098 * will not be attached anywhere in the namespace and will be private (i.e.
2099 * changes to the originating mount won't be propagated into this).
2100 *
2101 * Release with mntput().
2102 */
2103 struct vfsmount *clone_private_mount(const struct path *path)
2104 {
2105 struct mount *old_mnt = real_mount(path->mnt);
2106 struct mount *new_mnt;
2107
2108 down_read(&namespace_sem);
2109 if (IS_MNT_UNBINDABLE(old_mnt))
2110 goto invalid;
2111
2112 if (!check_mnt(old_mnt))
2113 goto invalid;
2114
2115 if (has_locked_children(old_mnt, path->dentry))
2116 goto invalid;
2117
2118 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2119 up_read(&namespace_sem);
2120
2121 if (IS_ERR(new_mnt))
2122 return ERR_CAST(new_mnt);
2123
2124 /* Longterm mount to be removed by kern_unmount*() */
2125 new_mnt->mnt_ns = MNT_NS_INTERNAL;
2126
2127 return &new_mnt->mnt;
2128
2129 invalid:
2130 up_read(&namespace_sem);
2131 return ERR_PTR(-EINVAL);
2132 }
2133 EXPORT_SYMBOL_GPL(clone_private_mount);
2134
2135 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
2136 struct vfsmount *root)
2137 {
2138 struct mount *mnt;
2139 int res = f(root, arg);
2140 if (res)
2141 return res;
2142 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2143 res = f(&mnt->mnt, arg);
2144 if (res)
2145 return res;
2146 }
2147 return 0;
2148 }
2149
2150 static void lock_mnt_tree(struct mount *mnt)
2151 {
2152 struct mount *p;
2153
2154 for (p = mnt; p; p = next_mnt(p, mnt)) {
2155 int flags = p->mnt.mnt_flags;
2156 /* Don't allow unprivileged users to change mount flags */
2157 flags |= MNT_LOCK_ATIME;
2158
2159 if (flags & MNT_READONLY)
2160 flags |= MNT_LOCK_READONLY;
2161
2162 if (flags & MNT_NODEV)
2163 flags |= MNT_LOCK_NODEV;
2164
2165 if (flags & MNT_NOSUID)
2166 flags |= MNT_LOCK_NOSUID;
2167
2168 if (flags & MNT_NOEXEC)
2169 flags |= MNT_LOCK_NOEXEC;
2170 /* Don't allow unprivileged users to reveal what is under a mount */
2171 if (list_empty(&p->mnt_expire))
2172 flags |= MNT_LOCKED;
2173 p->mnt.mnt_flags = flags;
2174 }
2175 }
2176
2177 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2178 {
2179 struct mount *p;
2180
2181 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2182 if (p->mnt_group_id && !IS_MNT_SHARED(p))
2183 mnt_release_group_id(p);
2184 }
2185 }
2186
2187 static int invent_group_ids(struct mount *mnt, bool recurse)
2188 {
2189 struct mount *p;
2190
2191 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2192 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2193 int err = mnt_alloc_group_id(p);
2194 if (err) {
2195 cleanup_group_ids(mnt, p);
2196 return err;
2197 }
2198 }
2199 }
2200
2201 return 0;
2202 }
2203
2204 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2205 {
2206 unsigned int max = READ_ONCE(sysctl_mount_max);
2207 unsigned int mounts = 0;
2208 struct mount *p;
2209
2210 if (ns->mounts >= max)
2211 return -ENOSPC;
2212 max -= ns->mounts;
2213 if (ns->pending_mounts >= max)
2214 return -ENOSPC;
2215 max -= ns->pending_mounts;
2216
2217 for (p = mnt; p; p = next_mnt(p, mnt))
2218 mounts++;
2219
2220 if (mounts > max)
2221 return -ENOSPC;
2222
2223 ns->pending_mounts += mounts;
2224 return 0;
2225 }
2226
2227 enum mnt_tree_flags_t {
2228 MNT_TREE_MOVE = BIT(0),
2229 MNT_TREE_BENEATH = BIT(1),
2230 };
2231
2232 /**
2233 * attach_recursive_mnt - attach a source mount tree
2234 * @source_mnt: mount tree to be attached
2235 * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath
2236 * @dest_mp: the mountpoint @source_mnt will be mounted at
2237 * @flags: modify how @source_mnt is supposed to be attached
2238 *
2239 * NOTE: in the table below explains the semantics when a source mount
2240 * of a given type is attached to a destination mount of a given type.
2241 * ---------------------------------------------------------------------------
2242 * | BIND MOUNT OPERATION |
2243 * |**************************************************************************
2244 * | source-->| shared | private | slave | unbindable |
2245 * | dest | | | | |
2246 * | | | | | | |
2247 * | v | | | | |
2248 * |**************************************************************************
2249 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2250 * | | | | | |
2251 * |non-shared| shared (+) | private | slave (*) | invalid |
2252 * ***************************************************************************
2253 * A bind operation clones the source mount and mounts the clone on the
2254 * destination mount.
2255 *
2256 * (++) the cloned mount is propagated to all the mounts in the propagation
2257 * tree of the destination mount and the cloned mount is added to
2258 * the peer group of the source mount.
2259 * (+) the cloned mount is created under the destination mount and is marked
2260 * as shared. The cloned mount is added to the peer group of the source
2261 * mount.
2262 * (+++) the mount is propagated to all the mounts in the propagation tree
2263 * of the destination mount and the cloned mount is made slave
2264 * of the same master as that of the source mount. The cloned mount
2265 * is marked as 'shared and slave'.
2266 * (*) the cloned mount is made a slave of the same master as that of the
2267 * source mount.
2268 *
2269 * ---------------------------------------------------------------------------
2270 * | MOVE MOUNT OPERATION |
2271 * |**************************************************************************
2272 * | source-->| shared | private | slave | unbindable |
2273 * | dest | | | | |
2274 * | | | | | | |
2275 * | v | | | | |
2276 * |**************************************************************************
2277 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2278 * | | | | | |
2279 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2280 * ***************************************************************************
2281 *
2282 * (+) the mount is moved to the destination. And is then propagated to
2283 * all the mounts in the propagation tree of the destination mount.
2284 * (+*) the mount is moved to the destination.
2285 * (+++) the mount is moved to the destination and is then propagated to
2286 * all the mounts belonging to the destination mount's propagation tree.
2287 * the mount is marked as 'shared and slave'.
2288 * (*) the mount continues to be a slave at the new location.
2289 *
2290 * if the source mount is a tree, the operations explained above is
2291 * applied to each mount in the tree.
2292 * Must be called without spinlocks held, since this function can sleep
2293 * in allocations.
2294 *
2295 * Context: The function expects namespace_lock() to be held.
2296 * Return: If @source_mnt was successfully attached 0 is returned.
2297 * Otherwise a negative error code is returned.
2298 */
2299 static int attach_recursive_mnt(struct mount *source_mnt,
2300 struct mount *top_mnt,
2301 struct mountpoint *dest_mp,
2302 enum mnt_tree_flags_t flags)
2303 {
2304 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2305 HLIST_HEAD(tree_list);
2306 struct mnt_namespace *ns = top_mnt->mnt_ns;
2307 struct mountpoint *smp;
2308 struct mount *child, *dest_mnt, *p;
2309 struct hlist_node *n;
2310 int err = 0;
2311 bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH;
2312
2313 /*
2314 * Preallocate a mountpoint in case the new mounts need to be
2315 * mounted beneath mounts on the same mountpoint.
2316 */
2317 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2318 if (IS_ERR(smp))
2319 return PTR_ERR(smp);
2320
2321 /* Is there space to add these mounts to the mount namespace? */
2322 if (!moving) {
2323 err = count_mounts(ns, source_mnt);
2324 if (err)
2325 goto out;
2326 }
2327
2328 if (beneath)
2329 dest_mnt = top_mnt->mnt_parent;
2330 else
2331 dest_mnt = top_mnt;
2332
2333 if (IS_MNT_SHARED(dest_mnt)) {
2334 err = invent_group_ids(source_mnt, true);
2335 if (err)
2336 goto out;
2337 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2338 }
2339 lock_mount_hash();
2340 if (err)
2341 goto out_cleanup_ids;
2342
2343 if (IS_MNT_SHARED(dest_mnt)) {
2344 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2345 set_mnt_shared(p);
2346 }
2347
2348 if (moving) {
2349 if (beneath)
2350 dest_mp = smp;
2351 unhash_mnt(source_mnt);
2352 attach_mnt(source_mnt, top_mnt, dest_mp, beneath);
2353 touch_mnt_namespace(source_mnt->mnt_ns);
2354 } else {
2355 if (source_mnt->mnt_ns) {
2356 /* move from anon - the caller will destroy */
2357 list_del_init(&source_mnt->mnt_ns->list);
2358 }
2359 if (beneath)
2360 mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp);
2361 else
2362 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2363 commit_tree(source_mnt);
2364 }
2365
2366 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2367 struct mount *q;
2368 hlist_del_init(&child->mnt_hash);
2369 q = __lookup_mnt(&child->mnt_parent->mnt,
2370 child->mnt_mountpoint);
2371 if (q)
2372 mnt_change_mountpoint(child, smp, q);
2373 /* Notice when we are propagating across user namespaces */
2374 if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2375 lock_mnt_tree(child);
2376 child->mnt.mnt_flags &= ~MNT_LOCKED;
2377 commit_tree(child);
2378 }
2379 put_mountpoint(smp);
2380 unlock_mount_hash();
2381
2382 return 0;
2383
2384 out_cleanup_ids:
2385 while (!hlist_empty(&tree_list)) {
2386 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2387 child->mnt_parent->mnt_ns->pending_mounts = 0;
2388 umount_tree(child, UMOUNT_SYNC);
2389 }
2390 unlock_mount_hash();
2391 cleanup_group_ids(source_mnt, NULL);
2392 out:
2393 ns->pending_mounts = 0;
2394
2395 read_seqlock_excl(&mount_lock);
2396 put_mountpoint(smp);
2397 read_sequnlock_excl(&mount_lock);
2398
2399 return err;
2400 }
2401
2402 /**
2403 * do_lock_mount - lock mount and mountpoint
2404 * @path: target path
2405 * @beneath: whether the intention is to mount beneath @path
2406 *
2407 * Follow the mount stack on @path until the top mount @mnt is found. If
2408 * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2409 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2410 * until nothing is stacked on top of it anymore.
2411 *
2412 * Acquire the inode_lock() on the top mount's ->mnt_root to protect
2413 * against concurrent removal of the new mountpoint from another mount
2414 * namespace.
2415 *
2416 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2417 * @mp on @mnt->mnt_parent must be acquired. This protects against a
2418 * concurrent unlink of @mp->mnt_dentry from another mount namespace
2419 * where @mnt doesn't have a child mount mounted @mp. A concurrent
2420 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2421 * on top of it for @beneath.
2422 *
2423 * In addition, @beneath needs to make sure that @mnt hasn't been
2424 * unmounted or moved from its current mountpoint in between dropping
2425 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2426 * being unmounted would be detected later by e.g., calling
2427 * check_mnt(mnt) in the function it's called from. For the @beneath
2428 * case however, it's useful to detect it directly in do_lock_mount().
2429 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2430 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2431 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2432 *
2433 * Return: Either the target mountpoint on the top mount or the top
2434 * mount's mountpoint.
2435 */
2436 static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
2437 {
2438 struct vfsmount *mnt = path->mnt;
2439 struct dentry *dentry;
2440 struct mountpoint *mp = ERR_PTR(-ENOENT);
2441
2442 for (;;) {
2443 struct mount *m;
2444
2445 if (beneath) {
2446 m = real_mount(mnt);
2447 read_seqlock_excl(&mount_lock);
2448 dentry = dget(m->mnt_mountpoint);
2449 read_sequnlock_excl(&mount_lock);
2450 } else {
2451 dentry = path->dentry;
2452 }
2453
2454 inode_lock(dentry->d_inode);
2455 if (unlikely(cant_mount(dentry))) {
2456 inode_unlock(dentry->d_inode);
2457 goto out;
2458 }
2459
2460 namespace_lock();
2461
2462 if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
2463 namespace_unlock();
2464 inode_unlock(dentry->d_inode);
2465 goto out;
2466 }
2467
2468 mnt = lookup_mnt(path);
2469 if (likely(!mnt))
2470 break;
2471
2472 namespace_unlock();
2473 inode_unlock(dentry->d_inode);
2474 if (beneath)
2475 dput(dentry);
2476 path_put(path);
2477 path->mnt = mnt;
2478 path->dentry = dget(mnt->mnt_root);
2479 }
2480
2481 mp = get_mountpoint(dentry);
2482 if (IS_ERR(mp)) {
2483 namespace_unlock();
2484 inode_unlock(dentry->d_inode);
2485 }
2486
2487 out:
2488 if (beneath)
2489 dput(dentry);
2490
2491 return mp;
2492 }
2493
2494 static inline struct mountpoint *lock_mount(struct path *path)
2495 {
2496 return do_lock_mount(path, false);
2497 }
2498
2499 static void unlock_mount(struct mountpoint *where)
2500 {
2501 struct dentry *dentry = where->m_dentry;
2502
2503 read_seqlock_excl(&mount_lock);
2504 put_mountpoint(where);
2505 read_sequnlock_excl(&mount_lock);
2506
2507 namespace_unlock();
2508 inode_unlock(dentry->d_inode);
2509 }
2510
2511 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2512 {
2513 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2514 return -EINVAL;
2515
2516 if (d_is_dir(mp->m_dentry) !=
2517 d_is_dir(mnt->mnt.mnt_root))
2518 return -ENOTDIR;
2519
2520 return attach_recursive_mnt(mnt, p, mp, 0);
2521 }
2522
2523 /*
2524 * Sanity check the flags to change_mnt_propagation.
2525 */
2526
2527 static int flags_to_propagation_type(int ms_flags)
2528 {
2529 int type = ms_flags & ~(MS_REC | MS_SILENT);
2530
2531 /* Fail if any non-propagation flags are set */
2532 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2533 return 0;
2534 /* Only one propagation flag should be set */
2535 if (!is_power_of_2(type))
2536 return 0;
2537 return type;
2538 }
2539
2540 /*
2541 * recursively change the type of the mountpoint.
2542 */
2543 static int do_change_type(struct path *path, int ms_flags)
2544 {
2545 struct mount *m;
2546 struct mount *mnt = real_mount(path->mnt);
2547 int recurse = ms_flags & MS_REC;
2548 int type;
2549 int err = 0;
2550
2551 if (!path_mounted(path))
2552 return -EINVAL;
2553
2554 type = flags_to_propagation_type(ms_flags);
2555 if (!type)
2556 return -EINVAL;
2557
2558 namespace_lock();
2559 if (type == MS_SHARED) {
2560 err = invent_group_ids(mnt, recurse);
2561 if (err)
2562 goto out_unlock;
2563 }
2564
2565 lock_mount_hash();
2566 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2567 change_mnt_propagation(m, type);
2568 unlock_mount_hash();
2569
2570 out_unlock:
2571 namespace_unlock();
2572 return err;
2573 }
2574
2575 static struct mount *__do_loopback(struct path *old_path, int recurse)
2576 {
2577 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2578
2579 if (IS_MNT_UNBINDABLE(old))
2580 return mnt;
2581
2582 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2583 return mnt;
2584
2585 if (!recurse && has_locked_children(old, old_path->dentry))
2586 return mnt;
2587
2588 if (recurse)
2589 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2590 else
2591 mnt = clone_mnt(old, old_path->dentry, 0);
2592
2593 if (!IS_ERR(mnt))
2594 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2595
2596 return mnt;
2597 }
2598
2599 /*
2600 * do loopback mount.
2601 */
2602 static int do_loopback(struct path *path, const char *old_name,
2603 int recurse)
2604 {
2605 struct path old_path;
2606 struct mount *mnt = NULL, *parent;
2607 struct mountpoint *mp;
2608 int err;
2609 if (!old_name || !*old_name)
2610 return -EINVAL;
2611 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2612 if (err)
2613 return err;
2614
2615 err = -EINVAL;
2616 if (mnt_ns_loop(old_path.dentry))
2617 goto out;
2618
2619 mp = lock_mount(path);
2620 if (IS_ERR(mp)) {
2621 err = PTR_ERR(mp);
2622 goto out;
2623 }
2624
2625 parent = real_mount(path->mnt);
2626 if (!check_mnt(parent))
2627 goto out2;
2628
2629 mnt = __do_loopback(&old_path, recurse);
2630 if (IS_ERR(mnt)) {
2631 err = PTR_ERR(mnt);
2632 goto out2;
2633 }
2634
2635 err = graft_tree(mnt, parent, mp);
2636 if (err) {
2637 lock_mount_hash();
2638 umount_tree(mnt, UMOUNT_SYNC);
2639 unlock_mount_hash();
2640 }
2641 out2:
2642 unlock_mount(mp);
2643 out:
2644 path_put(&old_path);
2645 return err;
2646 }
2647
2648 static struct file *open_detached_copy(struct path *path, bool recursive)
2649 {
2650 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2651 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2652 struct mount *mnt, *p;
2653 struct file *file;
2654
2655 if (IS_ERR(ns))
2656 return ERR_CAST(ns);
2657
2658 namespace_lock();
2659 mnt = __do_loopback(path, recursive);
2660 if (IS_ERR(mnt)) {
2661 namespace_unlock();
2662 free_mnt_ns(ns);
2663 return ERR_CAST(mnt);
2664 }
2665
2666 lock_mount_hash();
2667 for (p = mnt; p; p = next_mnt(p, mnt)) {
2668 p->mnt_ns = ns;
2669 ns->mounts++;
2670 }
2671 ns->root = mnt;
2672 list_add_tail(&ns->list, &mnt->mnt_list);
2673 mntget(&mnt->mnt);
2674 unlock_mount_hash();
2675 namespace_unlock();
2676
2677 mntput(path->mnt);
2678 path->mnt = &mnt->mnt;
2679 file = dentry_open(path, O_PATH, current_cred());
2680 if (IS_ERR(file))
2681 dissolve_on_fput(path->mnt);
2682 else
2683 file->f_mode |= FMODE_NEED_UNMOUNT;
2684 return file;
2685 }
2686
2687 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2688 {
2689 struct file *file;
2690 struct path path;
2691 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2692 bool detached = flags & OPEN_TREE_CLONE;
2693 int error;
2694 int fd;
2695
2696 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2697
2698 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2699 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2700 OPEN_TREE_CLOEXEC))
2701 return -EINVAL;
2702
2703 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2704 return -EINVAL;
2705
2706 if (flags & AT_NO_AUTOMOUNT)
2707 lookup_flags &= ~LOOKUP_AUTOMOUNT;
2708 if (flags & AT_SYMLINK_NOFOLLOW)
2709 lookup_flags &= ~LOOKUP_FOLLOW;
2710 if (flags & AT_EMPTY_PATH)
2711 lookup_flags |= LOOKUP_EMPTY;
2712
2713 if (detached && !may_mount())
2714 return -EPERM;
2715
2716 fd = get_unused_fd_flags(flags & O_CLOEXEC);
2717 if (fd < 0)
2718 return fd;
2719
2720 error = user_path_at(dfd, filename, lookup_flags, &path);
2721 if (unlikely(error)) {
2722 file = ERR_PTR(error);
2723 } else {
2724 if (detached)
2725 file = open_detached_copy(&path, flags & AT_RECURSIVE);
2726 else
2727 file = dentry_open(&path, O_PATH, current_cred());
2728 path_put(&path);
2729 }
2730 if (IS_ERR(file)) {
2731 put_unused_fd(fd);
2732 return PTR_ERR(file);
2733 }
2734 fd_install(fd, file);
2735 return fd;
2736 }
2737
2738 /*
2739 * Don't allow locked mount flags to be cleared.
2740 *
2741 * No locks need to be held here while testing the various MNT_LOCK
2742 * flags because those flags can never be cleared once they are set.
2743 */
2744 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2745 {
2746 unsigned int fl = mnt->mnt.mnt_flags;
2747
2748 if ((fl & MNT_LOCK_READONLY) &&
2749 !(mnt_flags & MNT_READONLY))
2750 return false;
2751
2752 if ((fl & MNT_LOCK_NODEV) &&
2753 !(mnt_flags & MNT_NODEV))
2754 return false;
2755
2756 if ((fl & MNT_LOCK_NOSUID) &&
2757 !(mnt_flags & MNT_NOSUID))
2758 return false;
2759
2760 if ((fl & MNT_LOCK_NOEXEC) &&
2761 !(mnt_flags & MNT_NOEXEC))
2762 return false;
2763
2764 if ((fl & MNT_LOCK_ATIME) &&
2765 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2766 return false;
2767
2768 return true;
2769 }
2770
2771 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2772 {
2773 bool readonly_request = (mnt_flags & MNT_READONLY);
2774
2775 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2776 return 0;
2777
2778 if (readonly_request)
2779 return mnt_make_readonly(mnt);
2780
2781 mnt->mnt.mnt_flags &= ~MNT_READONLY;
2782 return 0;
2783 }
2784
2785 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2786 {
2787 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2788 mnt->mnt.mnt_flags = mnt_flags;
2789 touch_mnt_namespace(mnt->mnt_ns);
2790 }
2791
2792 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2793 {
2794 struct super_block *sb = mnt->mnt_sb;
2795
2796 if (!__mnt_is_readonly(mnt) &&
2797 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
2798 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2799 char *buf = (char *)__get_free_page(GFP_KERNEL);
2800 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
2801
2802 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
2803 sb->s_type->name,
2804 is_mounted(mnt) ? "remounted" : "mounted",
2805 mntpath, &sb->s_time_max,
2806 (unsigned long long)sb->s_time_max);
2807
2808 free_page((unsigned long)buf);
2809 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
2810 }
2811 }
2812
2813 /*
2814 * Handle reconfiguration of the mountpoint only without alteration of the
2815 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2816 * to mount(2).
2817 */
2818 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2819 {
2820 struct super_block *sb = path->mnt->mnt_sb;
2821 struct mount *mnt = real_mount(path->mnt);
2822 int ret;
2823
2824 if (!check_mnt(mnt))
2825 return -EINVAL;
2826
2827 if (!path_mounted(path))
2828 return -EINVAL;
2829
2830 if (!can_change_locked_flags(mnt, mnt_flags))
2831 return -EPERM;
2832
2833 /*
2834 * We're only checking whether the superblock is read-only not
2835 * changing it, so only take down_read(&sb->s_umount).
2836 */
2837 down_read(&sb->s_umount);
2838 lock_mount_hash();
2839 ret = change_mount_ro_state(mnt, mnt_flags);
2840 if (ret == 0)
2841 set_mount_attributes(mnt, mnt_flags);
2842 unlock_mount_hash();
2843 up_read(&sb->s_umount);
2844
2845 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2846
2847 return ret;
2848 }
2849
2850 /*
2851 * change filesystem flags. dir should be a physical root of filesystem.
2852 * If you've mounted a non-root directory somewhere and want to do remount
2853 * on it - tough luck.
2854 */
2855 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2856 int mnt_flags, void *data)
2857 {
2858 int err;
2859 struct super_block *sb = path->mnt->mnt_sb;
2860 struct mount *mnt = real_mount(path->mnt);
2861 struct fs_context *fc;
2862
2863 if (!check_mnt(mnt))
2864 return -EINVAL;
2865
2866 if (!path_mounted(path))
2867 return -EINVAL;
2868
2869 if (!can_change_locked_flags(mnt, mnt_flags))
2870 return -EPERM;
2871
2872 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2873 if (IS_ERR(fc))
2874 return PTR_ERR(fc);
2875
2876 fc->oldapi = true;
2877 err = parse_monolithic_mount_data(fc, data);
2878 if (!err) {
2879 down_write(&sb->s_umount);
2880 err = -EPERM;
2881 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
2882 err = reconfigure_super(fc);
2883 if (!err) {
2884 lock_mount_hash();
2885 set_mount_attributes(mnt, mnt_flags);
2886 unlock_mount_hash();
2887 }
2888 }
2889 up_write(&sb->s_umount);
2890 }
2891
2892 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2893
2894 put_fs_context(fc);
2895 return err;
2896 }
2897
2898 static inline int tree_contains_unbindable(struct mount *mnt)
2899 {
2900 struct mount *p;
2901 for (p = mnt; p; p = next_mnt(p, mnt)) {
2902 if (IS_MNT_UNBINDABLE(p))
2903 return 1;
2904 }
2905 return 0;
2906 }
2907
2908 /*
2909 * Check that there aren't references to earlier/same mount namespaces in the
2910 * specified subtree. Such references can act as pins for mount namespaces
2911 * that aren't checked by the mount-cycle checking code, thereby allowing
2912 * cycles to be made.
2913 */
2914 static bool check_for_nsfs_mounts(struct mount *subtree)
2915 {
2916 struct mount *p;
2917 bool ret = false;
2918
2919 lock_mount_hash();
2920 for (p = subtree; p; p = next_mnt(p, subtree))
2921 if (mnt_ns_loop(p->mnt.mnt_root))
2922 goto out;
2923
2924 ret = true;
2925 out:
2926 unlock_mount_hash();
2927 return ret;
2928 }
2929
2930 static int do_set_group(struct path *from_path, struct path *to_path)
2931 {
2932 struct mount *from, *to;
2933 int err;
2934
2935 from = real_mount(from_path->mnt);
2936 to = real_mount(to_path->mnt);
2937
2938 namespace_lock();
2939
2940 err = -EINVAL;
2941 /* To and From must be mounted */
2942 if (!is_mounted(&from->mnt))
2943 goto out;
2944 if (!is_mounted(&to->mnt))
2945 goto out;
2946
2947 err = -EPERM;
2948 /* We should be allowed to modify mount namespaces of both mounts */
2949 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
2950 goto out;
2951 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
2952 goto out;
2953
2954 err = -EINVAL;
2955 /* To and From paths should be mount roots */
2956 if (!path_mounted(from_path))
2957 goto out;
2958 if (!path_mounted(to_path))
2959 goto out;
2960
2961 /* Setting sharing groups is only allowed across same superblock */
2962 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
2963 goto out;
2964
2965 /* From mount root should be wider than To mount root */
2966 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
2967 goto out;
2968
2969 /* From mount should not have locked children in place of To's root */
2970 if (has_locked_children(from, to->mnt.mnt_root))
2971 goto out;
2972
2973 /* Setting sharing groups is only allowed on private mounts */
2974 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
2975 goto out;
2976
2977 /* From should not be private */
2978 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
2979 goto out;
2980
2981 if (IS_MNT_SLAVE(from)) {
2982 struct mount *m = from->mnt_master;
2983
2984 list_add(&to->mnt_slave, &m->mnt_slave_list);
2985 to->mnt_master = m;
2986 }
2987
2988 if (IS_MNT_SHARED(from)) {
2989 to->mnt_group_id = from->mnt_group_id;
2990 list_add(&to->mnt_share, &from->mnt_share);
2991 lock_mount_hash();
2992 set_mnt_shared(to);
2993 unlock_mount_hash();
2994 }
2995
2996 err = 0;
2997 out:
2998 namespace_unlock();
2999 return err;
3000 }
3001
3002 /**
3003 * path_overmounted - check if path is overmounted
3004 * @path: path to check
3005 *
3006 * Check if path is overmounted, i.e., if there's a mount on top of
3007 * @path->mnt with @path->dentry as mountpoint.
3008 *
3009 * Context: This function expects namespace_lock() to be held.
3010 * Return: If path is overmounted true is returned, false if not.
3011 */
3012 static inline bool path_overmounted(const struct path *path)
3013 {
3014 rcu_read_lock();
3015 if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
3016 rcu_read_unlock();
3017 return true;
3018 }
3019 rcu_read_unlock();
3020 return false;
3021 }
3022
3023 /**
3024 * can_move_mount_beneath - check that we can mount beneath the top mount
3025 * @from: mount to mount beneath
3026 * @to: mount under which to mount
3027 *
3028 * - Make sure that @to->dentry is actually the root of a mount under
3029 * which we can mount another mount.
3030 * - Make sure that nothing can be mounted beneath the caller's current
3031 * root or the rootfs of the namespace.
3032 * - Make sure that the caller can unmount the topmost mount ensuring
3033 * that the caller could reveal the underlying mountpoint.
3034 * - Ensure that nothing has been mounted on top of @from before we
3035 * grabbed @namespace_sem to avoid creating pointless shadow mounts.
3036 * - Prevent mounting beneath a mount if the propagation relationship
3037 * between the source mount, parent mount, and top mount would lead to
3038 * nonsensical mount trees.
3039 *
3040 * Context: This function expects namespace_lock() to be held.
3041 * Return: On success 0, and on error a negative error code is returned.
3042 */
3043 static int can_move_mount_beneath(const struct path *from,
3044 const struct path *to,
3045 const struct mountpoint *mp)
3046 {
3047 struct mount *mnt_from = real_mount(from->mnt),
3048 *mnt_to = real_mount(to->mnt),
3049 *parent_mnt_to = mnt_to->mnt_parent;
3050
3051 if (!mnt_has_parent(mnt_to))
3052 return -EINVAL;
3053
3054 if (!path_mounted(to))
3055 return -EINVAL;
3056
3057 if (IS_MNT_LOCKED(mnt_to))
3058 return -EINVAL;
3059
3060 /* Avoid creating shadow mounts during mount propagation. */
3061 if (path_overmounted(from))
3062 return -EINVAL;
3063
3064 /*
3065 * Mounting beneath the rootfs only makes sense when the
3066 * semantics of pivot_root(".", ".") are used.
3067 */
3068 if (&mnt_to->mnt == current->fs->root.mnt)
3069 return -EINVAL;
3070 if (parent_mnt_to == current->nsproxy->mnt_ns->root)
3071 return -EINVAL;
3072
3073 for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent)
3074 if (p == mnt_to)
3075 return -EINVAL;
3076
3077 /*
3078 * If the parent mount propagates to the child mount this would
3079 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3080 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3081 * defeats the whole purpose of mounting beneath another mount.
3082 */
3083 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
3084 return -EINVAL;
3085
3086 /*
3087 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3088 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3089 * Afterwards @mnt_from would be mounted on top of
3090 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3091 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3092 * already mounted on @mnt_from, @mnt_to would ultimately be
3093 * remounted on top of @c. Afterwards, @mnt_from would be
3094 * covered by a copy @c of @mnt_from and @c would be covered by
3095 * @mnt_from itself. This defeats the whole purpose of mounting
3096 * @mnt_from beneath @mnt_to.
3097 */
3098 if (propagation_would_overmount(parent_mnt_to, mnt_from, mp))
3099 return -EINVAL;
3100
3101 return 0;
3102 }
3103
3104 static int do_move_mount(struct path *old_path, struct path *new_path,
3105 bool beneath)
3106 {
3107 struct mnt_namespace *ns;
3108 struct mount *p;
3109 struct mount *old;
3110 struct mount *parent;
3111 struct mountpoint *mp, *old_mp;
3112 int err;
3113 bool attached;
3114 enum mnt_tree_flags_t flags = 0;
3115
3116 mp = do_lock_mount(new_path, beneath);
3117 if (IS_ERR(mp))
3118 return PTR_ERR(mp);
3119
3120 old = real_mount(old_path->mnt);
3121 p = real_mount(new_path->mnt);
3122 parent = old->mnt_parent;
3123 attached = mnt_has_parent(old);
3124 if (attached)
3125 flags |= MNT_TREE_MOVE;
3126 old_mp = old->mnt_mp;
3127 ns = old->mnt_ns;
3128
3129 err = -EINVAL;
3130 /* The mountpoint must be in our namespace. */
3131 if (!check_mnt(p))
3132 goto out;
3133
3134 /* The thing moved must be mounted... */
3135 if (!is_mounted(&old->mnt))
3136 goto out;
3137
3138 /* ... and either ours or the root of anon namespace */
3139 if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
3140 goto out;
3141
3142 if (old->mnt.mnt_flags & MNT_LOCKED)
3143 goto out;
3144
3145 if (!path_mounted(old_path))
3146 goto out;
3147
3148 if (d_is_dir(new_path->dentry) !=
3149 d_is_dir(old_path->dentry))
3150 goto out;
3151 /*
3152 * Don't move a mount residing in a shared parent.
3153 */
3154 if (attached && IS_MNT_SHARED(parent))
3155 goto out;
3156
3157 if (beneath) {
3158 err = can_move_mount_beneath(old_path, new_path, mp);
3159 if (err)
3160 goto out;
3161
3162 err = -EINVAL;
3163 p = p->mnt_parent;
3164 flags |= MNT_TREE_BENEATH;
3165 }
3166
3167 /*
3168 * Don't move a mount tree containing unbindable mounts to a destination
3169 * mount which is shared.
3170 */
3171 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
3172 goto out;
3173 err = -ELOOP;
3174 if (!check_for_nsfs_mounts(old))
3175 goto out;
3176 for (; mnt_has_parent(p); p = p->mnt_parent)
3177 if (p == old)
3178 goto out;
3179
3180 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags);
3181 if (err)
3182 goto out;
3183
3184 /* if the mount is moved, it should no longer be expire
3185 * automatically */
3186 list_del_init(&old->mnt_expire);
3187 if (attached)
3188 put_mountpoint(old_mp);
3189 out:
3190 unlock_mount(mp);
3191 if (!err) {
3192 if (attached)
3193 mntput_no_expire(parent);
3194 else
3195 free_mnt_ns(ns);
3196 }
3197 return err;
3198 }
3199
3200 static int do_move_mount_old(struct path *path, const char *old_name)
3201 {
3202 struct path old_path;
3203 int err;
3204
3205 if (!old_name || !*old_name)
3206 return -EINVAL;
3207
3208 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
3209 if (err)
3210 return err;
3211
3212 err = do_move_mount(&old_path, path, false);
3213 path_put(&old_path);
3214 return err;
3215 }
3216
3217 /*
3218 * add a mount into a namespace's mount tree
3219 */
3220 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
3221 const struct path *path, int mnt_flags)
3222 {
3223 struct mount *parent = real_mount(path->mnt);
3224
3225 mnt_flags &= ~MNT_INTERNAL_FLAGS;
3226
3227 if (unlikely(!check_mnt(parent))) {
3228 /* that's acceptable only for automounts done in private ns */
3229 if (!(mnt_flags & MNT_SHRINKABLE))
3230 return -EINVAL;
3231 /* ... and for those we'd better have mountpoint still alive */
3232 if (!parent->mnt_ns)
3233 return -EINVAL;
3234 }
3235
3236 /* Refuse the same filesystem on the same mount point */
3237 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path))
3238 return -EBUSY;
3239
3240 if (d_is_symlink(newmnt->mnt.mnt_root))
3241 return -EINVAL;
3242
3243 newmnt->mnt.mnt_flags = mnt_flags;
3244 return graft_tree(newmnt, parent, mp);
3245 }
3246
3247 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
3248
3249 /*
3250 * Create a new mount using a superblock configuration and request it
3251 * be added to the namespace tree.
3252 */
3253 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
3254 unsigned int mnt_flags)
3255 {
3256 struct vfsmount *mnt;
3257 struct mountpoint *mp;
3258 struct super_block *sb = fc->root->d_sb;
3259 int error;
3260
3261 error = security_sb_kern_mount(sb);
3262 if (!error && mount_too_revealing(sb, &mnt_flags))
3263 error = -EPERM;
3264
3265 if (unlikely(error)) {
3266 fc_drop_locked(fc);
3267 return error;
3268 }
3269
3270 up_write(&sb->s_umount);
3271
3272 mnt = vfs_create_mount(fc);
3273 if (IS_ERR(mnt))
3274 return PTR_ERR(mnt);
3275
3276 mnt_warn_timestamp_expiry(mountpoint, mnt);
3277
3278 mp = lock_mount(mountpoint);
3279 if (IS_ERR(mp)) {
3280 mntput(mnt);
3281 return PTR_ERR(mp);
3282 }
3283 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
3284 unlock_mount(mp);
3285 if (error < 0)
3286 mntput(mnt);
3287 return error;
3288 }
3289
3290 /*
3291 * create a new mount for userspace and request it to be added into the
3292 * namespace's tree
3293 */
3294 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
3295 int mnt_flags, const char *name, void *data)
3296 {
3297 struct file_system_type *type;
3298 struct fs_context *fc;
3299 const char *subtype = NULL;
3300 int err = 0;
3301
3302 if (!fstype)
3303 return -EINVAL;
3304
3305 type = get_fs_type(fstype);
3306 if (!type)
3307 return -ENODEV;
3308
3309 if (type->fs_flags & FS_HAS_SUBTYPE) {
3310 subtype = strchr(fstype, '.');
3311 if (subtype) {
3312 subtype++;
3313 if (!*subtype) {
3314 put_filesystem(type);
3315 return -EINVAL;
3316 }
3317 }
3318 }
3319
3320 fc = fs_context_for_mount(type, sb_flags);
3321 put_filesystem(type);
3322 if (IS_ERR(fc))
3323 return PTR_ERR(fc);
3324
3325 if (subtype)
3326 err = vfs_parse_fs_string(fc, "subtype",
3327 subtype, strlen(subtype));
3328 if (!err && name)
3329 err = vfs_parse_fs_string(fc, "source", name, strlen(name));
3330 if (!err)
3331 err = parse_monolithic_mount_data(fc, data);
3332 if (!err && !mount_capable(fc))
3333 err = -EPERM;
3334 if (!err)
3335 err = vfs_get_tree(fc);
3336 if (!err)
3337 err = do_new_mount_fc(fc, path, mnt_flags);
3338
3339 put_fs_context(fc);
3340 return err;
3341 }
3342
3343 int finish_automount(struct vfsmount *m, const struct path *path)
3344 {
3345 struct dentry *dentry = path->dentry;
3346 struct mountpoint *mp;
3347 struct mount *mnt;
3348 int err;
3349
3350 if (!m)
3351 return 0;
3352 if (IS_ERR(m))
3353 return PTR_ERR(m);
3354
3355 mnt = real_mount(m);
3356 /* The new mount record should have at least 2 refs to prevent it being
3357 * expired before we get a chance to add it
3358 */
3359 BUG_ON(mnt_get_count(mnt) < 2);
3360
3361 if (m->mnt_sb == path->mnt->mnt_sb &&
3362 m->mnt_root == dentry) {
3363 err = -ELOOP;
3364 goto discard;
3365 }
3366
3367 /*
3368 * we don't want to use lock_mount() - in this case finding something
3369 * that overmounts our mountpoint to be means "quitely drop what we've
3370 * got", not "try to mount it on top".
3371 */
3372 inode_lock(dentry->d_inode);
3373 namespace_lock();
3374 if (unlikely(cant_mount(dentry))) {
3375 err = -ENOENT;
3376 goto discard_locked;
3377 }
3378 if (path_overmounted(path)) {
3379 err = 0;
3380 goto discard_locked;
3381 }
3382 mp = get_mountpoint(dentry);
3383 if (IS_ERR(mp)) {
3384 err = PTR_ERR(mp);
3385 goto discard_locked;
3386 }
3387
3388 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3389 unlock_mount(mp);
3390 if (unlikely(err))
3391 goto discard;
3392 mntput(m);
3393 return 0;
3394
3395 discard_locked:
3396 namespace_unlock();
3397 inode_unlock(dentry->d_inode);
3398 discard:
3399 /* remove m from any expiration list it may be on */
3400 if (!list_empty(&mnt->mnt_expire)) {
3401 namespace_lock();
3402 list_del_init(&mnt->mnt_expire);
3403 namespace_unlock();
3404 }
3405 mntput(m);
3406 mntput(m);
3407 return err;
3408 }
3409
3410 /**
3411 * mnt_set_expiry - Put a mount on an expiration list
3412 * @mnt: The mount to list.
3413 * @expiry_list: The list to add the mount to.
3414 */
3415 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3416 {
3417 namespace_lock();
3418
3419 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3420
3421 namespace_unlock();
3422 }
3423 EXPORT_SYMBOL(mnt_set_expiry);
3424
3425 /*
3426 * process a list of expirable mountpoints with the intent of discarding any
3427 * mountpoints that aren't in use and haven't been touched since last we came
3428 * here
3429 */
3430 void mark_mounts_for_expiry(struct list_head *mounts)
3431 {
3432 struct mount *mnt, *next;
3433 LIST_HEAD(graveyard);
3434
3435 if (list_empty(mounts))
3436 return;
3437
3438 namespace_lock();
3439 lock_mount_hash();
3440
3441 /* extract from the expiration list every vfsmount that matches the
3442 * following criteria:
3443 * - only referenced by its parent vfsmount
3444 * - still marked for expiry (marked on the last call here; marks are
3445 * cleared by mntput())
3446 */
3447 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3448 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3449 propagate_mount_busy(mnt, 1))
3450 continue;
3451 list_move(&mnt->mnt_expire, &graveyard);
3452 }
3453 while (!list_empty(&graveyard)) {
3454 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3455 touch_mnt_namespace(mnt->mnt_ns);
3456 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3457 }
3458 unlock_mount_hash();
3459 namespace_unlock();
3460 }
3461
3462 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3463
3464 /*
3465 * Ripoff of 'select_parent()'
3466 *
3467 * search the list of submounts for a given mountpoint, and move any
3468 * shrinkable submounts to the 'graveyard' list.
3469 */
3470 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3471 {
3472 struct mount *this_parent = parent;
3473 struct list_head *next;
3474 int found = 0;
3475
3476 repeat:
3477 next = this_parent->mnt_mounts.next;
3478 resume:
3479 while (next != &this_parent->mnt_mounts) {
3480 struct list_head *tmp = next;
3481 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3482
3483 next = tmp->next;
3484 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3485 continue;
3486 /*
3487 * Descend a level if the d_mounts list is non-empty.
3488 */
3489 if (!list_empty(&mnt->mnt_mounts)) {
3490 this_parent = mnt;
3491 goto repeat;
3492 }
3493
3494 if (!propagate_mount_busy(mnt, 1)) {
3495 list_move_tail(&mnt->mnt_expire, graveyard);
3496 found++;
3497 }
3498 }
3499 /*
3500 * All done at this level ... ascend and resume the search
3501 */
3502 if (this_parent != parent) {
3503 next = this_parent->mnt_child.next;
3504 this_parent = this_parent->mnt_parent;
3505 goto resume;
3506 }
3507 return found;
3508 }
3509
3510 /*
3511 * process a list of expirable mountpoints with the intent of discarding any
3512 * submounts of a specific parent mountpoint
3513 *
3514 * mount_lock must be held for write
3515 */
3516 static void shrink_submounts(struct mount *mnt)
3517 {
3518 LIST_HEAD(graveyard);
3519 struct mount *m;
3520
3521 /* extract submounts of 'mountpoint' from the expiration list */
3522 while (select_submounts(mnt, &graveyard)) {
3523 while (!list_empty(&graveyard)) {
3524 m = list_first_entry(&graveyard, struct mount,
3525 mnt_expire);
3526 touch_mnt_namespace(m->mnt_ns);
3527 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3528 }
3529 }
3530 }
3531
3532 static void *copy_mount_options(const void __user * data)
3533 {
3534 char *copy;
3535 unsigned left, offset;
3536
3537 if (!data)
3538 return NULL;
3539
3540 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3541 if (!copy)
3542 return ERR_PTR(-ENOMEM);
3543
3544 left = copy_from_user(copy, data, PAGE_SIZE);
3545
3546 /*
3547 * Not all architectures have an exact copy_from_user(). Resort to
3548 * byte at a time.
3549 */
3550 offset = PAGE_SIZE - left;
3551 while (left) {
3552 char c;
3553 if (get_user(c, (const char __user *)data + offset))
3554 break;
3555 copy[offset] = c;
3556 left--;
3557 offset++;
3558 }
3559
3560 if (left == PAGE_SIZE) {
3561 kfree(copy);
3562 return ERR_PTR(-EFAULT);
3563 }
3564
3565 return copy;
3566 }
3567
3568 static char *copy_mount_string(const void __user *data)
3569 {
3570 return data ? strndup_user(data, PATH_MAX) : NULL;
3571 }
3572
3573 /*
3574 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3575 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3576 *
3577 * data is a (void *) that can point to any structure up to
3578 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3579 * information (or be NULL).
3580 *
3581 * Pre-0.97 versions of mount() didn't have a flags word.
3582 * When the flags word was introduced its top half was required
3583 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3584 * Therefore, if this magic number is present, it carries no information
3585 * and must be discarded.
3586 */
3587 int path_mount(const char *dev_name, struct path *path,
3588 const char *type_page, unsigned long flags, void *data_page)
3589 {
3590 unsigned int mnt_flags = 0, sb_flags;
3591 int ret;
3592
3593 /* Discard magic */
3594 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3595 flags &= ~MS_MGC_MSK;
3596
3597 /* Basic sanity checks */
3598 if (data_page)
3599 ((char *)data_page)[PAGE_SIZE - 1] = 0;
3600
3601 if (flags & MS_NOUSER)
3602 return -EINVAL;
3603
3604 ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3605 if (ret)
3606 return ret;
3607 if (!may_mount())
3608 return -EPERM;
3609 if (flags & SB_MANDLOCK)
3610 warn_mandlock();
3611
3612 /* Default to relatime unless overriden */
3613 if (!(flags & MS_NOATIME))
3614 mnt_flags |= MNT_RELATIME;
3615
3616 /* Separate the per-mountpoint flags */
3617 if (flags & MS_NOSUID)
3618 mnt_flags |= MNT_NOSUID;
3619 if (flags & MS_NODEV)
3620 mnt_flags |= MNT_NODEV;
3621 if (flags & MS_NOEXEC)
3622 mnt_flags |= MNT_NOEXEC;
3623 if (flags & MS_NOATIME)
3624 mnt_flags |= MNT_NOATIME;
3625 if (flags & MS_NODIRATIME)
3626 mnt_flags |= MNT_NODIRATIME;
3627 if (flags & MS_STRICTATIME)
3628 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3629 if (flags & MS_RDONLY)
3630 mnt_flags |= MNT_READONLY;
3631 if (flags & MS_NOSYMFOLLOW)
3632 mnt_flags |= MNT_NOSYMFOLLOW;
3633
3634 /* The default atime for remount is preservation */
3635 if ((flags & MS_REMOUNT) &&
3636 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3637 MS_STRICTATIME)) == 0)) {
3638 mnt_flags &= ~MNT_ATIME_MASK;
3639 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3640 }
3641
3642 sb_flags = flags & (SB_RDONLY |
3643 SB_SYNCHRONOUS |
3644 SB_MANDLOCK |
3645 SB_DIRSYNC |
3646 SB_SILENT |
3647 SB_POSIXACL |
3648 SB_LAZYTIME |
3649 SB_I_VERSION);
3650
3651 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3652 return do_reconfigure_mnt(path, mnt_flags);
3653 if (flags & MS_REMOUNT)
3654 return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3655 if (flags & MS_BIND)
3656 return do_loopback(path, dev_name, flags & MS_REC);
3657 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3658 return do_change_type(path, flags);
3659 if (flags & MS_MOVE)
3660 return do_move_mount_old(path, dev_name);
3661
3662 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3663 data_page);
3664 }
3665
3666 long do_mount(const char *dev_name, const char __user *dir_name,
3667 const char *type_page, unsigned long flags, void *data_page)
3668 {
3669 struct path path;
3670 int ret;
3671
3672 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3673 if (ret)
3674 return ret;
3675 ret = path_mount(dev_name, &path, type_page, flags, data_page);
3676 path_put(&path);
3677 return ret;
3678 }
3679
3680 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3681 {
3682 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3683 }
3684
3685 static void dec_mnt_namespaces(struct ucounts *ucounts)
3686 {
3687 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3688 }
3689
3690 static void free_mnt_ns(struct mnt_namespace *ns)
3691 {
3692 if (!is_anon_ns(ns))
3693 ns_free_inum(&ns->ns);
3694 dec_mnt_namespaces(ns->ucounts);
3695 put_user_ns(ns->user_ns);
3696 kfree(ns);
3697 }
3698
3699 /*
3700 * Assign a sequence number so we can detect when we attempt to bind
3701 * mount a reference to an older mount namespace into the current
3702 * mount namespace, preventing reference counting loops. A 64bit
3703 * number incrementing at 10Ghz will take 12,427 years to wrap which
3704 * is effectively never, so we can ignore the possibility.
3705 */
3706 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3707
3708 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3709 {
3710 struct mnt_namespace *new_ns;
3711 struct ucounts *ucounts;
3712 int ret;
3713
3714 ucounts = inc_mnt_namespaces(user_ns);
3715 if (!ucounts)
3716 return ERR_PTR(-ENOSPC);
3717
3718 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
3719 if (!new_ns) {
3720 dec_mnt_namespaces(ucounts);
3721 return ERR_PTR(-ENOMEM);
3722 }
3723 if (!anon) {
3724 ret = ns_alloc_inum(&new_ns->ns);
3725 if (ret) {
3726 kfree(new_ns);
3727 dec_mnt_namespaces(ucounts);
3728 return ERR_PTR(ret);
3729 }
3730 }
3731 new_ns->ns.ops = &mntns_operations;
3732 if (!anon)
3733 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3734 refcount_set(&new_ns->ns.count, 1);
3735 INIT_LIST_HEAD(&new_ns->list);
3736 init_waitqueue_head(&new_ns->poll);
3737 spin_lock_init(&new_ns->ns_lock);
3738 new_ns->user_ns = get_user_ns(user_ns);
3739 new_ns->ucounts = ucounts;
3740 return new_ns;
3741 }
3742
3743 __latent_entropy
3744 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3745 struct user_namespace *user_ns, struct fs_struct *new_fs)
3746 {
3747 struct mnt_namespace *new_ns;
3748 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3749 struct mount *p, *q;
3750 struct mount *old;
3751 struct mount *new;
3752 int copy_flags;
3753
3754 BUG_ON(!ns);
3755
3756 if (likely(!(flags & CLONE_NEWNS))) {
3757 get_mnt_ns(ns);
3758 return ns;
3759 }
3760
3761 old = ns->root;
3762
3763 new_ns = alloc_mnt_ns(user_ns, false);
3764 if (IS_ERR(new_ns))
3765 return new_ns;
3766
3767 namespace_lock();
3768 /* First pass: copy the tree topology */
3769 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3770 if (user_ns != ns->user_ns)
3771 copy_flags |= CL_SHARED_TO_SLAVE;
3772 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3773 if (IS_ERR(new)) {
3774 namespace_unlock();
3775 free_mnt_ns(new_ns);
3776 return ERR_CAST(new);
3777 }
3778 if (user_ns != ns->user_ns) {
3779 lock_mount_hash();
3780 lock_mnt_tree(new);
3781 unlock_mount_hash();
3782 }
3783 new_ns->root = new;
3784 list_add_tail(&new_ns->list, &new->mnt_list);
3785
3786 /*
3787 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3788 * as belonging to new namespace. We have already acquired a private
3789 * fs_struct, so tsk->fs->lock is not needed.
3790 */
3791 p = old;
3792 q = new;
3793 while (p) {
3794 q->mnt_ns = new_ns;
3795 new_ns->mounts++;
3796 if (new_fs) {
3797 if (&p->mnt == new_fs->root.mnt) {
3798 new_fs->root.mnt = mntget(&q->mnt);
3799 rootmnt = &p->mnt;
3800 }
3801 if (&p->mnt == new_fs->pwd.mnt) {
3802 new_fs->pwd.mnt = mntget(&q->mnt);
3803 pwdmnt = &p->mnt;
3804 }
3805 }
3806 p = next_mnt(p, old);
3807 q = next_mnt(q, new);
3808 if (!q)
3809 break;
3810 // an mntns binding we'd skipped?
3811 while (p->mnt.mnt_root != q->mnt.mnt_root)
3812 p = next_mnt(skip_mnt_tree(p), old);
3813 }
3814 namespace_unlock();
3815
3816 if (rootmnt)
3817 mntput(rootmnt);
3818 if (pwdmnt)
3819 mntput(pwdmnt);
3820
3821 return new_ns;
3822 }
3823
3824 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3825 {
3826 struct mount *mnt = real_mount(m);
3827 struct mnt_namespace *ns;
3828 struct super_block *s;
3829 struct path path;
3830 int err;
3831
3832 ns = alloc_mnt_ns(&init_user_ns, true);
3833 if (IS_ERR(ns)) {
3834 mntput(m);
3835 return ERR_CAST(ns);
3836 }
3837 mnt->mnt_ns = ns;
3838 ns->root = mnt;
3839 ns->mounts++;
3840 list_add(&mnt->mnt_list, &ns->list);
3841
3842 err = vfs_path_lookup(m->mnt_root, m,
3843 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3844
3845 put_mnt_ns(ns);
3846
3847 if (err)
3848 return ERR_PTR(err);
3849
3850 /* trade a vfsmount reference for active sb one */
3851 s = path.mnt->mnt_sb;
3852 atomic_inc(&s->s_active);
3853 mntput(path.mnt);
3854 /* lock the sucker */
3855 down_write(&s->s_umount);
3856 /* ... and return the root of (sub)tree on it */
3857 return path.dentry;
3858 }
3859 EXPORT_SYMBOL(mount_subtree);
3860
3861 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3862 char __user *, type, unsigned long, flags, void __user *, data)
3863 {
3864 int ret;
3865 char *kernel_type;
3866 char *kernel_dev;
3867 void *options;
3868
3869 kernel_type = copy_mount_string(type);
3870 ret = PTR_ERR(kernel_type);
3871 if (IS_ERR(kernel_type))
3872 goto out_type;
3873
3874 kernel_dev = copy_mount_string(dev_name);
3875 ret = PTR_ERR(kernel_dev);
3876 if (IS_ERR(kernel_dev))
3877 goto out_dev;
3878
3879 options = copy_mount_options(data);
3880 ret = PTR_ERR(options);
3881 if (IS_ERR(options))
3882 goto out_data;
3883
3884 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3885
3886 kfree(options);
3887 out_data:
3888 kfree(kernel_dev);
3889 out_dev:
3890 kfree(kernel_type);
3891 out_type:
3892 return ret;
3893 }
3894
3895 #define FSMOUNT_VALID_FLAGS \
3896 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
3897 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
3898 MOUNT_ATTR_NOSYMFOLLOW)
3899
3900 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
3901
3902 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
3903 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
3904
3905 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
3906 {
3907 unsigned int mnt_flags = 0;
3908
3909 if (attr_flags & MOUNT_ATTR_RDONLY)
3910 mnt_flags |= MNT_READONLY;
3911 if (attr_flags & MOUNT_ATTR_NOSUID)
3912 mnt_flags |= MNT_NOSUID;
3913 if (attr_flags & MOUNT_ATTR_NODEV)
3914 mnt_flags |= MNT_NODEV;
3915 if (attr_flags & MOUNT_ATTR_NOEXEC)
3916 mnt_flags |= MNT_NOEXEC;
3917 if (attr_flags & MOUNT_ATTR_NODIRATIME)
3918 mnt_flags |= MNT_NODIRATIME;
3919 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
3920 mnt_flags |= MNT_NOSYMFOLLOW;
3921
3922 return mnt_flags;
3923 }
3924
3925 /*
3926 * Create a kernel mount representation for a new, prepared superblock
3927 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3928 */
3929 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
3930 unsigned int, attr_flags)
3931 {
3932 struct mnt_namespace *ns;
3933 struct fs_context *fc;
3934 struct file *file;
3935 struct path newmount;
3936 struct mount *mnt;
3937 struct fd f;
3938 unsigned int mnt_flags = 0;
3939 long ret;
3940
3941 if (!may_mount())
3942 return -EPERM;
3943
3944 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
3945 return -EINVAL;
3946
3947 if (attr_flags & ~FSMOUNT_VALID_FLAGS)
3948 return -EINVAL;
3949
3950 mnt_flags = attr_flags_to_mnt_flags(attr_flags);
3951
3952 switch (attr_flags & MOUNT_ATTR__ATIME) {
3953 case MOUNT_ATTR_STRICTATIME:
3954 break;
3955 case MOUNT_ATTR_NOATIME:
3956 mnt_flags |= MNT_NOATIME;
3957 break;
3958 case MOUNT_ATTR_RELATIME:
3959 mnt_flags |= MNT_RELATIME;
3960 break;
3961 default:
3962 return -EINVAL;
3963 }
3964
3965 f = fdget(fs_fd);
3966 if (!f.file)
3967 return -EBADF;
3968
3969 ret = -EINVAL;
3970 if (f.file->f_op != &fscontext_fops)
3971 goto err_fsfd;
3972
3973 fc = f.file->private_data;
3974
3975 ret = mutex_lock_interruptible(&fc->uapi_mutex);
3976 if (ret < 0)
3977 goto err_fsfd;
3978
3979 /* There must be a valid superblock or we can't mount it */
3980 ret = -EINVAL;
3981 if (!fc->root)
3982 goto err_unlock;
3983
3984 ret = -EPERM;
3985 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
3986 pr_warn("VFS: Mount too revealing\n");
3987 goto err_unlock;
3988 }
3989
3990 ret = -EBUSY;
3991 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
3992 goto err_unlock;
3993
3994 if (fc->sb_flags & SB_MANDLOCK)
3995 warn_mandlock();
3996
3997 newmount.mnt = vfs_create_mount(fc);
3998 if (IS_ERR(newmount.mnt)) {
3999 ret = PTR_ERR(newmount.mnt);
4000 goto err_unlock;
4001 }
4002 newmount.dentry = dget(fc->root);
4003 newmount.mnt->mnt_flags = mnt_flags;
4004
4005 /* We've done the mount bit - now move the file context into more or
4006 * less the same state as if we'd done an fspick(). We don't want to
4007 * do any memory allocation or anything like that at this point as we
4008 * don't want to have to handle any errors incurred.
4009 */
4010 vfs_clean_context(fc);
4011
4012 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
4013 if (IS_ERR(ns)) {
4014 ret = PTR_ERR(ns);
4015 goto err_path;
4016 }
4017 mnt = real_mount(newmount.mnt);
4018 mnt->mnt_ns = ns;
4019 ns->root = mnt;
4020 ns->mounts = 1;
4021 list_add(&mnt->mnt_list, &ns->list);
4022 mntget(newmount.mnt);
4023
4024 /* Attach to an apparent O_PATH fd with a note that we need to unmount
4025 * it, not just simply put it.
4026 */
4027 file = dentry_open(&newmount, O_PATH, fc->cred);
4028 if (IS_ERR(file)) {
4029 dissolve_on_fput(newmount.mnt);
4030 ret = PTR_ERR(file);
4031 goto err_path;
4032 }
4033 file->f_mode |= FMODE_NEED_UNMOUNT;
4034
4035 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
4036 if (ret >= 0)
4037 fd_install(ret, file);
4038 else
4039 fput(file);
4040
4041 err_path:
4042 path_put(&newmount);
4043 err_unlock:
4044 mutex_unlock(&fc->uapi_mutex);
4045 err_fsfd:
4046 fdput(f);
4047 return ret;
4048 }
4049
4050 /*
4051 * Move a mount from one place to another. In combination with
4052 * fsopen()/fsmount() this is used to install a new mount and in combination
4053 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4054 * a mount subtree.
4055 *
4056 * Note the flags value is a combination of MOVE_MOUNT_* flags.
4057 */
4058 SYSCALL_DEFINE5(move_mount,
4059 int, from_dfd, const char __user *, from_pathname,
4060 int, to_dfd, const char __user *, to_pathname,
4061 unsigned int, flags)
4062 {
4063 struct path from_path, to_path;
4064 unsigned int lflags;
4065 int ret = 0;
4066
4067 if (!may_mount())
4068 return -EPERM;
4069
4070 if (flags & ~MOVE_MOUNT__MASK)
4071 return -EINVAL;
4072
4073 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
4074 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
4075 return -EINVAL;
4076
4077 /* If someone gives a pathname, they aren't permitted to move
4078 * from an fd that requires unmount as we can't get at the flag
4079 * to clear it afterwards.
4080 */
4081 lflags = 0;
4082 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
4083 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
4084 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
4085
4086 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
4087 if (ret < 0)
4088 return ret;
4089
4090 lflags = 0;
4091 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
4092 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
4093 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
4094
4095 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
4096 if (ret < 0)
4097 goto out_from;
4098
4099 ret = security_move_mount(&from_path, &to_path);
4100 if (ret < 0)
4101 goto out_to;
4102
4103 if (flags & MOVE_MOUNT_SET_GROUP)
4104 ret = do_set_group(&from_path, &to_path);
4105 else
4106 ret = do_move_mount(&from_path, &to_path,
4107 (flags & MOVE_MOUNT_BENEATH));
4108
4109 out_to:
4110 path_put(&to_path);
4111 out_from:
4112 path_put(&from_path);
4113 return ret;
4114 }
4115
4116 /*
4117 * Return true if path is reachable from root
4118 *
4119 * namespace_sem or mount_lock is held
4120 */
4121 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
4122 const struct path *root)
4123 {
4124 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
4125 dentry = mnt->mnt_mountpoint;
4126 mnt = mnt->mnt_parent;
4127 }
4128 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
4129 }
4130
4131 bool path_is_under(const struct path *path1, const struct path *path2)
4132 {
4133 bool res;
4134 read_seqlock_excl(&mount_lock);
4135 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
4136 read_sequnlock_excl(&mount_lock);
4137 return res;
4138 }
4139 EXPORT_SYMBOL(path_is_under);
4140
4141 /*
4142 * pivot_root Semantics:
4143 * Moves the root file system of the current process to the directory put_old,
4144 * makes new_root as the new root file system of the current process, and sets
4145 * root/cwd of all processes which had them on the current root to new_root.
4146 *
4147 * Restrictions:
4148 * The new_root and put_old must be directories, and must not be on the
4149 * same file system as the current process root. The put_old must be
4150 * underneath new_root, i.e. adding a non-zero number of /.. to the string
4151 * pointed to by put_old must yield the same directory as new_root. No other
4152 * file system may be mounted on put_old. After all, new_root is a mountpoint.
4153 *
4154 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
4155 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
4156 * in this situation.
4157 *
4158 * Notes:
4159 * - we don't move root/cwd if they are not at the root (reason: if something
4160 * cared enough to change them, it's probably wrong to force them elsewhere)
4161 * - it's okay to pick a root that isn't the root of a file system, e.g.
4162 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4163 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4164 * first.
4165 */
4166 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
4167 const char __user *, put_old)
4168 {
4169 struct path new, old, root;
4170 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
4171 struct mountpoint *old_mp, *root_mp;
4172 int error;
4173
4174 if (!may_mount())
4175 return -EPERM;
4176
4177 error = user_path_at(AT_FDCWD, new_root,
4178 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
4179 if (error)
4180 goto out0;
4181
4182 error = user_path_at(AT_FDCWD, put_old,
4183 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
4184 if (error)
4185 goto out1;
4186
4187 error = security_sb_pivotroot(&old, &new);
4188 if (error)
4189 goto out2;
4190
4191 get_fs_root(current->fs, &root);
4192 old_mp = lock_mount(&old);
4193 error = PTR_ERR(old_mp);
4194 if (IS_ERR(old_mp))
4195 goto out3;
4196
4197 error = -EINVAL;
4198 new_mnt = real_mount(new.mnt);
4199 root_mnt = real_mount(root.mnt);
4200 old_mnt = real_mount(old.mnt);
4201 ex_parent = new_mnt->mnt_parent;
4202 root_parent = root_mnt->mnt_parent;
4203 if (IS_MNT_SHARED(old_mnt) ||
4204 IS_MNT_SHARED(ex_parent) ||
4205 IS_MNT_SHARED(root_parent))
4206 goto out4;
4207 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
4208 goto out4;
4209 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4210 goto out4;
4211 error = -ENOENT;
4212 if (d_unlinked(new.dentry))
4213 goto out4;
4214 error = -EBUSY;
4215 if (new_mnt == root_mnt || old_mnt == root_mnt)
4216 goto out4; /* loop, on the same file system */
4217 error = -EINVAL;
4218 if (!path_mounted(&root))
4219 goto out4; /* not a mountpoint */
4220 if (!mnt_has_parent(root_mnt))
4221 goto out4; /* not attached */
4222 if (!path_mounted(&new))
4223 goto out4; /* not a mountpoint */
4224 if (!mnt_has_parent(new_mnt))
4225 goto out4; /* not attached */
4226 /* make sure we can reach put_old from new_root */
4227 if (!is_path_reachable(old_mnt, old.dentry, &new))
4228 goto out4;
4229 /* make certain new is below the root */
4230 if (!is_path_reachable(new_mnt, new.dentry, &root))
4231 goto out4;
4232 lock_mount_hash();
4233 umount_mnt(new_mnt);
4234 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
4235 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4236 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4237 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4238 }
4239 /* mount old root on put_old */
4240 attach_mnt(root_mnt, old_mnt, old_mp, false);
4241 /* mount new_root on / */
4242 attach_mnt(new_mnt, root_parent, root_mp, false);
4243 mnt_add_count(root_parent, -1);
4244 touch_mnt_namespace(current->nsproxy->mnt_ns);
4245 /* A moved mount should not expire automatically */
4246 list_del_init(&new_mnt->mnt_expire);
4247 put_mountpoint(root_mp);
4248 unlock_mount_hash();
4249 chroot_fs_refs(&root, &new);
4250 error = 0;
4251 out4:
4252 unlock_mount(old_mp);
4253 if (!error)
4254 mntput_no_expire(ex_parent);
4255 out3:
4256 path_put(&root);
4257 out2:
4258 path_put(&old);
4259 out1:
4260 path_put(&new);
4261 out0:
4262 return error;
4263 }
4264
4265 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4266 {
4267 unsigned int flags = mnt->mnt.mnt_flags;
4268
4269 /* flags to clear */
4270 flags &= ~kattr->attr_clr;
4271 /* flags to raise */
4272 flags |= kattr->attr_set;
4273
4274 return flags;
4275 }
4276
4277 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4278 {
4279 struct vfsmount *m = &mnt->mnt;
4280 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
4281
4282 if (!kattr->mnt_idmap)
4283 return 0;
4284
4285 /*
4286 * Creating an idmapped mount with the filesystem wide idmapping
4287 * doesn't make sense so block that. We don't allow mushy semantics.
4288 */
4289 if (!check_fsmapping(kattr->mnt_idmap, m->mnt_sb))
4290 return -EINVAL;
4291
4292 /*
4293 * Once a mount has been idmapped we don't allow it to change its
4294 * mapping. It makes things simpler and callers can just create
4295 * another bind-mount they can idmap if they want to.
4296 */
4297 if (is_idmapped_mnt(m))
4298 return -EPERM;
4299
4300 /* The underlying filesystem doesn't support idmapped mounts yet. */
4301 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4302 return -EINVAL;
4303
4304 /* We're not controlling the superblock. */
4305 if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
4306 return -EPERM;
4307
4308 /* Mount has already been visible in the filesystem hierarchy. */
4309 if (!is_anon_ns(mnt->mnt_ns))
4310 return -EINVAL;
4311
4312 return 0;
4313 }
4314
4315 /**
4316 * mnt_allow_writers() - check whether the attribute change allows writers
4317 * @kattr: the new mount attributes
4318 * @mnt: the mount to which @kattr will be applied
4319 *
4320 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4321 *
4322 * Return: true if writers need to be held, false if not
4323 */
4324 static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
4325 const struct mount *mnt)
4326 {
4327 return (!(kattr->attr_set & MNT_READONLY) ||
4328 (mnt->mnt.mnt_flags & MNT_READONLY)) &&
4329 !kattr->mnt_idmap;
4330 }
4331
4332 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4333 {
4334 struct mount *m;
4335 int err;
4336
4337 for (m = mnt; m; m = next_mnt(m, mnt)) {
4338 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
4339 err = -EPERM;
4340 break;
4341 }
4342
4343 err = can_idmap_mount(kattr, m);
4344 if (err)
4345 break;
4346
4347 if (!mnt_allow_writers(kattr, m)) {
4348 err = mnt_hold_writers(m);
4349 if (err)
4350 break;
4351 }
4352
4353 if (!kattr->recurse)
4354 return 0;
4355 }
4356
4357 if (err) {
4358 struct mount *p;
4359
4360 /*
4361 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4362 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4363 * mounts and needs to take care to include the first mount.
4364 */
4365 for (p = mnt; p; p = next_mnt(p, mnt)) {
4366 /* If we had to hold writers unblock them. */
4367 if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
4368 mnt_unhold_writers(p);
4369
4370 /*
4371 * We're done once the first mount we changed got
4372 * MNT_WRITE_HOLD unset.
4373 */
4374 if (p == m)
4375 break;
4376 }
4377 }
4378 return err;
4379 }
4380
4381 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4382 {
4383 if (!kattr->mnt_idmap)
4384 return;
4385
4386 /*
4387 * Pairs with smp_load_acquire() in mnt_idmap().
4388 *
4389 * Since we only allow a mount to change the idmapping once and
4390 * verified this in can_idmap_mount() we know that the mount has
4391 * @nop_mnt_idmap attached to it. So there's no need to drop any
4392 * references.
4393 */
4394 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4395 }
4396
4397 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4398 {
4399 struct mount *m;
4400
4401 for (m = mnt; m; m = next_mnt(m, mnt)) {
4402 unsigned int flags;
4403
4404 do_idmap_mount(kattr, m);
4405 flags = recalc_flags(kattr, m);
4406 WRITE_ONCE(m->mnt.mnt_flags, flags);
4407
4408 /* If we had to hold writers unblock them. */
4409 if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
4410 mnt_unhold_writers(m);
4411
4412 if (kattr->propagation)
4413 change_mnt_propagation(m, kattr->propagation);
4414 if (!kattr->recurse)
4415 break;
4416 }
4417 touch_mnt_namespace(mnt->mnt_ns);
4418 }
4419
4420 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
4421 {
4422 struct mount *mnt = real_mount(path->mnt);
4423 int err = 0;
4424
4425 if (!path_mounted(path))
4426 return -EINVAL;
4427
4428 if (kattr->mnt_userns) {
4429 struct mnt_idmap *mnt_idmap;
4430
4431 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
4432 if (IS_ERR(mnt_idmap))
4433 return PTR_ERR(mnt_idmap);
4434 kattr->mnt_idmap = mnt_idmap;
4435 }
4436
4437 if (kattr->propagation) {
4438 /*
4439 * Only take namespace_lock() if we're actually changing
4440 * propagation.
4441 */
4442 namespace_lock();
4443 if (kattr->propagation == MS_SHARED) {
4444 err = invent_group_ids(mnt, kattr->recurse);
4445 if (err) {
4446 namespace_unlock();
4447 return err;
4448 }
4449 }
4450 }
4451
4452 err = -EINVAL;
4453 lock_mount_hash();
4454
4455 /* Ensure that this isn't anything purely vfs internal. */
4456 if (!is_mounted(&mnt->mnt))
4457 goto out;
4458
4459 /*
4460 * If this is an attached mount make sure it's located in the callers
4461 * mount namespace. If it's not don't let the caller interact with it.
4462 * If this is a detached mount make sure it has an anonymous mount
4463 * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
4464 */
4465 if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
4466 goto out;
4467
4468 /*
4469 * First, we get the mount tree in a shape where we can change mount
4470 * properties without failure. If we succeeded to do so we commit all
4471 * changes and if we failed we clean up.
4472 */
4473 err = mount_setattr_prepare(kattr, mnt);
4474 if (!err)
4475 mount_setattr_commit(kattr, mnt);
4476
4477 out:
4478 unlock_mount_hash();
4479
4480 if (kattr->propagation) {
4481 if (err)
4482 cleanup_group_ids(mnt, NULL);
4483 namespace_unlock();
4484 }
4485
4486 return err;
4487 }
4488
4489 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4490 struct mount_kattr *kattr, unsigned int flags)
4491 {
4492 int err = 0;
4493 struct ns_common *ns;
4494 struct user_namespace *mnt_userns;
4495 struct fd f;
4496
4497 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4498 return 0;
4499
4500 /*
4501 * We currently do not support clearing an idmapped mount. If this ever
4502 * is a use-case we can revisit this but for now let's keep it simple
4503 * and not allow it.
4504 */
4505 if (attr->attr_clr & MOUNT_ATTR_IDMAP)
4506 return -EINVAL;
4507
4508 if (attr->userns_fd > INT_MAX)
4509 return -EINVAL;
4510
4511 f = fdget(attr->userns_fd);
4512 if (!f.file)
4513 return -EBADF;
4514
4515 if (!proc_ns_file(f.file)) {
4516 err = -EINVAL;
4517 goto out_fput;
4518 }
4519
4520 ns = get_proc_ns(file_inode(f.file));
4521 if (ns->ops->type != CLONE_NEWUSER) {
4522 err = -EINVAL;
4523 goto out_fput;
4524 }
4525
4526 /*
4527 * The initial idmapping cannot be used to create an idmapped
4528 * mount. We use the initial idmapping as an indicator of a mount
4529 * that is not idmapped. It can simply be passed into helpers that
4530 * are aware of idmapped mounts as a convenient shortcut. A user
4531 * can just create a dedicated identity mapping to achieve the same
4532 * result.
4533 */
4534 mnt_userns = container_of(ns, struct user_namespace, ns);
4535 if (mnt_userns == &init_user_ns) {
4536 err = -EPERM;
4537 goto out_fput;
4538 }
4539
4540 /* We're not controlling the target namespace. */
4541 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4542 err = -EPERM;
4543 goto out_fput;
4544 }
4545
4546 kattr->mnt_userns = get_user_ns(mnt_userns);
4547
4548 out_fput:
4549 fdput(f);
4550 return err;
4551 }
4552
4553 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4554 struct mount_kattr *kattr, unsigned int flags)
4555 {
4556 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
4557
4558 if (flags & AT_NO_AUTOMOUNT)
4559 lookup_flags &= ~LOOKUP_AUTOMOUNT;
4560 if (flags & AT_SYMLINK_NOFOLLOW)
4561 lookup_flags &= ~LOOKUP_FOLLOW;
4562 if (flags & AT_EMPTY_PATH)
4563 lookup_flags |= LOOKUP_EMPTY;
4564
4565 *kattr = (struct mount_kattr) {
4566 .lookup_flags = lookup_flags,
4567 .recurse = !!(flags & AT_RECURSIVE),
4568 };
4569
4570 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4571 return -EINVAL;
4572 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
4573 return -EINVAL;
4574 kattr->propagation = attr->propagation;
4575
4576 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
4577 return -EINVAL;
4578
4579 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
4580 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
4581
4582 /*
4583 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4584 * users wanting to transition to a different atime setting cannot
4585 * simply specify the atime setting in @attr_set, but must also
4586 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4587 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4588 * @attr_clr and that @attr_set can't have any atime bits set if
4589 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4590 */
4591 if (attr->attr_clr & MOUNT_ATTR__ATIME) {
4592 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
4593 return -EINVAL;
4594
4595 /*
4596 * Clear all previous time settings as they are mutually
4597 * exclusive.
4598 */
4599 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
4600 switch (attr->attr_set & MOUNT_ATTR__ATIME) {
4601 case MOUNT_ATTR_RELATIME:
4602 kattr->attr_set |= MNT_RELATIME;
4603 break;
4604 case MOUNT_ATTR_NOATIME:
4605 kattr->attr_set |= MNT_NOATIME;
4606 break;
4607 case MOUNT_ATTR_STRICTATIME:
4608 break;
4609 default:
4610 return -EINVAL;
4611 }
4612 } else {
4613 if (attr->attr_set & MOUNT_ATTR__ATIME)
4614 return -EINVAL;
4615 }
4616
4617 return build_mount_idmapped(attr, usize, kattr, flags);
4618 }
4619
4620 static void finish_mount_kattr(struct mount_kattr *kattr)
4621 {
4622 put_user_ns(kattr->mnt_userns);
4623 kattr->mnt_userns = NULL;
4624
4625 if (kattr->mnt_idmap)
4626 mnt_idmap_put(kattr->mnt_idmap);
4627 }
4628
4629 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
4630 unsigned int, flags, struct mount_attr __user *, uattr,
4631 size_t, usize)
4632 {
4633 int err;
4634 struct path target;
4635 struct mount_attr attr;
4636 struct mount_kattr kattr;
4637
4638 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
4639
4640 if (flags & ~(AT_EMPTY_PATH |
4641 AT_RECURSIVE |
4642 AT_SYMLINK_NOFOLLOW |
4643 AT_NO_AUTOMOUNT))
4644 return -EINVAL;
4645
4646 if (unlikely(usize > PAGE_SIZE))
4647 return -E2BIG;
4648 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
4649 return -EINVAL;
4650
4651 if (!may_mount())
4652 return -EPERM;
4653
4654 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
4655 if (err)
4656 return err;
4657
4658 /* Don't bother walking through the mounts if this is a nop. */
4659 if (attr.attr_set == 0 &&
4660 attr.attr_clr == 0 &&
4661 attr.propagation == 0)
4662 return 0;
4663
4664 err = build_mount_kattr(&attr, usize, &kattr, flags);
4665 if (err)
4666 return err;
4667
4668 err = user_path_at(dfd, path, kattr.lookup_flags, &target);
4669 if (!err) {
4670 err = do_mount_setattr(&target, &kattr);
4671 path_put(&target);
4672 }
4673 finish_mount_kattr(&kattr);
4674 return err;
4675 }
4676
4677 static void __init init_mount_tree(void)
4678 {
4679 struct vfsmount *mnt;
4680 struct mount *m;
4681 struct mnt_namespace *ns;
4682 struct path root;
4683
4684 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
4685 if (IS_ERR(mnt))
4686 panic("Can't create rootfs");
4687
4688 ns = alloc_mnt_ns(&init_user_ns, false);
4689 if (IS_ERR(ns))
4690 panic("Can't allocate initial namespace");
4691 m = real_mount(mnt);
4692 m->mnt_ns = ns;
4693 ns->root = m;
4694 ns->mounts = 1;
4695 list_add(&m->mnt_list, &ns->list);
4696 init_task.nsproxy->mnt_ns = ns;
4697 get_mnt_ns(ns);
4698
4699 root.mnt = mnt;
4700 root.dentry = mnt->mnt_root;
4701 mnt->mnt_flags |= MNT_LOCKED;
4702
4703 set_fs_pwd(current->fs, &root);
4704 set_fs_root(current->fs, &root);
4705 }
4706
4707 void __init mnt_init(void)
4708 {
4709 int err;
4710
4711 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
4712 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
4713
4714 mount_hashtable = alloc_large_system_hash("Mount-cache",
4715 sizeof(struct hlist_head),
4716 mhash_entries, 19,
4717 HASH_ZERO,
4718 &m_hash_shift, &m_hash_mask, 0, 0);
4719 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
4720 sizeof(struct hlist_head),
4721 mphash_entries, 19,
4722 HASH_ZERO,
4723 &mp_hash_shift, &mp_hash_mask, 0, 0);
4724
4725 if (!mount_hashtable || !mountpoint_hashtable)
4726 panic("Failed to allocate mount hash table\n");
4727
4728 kernfs_init();
4729
4730 err = sysfs_init();
4731 if (err)
4732 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
4733 __func__, err);
4734 fs_kobj = kobject_create_and_add("fs", NULL);
4735 if (!fs_kobj)
4736 printk(KERN_WARNING "%s: kobj create error\n", __func__);
4737 shmem_init();
4738 init_rootfs();
4739 init_mount_tree();
4740 }
4741
4742 void put_mnt_ns(struct mnt_namespace *ns)
4743 {
4744 if (!refcount_dec_and_test(&ns->ns.count))
4745 return;
4746 drop_collected_mounts(&ns->root->mnt);
4747 free_mnt_ns(ns);
4748 }
4749
4750 struct vfsmount *kern_mount(struct file_system_type *type)
4751 {
4752 struct vfsmount *mnt;
4753 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
4754 if (!IS_ERR(mnt)) {
4755 /*
4756 * it is a longterm mount, don't release mnt until
4757 * we unmount before file sys is unregistered
4758 */
4759 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
4760 }
4761 return mnt;
4762 }
4763 EXPORT_SYMBOL_GPL(kern_mount);
4764
4765 void kern_unmount(struct vfsmount *mnt)
4766 {
4767 /* release long term mount so mount point can be released */
4768 if (!IS_ERR(mnt)) {
4769 mnt_make_shortterm(mnt);
4770 synchronize_rcu(); /* yecchhh... */
4771 mntput(mnt);
4772 }
4773 }
4774 EXPORT_SYMBOL(kern_unmount);
4775
4776 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
4777 {
4778 unsigned int i;
4779
4780 for (i = 0; i < num; i++)
4781 mnt_make_shortterm(mnt[i]);
4782 synchronize_rcu_expedited();
4783 for (i = 0; i < num; i++)
4784 mntput(mnt[i]);
4785 }
4786 EXPORT_SYMBOL(kern_unmount_array);
4787
4788 bool our_mnt(struct vfsmount *mnt)
4789 {
4790 return check_mnt(real_mount(mnt));
4791 }
4792
4793 bool current_chrooted(void)
4794 {
4795 /* Does the current process have a non-standard root */
4796 struct path ns_root;
4797 struct path fs_root;
4798 bool chrooted;
4799
4800 /* Find the namespace root */
4801 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
4802 ns_root.dentry = ns_root.mnt->mnt_root;
4803 path_get(&ns_root);
4804 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
4805 ;
4806
4807 get_fs_root(current->fs, &fs_root);
4808
4809 chrooted = !path_equal(&fs_root, &ns_root);
4810
4811 path_put(&fs_root);
4812 path_put(&ns_root);
4813
4814 return chrooted;
4815 }
4816
4817 static bool mnt_already_visible(struct mnt_namespace *ns,
4818 const struct super_block *sb,
4819 int *new_mnt_flags)
4820 {
4821 int new_flags = *new_mnt_flags;
4822 struct mount *mnt;
4823 bool visible = false;
4824
4825 down_read(&namespace_sem);
4826 lock_ns_list(ns);
4827 list_for_each_entry(mnt, &ns->list, mnt_list) {
4828 struct mount *child;
4829 int mnt_flags;
4830
4831 if (mnt_is_cursor(mnt))
4832 continue;
4833
4834 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
4835 continue;
4836
4837 /* This mount is not fully visible if it's root directory
4838 * is not the root directory of the filesystem.
4839 */
4840 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
4841 continue;
4842
4843 /* A local view of the mount flags */
4844 mnt_flags = mnt->mnt.mnt_flags;
4845
4846 /* Don't miss readonly hidden in the superblock flags */
4847 if (sb_rdonly(mnt->mnt.mnt_sb))
4848 mnt_flags |= MNT_LOCK_READONLY;
4849
4850 /* Verify the mount flags are equal to or more permissive
4851 * than the proposed new mount.
4852 */
4853 if ((mnt_flags & MNT_LOCK_READONLY) &&
4854 !(new_flags & MNT_READONLY))
4855 continue;
4856 if ((mnt_flags & MNT_LOCK_ATIME) &&
4857 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
4858 continue;
4859
4860 /* This mount is not fully visible if there are any
4861 * locked child mounts that cover anything except for
4862 * empty directories.
4863 */
4864 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4865 struct inode *inode = child->mnt_mountpoint->d_inode;
4866 /* Only worry about locked mounts */
4867 if (!(child->mnt.mnt_flags & MNT_LOCKED))
4868 continue;
4869 /* Is the directory permanetly empty? */
4870 if (!is_empty_dir_inode(inode))
4871 goto next;
4872 }
4873 /* Preserve the locked attributes */
4874 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
4875 MNT_LOCK_ATIME);
4876 visible = true;
4877 goto found;
4878 next: ;
4879 }
4880 found:
4881 unlock_ns_list(ns);
4882 up_read(&namespace_sem);
4883 return visible;
4884 }
4885
4886 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
4887 {
4888 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
4889 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
4890 unsigned long s_iflags;
4891
4892 if (ns->user_ns == &init_user_ns)
4893 return false;
4894
4895 /* Can this filesystem be too revealing? */
4896 s_iflags = sb->s_iflags;
4897 if (!(s_iflags & SB_I_USERNS_VISIBLE))
4898 return false;
4899
4900 if ((s_iflags & required_iflags) != required_iflags) {
4901 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
4902 required_iflags);
4903 return true;
4904 }
4905
4906 return !mnt_already_visible(ns, sb, new_mnt_flags);
4907 }
4908
4909 bool mnt_may_suid(struct vfsmount *mnt)
4910 {
4911 /*
4912 * Foreign mounts (accessed via fchdir or through /proc
4913 * symlinks) are always treated as if they are nosuid. This
4914 * prevents namespaces from trusting potentially unsafe
4915 * suid/sgid bits, file caps, or security labels that originate
4916 * in other namespaces.
4917 */
4918 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4919 current_in_userns(mnt->mnt_sb->s_user_ns);
4920 }
4921
4922 static struct ns_common *mntns_get(struct task_struct *task)
4923 {
4924 struct ns_common *ns = NULL;
4925 struct nsproxy *nsproxy;
4926
4927 task_lock(task);
4928 nsproxy = task->nsproxy;
4929 if (nsproxy) {
4930 ns = &nsproxy->mnt_ns->ns;
4931 get_mnt_ns(to_mnt_ns(ns));
4932 }
4933 task_unlock(task);
4934
4935 return ns;
4936 }
4937
4938 static void mntns_put(struct ns_common *ns)
4939 {
4940 put_mnt_ns(to_mnt_ns(ns));
4941 }
4942
4943 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
4944 {
4945 struct nsproxy *nsproxy = nsset->nsproxy;
4946 struct fs_struct *fs = nsset->fs;
4947 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
4948 struct user_namespace *user_ns = nsset->cred->user_ns;
4949 struct path root;
4950 int err;
4951
4952 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
4953 !ns_capable(user_ns, CAP_SYS_CHROOT) ||
4954 !ns_capable(user_ns, CAP_SYS_ADMIN))
4955 return -EPERM;
4956
4957 if (is_anon_ns(mnt_ns))
4958 return -EINVAL;
4959
4960 if (fs->users != 1)
4961 return -EINVAL;
4962
4963 get_mnt_ns(mnt_ns);
4964 old_mnt_ns = nsproxy->mnt_ns;
4965 nsproxy->mnt_ns = mnt_ns;
4966
4967 /* Find the root */
4968 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
4969 "/", LOOKUP_DOWN, &root);
4970 if (err) {
4971 /* revert to old namespace */
4972 nsproxy->mnt_ns = old_mnt_ns;
4973 put_mnt_ns(mnt_ns);
4974 return err;
4975 }
4976
4977 put_mnt_ns(old_mnt_ns);
4978
4979 /* Update the pwd and root */
4980 set_fs_pwd(fs, &root);
4981 set_fs_root(fs, &root);
4982
4983 path_put(&root);
4984 return 0;
4985 }
4986
4987 static struct user_namespace *mntns_owner(struct ns_common *ns)
4988 {
4989 return to_mnt_ns(ns)->user_ns;
4990 }
4991
4992 const struct proc_ns_operations mntns_operations = {
4993 .name = "mnt",
4994 .type = CLONE_NEWNS,
4995 .get = mntns_get,
4996 .put = mntns_put,
4997 .install = mntns_install,
4998 .owner = mntns_owner,
4999 };
5000
5001 #ifdef CONFIG_SYSCTL
5002 static struct ctl_table fs_namespace_sysctls[] = {
5003 {
5004 .procname = "mount-max",
5005 .data = &sysctl_mount_max,
5006 .maxlen = sizeof(unsigned int),
5007 .mode = 0644,
5008 .proc_handler = proc_dointvec_minmax,
5009 .extra1 = SYSCTL_ONE,
5010 },
5011 { }
5012 };
5013
5014 static int __init init_fs_namespace_sysctls(void)
5015 {
5016 register_sysctl_init("fs", fs_namespace_sysctls);
5017 return 0;
5018 }
5019 fs_initcall(init_fs_namespace_sysctls);
5020
5021 #endif /* CONFIG_SYSCTL */