]> git.ipfire.org Git - people/ms/linux.git/blob - fs/super.c
Merge tag 'leds-5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/pavel...
[people/ms/linux.git] / fs / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/super.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * super.c contains code to handle: - mount structures
8 * - super-block tables
9 * - filesystem drivers list
10 * - mount system call
11 * - umount system call
12 * - ustat system call
13 *
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 *
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22 */
23
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41
42 static int thaw_super_locked(struct super_block *sb);
43
44 static LIST_HEAD(super_blocks);
45 static DEFINE_SPINLOCK(sb_lock);
46
47 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48 "sb_writers",
49 "sb_pagefaults",
50 "sb_internal",
51 };
52
53 /*
54 * One thing we have to be careful of with a per-sb shrinker is that we don't
55 * drop the last active reference to the superblock from within the shrinker.
56 * If that happens we could trigger unregistering the shrinker from within the
57 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
58 * take a passive reference to the superblock to avoid this from occurring.
59 */
60 static unsigned long super_cache_scan(struct shrinker *shrink,
61 struct shrink_control *sc)
62 {
63 struct super_block *sb;
64 long fs_objects = 0;
65 long total_objects;
66 long freed = 0;
67 long dentries;
68 long inodes;
69
70 sb = container_of(shrink, struct super_block, s_shrink);
71
72 /*
73 * Deadlock avoidance. We may hold various FS locks, and we don't want
74 * to recurse into the FS that called us in clear_inode() and friends..
75 */
76 if (!(sc->gfp_mask & __GFP_FS))
77 return SHRINK_STOP;
78
79 if (!trylock_super(sb))
80 return SHRINK_STOP;
81
82 if (sb->s_op->nr_cached_objects)
83 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
84
85 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
86 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
87 total_objects = dentries + inodes + fs_objects + 1;
88 if (!total_objects)
89 total_objects = 1;
90
91 /* proportion the scan between the caches */
92 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
93 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
94 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
95
96 /*
97 * prune the dcache first as the icache is pinned by it, then
98 * prune the icache, followed by the filesystem specific caches
99 *
100 * Ensure that we always scan at least one object - memcg kmem
101 * accounting uses this to fully empty the caches.
102 */
103 sc->nr_to_scan = dentries + 1;
104 freed = prune_dcache_sb(sb, sc);
105 sc->nr_to_scan = inodes + 1;
106 freed += prune_icache_sb(sb, sc);
107
108 if (fs_objects) {
109 sc->nr_to_scan = fs_objects + 1;
110 freed += sb->s_op->free_cached_objects(sb, sc);
111 }
112
113 up_read(&sb->s_umount);
114 return freed;
115 }
116
117 static unsigned long super_cache_count(struct shrinker *shrink,
118 struct shrink_control *sc)
119 {
120 struct super_block *sb;
121 long total_objects = 0;
122
123 sb = container_of(shrink, struct super_block, s_shrink);
124
125 /*
126 * We don't call trylock_super() here as it is a scalability bottleneck,
127 * so we're exposed to partial setup state. The shrinker rwsem does not
128 * protect filesystem operations backing list_lru_shrink_count() or
129 * s_op->nr_cached_objects(). Counts can change between
130 * super_cache_count and super_cache_scan, so we really don't need locks
131 * here.
132 *
133 * However, if we are currently mounting the superblock, the underlying
134 * filesystem might be in a state of partial construction and hence it
135 * is dangerous to access it. trylock_super() uses a SB_BORN check to
136 * avoid this situation, so do the same here. The memory barrier is
137 * matched with the one in mount_fs() as we don't hold locks here.
138 */
139 if (!(sb->s_flags & SB_BORN))
140 return 0;
141 smp_rmb();
142
143 if (sb->s_op && sb->s_op->nr_cached_objects)
144 total_objects = sb->s_op->nr_cached_objects(sb, sc);
145
146 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
147 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
148
149 if (!total_objects)
150 return SHRINK_EMPTY;
151
152 total_objects = vfs_pressure_ratio(total_objects);
153 return total_objects;
154 }
155
156 static void destroy_super_work(struct work_struct *work)
157 {
158 struct super_block *s = container_of(work, struct super_block,
159 destroy_work);
160 int i;
161
162 for (i = 0; i < SB_FREEZE_LEVELS; i++)
163 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
164 kfree(s);
165 }
166
167 static void destroy_super_rcu(struct rcu_head *head)
168 {
169 struct super_block *s = container_of(head, struct super_block, rcu);
170 INIT_WORK(&s->destroy_work, destroy_super_work);
171 schedule_work(&s->destroy_work);
172 }
173
174 /* Free a superblock that has never been seen by anyone */
175 static void destroy_unused_super(struct super_block *s)
176 {
177 if (!s)
178 return;
179 up_write(&s->s_umount);
180 list_lru_destroy(&s->s_dentry_lru);
181 list_lru_destroy(&s->s_inode_lru);
182 security_sb_free(s);
183 put_user_ns(s->s_user_ns);
184 kfree(s->s_subtype);
185 free_prealloced_shrinker(&s->s_shrink);
186 /* no delays needed */
187 destroy_super_work(&s->destroy_work);
188 }
189
190 /**
191 * alloc_super - create new superblock
192 * @type: filesystem type superblock should belong to
193 * @flags: the mount flags
194 * @user_ns: User namespace for the super_block
195 *
196 * Allocates and initializes a new &struct super_block. alloc_super()
197 * returns a pointer new superblock or %NULL if allocation had failed.
198 */
199 static struct super_block *alloc_super(struct file_system_type *type, int flags,
200 struct user_namespace *user_ns)
201 {
202 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
203 static const struct super_operations default_op;
204 int i;
205
206 if (!s)
207 return NULL;
208
209 INIT_LIST_HEAD(&s->s_mounts);
210 s->s_user_ns = get_user_ns(user_ns);
211 init_rwsem(&s->s_umount);
212 lockdep_set_class(&s->s_umount, &type->s_umount_key);
213 /*
214 * sget() can have s_umount recursion.
215 *
216 * When it cannot find a suitable sb, it allocates a new
217 * one (this one), and tries again to find a suitable old
218 * one.
219 *
220 * In case that succeeds, it will acquire the s_umount
221 * lock of the old one. Since these are clearly distrinct
222 * locks, and this object isn't exposed yet, there's no
223 * risk of deadlocks.
224 *
225 * Annotate this by putting this lock in a different
226 * subclass.
227 */
228 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
229
230 if (security_sb_alloc(s))
231 goto fail;
232
233 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
234 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
235 sb_writers_name[i],
236 &type->s_writers_key[i]))
237 goto fail;
238 }
239 init_waitqueue_head(&s->s_writers.wait_unfrozen);
240 s->s_bdi = &noop_backing_dev_info;
241 s->s_flags = flags;
242 if (s->s_user_ns != &init_user_ns)
243 s->s_iflags |= SB_I_NODEV;
244 INIT_HLIST_NODE(&s->s_instances);
245 INIT_HLIST_BL_HEAD(&s->s_roots);
246 mutex_init(&s->s_sync_lock);
247 INIT_LIST_HEAD(&s->s_inodes);
248 spin_lock_init(&s->s_inode_list_lock);
249 INIT_LIST_HEAD(&s->s_inodes_wb);
250 spin_lock_init(&s->s_inode_wblist_lock);
251
252 s->s_count = 1;
253 atomic_set(&s->s_active, 1);
254 mutex_init(&s->s_vfs_rename_mutex);
255 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
256 init_rwsem(&s->s_dquot.dqio_sem);
257 s->s_maxbytes = MAX_NON_LFS;
258 s->s_op = &default_op;
259 s->s_time_gran = 1000000000;
260 s->s_time_min = TIME64_MIN;
261 s->s_time_max = TIME64_MAX;
262
263 s->s_shrink.seeks = DEFAULT_SEEKS;
264 s->s_shrink.scan_objects = super_cache_scan;
265 s->s_shrink.count_objects = super_cache_count;
266 s->s_shrink.batch = 1024;
267 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
268 if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
269 goto fail;
270 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
271 goto fail;
272 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
273 goto fail;
274 return s;
275
276 fail:
277 destroy_unused_super(s);
278 return NULL;
279 }
280
281 /* Superblock refcounting */
282
283 /*
284 * Drop a superblock's refcount. The caller must hold sb_lock.
285 */
286 static void __put_super(struct super_block *s)
287 {
288 if (!--s->s_count) {
289 list_del_init(&s->s_list);
290 WARN_ON(s->s_dentry_lru.node);
291 WARN_ON(s->s_inode_lru.node);
292 WARN_ON(!list_empty(&s->s_mounts));
293 security_sb_free(s);
294 fscrypt_sb_free(s);
295 put_user_ns(s->s_user_ns);
296 kfree(s->s_subtype);
297 call_rcu(&s->rcu, destroy_super_rcu);
298 }
299 }
300
301 /**
302 * put_super - drop a temporary reference to superblock
303 * @sb: superblock in question
304 *
305 * Drops a temporary reference, frees superblock if there's no
306 * references left.
307 */
308 void put_super(struct super_block *sb)
309 {
310 spin_lock(&sb_lock);
311 __put_super(sb);
312 spin_unlock(&sb_lock);
313 }
314
315
316 /**
317 * deactivate_locked_super - drop an active reference to superblock
318 * @s: superblock to deactivate
319 *
320 * Drops an active reference to superblock, converting it into a temporary
321 * one if there is no other active references left. In that case we
322 * tell fs driver to shut it down and drop the temporary reference we
323 * had just acquired.
324 *
325 * Caller holds exclusive lock on superblock; that lock is released.
326 */
327 void deactivate_locked_super(struct super_block *s)
328 {
329 struct file_system_type *fs = s->s_type;
330 if (atomic_dec_and_test(&s->s_active)) {
331 unregister_shrinker(&s->s_shrink);
332 fs->kill_sb(s);
333
334 /*
335 * Since list_lru_destroy() may sleep, we cannot call it from
336 * put_super(), where we hold the sb_lock. Therefore we destroy
337 * the lru lists right now.
338 */
339 list_lru_destroy(&s->s_dentry_lru);
340 list_lru_destroy(&s->s_inode_lru);
341
342 put_filesystem(fs);
343 put_super(s);
344 } else {
345 up_write(&s->s_umount);
346 }
347 }
348
349 EXPORT_SYMBOL(deactivate_locked_super);
350
351 /**
352 * deactivate_super - drop an active reference to superblock
353 * @s: superblock to deactivate
354 *
355 * Variant of deactivate_locked_super(), except that superblock is *not*
356 * locked by caller. If we are going to drop the final active reference,
357 * lock will be acquired prior to that.
358 */
359 void deactivate_super(struct super_block *s)
360 {
361 if (!atomic_add_unless(&s->s_active, -1, 1)) {
362 down_write(&s->s_umount);
363 deactivate_locked_super(s);
364 }
365 }
366
367 EXPORT_SYMBOL(deactivate_super);
368
369 /**
370 * grab_super - acquire an active reference
371 * @s: reference we are trying to make active
372 *
373 * Tries to acquire an active reference. grab_super() is used when we
374 * had just found a superblock in super_blocks or fs_type->fs_supers
375 * and want to turn it into a full-blown active reference. grab_super()
376 * is called with sb_lock held and drops it. Returns 1 in case of
377 * success, 0 if we had failed (superblock contents was already dead or
378 * dying when grab_super() had been called). Note that this is only
379 * called for superblocks not in rundown mode (== ones still on ->fs_supers
380 * of their type), so increment of ->s_count is OK here.
381 */
382 static int grab_super(struct super_block *s) __releases(sb_lock)
383 {
384 s->s_count++;
385 spin_unlock(&sb_lock);
386 down_write(&s->s_umount);
387 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
388 put_super(s);
389 return 1;
390 }
391 up_write(&s->s_umount);
392 put_super(s);
393 return 0;
394 }
395
396 /*
397 * trylock_super - try to grab ->s_umount shared
398 * @sb: reference we are trying to grab
399 *
400 * Try to prevent fs shutdown. This is used in places where we
401 * cannot take an active reference but we need to ensure that the
402 * filesystem is not shut down while we are working on it. It returns
403 * false if we cannot acquire s_umount or if we lose the race and
404 * filesystem already got into shutdown, and returns true with the s_umount
405 * lock held in read mode in case of success. On successful return,
406 * the caller must drop the s_umount lock when done.
407 *
408 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
409 * The reason why it's safe is that we are OK with doing trylock instead
410 * of down_read(). There's a couple of places that are OK with that, but
411 * it's very much not a general-purpose interface.
412 */
413 bool trylock_super(struct super_block *sb)
414 {
415 if (down_read_trylock(&sb->s_umount)) {
416 if (!hlist_unhashed(&sb->s_instances) &&
417 sb->s_root && (sb->s_flags & SB_BORN))
418 return true;
419 up_read(&sb->s_umount);
420 }
421
422 return false;
423 }
424
425 /**
426 * retire_super - prevents superblock from being reused
427 * @sb: superblock to retire
428 *
429 * The function marks superblock to be ignored in superblock test, which
430 * prevents it from being reused for any new mounts. If the superblock has
431 * a private bdi, it also unregisters it, but doesn't reduce the refcount
432 * of the superblock to prevent potential races. The refcount is reduced
433 * by generic_shutdown_super(). The function can not be called
434 * concurrently with generic_shutdown_super(). It is safe to call the
435 * function multiple times, subsequent calls have no effect.
436 *
437 * The marker will affect the re-use only for block-device-based
438 * superblocks. Other superblocks will still get marked if this function
439 * is used, but that will not affect their reusability.
440 */
441 void retire_super(struct super_block *sb)
442 {
443 WARN_ON(!sb->s_bdev);
444 down_write(&sb->s_umount);
445 if (sb->s_iflags & SB_I_PERSB_BDI) {
446 bdi_unregister(sb->s_bdi);
447 sb->s_iflags &= ~SB_I_PERSB_BDI;
448 }
449 sb->s_iflags |= SB_I_RETIRED;
450 up_write(&sb->s_umount);
451 }
452 EXPORT_SYMBOL(retire_super);
453
454 /**
455 * generic_shutdown_super - common helper for ->kill_sb()
456 * @sb: superblock to kill
457 *
458 * generic_shutdown_super() does all fs-independent work on superblock
459 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
460 * that need destruction out of superblock, call generic_shutdown_super()
461 * and release aforementioned objects. Note: dentries and inodes _are_
462 * taken care of and do not need specific handling.
463 *
464 * Upon calling this function, the filesystem may no longer alter or
465 * rearrange the set of dentries belonging to this super_block, nor may it
466 * change the attachments of dentries to inodes.
467 */
468 void generic_shutdown_super(struct super_block *sb)
469 {
470 const struct super_operations *sop = sb->s_op;
471
472 if (sb->s_root) {
473 shrink_dcache_for_umount(sb);
474 sync_filesystem(sb);
475 sb->s_flags &= ~SB_ACTIVE;
476
477 cgroup_writeback_umount();
478
479 /* evict all inodes with zero refcount */
480 evict_inodes(sb);
481 /* only nonzero refcount inodes can have marks */
482 fsnotify_sb_delete(sb);
483 security_sb_delete(sb);
484
485 if (sb->s_dio_done_wq) {
486 destroy_workqueue(sb->s_dio_done_wq);
487 sb->s_dio_done_wq = NULL;
488 }
489
490 if (sop->put_super)
491 sop->put_super(sb);
492
493 if (!list_empty(&sb->s_inodes)) {
494 printk("VFS: Busy inodes after unmount of %s. "
495 "Self-destruct in 5 seconds. Have a nice day...\n",
496 sb->s_id);
497 }
498 }
499 spin_lock(&sb_lock);
500 /* should be initialized for __put_super_and_need_restart() */
501 hlist_del_init(&sb->s_instances);
502 spin_unlock(&sb_lock);
503 up_write(&sb->s_umount);
504 if (sb->s_bdi != &noop_backing_dev_info) {
505 if (sb->s_iflags & SB_I_PERSB_BDI)
506 bdi_unregister(sb->s_bdi);
507 bdi_put(sb->s_bdi);
508 sb->s_bdi = &noop_backing_dev_info;
509 }
510 }
511
512 EXPORT_SYMBOL(generic_shutdown_super);
513
514 bool mount_capable(struct fs_context *fc)
515 {
516 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
517 return capable(CAP_SYS_ADMIN);
518 else
519 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
520 }
521
522 /**
523 * sget_fc - Find or create a superblock
524 * @fc: Filesystem context.
525 * @test: Comparison callback
526 * @set: Setup callback
527 *
528 * Find or create a superblock using the parameters stored in the filesystem
529 * context and the two callback functions.
530 *
531 * If an extant superblock is matched, then that will be returned with an
532 * elevated reference count that the caller must transfer or discard.
533 *
534 * If no match is made, a new superblock will be allocated and basic
535 * initialisation will be performed (s_type, s_fs_info and s_id will be set and
536 * the set() callback will be invoked), the superblock will be published and it
537 * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
538 * as yet unset.
539 */
540 struct super_block *sget_fc(struct fs_context *fc,
541 int (*test)(struct super_block *, struct fs_context *),
542 int (*set)(struct super_block *, struct fs_context *))
543 {
544 struct super_block *s = NULL;
545 struct super_block *old;
546 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
547 int err;
548
549 retry:
550 spin_lock(&sb_lock);
551 if (test) {
552 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
553 if (test(old, fc))
554 goto share_extant_sb;
555 }
556 }
557 if (!s) {
558 spin_unlock(&sb_lock);
559 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
560 if (!s)
561 return ERR_PTR(-ENOMEM);
562 goto retry;
563 }
564
565 s->s_fs_info = fc->s_fs_info;
566 err = set(s, fc);
567 if (err) {
568 s->s_fs_info = NULL;
569 spin_unlock(&sb_lock);
570 destroy_unused_super(s);
571 return ERR_PTR(err);
572 }
573 fc->s_fs_info = NULL;
574 s->s_type = fc->fs_type;
575 s->s_iflags |= fc->s_iflags;
576 strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
577 list_add_tail(&s->s_list, &super_blocks);
578 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
579 spin_unlock(&sb_lock);
580 get_filesystem(s->s_type);
581 register_shrinker_prepared(&s->s_shrink);
582 return s;
583
584 share_extant_sb:
585 if (user_ns != old->s_user_ns) {
586 spin_unlock(&sb_lock);
587 destroy_unused_super(s);
588 return ERR_PTR(-EBUSY);
589 }
590 if (!grab_super(old))
591 goto retry;
592 destroy_unused_super(s);
593 return old;
594 }
595 EXPORT_SYMBOL(sget_fc);
596
597 /**
598 * sget - find or create a superblock
599 * @type: filesystem type superblock should belong to
600 * @test: comparison callback
601 * @set: setup callback
602 * @flags: mount flags
603 * @data: argument to each of them
604 */
605 struct super_block *sget(struct file_system_type *type,
606 int (*test)(struct super_block *,void *),
607 int (*set)(struct super_block *,void *),
608 int flags,
609 void *data)
610 {
611 struct user_namespace *user_ns = current_user_ns();
612 struct super_block *s = NULL;
613 struct super_block *old;
614 int err;
615
616 /* We don't yet pass the user namespace of the parent
617 * mount through to here so always use &init_user_ns
618 * until that changes.
619 */
620 if (flags & SB_SUBMOUNT)
621 user_ns = &init_user_ns;
622
623 retry:
624 spin_lock(&sb_lock);
625 if (test) {
626 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
627 if (!test(old, data))
628 continue;
629 if (user_ns != old->s_user_ns) {
630 spin_unlock(&sb_lock);
631 destroy_unused_super(s);
632 return ERR_PTR(-EBUSY);
633 }
634 if (!grab_super(old))
635 goto retry;
636 destroy_unused_super(s);
637 return old;
638 }
639 }
640 if (!s) {
641 spin_unlock(&sb_lock);
642 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
643 if (!s)
644 return ERR_PTR(-ENOMEM);
645 goto retry;
646 }
647
648 err = set(s, data);
649 if (err) {
650 spin_unlock(&sb_lock);
651 destroy_unused_super(s);
652 return ERR_PTR(err);
653 }
654 s->s_type = type;
655 strlcpy(s->s_id, type->name, sizeof(s->s_id));
656 list_add_tail(&s->s_list, &super_blocks);
657 hlist_add_head(&s->s_instances, &type->fs_supers);
658 spin_unlock(&sb_lock);
659 get_filesystem(type);
660 register_shrinker_prepared(&s->s_shrink);
661 return s;
662 }
663 EXPORT_SYMBOL(sget);
664
665 void drop_super(struct super_block *sb)
666 {
667 up_read(&sb->s_umount);
668 put_super(sb);
669 }
670
671 EXPORT_SYMBOL(drop_super);
672
673 void drop_super_exclusive(struct super_block *sb)
674 {
675 up_write(&sb->s_umount);
676 put_super(sb);
677 }
678 EXPORT_SYMBOL(drop_super_exclusive);
679
680 static void __iterate_supers(void (*f)(struct super_block *))
681 {
682 struct super_block *sb, *p = NULL;
683
684 spin_lock(&sb_lock);
685 list_for_each_entry(sb, &super_blocks, s_list) {
686 if (hlist_unhashed(&sb->s_instances))
687 continue;
688 sb->s_count++;
689 spin_unlock(&sb_lock);
690
691 f(sb);
692
693 spin_lock(&sb_lock);
694 if (p)
695 __put_super(p);
696 p = sb;
697 }
698 if (p)
699 __put_super(p);
700 spin_unlock(&sb_lock);
701 }
702 /**
703 * iterate_supers - call function for all active superblocks
704 * @f: function to call
705 * @arg: argument to pass to it
706 *
707 * Scans the superblock list and calls given function, passing it
708 * locked superblock and given argument.
709 */
710 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
711 {
712 struct super_block *sb, *p = NULL;
713
714 spin_lock(&sb_lock);
715 list_for_each_entry(sb, &super_blocks, s_list) {
716 if (hlist_unhashed(&sb->s_instances))
717 continue;
718 sb->s_count++;
719 spin_unlock(&sb_lock);
720
721 down_read(&sb->s_umount);
722 if (sb->s_root && (sb->s_flags & SB_BORN))
723 f(sb, arg);
724 up_read(&sb->s_umount);
725
726 spin_lock(&sb_lock);
727 if (p)
728 __put_super(p);
729 p = sb;
730 }
731 if (p)
732 __put_super(p);
733 spin_unlock(&sb_lock);
734 }
735
736 /**
737 * iterate_supers_type - call function for superblocks of given type
738 * @type: fs type
739 * @f: function to call
740 * @arg: argument to pass to it
741 *
742 * Scans the superblock list and calls given function, passing it
743 * locked superblock and given argument.
744 */
745 void iterate_supers_type(struct file_system_type *type,
746 void (*f)(struct super_block *, void *), void *arg)
747 {
748 struct super_block *sb, *p = NULL;
749
750 spin_lock(&sb_lock);
751 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
752 sb->s_count++;
753 spin_unlock(&sb_lock);
754
755 down_read(&sb->s_umount);
756 if (sb->s_root && (sb->s_flags & SB_BORN))
757 f(sb, arg);
758 up_read(&sb->s_umount);
759
760 spin_lock(&sb_lock);
761 if (p)
762 __put_super(p);
763 p = sb;
764 }
765 if (p)
766 __put_super(p);
767 spin_unlock(&sb_lock);
768 }
769
770 EXPORT_SYMBOL(iterate_supers_type);
771
772 /**
773 * get_super - get the superblock of a device
774 * @bdev: device to get the superblock for
775 *
776 * Scans the superblock list and finds the superblock of the file system
777 * mounted on the device given. %NULL is returned if no match is found.
778 */
779 struct super_block *get_super(struct block_device *bdev)
780 {
781 struct super_block *sb;
782
783 if (!bdev)
784 return NULL;
785
786 spin_lock(&sb_lock);
787 rescan:
788 list_for_each_entry(sb, &super_blocks, s_list) {
789 if (hlist_unhashed(&sb->s_instances))
790 continue;
791 if (sb->s_bdev == bdev) {
792 sb->s_count++;
793 spin_unlock(&sb_lock);
794 down_read(&sb->s_umount);
795 /* still alive? */
796 if (sb->s_root && (sb->s_flags & SB_BORN))
797 return sb;
798 up_read(&sb->s_umount);
799 /* nope, got unmounted */
800 spin_lock(&sb_lock);
801 __put_super(sb);
802 goto rescan;
803 }
804 }
805 spin_unlock(&sb_lock);
806 return NULL;
807 }
808
809 /**
810 * get_active_super - get an active reference to the superblock of a device
811 * @bdev: device to get the superblock for
812 *
813 * Scans the superblock list and finds the superblock of the file system
814 * mounted on the device given. Returns the superblock with an active
815 * reference or %NULL if none was found.
816 */
817 struct super_block *get_active_super(struct block_device *bdev)
818 {
819 struct super_block *sb;
820
821 if (!bdev)
822 return NULL;
823
824 restart:
825 spin_lock(&sb_lock);
826 list_for_each_entry(sb, &super_blocks, s_list) {
827 if (hlist_unhashed(&sb->s_instances))
828 continue;
829 if (sb->s_bdev == bdev) {
830 if (!grab_super(sb))
831 goto restart;
832 up_write(&sb->s_umount);
833 return sb;
834 }
835 }
836 spin_unlock(&sb_lock);
837 return NULL;
838 }
839
840 struct super_block *user_get_super(dev_t dev, bool excl)
841 {
842 struct super_block *sb;
843
844 spin_lock(&sb_lock);
845 rescan:
846 list_for_each_entry(sb, &super_blocks, s_list) {
847 if (hlist_unhashed(&sb->s_instances))
848 continue;
849 if (sb->s_dev == dev) {
850 sb->s_count++;
851 spin_unlock(&sb_lock);
852 if (excl)
853 down_write(&sb->s_umount);
854 else
855 down_read(&sb->s_umount);
856 /* still alive? */
857 if (sb->s_root && (sb->s_flags & SB_BORN))
858 return sb;
859 if (excl)
860 up_write(&sb->s_umount);
861 else
862 up_read(&sb->s_umount);
863 /* nope, got unmounted */
864 spin_lock(&sb_lock);
865 __put_super(sb);
866 goto rescan;
867 }
868 }
869 spin_unlock(&sb_lock);
870 return NULL;
871 }
872
873 /**
874 * reconfigure_super - asks filesystem to change superblock parameters
875 * @fc: The superblock and configuration
876 *
877 * Alters the configuration parameters of a live superblock.
878 */
879 int reconfigure_super(struct fs_context *fc)
880 {
881 struct super_block *sb = fc->root->d_sb;
882 int retval;
883 bool remount_ro = false;
884 bool force = fc->sb_flags & SB_FORCE;
885
886 if (fc->sb_flags_mask & ~MS_RMT_MASK)
887 return -EINVAL;
888 if (sb->s_writers.frozen != SB_UNFROZEN)
889 return -EBUSY;
890
891 retval = security_sb_remount(sb, fc->security);
892 if (retval)
893 return retval;
894
895 if (fc->sb_flags_mask & SB_RDONLY) {
896 #ifdef CONFIG_BLOCK
897 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
898 bdev_read_only(sb->s_bdev))
899 return -EACCES;
900 #endif
901
902 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
903 }
904
905 if (remount_ro) {
906 if (!hlist_empty(&sb->s_pins)) {
907 up_write(&sb->s_umount);
908 group_pin_kill(&sb->s_pins);
909 down_write(&sb->s_umount);
910 if (!sb->s_root)
911 return 0;
912 if (sb->s_writers.frozen != SB_UNFROZEN)
913 return -EBUSY;
914 remount_ro = !sb_rdonly(sb);
915 }
916 }
917 shrink_dcache_sb(sb);
918
919 /* If we are reconfiguring to RDONLY and current sb is read/write,
920 * make sure there are no files open for writing.
921 */
922 if (remount_ro) {
923 if (force) {
924 sb->s_readonly_remount = 1;
925 smp_wmb();
926 } else {
927 retval = sb_prepare_remount_readonly(sb);
928 if (retval)
929 return retval;
930 }
931 }
932
933 if (fc->ops->reconfigure) {
934 retval = fc->ops->reconfigure(fc);
935 if (retval) {
936 if (!force)
937 goto cancel_readonly;
938 /* If forced remount, go ahead despite any errors */
939 WARN(1, "forced remount of a %s fs returned %i\n",
940 sb->s_type->name, retval);
941 }
942 }
943
944 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
945 (fc->sb_flags & fc->sb_flags_mask)));
946 /* Needs to be ordered wrt mnt_is_readonly() */
947 smp_wmb();
948 sb->s_readonly_remount = 0;
949
950 /*
951 * Some filesystems modify their metadata via some other path than the
952 * bdev buffer cache (eg. use a private mapping, or directories in
953 * pagecache, etc). Also file data modifications go via their own
954 * mappings. So If we try to mount readonly then copy the filesystem
955 * from bdev, we could get stale data, so invalidate it to give a best
956 * effort at coherency.
957 */
958 if (remount_ro && sb->s_bdev)
959 invalidate_bdev(sb->s_bdev);
960 return 0;
961
962 cancel_readonly:
963 sb->s_readonly_remount = 0;
964 return retval;
965 }
966
967 static void do_emergency_remount_callback(struct super_block *sb)
968 {
969 down_write(&sb->s_umount);
970 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
971 !sb_rdonly(sb)) {
972 struct fs_context *fc;
973
974 fc = fs_context_for_reconfigure(sb->s_root,
975 SB_RDONLY | SB_FORCE, SB_RDONLY);
976 if (!IS_ERR(fc)) {
977 if (parse_monolithic_mount_data(fc, NULL) == 0)
978 (void)reconfigure_super(fc);
979 put_fs_context(fc);
980 }
981 }
982 up_write(&sb->s_umount);
983 }
984
985 static void do_emergency_remount(struct work_struct *work)
986 {
987 __iterate_supers(do_emergency_remount_callback);
988 kfree(work);
989 printk("Emergency Remount complete\n");
990 }
991
992 void emergency_remount(void)
993 {
994 struct work_struct *work;
995
996 work = kmalloc(sizeof(*work), GFP_ATOMIC);
997 if (work) {
998 INIT_WORK(work, do_emergency_remount);
999 schedule_work(work);
1000 }
1001 }
1002
1003 static void do_thaw_all_callback(struct super_block *sb)
1004 {
1005 down_write(&sb->s_umount);
1006 if (sb->s_root && sb->s_flags & SB_BORN) {
1007 emergency_thaw_bdev(sb);
1008 thaw_super_locked(sb);
1009 } else {
1010 up_write(&sb->s_umount);
1011 }
1012 }
1013
1014 static void do_thaw_all(struct work_struct *work)
1015 {
1016 __iterate_supers(do_thaw_all_callback);
1017 kfree(work);
1018 printk(KERN_WARNING "Emergency Thaw complete\n");
1019 }
1020
1021 /**
1022 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1023 *
1024 * Used for emergency unfreeze of all filesystems via SysRq
1025 */
1026 void emergency_thaw_all(void)
1027 {
1028 struct work_struct *work;
1029
1030 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1031 if (work) {
1032 INIT_WORK(work, do_thaw_all);
1033 schedule_work(work);
1034 }
1035 }
1036
1037 static DEFINE_IDA(unnamed_dev_ida);
1038
1039 /**
1040 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1041 * @p: Pointer to a dev_t.
1042 *
1043 * Filesystems which don't use real block devices can call this function
1044 * to allocate a virtual block device.
1045 *
1046 * Context: Any context. Frequently called while holding sb_lock.
1047 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1048 * or -ENOMEM if memory allocation failed.
1049 */
1050 int get_anon_bdev(dev_t *p)
1051 {
1052 int dev;
1053
1054 /*
1055 * Many userspace utilities consider an FSID of 0 invalid.
1056 * Always return at least 1 from get_anon_bdev.
1057 */
1058 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1059 GFP_ATOMIC);
1060 if (dev == -ENOSPC)
1061 dev = -EMFILE;
1062 if (dev < 0)
1063 return dev;
1064
1065 *p = MKDEV(0, dev);
1066 return 0;
1067 }
1068 EXPORT_SYMBOL(get_anon_bdev);
1069
1070 void free_anon_bdev(dev_t dev)
1071 {
1072 ida_free(&unnamed_dev_ida, MINOR(dev));
1073 }
1074 EXPORT_SYMBOL(free_anon_bdev);
1075
1076 int set_anon_super(struct super_block *s, void *data)
1077 {
1078 return get_anon_bdev(&s->s_dev);
1079 }
1080 EXPORT_SYMBOL(set_anon_super);
1081
1082 void kill_anon_super(struct super_block *sb)
1083 {
1084 dev_t dev = sb->s_dev;
1085 generic_shutdown_super(sb);
1086 free_anon_bdev(dev);
1087 }
1088 EXPORT_SYMBOL(kill_anon_super);
1089
1090 void kill_litter_super(struct super_block *sb)
1091 {
1092 if (sb->s_root)
1093 d_genocide(sb->s_root);
1094 kill_anon_super(sb);
1095 }
1096 EXPORT_SYMBOL(kill_litter_super);
1097
1098 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1099 {
1100 return set_anon_super(sb, NULL);
1101 }
1102 EXPORT_SYMBOL(set_anon_super_fc);
1103
1104 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1105 {
1106 return sb->s_fs_info == fc->s_fs_info;
1107 }
1108
1109 static int test_single_super(struct super_block *s, struct fs_context *fc)
1110 {
1111 return 1;
1112 }
1113
1114 /**
1115 * vfs_get_super - Get a superblock with a search key set in s_fs_info.
1116 * @fc: The filesystem context holding the parameters
1117 * @keying: How to distinguish superblocks
1118 * @fill_super: Helper to initialise a new superblock
1119 *
1120 * Search for a superblock and create a new one if not found. The search
1121 * criterion is controlled by @keying. If the search fails, a new superblock
1122 * is created and @fill_super() is called to initialise it.
1123 *
1124 * @keying can take one of a number of values:
1125 *
1126 * (1) vfs_get_single_super - Only one superblock of this type may exist on the
1127 * system. This is typically used for special system filesystems.
1128 *
1129 * (2) vfs_get_keyed_super - Multiple superblocks may exist, but they must have
1130 * distinct keys (where the key is in s_fs_info). Searching for the same
1131 * key again will turn up the superblock for that key.
1132 *
1133 * (3) vfs_get_independent_super - Multiple superblocks may exist and are
1134 * unkeyed. Each call will get a new superblock.
1135 *
1136 * A permissions check is made by sget_fc() unless we're getting a superblock
1137 * for a kernel-internal mount or a submount.
1138 */
1139 int vfs_get_super(struct fs_context *fc,
1140 enum vfs_get_super_keying keying,
1141 int (*fill_super)(struct super_block *sb,
1142 struct fs_context *fc))
1143 {
1144 int (*test)(struct super_block *, struct fs_context *);
1145 struct super_block *sb;
1146 int err;
1147
1148 switch (keying) {
1149 case vfs_get_single_super:
1150 case vfs_get_single_reconf_super:
1151 test = test_single_super;
1152 break;
1153 case vfs_get_keyed_super:
1154 test = test_keyed_super;
1155 break;
1156 case vfs_get_independent_super:
1157 test = NULL;
1158 break;
1159 default:
1160 BUG();
1161 }
1162
1163 sb = sget_fc(fc, test, set_anon_super_fc);
1164 if (IS_ERR(sb))
1165 return PTR_ERR(sb);
1166
1167 if (!sb->s_root) {
1168 err = fill_super(sb, fc);
1169 if (err)
1170 goto error;
1171
1172 sb->s_flags |= SB_ACTIVE;
1173 fc->root = dget(sb->s_root);
1174 } else {
1175 fc->root = dget(sb->s_root);
1176 if (keying == vfs_get_single_reconf_super) {
1177 err = reconfigure_super(fc);
1178 if (err < 0) {
1179 dput(fc->root);
1180 fc->root = NULL;
1181 goto error;
1182 }
1183 }
1184 }
1185
1186 return 0;
1187
1188 error:
1189 deactivate_locked_super(sb);
1190 return err;
1191 }
1192 EXPORT_SYMBOL(vfs_get_super);
1193
1194 int get_tree_nodev(struct fs_context *fc,
1195 int (*fill_super)(struct super_block *sb,
1196 struct fs_context *fc))
1197 {
1198 return vfs_get_super(fc, vfs_get_independent_super, fill_super);
1199 }
1200 EXPORT_SYMBOL(get_tree_nodev);
1201
1202 int get_tree_single(struct fs_context *fc,
1203 int (*fill_super)(struct super_block *sb,
1204 struct fs_context *fc))
1205 {
1206 return vfs_get_super(fc, vfs_get_single_super, fill_super);
1207 }
1208 EXPORT_SYMBOL(get_tree_single);
1209
1210 int get_tree_single_reconf(struct fs_context *fc,
1211 int (*fill_super)(struct super_block *sb,
1212 struct fs_context *fc))
1213 {
1214 return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super);
1215 }
1216 EXPORT_SYMBOL(get_tree_single_reconf);
1217
1218 int get_tree_keyed(struct fs_context *fc,
1219 int (*fill_super)(struct super_block *sb,
1220 struct fs_context *fc),
1221 void *key)
1222 {
1223 fc->s_fs_info = key;
1224 return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
1225 }
1226 EXPORT_SYMBOL(get_tree_keyed);
1227
1228 #ifdef CONFIG_BLOCK
1229
1230 static int set_bdev_super(struct super_block *s, void *data)
1231 {
1232 s->s_bdev = data;
1233 s->s_dev = s->s_bdev->bd_dev;
1234 s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
1235
1236 if (bdev_stable_writes(s->s_bdev))
1237 s->s_iflags |= SB_I_STABLE_WRITES;
1238 return 0;
1239 }
1240
1241 static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1242 {
1243 return set_bdev_super(s, fc->sget_key);
1244 }
1245
1246 static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1247 {
1248 return !(s->s_iflags & SB_I_RETIRED) && s->s_bdev == fc->sget_key;
1249 }
1250
1251 /**
1252 * get_tree_bdev - Get a superblock based on a single block device
1253 * @fc: The filesystem context holding the parameters
1254 * @fill_super: Helper to initialise a new superblock
1255 */
1256 int get_tree_bdev(struct fs_context *fc,
1257 int (*fill_super)(struct super_block *,
1258 struct fs_context *))
1259 {
1260 struct block_device *bdev;
1261 struct super_block *s;
1262 fmode_t mode = FMODE_READ | FMODE_EXCL;
1263 int error = 0;
1264
1265 if (!(fc->sb_flags & SB_RDONLY))
1266 mode |= FMODE_WRITE;
1267
1268 if (!fc->source)
1269 return invalf(fc, "No source specified");
1270
1271 bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
1272 if (IS_ERR(bdev)) {
1273 errorf(fc, "%s: Can't open blockdev", fc->source);
1274 return PTR_ERR(bdev);
1275 }
1276
1277 /* Once the superblock is inserted into the list by sget_fc(), s_umount
1278 * will protect the lockfs code from trying to start a snapshot while
1279 * we are mounting
1280 */
1281 mutex_lock(&bdev->bd_fsfreeze_mutex);
1282 if (bdev->bd_fsfreeze_count > 0) {
1283 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1284 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1285 blkdev_put(bdev, mode);
1286 return -EBUSY;
1287 }
1288
1289 fc->sb_flags |= SB_NOSEC;
1290 fc->sget_key = bdev;
1291 s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
1292 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1293 if (IS_ERR(s)) {
1294 blkdev_put(bdev, mode);
1295 return PTR_ERR(s);
1296 }
1297
1298 if (s->s_root) {
1299 /* Don't summarily change the RO/RW state. */
1300 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1301 warnf(fc, "%pg: Can't mount, would change RO state", bdev);
1302 deactivate_locked_super(s);
1303 blkdev_put(bdev, mode);
1304 return -EBUSY;
1305 }
1306
1307 /*
1308 * s_umount nests inside open_mutex during
1309 * __invalidate_device(). blkdev_put() acquires
1310 * open_mutex and can't be called under s_umount. Drop
1311 * s_umount temporarily. This is safe as we're
1312 * holding an active reference.
1313 */
1314 up_write(&s->s_umount);
1315 blkdev_put(bdev, mode);
1316 down_write(&s->s_umount);
1317 } else {
1318 s->s_mode = mode;
1319 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1320 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
1321 fc->fs_type->name, s->s_id);
1322 sb_set_blocksize(s, block_size(bdev));
1323 error = fill_super(s, fc);
1324 if (error) {
1325 deactivate_locked_super(s);
1326 return error;
1327 }
1328
1329 s->s_flags |= SB_ACTIVE;
1330 bdev->bd_super = s;
1331 }
1332
1333 BUG_ON(fc->root);
1334 fc->root = dget(s->s_root);
1335 return 0;
1336 }
1337 EXPORT_SYMBOL(get_tree_bdev);
1338
1339 static int test_bdev_super(struct super_block *s, void *data)
1340 {
1341 return !(s->s_iflags & SB_I_RETIRED) && (void *)s->s_bdev == data;
1342 }
1343
1344 struct dentry *mount_bdev(struct file_system_type *fs_type,
1345 int flags, const char *dev_name, void *data,
1346 int (*fill_super)(struct super_block *, void *, int))
1347 {
1348 struct block_device *bdev;
1349 struct super_block *s;
1350 fmode_t mode = FMODE_READ | FMODE_EXCL;
1351 int error = 0;
1352
1353 if (!(flags & SB_RDONLY))
1354 mode |= FMODE_WRITE;
1355
1356 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1357 if (IS_ERR(bdev))
1358 return ERR_CAST(bdev);
1359
1360 /*
1361 * once the super is inserted into the list by sget, s_umount
1362 * will protect the lockfs code from trying to start a snapshot
1363 * while we are mounting
1364 */
1365 mutex_lock(&bdev->bd_fsfreeze_mutex);
1366 if (bdev->bd_fsfreeze_count > 0) {
1367 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1368 error = -EBUSY;
1369 goto error_bdev;
1370 }
1371 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
1372 bdev);
1373 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1374 if (IS_ERR(s))
1375 goto error_s;
1376
1377 if (s->s_root) {
1378 if ((flags ^ s->s_flags) & SB_RDONLY) {
1379 deactivate_locked_super(s);
1380 error = -EBUSY;
1381 goto error_bdev;
1382 }
1383
1384 /*
1385 * s_umount nests inside open_mutex during
1386 * __invalidate_device(). blkdev_put() acquires
1387 * open_mutex and can't be called under s_umount. Drop
1388 * s_umount temporarily. This is safe as we're
1389 * holding an active reference.
1390 */
1391 up_write(&s->s_umount);
1392 blkdev_put(bdev, mode);
1393 down_write(&s->s_umount);
1394 } else {
1395 s->s_mode = mode;
1396 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1397 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
1398 fs_type->name, s->s_id);
1399 sb_set_blocksize(s, block_size(bdev));
1400 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1401 if (error) {
1402 deactivate_locked_super(s);
1403 goto error;
1404 }
1405
1406 s->s_flags |= SB_ACTIVE;
1407 bdev->bd_super = s;
1408 }
1409
1410 return dget(s->s_root);
1411
1412 error_s:
1413 error = PTR_ERR(s);
1414 error_bdev:
1415 blkdev_put(bdev, mode);
1416 error:
1417 return ERR_PTR(error);
1418 }
1419 EXPORT_SYMBOL(mount_bdev);
1420
1421 void kill_block_super(struct super_block *sb)
1422 {
1423 struct block_device *bdev = sb->s_bdev;
1424 fmode_t mode = sb->s_mode;
1425
1426 bdev->bd_super = NULL;
1427 generic_shutdown_super(sb);
1428 sync_blockdev(bdev);
1429 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1430 blkdev_put(bdev, mode | FMODE_EXCL);
1431 }
1432
1433 EXPORT_SYMBOL(kill_block_super);
1434 #endif
1435
1436 struct dentry *mount_nodev(struct file_system_type *fs_type,
1437 int flags, void *data,
1438 int (*fill_super)(struct super_block *, void *, int))
1439 {
1440 int error;
1441 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1442
1443 if (IS_ERR(s))
1444 return ERR_CAST(s);
1445
1446 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1447 if (error) {
1448 deactivate_locked_super(s);
1449 return ERR_PTR(error);
1450 }
1451 s->s_flags |= SB_ACTIVE;
1452 return dget(s->s_root);
1453 }
1454 EXPORT_SYMBOL(mount_nodev);
1455
1456 int reconfigure_single(struct super_block *s,
1457 int flags, void *data)
1458 {
1459 struct fs_context *fc;
1460 int ret;
1461
1462 /* The caller really need to be passing fc down into mount_single(),
1463 * then a chunk of this can be removed. [Bollocks -- AV]
1464 * Better yet, reconfiguration shouldn't happen, but rather the second
1465 * mount should be rejected if the parameters are not compatible.
1466 */
1467 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1468 if (IS_ERR(fc))
1469 return PTR_ERR(fc);
1470
1471 ret = parse_monolithic_mount_data(fc, data);
1472 if (ret < 0)
1473 goto out;
1474
1475 ret = reconfigure_super(fc);
1476 out:
1477 put_fs_context(fc);
1478 return ret;
1479 }
1480
1481 static int compare_single(struct super_block *s, void *p)
1482 {
1483 return 1;
1484 }
1485
1486 struct dentry *mount_single(struct file_system_type *fs_type,
1487 int flags, void *data,
1488 int (*fill_super)(struct super_block *, void *, int))
1489 {
1490 struct super_block *s;
1491 int error;
1492
1493 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1494 if (IS_ERR(s))
1495 return ERR_CAST(s);
1496 if (!s->s_root) {
1497 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1498 if (!error)
1499 s->s_flags |= SB_ACTIVE;
1500 } else {
1501 error = reconfigure_single(s, flags, data);
1502 }
1503 if (unlikely(error)) {
1504 deactivate_locked_super(s);
1505 return ERR_PTR(error);
1506 }
1507 return dget(s->s_root);
1508 }
1509 EXPORT_SYMBOL(mount_single);
1510
1511 /**
1512 * vfs_get_tree - Get the mountable root
1513 * @fc: The superblock configuration context.
1514 *
1515 * The filesystem is invoked to get or create a superblock which can then later
1516 * be used for mounting. The filesystem places a pointer to the root to be
1517 * used for mounting in @fc->root.
1518 */
1519 int vfs_get_tree(struct fs_context *fc)
1520 {
1521 struct super_block *sb;
1522 int error;
1523
1524 if (fc->root)
1525 return -EBUSY;
1526
1527 /* Get the mountable root in fc->root, with a ref on the root and a ref
1528 * on the superblock.
1529 */
1530 error = fc->ops->get_tree(fc);
1531 if (error < 0)
1532 return error;
1533
1534 if (!fc->root) {
1535 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1536 fc->fs_type->name);
1537 /* We don't know what the locking state of the superblock is -
1538 * if there is a superblock.
1539 */
1540 BUG();
1541 }
1542
1543 sb = fc->root->d_sb;
1544 WARN_ON(!sb->s_bdi);
1545
1546 /*
1547 * Write barrier is for super_cache_count(). We place it before setting
1548 * SB_BORN as the data dependency between the two functions is the
1549 * superblock structure contents that we just set up, not the SB_BORN
1550 * flag.
1551 */
1552 smp_wmb();
1553 sb->s_flags |= SB_BORN;
1554
1555 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1556 if (unlikely(error)) {
1557 fc_drop_locked(fc);
1558 return error;
1559 }
1560
1561 /*
1562 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1563 * but s_maxbytes was an unsigned long long for many releases. Throw
1564 * this warning for a little while to try and catch filesystems that
1565 * violate this rule.
1566 */
1567 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1568 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1569
1570 return 0;
1571 }
1572 EXPORT_SYMBOL(vfs_get_tree);
1573
1574 /*
1575 * Setup private BDI for given superblock. It gets automatically cleaned up
1576 * in generic_shutdown_super().
1577 */
1578 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1579 {
1580 struct backing_dev_info *bdi;
1581 int err;
1582 va_list args;
1583
1584 bdi = bdi_alloc(NUMA_NO_NODE);
1585 if (!bdi)
1586 return -ENOMEM;
1587
1588 va_start(args, fmt);
1589 err = bdi_register_va(bdi, fmt, args);
1590 va_end(args);
1591 if (err) {
1592 bdi_put(bdi);
1593 return err;
1594 }
1595 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1596 sb->s_bdi = bdi;
1597 sb->s_iflags |= SB_I_PERSB_BDI;
1598
1599 return 0;
1600 }
1601 EXPORT_SYMBOL(super_setup_bdi_name);
1602
1603 /*
1604 * Setup private BDI for given superblock. I gets automatically cleaned up
1605 * in generic_shutdown_super().
1606 */
1607 int super_setup_bdi(struct super_block *sb)
1608 {
1609 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1610
1611 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1612 atomic_long_inc_return(&bdi_seq));
1613 }
1614 EXPORT_SYMBOL(super_setup_bdi);
1615
1616 /**
1617 * sb_wait_write - wait until all writers to given file system finish
1618 * @sb: the super for which we wait
1619 * @level: type of writers we wait for (normal vs page fault)
1620 *
1621 * This function waits until there are no writers of given type to given file
1622 * system.
1623 */
1624 static void sb_wait_write(struct super_block *sb, int level)
1625 {
1626 percpu_down_write(sb->s_writers.rw_sem + level-1);
1627 }
1628
1629 /*
1630 * We are going to return to userspace and forget about these locks, the
1631 * ownership goes to the caller of thaw_super() which does unlock().
1632 */
1633 static void lockdep_sb_freeze_release(struct super_block *sb)
1634 {
1635 int level;
1636
1637 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1638 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1639 }
1640
1641 /*
1642 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1643 */
1644 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1645 {
1646 int level;
1647
1648 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1649 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1650 }
1651
1652 static void sb_freeze_unlock(struct super_block *sb, int level)
1653 {
1654 for (level--; level >= 0; level--)
1655 percpu_up_write(sb->s_writers.rw_sem + level);
1656 }
1657
1658 /**
1659 * freeze_super - lock the filesystem and force it into a consistent state
1660 * @sb: the super to lock
1661 *
1662 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1663 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1664 * -EBUSY.
1665 *
1666 * During this function, sb->s_writers.frozen goes through these values:
1667 *
1668 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1669 *
1670 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1671 * writes should be blocked, though page faults are still allowed. We wait for
1672 * all writes to complete and then proceed to the next stage.
1673 *
1674 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1675 * but internal fs threads can still modify the filesystem (although they
1676 * should not dirty new pages or inodes), writeback can run etc. After waiting
1677 * for all running page faults we sync the filesystem which will clean all
1678 * dirty pages and inodes (no new dirty pages or inodes can be created when
1679 * sync is running).
1680 *
1681 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1682 * modification are blocked (e.g. XFS preallocation truncation on inode
1683 * reclaim). This is usually implemented by blocking new transactions for
1684 * filesystems that have them and need this additional guard. After all
1685 * internal writers are finished we call ->freeze_fs() to finish filesystem
1686 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1687 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1688 *
1689 * sb->s_writers.frozen is protected by sb->s_umount.
1690 */
1691 int freeze_super(struct super_block *sb)
1692 {
1693 int ret;
1694
1695 atomic_inc(&sb->s_active);
1696 down_write(&sb->s_umount);
1697 if (sb->s_writers.frozen != SB_UNFROZEN) {
1698 deactivate_locked_super(sb);
1699 return -EBUSY;
1700 }
1701
1702 if (!(sb->s_flags & SB_BORN)) {
1703 up_write(&sb->s_umount);
1704 return 0; /* sic - it's "nothing to do" */
1705 }
1706
1707 if (sb_rdonly(sb)) {
1708 /* Nothing to do really... */
1709 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1710 up_write(&sb->s_umount);
1711 return 0;
1712 }
1713
1714 sb->s_writers.frozen = SB_FREEZE_WRITE;
1715 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1716 up_write(&sb->s_umount);
1717 sb_wait_write(sb, SB_FREEZE_WRITE);
1718 down_write(&sb->s_umount);
1719
1720 /* Now we go and block page faults... */
1721 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1722 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1723
1724 /* All writers are done so after syncing there won't be dirty data */
1725 ret = sync_filesystem(sb);
1726 if (ret) {
1727 sb->s_writers.frozen = SB_UNFROZEN;
1728 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
1729 wake_up(&sb->s_writers.wait_unfrozen);
1730 deactivate_locked_super(sb);
1731 return ret;
1732 }
1733
1734 /* Now wait for internal filesystem counter */
1735 sb->s_writers.frozen = SB_FREEZE_FS;
1736 sb_wait_write(sb, SB_FREEZE_FS);
1737
1738 if (sb->s_op->freeze_fs) {
1739 ret = sb->s_op->freeze_fs(sb);
1740 if (ret) {
1741 printk(KERN_ERR
1742 "VFS:Filesystem freeze failed\n");
1743 sb->s_writers.frozen = SB_UNFROZEN;
1744 sb_freeze_unlock(sb, SB_FREEZE_FS);
1745 wake_up(&sb->s_writers.wait_unfrozen);
1746 deactivate_locked_super(sb);
1747 return ret;
1748 }
1749 }
1750 /*
1751 * For debugging purposes so that fs can warn if it sees write activity
1752 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1753 */
1754 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1755 lockdep_sb_freeze_release(sb);
1756 up_write(&sb->s_umount);
1757 return 0;
1758 }
1759 EXPORT_SYMBOL(freeze_super);
1760
1761 static int thaw_super_locked(struct super_block *sb)
1762 {
1763 int error;
1764
1765 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
1766 up_write(&sb->s_umount);
1767 return -EINVAL;
1768 }
1769
1770 if (sb_rdonly(sb)) {
1771 sb->s_writers.frozen = SB_UNFROZEN;
1772 goto out;
1773 }
1774
1775 lockdep_sb_freeze_acquire(sb);
1776
1777 if (sb->s_op->unfreeze_fs) {
1778 error = sb->s_op->unfreeze_fs(sb);
1779 if (error) {
1780 printk(KERN_ERR
1781 "VFS:Filesystem thaw failed\n");
1782 lockdep_sb_freeze_release(sb);
1783 up_write(&sb->s_umount);
1784 return error;
1785 }
1786 }
1787
1788 sb->s_writers.frozen = SB_UNFROZEN;
1789 sb_freeze_unlock(sb, SB_FREEZE_FS);
1790 out:
1791 wake_up(&sb->s_writers.wait_unfrozen);
1792 deactivate_locked_super(sb);
1793 return 0;
1794 }
1795
1796 /**
1797 * thaw_super -- unlock filesystem
1798 * @sb: the super to thaw
1799 *
1800 * Unlocks the filesystem and marks it writeable again after freeze_super().
1801 */
1802 int thaw_super(struct super_block *sb)
1803 {
1804 down_write(&sb->s_umount);
1805 return thaw_super_locked(sb);
1806 }
1807 EXPORT_SYMBOL(thaw_super);