1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
8 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/backing-dev.h>
17 #include <linux/module.h>
18 #include <linux/blkpg.h>
19 #include <linux/magic.h>
20 #include <linux/buffer_head.h>
21 #include <linux/swap.h>
22 #include <linux/writeback.h>
23 #include <linux/mount.h>
24 #include <linux/pseudo_fs.h>
25 #include <linux/uio.h>
26 #include <linux/namei.h>
27 #include <linux/part_stat.h>
28 #include <linux/uaccess.h>
29 #include <linux/stat.h>
30 #include "../fs/internal.h"
34 struct block_device bdev
;
35 struct inode vfs_inode
;
38 static inline struct bdev_inode
*BDEV_I(struct inode
*inode
)
40 return container_of(inode
, struct bdev_inode
, vfs_inode
);
43 struct block_device
*I_BDEV(struct inode
*inode
)
45 return &BDEV_I(inode
)->bdev
;
47 EXPORT_SYMBOL(I_BDEV
);
49 static void bdev_write_inode(struct block_device
*bdev
)
51 struct inode
*inode
= bdev
->bd_inode
;
54 spin_lock(&inode
->i_lock
);
55 while (inode
->i_state
& I_DIRTY
) {
56 spin_unlock(&inode
->i_lock
);
57 ret
= write_inode_now(inode
, true);
60 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
62 spin_lock(&inode
->i_lock
);
64 spin_unlock(&inode
->i_lock
);
67 /* Kill _all_ buffers and pagecache , dirty or not.. */
68 static void kill_bdev(struct block_device
*bdev
)
70 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
72 if (mapping_empty(mapping
))
76 truncate_inode_pages(mapping
, 0);
79 /* Invalidate clean unused buffers and pagecache. */
80 void invalidate_bdev(struct block_device
*bdev
)
82 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
84 if (mapping
->nrpages
) {
86 lru_add_drain_all(); /* make sure all lru add caches are flushed */
87 invalidate_mapping_pages(mapping
, 0, -1);
90 EXPORT_SYMBOL(invalidate_bdev
);
93 * Drop all buffers & page cache for given bdev range. This function bails
94 * with error if bdev has other exclusive owner (such as filesystem).
96 int truncate_bdev_range(struct block_device
*bdev
, blk_mode_t mode
,
97 loff_t lstart
, loff_t lend
)
100 * If we don't hold exclusive handle for the device, upgrade to it
101 * while we discard the buffer cache to avoid discarding buffers
102 * under live filesystem.
104 if (!(mode
& BLK_OPEN_EXCL
)) {
105 int err
= bd_prepare_to_claim(bdev
, truncate_bdev_range
, NULL
);
110 truncate_inode_pages_range(bdev
->bd_inode
->i_mapping
, lstart
, lend
);
111 if (!(mode
& BLK_OPEN_EXCL
))
112 bd_abort_claiming(bdev
, truncate_bdev_range
);
117 * Someone else has handle exclusively open. Try invalidating instead.
118 * The 'end' argument is inclusive so the rounding is safe.
120 return invalidate_inode_pages2_range(bdev
->bd_inode
->i_mapping
,
121 lstart
>> PAGE_SHIFT
,
125 static void set_init_blocksize(struct block_device
*bdev
)
127 unsigned int bsize
= bdev_logical_block_size(bdev
);
128 loff_t size
= i_size_read(bdev
->bd_inode
);
130 while (bsize
< PAGE_SIZE
) {
135 bdev
->bd_inode
->i_blkbits
= blksize_bits(bsize
);
138 int set_blocksize(struct block_device
*bdev
, int size
)
140 /* Size must be a power of two, and between 512 and PAGE_SIZE */
141 if (size
> PAGE_SIZE
|| size
< 512 || !is_power_of_2(size
))
144 /* Size cannot be smaller than the size supported by the device */
145 if (size
< bdev_logical_block_size(bdev
))
148 /* Don't change the size if it is same as current */
149 if (bdev
->bd_inode
->i_blkbits
!= blksize_bits(size
)) {
151 bdev
->bd_inode
->i_blkbits
= blksize_bits(size
);
157 EXPORT_SYMBOL(set_blocksize
);
159 int sb_set_blocksize(struct super_block
*sb
, int size
)
161 if (set_blocksize(sb
->s_bdev
, size
))
163 /* If we get here, we know size is power of two
164 * and it's value is between 512 and PAGE_SIZE */
165 sb
->s_blocksize
= size
;
166 sb
->s_blocksize_bits
= blksize_bits(size
);
167 return sb
->s_blocksize
;
170 EXPORT_SYMBOL(sb_set_blocksize
);
172 int sb_min_blocksize(struct super_block
*sb
, int size
)
174 int minsize
= bdev_logical_block_size(sb
->s_bdev
);
177 return sb_set_blocksize(sb
, size
);
180 EXPORT_SYMBOL(sb_min_blocksize
);
182 int sync_blockdev_nowait(struct block_device
*bdev
)
186 return filemap_flush(bdev
->bd_inode
->i_mapping
);
188 EXPORT_SYMBOL_GPL(sync_blockdev_nowait
);
191 * Write out and wait upon all the dirty data associated with a block
192 * device via its mapping. Does not take the superblock lock.
194 int sync_blockdev(struct block_device
*bdev
)
198 return filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
200 EXPORT_SYMBOL(sync_blockdev
);
202 int sync_blockdev_range(struct block_device
*bdev
, loff_t lstart
, loff_t lend
)
204 return filemap_write_and_wait_range(bdev
->bd_inode
->i_mapping
,
207 EXPORT_SYMBOL(sync_blockdev_range
);
210 * freeze_bdev - lock a filesystem and force it into a consistent state
211 * @bdev: blockdevice to lock
213 * If a superblock is found on this device, we take the s_umount semaphore
214 * on it to make sure nobody unmounts until the snapshot creation is done.
215 * The reference counter (bd_fsfreeze_count) guarantees that only the last
216 * unfreeze process can unfreeze the frozen filesystem actually when multiple
217 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
218 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
221 int freeze_bdev(struct block_device
*bdev
)
223 struct super_block
*sb
;
226 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
227 if (++bdev
->bd_fsfreeze_count
> 1)
230 sb
= get_active_super(bdev
);
233 if (sb
->s_op
->freeze_super
)
234 error
= sb
->s_op
->freeze_super(sb
, FREEZE_HOLDER_USERSPACE
);
236 error
= freeze_super(sb
, FREEZE_HOLDER_USERSPACE
);
237 deactivate_super(sb
);
240 bdev
->bd_fsfreeze_count
--;
243 bdev
->bd_fsfreeze_sb
= sb
;
248 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
251 EXPORT_SYMBOL(freeze_bdev
);
254 * thaw_bdev - unlock filesystem
255 * @bdev: blockdevice to unlock
257 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
259 int thaw_bdev(struct block_device
*bdev
)
261 struct super_block
*sb
;
264 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
265 if (!bdev
->bd_fsfreeze_count
)
269 if (--bdev
->bd_fsfreeze_count
> 0)
272 sb
= bdev
->bd_fsfreeze_sb
;
276 if (sb
->s_op
->thaw_super
)
277 error
= sb
->s_op
->thaw_super(sb
, FREEZE_HOLDER_USERSPACE
);
279 error
= thaw_super(sb
, FREEZE_HOLDER_USERSPACE
);
281 bdev
->bd_fsfreeze_count
++;
283 bdev
->bd_fsfreeze_sb
= NULL
;
285 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
288 EXPORT_SYMBOL(thaw_bdev
);
294 static __cacheline_aligned_in_smp
DEFINE_MUTEX(bdev_lock
);
295 static struct kmem_cache
*bdev_cachep __ro_after_init
;
297 static struct inode
*bdev_alloc_inode(struct super_block
*sb
)
299 struct bdev_inode
*ei
= alloc_inode_sb(sb
, bdev_cachep
, GFP_KERNEL
);
303 memset(&ei
->bdev
, 0, sizeof(ei
->bdev
));
304 return &ei
->vfs_inode
;
307 static void bdev_free_inode(struct inode
*inode
)
309 struct block_device
*bdev
= I_BDEV(inode
);
311 free_percpu(bdev
->bd_stats
);
312 kfree(bdev
->bd_meta_info
);
314 if (!bdev_is_partition(bdev
)) {
315 if (bdev
->bd_disk
&& bdev
->bd_disk
->bdi
)
316 bdi_put(bdev
->bd_disk
->bdi
);
317 kfree(bdev
->bd_disk
);
320 if (MAJOR(bdev
->bd_dev
) == BLOCK_EXT_MAJOR
)
321 blk_free_ext_minor(MINOR(bdev
->bd_dev
));
323 kmem_cache_free(bdev_cachep
, BDEV_I(inode
));
326 static void init_once(void *data
)
328 struct bdev_inode
*ei
= data
;
330 inode_init_once(&ei
->vfs_inode
);
333 static void bdev_evict_inode(struct inode
*inode
)
335 truncate_inode_pages_final(&inode
->i_data
);
336 invalidate_inode_buffers(inode
); /* is it needed here? */
340 static const struct super_operations bdev_sops
= {
341 .statfs
= simple_statfs
,
342 .alloc_inode
= bdev_alloc_inode
,
343 .free_inode
= bdev_free_inode
,
344 .drop_inode
= generic_delete_inode
,
345 .evict_inode
= bdev_evict_inode
,
348 static int bd_init_fs_context(struct fs_context
*fc
)
350 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, BDEVFS_MAGIC
);
353 fc
->s_iflags
|= SB_I_CGROUPWB
;
354 ctx
->ops
= &bdev_sops
;
358 static struct file_system_type bd_type
= {
360 .init_fs_context
= bd_init_fs_context
,
361 .kill_sb
= kill_anon_super
,
364 struct super_block
*blockdev_superblock __ro_after_init
;
365 EXPORT_SYMBOL_GPL(blockdev_superblock
);
367 void __init
bdev_cache_init(void)
370 static struct vfsmount
*bd_mnt __ro_after_init
;
372 bdev_cachep
= kmem_cache_create("bdev_cache", sizeof(struct bdev_inode
),
373 0, (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
374 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
|SLAB_PANIC
),
376 err
= register_filesystem(&bd_type
);
378 panic("Cannot register bdev pseudo-fs");
379 bd_mnt
= kern_mount(&bd_type
);
381 panic("Cannot create bdev pseudo-fs");
382 blockdev_superblock
= bd_mnt
->mnt_sb
; /* For writeback */
385 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
)
387 struct block_device
*bdev
;
390 inode
= new_inode(blockdev_superblock
);
393 inode
->i_mode
= S_IFBLK
;
395 inode
->i_data
.a_ops
= &def_blk_aops
;
396 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
398 bdev
= I_BDEV(inode
);
399 mutex_init(&bdev
->bd_fsfreeze_mutex
);
400 spin_lock_init(&bdev
->bd_size_lock
);
401 mutex_init(&bdev
->bd_holder_lock
);
402 bdev
->bd_partno
= partno
;
403 bdev
->bd_inode
= inode
;
404 bdev
->bd_queue
= disk
->queue
;
406 bdev
->bd_has_submit_bio
= disk
->part0
->bd_has_submit_bio
;
408 bdev
->bd_has_submit_bio
= false;
409 bdev
->bd_stats
= alloc_percpu(struct disk_stats
);
410 if (!bdev
->bd_stats
) {
414 bdev
->bd_disk
= disk
;
418 void bdev_set_nr_sectors(struct block_device
*bdev
, sector_t sectors
)
420 spin_lock(&bdev
->bd_size_lock
);
421 i_size_write(bdev
->bd_inode
, (loff_t
)sectors
<< SECTOR_SHIFT
);
422 bdev
->bd_nr_sectors
= sectors
;
423 spin_unlock(&bdev
->bd_size_lock
);
426 void bdev_add(struct block_device
*bdev
, dev_t dev
)
429 bdev
->bd_inode
->i_rdev
= dev
;
430 bdev
->bd_inode
->i_ino
= dev
;
431 insert_inode_hash(bdev
->bd_inode
);
434 long nr_blockdev_pages(void)
439 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
440 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
)
441 ret
+= inode
->i_mapping
->nrpages
;
442 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
448 * bd_may_claim - test whether a block device can be claimed
449 * @bdev: block device of interest
450 * @holder: holder trying to claim @bdev
453 * Test whether @bdev can be claimed by @holder.
456 * %true if @bdev can be claimed, %false otherwise.
458 static bool bd_may_claim(struct block_device
*bdev
, void *holder
,
459 const struct blk_holder_ops
*hops
)
461 struct block_device
*whole
= bdev_whole(bdev
);
463 lockdep_assert_held(&bdev_lock
);
465 if (bdev
->bd_holder
) {
467 * The same holder can always re-claim.
469 if (bdev
->bd_holder
== holder
) {
470 if (WARN_ON_ONCE(bdev
->bd_holder_ops
!= hops
))
478 * If the whole devices holder is set to bd_may_claim, a partition on
479 * the device is claimed, but not the whole device.
482 whole
->bd_holder
&& whole
->bd_holder
!= bd_may_claim
)
488 * bd_prepare_to_claim - claim a block device
489 * @bdev: block device of interest
490 * @holder: holder trying to claim @bdev
493 * Claim @bdev. This function fails if @bdev is already claimed by another
494 * holder and waits if another claiming is in progress. return, the caller
495 * has ownership of bd_claiming and bd_holder[s].
498 * 0 if @bdev can be claimed, -EBUSY otherwise.
500 int bd_prepare_to_claim(struct block_device
*bdev
, void *holder
,
501 const struct blk_holder_ops
*hops
)
503 struct block_device
*whole
= bdev_whole(bdev
);
505 if (WARN_ON_ONCE(!holder
))
508 mutex_lock(&bdev_lock
);
509 /* if someone else claimed, fail */
510 if (!bd_may_claim(bdev
, holder
, hops
)) {
511 mutex_unlock(&bdev_lock
);
515 /* if claiming is already in progress, wait for it to finish */
516 if (whole
->bd_claiming
) {
517 wait_queue_head_t
*wq
= bit_waitqueue(&whole
->bd_claiming
, 0);
520 prepare_to_wait(wq
, &wait
, TASK_UNINTERRUPTIBLE
);
521 mutex_unlock(&bdev_lock
);
523 finish_wait(wq
, &wait
);
528 whole
->bd_claiming
= holder
;
529 mutex_unlock(&bdev_lock
);
532 EXPORT_SYMBOL_GPL(bd_prepare_to_claim
); /* only for the loop driver */
534 static void bd_clear_claiming(struct block_device
*whole
, void *holder
)
536 lockdep_assert_held(&bdev_lock
);
537 /* tell others that we're done */
538 BUG_ON(whole
->bd_claiming
!= holder
);
539 whole
->bd_claiming
= NULL
;
540 wake_up_bit(&whole
->bd_claiming
, 0);
544 * bd_finish_claiming - finish claiming of a block device
545 * @bdev: block device of interest
546 * @holder: holder that has claimed @bdev
547 * @hops: block device holder operations
549 * Finish exclusive open of a block device. Mark the device as exlusively
550 * open by the holder and wake up all waiters for exclusive open to finish.
552 static void bd_finish_claiming(struct block_device
*bdev
, void *holder
,
553 const struct blk_holder_ops
*hops
)
555 struct block_device
*whole
= bdev_whole(bdev
);
557 mutex_lock(&bdev_lock
);
558 BUG_ON(!bd_may_claim(bdev
, holder
, hops
));
560 * Note that for a whole device bd_holders will be incremented twice,
561 * and bd_holder will be set to bd_may_claim before being set to holder
564 whole
->bd_holder
= bd_may_claim
;
566 mutex_lock(&bdev
->bd_holder_lock
);
567 bdev
->bd_holder
= holder
;
568 bdev
->bd_holder_ops
= hops
;
569 mutex_unlock(&bdev
->bd_holder_lock
);
570 bd_clear_claiming(whole
, holder
);
571 mutex_unlock(&bdev_lock
);
575 * bd_abort_claiming - abort claiming of a block device
576 * @bdev: block device of interest
577 * @holder: holder that has claimed @bdev
579 * Abort claiming of a block device when the exclusive open failed. This can be
580 * also used when exclusive open is not actually desired and we just needed
581 * to block other exclusive openers for a while.
583 void bd_abort_claiming(struct block_device
*bdev
, void *holder
)
585 mutex_lock(&bdev_lock
);
586 bd_clear_claiming(bdev_whole(bdev
), holder
);
587 mutex_unlock(&bdev_lock
);
589 EXPORT_SYMBOL(bd_abort_claiming
);
591 static void bd_end_claim(struct block_device
*bdev
, void *holder
)
593 struct block_device
*whole
= bdev_whole(bdev
);
594 bool unblock
= false;
597 * Release a claim on the device. The holder fields are protected with
598 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
600 mutex_lock(&bdev_lock
);
601 WARN_ON_ONCE(bdev
->bd_holder
!= holder
);
602 WARN_ON_ONCE(--bdev
->bd_holders
< 0);
603 WARN_ON_ONCE(--whole
->bd_holders
< 0);
604 if (!bdev
->bd_holders
) {
605 mutex_lock(&bdev
->bd_holder_lock
);
606 bdev
->bd_holder
= NULL
;
607 bdev
->bd_holder_ops
= NULL
;
608 mutex_unlock(&bdev
->bd_holder_lock
);
609 if (bdev
->bd_write_holder
)
612 if (!whole
->bd_holders
)
613 whole
->bd_holder
= NULL
;
614 mutex_unlock(&bdev_lock
);
617 * If this was the last claim, remove holder link and unblock evpoll if
618 * it was a write holder.
621 disk_unblock_events(bdev
->bd_disk
);
622 bdev
->bd_write_holder
= false;
626 static void blkdev_flush_mapping(struct block_device
*bdev
)
628 WARN_ON_ONCE(bdev
->bd_holders
);
631 bdev_write_inode(bdev
);
634 static int blkdev_get_whole(struct block_device
*bdev
, blk_mode_t mode
)
636 struct gendisk
*disk
= bdev
->bd_disk
;
639 if (disk
->fops
->open
) {
640 ret
= disk
->fops
->open(disk
, mode
);
642 /* avoid ghost partitions on a removed medium */
643 if (ret
== -ENOMEDIUM
&&
644 test_bit(GD_NEED_PART_SCAN
, &disk
->state
))
645 bdev_disk_changed(disk
, true);
650 if (!atomic_read(&bdev
->bd_openers
))
651 set_init_blocksize(bdev
);
652 if (test_bit(GD_NEED_PART_SCAN
, &disk
->state
))
653 bdev_disk_changed(disk
, false);
654 atomic_inc(&bdev
->bd_openers
);
658 static void blkdev_put_whole(struct block_device
*bdev
)
660 if (atomic_dec_and_test(&bdev
->bd_openers
))
661 blkdev_flush_mapping(bdev
);
662 if (bdev
->bd_disk
->fops
->release
)
663 bdev
->bd_disk
->fops
->release(bdev
->bd_disk
);
666 static int blkdev_get_part(struct block_device
*part
, blk_mode_t mode
)
668 struct gendisk
*disk
= part
->bd_disk
;
671 ret
= blkdev_get_whole(bdev_whole(part
), mode
);
676 if (!bdev_nr_sectors(part
))
679 if (!atomic_read(&part
->bd_openers
)) {
680 disk
->open_partitions
++;
681 set_init_blocksize(part
);
683 atomic_inc(&part
->bd_openers
);
687 blkdev_put_whole(bdev_whole(part
));
691 static void blkdev_put_part(struct block_device
*part
)
693 struct block_device
*whole
= bdev_whole(part
);
695 if (atomic_dec_and_test(&part
->bd_openers
)) {
696 blkdev_flush_mapping(part
);
697 whole
->bd_disk
->open_partitions
--;
699 blkdev_put_whole(whole
);
702 struct block_device
*blkdev_get_no_open(dev_t dev
)
704 struct block_device
*bdev
;
707 inode
= ilookup(blockdev_superblock
, dev
);
708 if (!inode
&& IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD
)) {
709 blk_request_module(dev
);
710 inode
= ilookup(blockdev_superblock
, dev
);
713 "block device autoloading is deprecated and will be removed.\n");
718 /* switch from the inode reference to a device mode one: */
719 bdev
= &BDEV_I(inode
)->bdev
;
720 if (!kobject_get_unless_zero(&bdev
->bd_device
.kobj
))
726 void blkdev_put_no_open(struct block_device
*bdev
)
728 put_device(&bdev
->bd_device
);
732 * blkdev_get_by_dev - open a block device by device number
733 * @dev: device number of block device to open
734 * @mode: open mode (BLK_OPEN_*)
735 * @holder: exclusive holder identifier
736 * @hops: holder operations
738 * Open the block device described by device number @dev. If @holder is not
739 * %NULL, the block device is opened with exclusive access. Exclusive opens may
740 * nest for the same @holder.
742 * Use this interface ONLY if you really do not have anything better - i.e. when
743 * you are behind a truly sucky interface and all you are given is a device
744 * number. Everything else should use blkdev_get_by_path().
750 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
752 struct block_device
*blkdev_get_by_dev(dev_t dev
, blk_mode_t mode
, void *holder
,
753 const struct blk_holder_ops
*hops
)
755 bool unblock_events
= true;
756 struct block_device
*bdev
;
757 struct gendisk
*disk
;
760 ret
= devcgroup_check_permission(DEVCG_DEV_BLOCK
,
761 MAJOR(dev
), MINOR(dev
),
762 ((mode
& BLK_OPEN_READ
) ? DEVCG_ACC_READ
: 0) |
763 ((mode
& BLK_OPEN_WRITE
) ? DEVCG_ACC_WRITE
: 0));
767 bdev
= blkdev_get_no_open(dev
);
769 return ERR_PTR(-ENXIO
);
770 disk
= bdev
->bd_disk
;
773 mode
|= BLK_OPEN_EXCL
;
774 ret
= bd_prepare_to_claim(bdev
, holder
, hops
);
778 if (WARN_ON_ONCE(mode
& BLK_OPEN_EXCL
)) {
784 disk_block_events(disk
);
786 mutex_lock(&disk
->open_mutex
);
788 if (!disk_live(disk
))
790 if (!try_module_get(disk
->fops
->owner
))
792 if (bdev_is_partition(bdev
))
793 ret
= blkdev_get_part(bdev
, mode
);
795 ret
= blkdev_get_whole(bdev
, mode
);
799 bd_finish_claiming(bdev
, holder
, hops
);
802 * Block event polling for write claims if requested. Any write
803 * holder makes the write_holder state stick until all are
804 * released. This is good enough and tracking individual
805 * writeable reference is too fragile given the way @mode is
806 * used in blkdev_get/put().
808 if ((mode
& BLK_OPEN_WRITE
) && !bdev
->bd_write_holder
&&
809 (disk
->event_flags
& DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE
)) {
810 bdev
->bd_write_holder
= true;
811 unblock_events
= false;
814 mutex_unlock(&disk
->open_mutex
);
817 disk_unblock_events(disk
);
820 module_put(disk
->fops
->owner
);
823 bd_abort_claiming(bdev
, holder
);
824 mutex_unlock(&disk
->open_mutex
);
825 disk_unblock_events(disk
);
827 blkdev_put_no_open(bdev
);
830 EXPORT_SYMBOL(blkdev_get_by_dev
);
832 struct bdev_handle
*bdev_open_by_dev(dev_t dev
, blk_mode_t mode
, void *holder
,
833 const struct blk_holder_ops
*hops
)
835 struct bdev_handle
*handle
= kmalloc(sizeof(*handle
), GFP_KERNEL
);
836 struct block_device
*bdev
;
839 return ERR_PTR(-ENOMEM
);
840 bdev
= blkdev_get_by_dev(dev
, mode
, holder
, hops
);
843 return ERR_CAST(bdev
);
846 handle
->holder
= holder
;
848 mode
|= BLK_OPEN_EXCL
;
852 EXPORT_SYMBOL(bdev_open_by_dev
);
855 * blkdev_get_by_path - open a block device by name
856 * @path: path to the block device to open
857 * @mode: open mode (BLK_OPEN_*)
858 * @holder: exclusive holder identifier
859 * @hops: holder operations
861 * Open the block device described by the device file at @path. If @holder is
862 * not %NULL, the block device is opened with exclusive access. Exclusive opens
863 * may nest for the same @holder.
869 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
871 struct block_device
*blkdev_get_by_path(const char *path
, blk_mode_t mode
,
872 void *holder
, const struct blk_holder_ops
*hops
)
874 struct block_device
*bdev
;
878 error
= lookup_bdev(path
, &dev
);
880 return ERR_PTR(error
);
882 bdev
= blkdev_get_by_dev(dev
, mode
, holder
, hops
);
883 if (!IS_ERR(bdev
) && (mode
& BLK_OPEN_WRITE
) && bdev_read_only(bdev
)) {
884 blkdev_put(bdev
, holder
);
885 return ERR_PTR(-EACCES
);
890 EXPORT_SYMBOL(blkdev_get_by_path
);
892 struct bdev_handle
*bdev_open_by_path(const char *path
, blk_mode_t mode
,
893 void *holder
, const struct blk_holder_ops
*hops
)
895 struct bdev_handle
*handle
;
899 error
= lookup_bdev(path
, &dev
);
901 return ERR_PTR(error
);
903 handle
= bdev_open_by_dev(dev
, mode
, holder
, hops
);
904 if (!IS_ERR(handle
) && (mode
& BLK_OPEN_WRITE
) &&
905 bdev_read_only(handle
->bdev
)) {
906 bdev_release(handle
);
907 return ERR_PTR(-EACCES
);
912 EXPORT_SYMBOL(bdev_open_by_path
);
914 void blkdev_put(struct block_device
*bdev
, void *holder
)
916 struct gendisk
*disk
= bdev
->bd_disk
;
919 * Sync early if it looks like we're the last one. If someone else
920 * opens the block device between now and the decrement of bd_openers
921 * then we did a sync that we didn't need to, but that's not the end
922 * of the world and we want to avoid long (could be several minute)
923 * syncs while holding the mutex.
925 if (atomic_read(&bdev
->bd_openers
) == 1)
928 mutex_lock(&disk
->open_mutex
);
930 bd_end_claim(bdev
, holder
);
933 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
934 * event. This is to ensure detection of media removal commanded
935 * from userland - e.g. eject(1).
937 disk_flush_events(disk
, DISK_EVENT_MEDIA_CHANGE
);
939 if (bdev_is_partition(bdev
))
940 blkdev_put_part(bdev
);
942 blkdev_put_whole(bdev
);
943 mutex_unlock(&disk
->open_mutex
);
945 module_put(disk
->fops
->owner
);
946 blkdev_put_no_open(bdev
);
948 EXPORT_SYMBOL(blkdev_put
);
950 void bdev_release(struct bdev_handle
*handle
)
952 blkdev_put(handle
->bdev
, handle
->holder
);
955 EXPORT_SYMBOL(bdev_release
);
958 * lookup_bdev() - Look up a struct block_device by name.
959 * @pathname: Name of the block device in the filesystem.
960 * @dev: Pointer to the block device's dev_t, if found.
962 * Lookup the block device's dev_t at @pathname in the current
963 * namespace if possible and return it in @dev.
965 * Context: May sleep.
966 * Return: 0 if succeeded, negative errno otherwise.
968 int lookup_bdev(const char *pathname
, dev_t
*dev
)
974 if (!pathname
|| !*pathname
)
977 error
= kern_path(pathname
, LOOKUP_FOLLOW
, &path
);
981 inode
= d_backing_inode(path
.dentry
);
983 if (!S_ISBLK(inode
->i_mode
))
986 if (!may_open_dev(&path
))
989 *dev
= inode
->i_rdev
;
995 EXPORT_SYMBOL(lookup_bdev
);
998 * bdev_mark_dead - mark a block device as dead
999 * @bdev: block device to operate on
1000 * @surprise: indicate a surprise removal
1002 * Tell the file system that this devices or media is dead. If @surprise is set
1003 * to %true the device or media is already gone, if not we are preparing for an
1006 * This calls into the file system, which then typicall syncs out all dirty data
1007 * and writes back inodes and then invalidates any cached data in the inodes on
1008 * the file system. In addition we also invalidate the block device mapping.
1010 void bdev_mark_dead(struct block_device
*bdev
, bool surprise
)
1012 mutex_lock(&bdev
->bd_holder_lock
);
1013 if (bdev
->bd_holder_ops
&& bdev
->bd_holder_ops
->mark_dead
)
1014 bdev
->bd_holder_ops
->mark_dead(bdev
, surprise
);
1016 mutex_unlock(&bdev
->bd_holder_lock
);
1017 sync_blockdev(bdev
);
1020 invalidate_bdev(bdev
);
1023 * New drivers should not use this directly. There are some drivers however
1024 * that needs this for historical reasons. For example, the DASD driver has
1025 * historically had a shutdown to offline mode that doesn't actually remove the
1026 * gendisk that otherwise looks a lot like a safe device removal.
1028 EXPORT_SYMBOL_GPL(bdev_mark_dead
);
1030 void sync_bdevs(bool wait
)
1032 struct inode
*inode
, *old_inode
= NULL
;
1034 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1035 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
) {
1036 struct address_space
*mapping
= inode
->i_mapping
;
1037 struct block_device
*bdev
;
1039 spin_lock(&inode
->i_lock
);
1040 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
|I_NEW
) ||
1041 mapping
->nrpages
== 0) {
1042 spin_unlock(&inode
->i_lock
);
1046 spin_unlock(&inode
->i_lock
);
1047 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
1049 * We hold a reference to 'inode' so it couldn't have been
1050 * removed from s_inodes list while we dropped the
1051 * s_inode_list_lock We cannot iput the inode now as we can
1052 * be holding the last reference and we cannot iput it under
1053 * s_inode_list_lock. So we keep the reference and iput it
1058 bdev
= I_BDEV(inode
);
1060 mutex_lock(&bdev
->bd_disk
->open_mutex
);
1061 if (!atomic_read(&bdev
->bd_openers
)) {
1065 * We keep the error status of individual mapping so
1066 * that applications can catch the writeback error using
1067 * fsync(2). See filemap_fdatawait_keep_errors() for
1070 filemap_fdatawait_keep_errors(inode
->i_mapping
);
1072 filemap_fdatawrite(inode
->i_mapping
);
1074 mutex_unlock(&bdev
->bd_disk
->open_mutex
);
1076 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1078 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
1083 * Handle STATX_DIOALIGN for block devices.
1085 * Note that the inode passed to this is the inode of a block device node file,
1086 * not the block device's internal inode. Therefore it is *not* valid to use
1087 * I_BDEV() here; the block device has to be looked up by i_rdev instead.
1089 void bdev_statx_dioalign(struct inode
*inode
, struct kstat
*stat
)
1091 struct block_device
*bdev
;
1093 bdev
= blkdev_get_no_open(inode
->i_rdev
);
1097 stat
->dio_mem_align
= bdev_dma_alignment(bdev
) + 1;
1098 stat
->dio_offset_align
= bdev_logical_block_size(bdev
);
1099 stat
->result_mask
|= STATX_DIOALIGN
;
1101 blkdev_put_no_open(bdev
);