]> git.ipfire.org Git - people/ms/linux.git/blame - block/bdev.c
blk-mq: don't insert FUA request with data into scheduler queue
[people/ms/linux.git] / block / bdev.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7b51e703 5 * Copyright (C) 2016 - 2020 Christoph Hellwig
1da177e4
LT
6 */
7
1da177e4
LT
8#include <linux/init.h>
9#include <linux/mm.h>
1da177e4
LT
10#include <linux/slab.h>
11#include <linux/kmod.h>
12#include <linux/major.h>
7db9cfd3 13#include <linux/device_cgroup.h>
1da177e4 14#include <linux/blkdev.h>
fe45e630 15#include <linux/blk-integrity.h>
66114cad 16#include <linux/backing-dev.h>
1da177e4
LT
17#include <linux/module.h>
18#include <linux/blkpg.h>
b502bd11 19#include <linux/magic.h>
1da177e4 20#include <linux/buffer_head.h>
ff01bb48 21#include <linux/swap.h>
811d736f 22#include <linux/writeback.h>
1da177e4 23#include <linux/mount.h>
9030d16e 24#include <linux/pseudo_fs.h>
1da177e4
LT
25#include <linux/uio.h>
26#include <linux/namei.h>
ff01bb48 27#include <linux/cleancache.h>
15e3d2c5 28#include <linux/part_stat.h>
7c0f6ba6 29#include <linux/uaccess.h>
0dca4462
CH
30#include "../fs/internal.h"
31#include "blk.h"
1da177e4
LT
32
33struct bdev_inode {
34 struct block_device bdev;
35 struct inode vfs_inode;
36};
37
38static inline struct bdev_inode *BDEV_I(struct inode *inode)
39{
40 return container_of(inode, struct bdev_inode, vfs_inode);
41}
42
ff5053f6 43struct block_device *I_BDEV(struct inode *inode)
1da177e4
LT
44{
45 return &BDEV_I(inode)->bdev;
46}
1da177e4
LT
47EXPORT_SYMBOL(I_BDEV);
48
dbd3ca50 49static void bdev_write_inode(struct block_device *bdev)
564f00f6 50{
dbd3ca50
VG
51 struct inode *inode = bdev->bd_inode;
52 int ret;
53
564f00f6
CH
54 spin_lock(&inode->i_lock);
55 while (inode->i_state & I_DIRTY) {
56 spin_unlock(&inode->i_lock);
dbd3ca50
VG
57 ret = write_inode_now(inode, true);
58 if (ret) {
59 char name[BDEVNAME_SIZE];
60 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
61 "for block device %s (err=%d).\n",
62 bdevname(bdev, name), ret);
63 }
564f00f6
CH
64 spin_lock(&inode->i_lock);
65 }
66 spin_unlock(&inode->i_lock);
67}
68
f9a14399 69/* Kill _all_ buffers and pagecache , dirty or not.. */
3373a346 70static void kill_bdev(struct block_device *bdev)
1da177e4 71{
ff01bb48
AV
72 struct address_space *mapping = bdev->bd_inode->i_mapping;
73
7716506a 74 if (mapping_empty(mapping))
f9a14399 75 return;
ff01bb48 76
f9a14399 77 invalidate_bh_lrus();
ff01bb48 78 truncate_inode_pages(mapping, 0);
3373a346 79}
ff01bb48
AV
80
81/* Invalidate clean unused buffers and pagecache. */
82void invalidate_bdev(struct block_device *bdev)
83{
84 struct address_space *mapping = bdev->bd_inode->i_mapping;
85
a5f6a6a9
AR
86 if (mapping->nrpages) {
87 invalidate_bh_lrus();
88 lru_add_drain_all(); /* make sure all lru add caches are flushed */
89 invalidate_mapping_pages(mapping, 0, -1);
90 }
ff01bb48
AV
91 /* 99% of the time, we don't need to flush the cleancache on the bdev.
92 * But, for the strange corners, lets be cautious
93 */
3167760f 94 cleancache_invalidate_inode(mapping);
ff01bb48
AV
95}
96EXPORT_SYMBOL(invalidate_bdev);
1da177e4 97
384d87ef
JK
98/*
99 * Drop all buffers & page cache for given bdev range. This function bails
100 * with error if bdev has other exclusive owner (such as filesystem).
101 */
102int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
103 loff_t lstart, loff_t lend)
104{
384d87ef
JK
105 /*
106 * If we don't hold exclusive handle for the device, upgrade to it
107 * while we discard the buffer cache to avoid discarding buffers
108 * under live filesystem.
109 */
110 if (!(mode & FMODE_EXCL)) {
37c3fc9a 111 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
384d87ef 112 if (err)
56887cff 113 goto invalidate;
384d87ef 114 }
37c3fc9a 115
384d87ef 116 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
37c3fc9a
CH
117 if (!(mode & FMODE_EXCL))
118 bd_abort_claiming(bdev, truncate_bdev_range);
384d87ef 119 return 0;
56887cff
JK
120
121invalidate:
122 /*
123 * Someone else has handle exclusively open. Try invalidating instead.
124 * The 'end' argument is inclusive so the rounding is safe.
125 */
126 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
127 lstart >> PAGE_SHIFT,
128 lend >> PAGE_SHIFT);
384d87ef 129}
384d87ef 130
04906b2f
JK
131static void set_init_blocksize(struct block_device *bdev)
132{
8dc932d3
MM
133 unsigned int bsize = bdev_logical_block_size(bdev);
134 loff_t size = i_size_read(bdev->bd_inode);
135
136 while (bsize < PAGE_SIZE) {
137 if (size & bsize)
138 break;
139 bsize <<= 1;
140 }
141 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
04906b2f
JK
142}
143
1da177e4
LT
144int set_blocksize(struct block_device *bdev, int size)
145{
146 /* Size must be a power of two, and between 512 and PAGE_SIZE */
1368c4f2 147 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
1da177e4
LT
148 return -EINVAL;
149
150 /* Size cannot be smaller than the size supported by the device */
e1defc4f 151 if (size < bdev_logical_block_size(bdev))
1da177e4
LT
152 return -EINVAL;
153
154 /* Don't change the size if it is same as current */
6b7b181b 155 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
1da177e4 156 sync_blockdev(bdev);
1da177e4
LT
157 bdev->bd_inode->i_blkbits = blksize_bits(size);
158 kill_bdev(bdev);
159 }
160 return 0;
161}
162
163EXPORT_SYMBOL(set_blocksize);
164
165int sb_set_blocksize(struct super_block *sb, int size)
166{
1da177e4
LT
167 if (set_blocksize(sb->s_bdev, size))
168 return 0;
169 /* If we get here, we know size is power of two
170 * and it's value is between 512 and PAGE_SIZE */
171 sb->s_blocksize = size;
38885bd4 172 sb->s_blocksize_bits = blksize_bits(size);
1da177e4
LT
173 return sb->s_blocksize;
174}
175
176EXPORT_SYMBOL(sb_set_blocksize);
177
178int sb_min_blocksize(struct super_block *sb, int size)
179{
e1defc4f 180 int minsize = bdev_logical_block_size(sb->s_bdev);
1da177e4
LT
181 if (size < minsize)
182 size = minsize;
183 return sb_set_blocksize(sb, size);
184}
185
186EXPORT_SYMBOL(sb_min_blocksize);
187
70164eb6 188int sync_blockdev_nowait(struct block_device *bdev)
5cee5815
JK
189{
190 if (!bdev)
191 return 0;
70164eb6 192 return filemap_flush(bdev->bd_inode->i_mapping);
5cee5815 193}
70164eb6 194EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
5cee5815 195
585d3bc0
NP
196/*
197 * Write out and wait upon all the dirty data associated with a block
198 * device via its mapping. Does not take the superblock lock.
199 */
200int sync_blockdev(struct block_device *bdev)
201{
70164eb6
CH
202 if (!bdev)
203 return 0;
204 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
585d3bc0
NP
205}
206EXPORT_SYMBOL(sync_blockdev);
207
208/*
209 * Write out and wait upon all dirty data associated with this
210 * device. Filesystem data as well as the underlying block
211 * device. Takes the superblock lock.
212 */
213int fsync_bdev(struct block_device *bdev)
214{
215 struct super_block *sb = get_super(bdev);
216 if (sb) {
60b0680f 217 int res = sync_filesystem(sb);
585d3bc0
NP
218 drop_super(sb);
219 return res;
220 }
221 return sync_blockdev(bdev);
222}
47e4491b 223EXPORT_SYMBOL(fsync_bdev);
585d3bc0
NP
224
225/**
226 * freeze_bdev -- lock a filesystem and force it into a consistent state
227 * @bdev: blockdevice to lock
228 *
585d3bc0
NP
229 * If a superblock is found on this device, we take the s_umount semaphore
230 * on it to make sure nobody unmounts until the snapshot creation is done.
231 * The reference counter (bd_fsfreeze_count) guarantees that only the last
232 * unfreeze process can unfreeze the frozen filesystem actually when multiple
233 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
234 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
235 * actually.
236 */
040f04bd 237int freeze_bdev(struct block_device *bdev)
585d3bc0
NP
238{
239 struct super_block *sb;
240 int error = 0;
241
242 mutex_lock(&bdev->bd_fsfreeze_mutex);
040f04bd
CH
243 if (++bdev->bd_fsfreeze_count > 1)
244 goto done;
4504230a
CH
245
246 sb = get_active_super(bdev);
247 if (!sb)
040f04bd 248 goto sync;
48b6bca6
BM
249 if (sb->s_op->freeze_super)
250 error = sb->s_op->freeze_super(sb);
251 else
252 error = freeze_super(sb);
040f04bd
CH
253 deactivate_super(sb);
254
18e9e510 255 if (error) {
18e9e510 256 bdev->bd_fsfreeze_count--;
040f04bd 257 goto done;
585d3bc0 258 }
040f04bd
CH
259 bdev->bd_fsfreeze_sb = sb;
260
261sync:
585d3bc0 262 sync_blockdev(bdev);
040f04bd 263done:
585d3bc0 264 mutex_unlock(&bdev->bd_fsfreeze_mutex);
040f04bd 265 return error;
585d3bc0
NP
266}
267EXPORT_SYMBOL(freeze_bdev);
268
269/**
270 * thaw_bdev -- unlock filesystem
271 * @bdev: blockdevice to unlock
585d3bc0
NP
272 *
273 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
274 */
040f04bd 275int thaw_bdev(struct block_device *bdev)
585d3bc0 276{
040f04bd 277 struct super_block *sb;
4504230a 278 int error = -EINVAL;
585d3bc0
NP
279
280 mutex_lock(&bdev->bd_fsfreeze_mutex);
4504230a 281 if (!bdev->bd_fsfreeze_count)
18e9e510 282 goto out;
4504230a
CH
283
284 error = 0;
285 if (--bdev->bd_fsfreeze_count > 0)
18e9e510 286 goto out;
4504230a 287
040f04bd 288 sb = bdev->bd_fsfreeze_sb;
4504230a 289 if (!sb)
18e9e510 290 goto out;
4504230a 291
48b6bca6
BM
292 if (sb->s_op->thaw_super)
293 error = sb->s_op->thaw_super(sb);
294 else
295 error = thaw_super(sb);
997198ba 296 if (error)
18e9e510 297 bdev->bd_fsfreeze_count++;
04a6a536
ST
298 else
299 bdev->bd_fsfreeze_sb = NULL;
18e9e510 300out:
585d3bc0 301 mutex_unlock(&bdev->bd_fsfreeze_mutex);
997198ba 302 return error;
585d3bc0
NP
303}
304EXPORT_SYMBOL(thaw_bdev);
305
47a191fd
MW
306/**
307 * bdev_read_page() - Start reading a page from a block device
308 * @bdev: The device to read the page from
309 * @sector: The offset on the device to read the page to (need not be aligned)
310 * @page: The page to read
311 *
312 * On entry, the page should be locked. It will be unlocked when the page
313 * has been read. If the block driver implements rw_page synchronously,
314 * that will be true on exit from this function, but it need not be.
315 *
316 * Errors returned by this function are usually "soft", eg out of memory, or
317 * queue full; callers should try a different route to read this page rather
318 * than propagate an error back up the stack.
319 *
320 * Return: negative errno if an error occurs, 0 if submission was successful.
321 */
322int bdev_read_page(struct block_device *bdev, sector_t sector,
323 struct page *page)
324{
325 const struct block_device_operations *ops = bdev->bd_disk->fops;
2e6edc95
DW
326 int result = -EOPNOTSUPP;
327
f68eb1e7 328 if (!ops->rw_page || bdev_get_integrity(bdev))
2e6edc95
DW
329 return result;
330
025a3865 331 result = blk_queue_enter(bdev_get_queue(bdev), 0);
2e6edc95
DW
332 if (result)
333 return result;
3f289dcb
TH
334 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
335 REQ_OP_READ);
025a3865 336 blk_queue_exit(bdev_get_queue(bdev));
2e6edc95 337 return result;
47a191fd 338}
47a191fd
MW
339
340/**
341 * bdev_write_page() - Start writing a page to a block device
342 * @bdev: The device to write the page to
343 * @sector: The offset on the device to write the page to (need not be aligned)
344 * @page: The page to write
345 * @wbc: The writeback_control for the write
346 *
347 * On entry, the page should be locked and not currently under writeback.
348 * On exit, if the write started successfully, the page will be unlocked and
349 * under writeback. If the write failed already (eg the driver failed to
350 * queue the page to the device), the page will still be locked. If the
351 * caller is a ->writepage implementation, it will need to unlock the page.
352 *
353 * Errors returned by this function are usually "soft", eg out of memory, or
354 * queue full; callers should try a different route to write this page rather
355 * than propagate an error back up the stack.
356 *
357 * Return: negative errno if an error occurs, 0 if submission was successful.
358 */
359int bdev_write_page(struct block_device *bdev, sector_t sector,
360 struct page *page, struct writeback_control *wbc)
361{
362 int result;
47a191fd 363 const struct block_device_operations *ops = bdev->bd_disk->fops;
2e6edc95 364
f68eb1e7 365 if (!ops->rw_page || bdev_get_integrity(bdev))
47a191fd 366 return -EOPNOTSUPP;
025a3865 367 result = blk_queue_enter(bdev_get_queue(bdev), 0);
2e6edc95
DW
368 if (result)
369 return result;
370
47a191fd 371 set_page_writeback(page);
3f289dcb
TH
372 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
373 REQ_OP_WRITE);
f892760a 374 if (result) {
47a191fd 375 end_page_writeback(page);
f892760a
MW
376 } else {
377 clean_page_buffers(page);
47a191fd 378 unlock_page(page);
f892760a 379 }
025a3865 380 blk_queue_exit(bdev_get_queue(bdev));
47a191fd
MW
381 return result;
382}
47a191fd 383
1da177e4
LT
384/*
385 * pseudo-fs
386 */
387
388static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
e18b890b 389static struct kmem_cache * bdev_cachep __read_mostly;
1da177e4
LT
390
391static struct inode *bdev_alloc_inode(struct super_block *sb)
392{
e94b1766 393 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
2d2f6f1b 394
1da177e4
LT
395 if (!ei)
396 return NULL;
2d2f6f1b 397 memset(&ei->bdev, 0, sizeof(ei->bdev));
1da177e4
LT
398 return &ei->vfs_inode;
399}
400
41149cb0 401static void bdev_free_inode(struct inode *inode)
1da177e4 402{
15e3d2c5
CH
403 struct block_device *bdev = I_BDEV(inode);
404
405 free_percpu(bdev->bd_stats);
231926db 406 kfree(bdev->bd_meta_info);
15e3d2c5 407
889c05cc
CH
408 if (!bdev_is_partition(bdev)) {
409 if (bdev->bd_disk && bdev->bd_disk->bdi)
410 bdi_put(bdev->bd_disk->bdi);
340e8457 411 kfree(bdev->bd_disk);
889c05cc 412 }
9451aa0a
CH
413
414 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
415 blk_free_ext_minor(MINOR(bdev->bd_dev));
416
41149cb0 417 kmem_cache_free(bdev_cachep, BDEV_I(inode));
fa0d7e3d
NP
418}
419
e6cb5382 420static void init_once(void *data)
1da177e4 421{
e6cb5382 422 struct bdev_inode *ei = data;
1da177e4 423
a35afb83 424 inode_init_once(&ei->vfs_inode);
1da177e4
LT
425}
426
b57922d9 427static void bdev_evict_inode(struct inode *inode)
1da177e4 428{
91b0abe3 429 truncate_inode_pages_final(&inode->i_data);
b57922d9 430 invalidate_inode_buffers(inode); /* is it needed here? */
dbd5768f 431 clear_inode(inode);
1da177e4
LT
432}
433
ee9b6d61 434static const struct super_operations bdev_sops = {
1da177e4
LT
435 .statfs = simple_statfs,
436 .alloc_inode = bdev_alloc_inode,
41149cb0 437 .free_inode = bdev_free_inode,
1da177e4 438 .drop_inode = generic_delete_inode,
b57922d9 439 .evict_inode = bdev_evict_inode,
1da177e4
LT
440};
441
9030d16e 442static int bd_init_fs_context(struct fs_context *fc)
1da177e4 443{
9030d16e
DH
444 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
445 if (!ctx)
446 return -ENOMEM;
447 fc->s_iflags |= SB_I_CGROUPWB;
448 ctx->ops = &bdev_sops;
449 return 0;
1da177e4
LT
450}
451
452static struct file_system_type bd_type = {
453 .name = "bdev",
9030d16e 454 .init_fs_context = bd_init_fs_context,
1da177e4
LT
455 .kill_sb = kill_anon_super,
456};
457
a212b105
TH
458struct super_block *blockdev_superblock __read_mostly;
459EXPORT_SYMBOL_GPL(blockdev_superblock);
1da177e4
LT
460
461void __init bdev_cache_init(void)
462{
463 int err;
ace8577a 464 static struct vfsmount *bd_mnt;
c2acf7b9 465
1da177e4 466 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
fffb60f9 467 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
5d097056 468 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
20c2df83 469 init_once);
1da177e4
LT
470 err = register_filesystem(&bd_type);
471 if (err)
472 panic("Cannot register bdev pseudo-fs");
473 bd_mnt = kern_mount(&bd_type);
1da177e4
LT
474 if (IS_ERR(bd_mnt))
475 panic("Cannot create bdev pseudo-fs");
ace8577a 476 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
1da177e4
LT
477}
478
22ae8ce8 479struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
1da177e4
LT
480{
481 struct block_device *bdev;
482 struct inode *inode;
483
22ae8ce8 484 inode = new_inode(blockdev_superblock);
1da177e4
LT
485 if (!inode)
486 return NULL;
22ae8ce8
CH
487 inode->i_mode = S_IFBLK;
488 inode->i_rdev = 0;
489 inode->i_data.a_ops = &def_blk_aops;
490 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
491
492 bdev = I_BDEV(inode);
e6cb5382 493 mutex_init(&bdev->bd_fsfreeze_mutex);
22ae8ce8 494 spin_lock_init(&bdev->bd_size_lock);
22ae8ce8 495 bdev->bd_partno = partno;
22ae8ce8 496 bdev->bd_inode = inode;
17220ca5 497 bdev->bd_queue = disk->queue;
15e3d2c5
CH
498 bdev->bd_stats = alloc_percpu(struct disk_stats);
499 if (!bdev->bd_stats) {
500 iput(inode);
501 return NULL;
502 }
06cc978d 503 bdev->bd_disk = disk;
22ae8ce8
CH
504 return bdev;
505}
1da177e4 506
22ae8ce8
CH
507void bdev_add(struct block_device *bdev, dev_t dev)
508{
509 bdev->bd_dev = dev;
510 bdev->bd_inode->i_rdev = dev;
511 bdev->bd_inode->i_ino = dev;
512 insert_inode_hash(bdev->bd_inode);
513}
1da177e4 514
1da177e4
LT
515long nr_blockdev_pages(void)
516{
1008fe6d 517 struct inode *inode;
1da177e4 518 long ret = 0;
1008fe6d
CH
519
520 spin_lock(&blockdev_superblock->s_inode_list_lock);
521 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
522 ret += inode->i_mapping->nrpages;
523 spin_unlock(&blockdev_superblock->s_inode_list_lock);
524
1da177e4
LT
525 return ret;
526}
527
1a3cbbc5
TH
528/**
529 * bd_may_claim - test whether a block device can be claimed
530 * @bdev: block device of interest
531 * @whole: whole block device containing @bdev, may equal @bdev
532 * @holder: holder trying to claim @bdev
533 *
25985edc 534 * Test whether @bdev can be claimed by @holder.
1a3cbbc5
TH
535 *
536 * CONTEXT:
537 * spin_lock(&bdev_lock).
538 *
539 * RETURNS:
540 * %true if @bdev can be claimed, %false otherwise.
541 */
542static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
543 void *holder)
1da177e4 544{
1da177e4 545 if (bdev->bd_holder == holder)
1a3cbbc5 546 return true; /* already a holder */
1da177e4 547 else if (bdev->bd_holder != NULL)
1a3cbbc5 548 return false; /* held by someone else */
bcc7f5b4 549 else if (whole == bdev)
1a3cbbc5 550 return true; /* is a whole device which isn't held */
1da177e4 551
e525fd89 552 else if (whole->bd_holder == bd_may_claim)
1a3cbbc5
TH
553 return true; /* is a partition of a device that is being partitioned */
554 else if (whole->bd_holder != NULL)
555 return false; /* is a partition of a held device */
1da177e4 556 else
1a3cbbc5
TH
557 return true; /* is a partition of an un-held device */
558}
559
6b4517a7 560/**
58e46ed9 561 * bd_prepare_to_claim - claim a block device
6b4517a7 562 * @bdev: block device of interest
6b4517a7
TH
563 * @holder: holder trying to claim @bdev
564 *
58e46ed9
CH
565 * Claim @bdev. This function fails if @bdev is already claimed by another
566 * holder and waits if another claiming is in progress. return, the caller
567 * has ownership of bd_claiming and bd_holder[s].
6b4517a7
TH
568 *
569 * RETURNS:
570 * 0 if @bdev can be claimed, -EBUSY otherwise.
571 */
37c3fc9a 572int bd_prepare_to_claim(struct block_device *bdev, void *holder)
6b4517a7 573{
37c3fc9a
CH
574 struct block_device *whole = bdev_whole(bdev);
575
576 if (WARN_ON_ONCE(!holder))
577 return -EINVAL;
6b4517a7 578retry:
58e46ed9 579 spin_lock(&bdev_lock);
6b4517a7 580 /* if someone else claimed, fail */
58e46ed9
CH
581 if (!bd_may_claim(bdev, whole, holder)) {
582 spin_unlock(&bdev_lock);
6b4517a7 583 return -EBUSY;
58e46ed9 584 }
6b4517a7 585
e75aa858
TH
586 /* if claiming is already in progress, wait for it to finish */
587 if (whole->bd_claiming) {
6b4517a7
TH
588 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
589 DEFINE_WAIT(wait);
590
591 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
592 spin_unlock(&bdev_lock);
593 schedule();
594 finish_wait(wq, &wait);
6b4517a7
TH
595 goto retry;
596 }
597
598 /* yay, all mine */
58e46ed9
CH
599 whole->bd_claiming = holder;
600 spin_unlock(&bdev_lock);
6b4517a7
TH
601 return 0;
602}
ecbe6bc0 603EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
6b4517a7 604
89e524c0
JK
605static void bd_clear_claiming(struct block_device *whole, void *holder)
606{
607 lockdep_assert_held(&bdev_lock);
608 /* tell others that we're done */
609 BUG_ON(whole->bd_claiming != holder);
610 whole->bd_claiming = NULL;
611 wake_up_bit(&whole->bd_claiming, 0);
612}
613
614/**
615 * bd_finish_claiming - finish claiming of a block device
616 * @bdev: block device of interest
89e524c0
JK
617 * @holder: holder that has claimed @bdev
618 *
619 * Finish exclusive open of a block device. Mark the device as exlusively
620 * open by the holder and wake up all waiters for exclusive open to finish.
621 */
37c3fc9a 622static void bd_finish_claiming(struct block_device *bdev, void *holder)
89e524c0 623{
37c3fc9a
CH
624 struct block_device *whole = bdev_whole(bdev);
625
89e524c0
JK
626 spin_lock(&bdev_lock);
627 BUG_ON(!bd_may_claim(bdev, whole, holder));
628 /*
629 * Note that for a whole device bd_holders will be incremented twice,
630 * and bd_holder will be set to bd_may_claim before being set to holder
631 */
632 whole->bd_holders++;
633 whole->bd_holder = bd_may_claim;
634 bdev->bd_holders++;
635 bdev->bd_holder = holder;
636 bd_clear_claiming(whole, holder);
637 spin_unlock(&bdev_lock);
638}
89e524c0
JK
639
640/**
641 * bd_abort_claiming - abort claiming of a block device
642 * @bdev: block device of interest
89e524c0
JK
643 * @holder: holder that has claimed @bdev
644 *
645 * Abort claiming of a block device when the exclusive open failed. This can be
646 * also used when exclusive open is not actually desired and we just needed
647 * to block other exclusive openers for a while.
648 */
37c3fc9a 649void bd_abort_claiming(struct block_device *bdev, void *holder)
89e524c0
JK
650{
651 spin_lock(&bdev_lock);
37c3fc9a 652 bd_clear_claiming(bdev_whole(bdev), holder);
89e524c0
JK
653 spin_unlock(&bdev_lock);
654}
655EXPORT_SYMBOL(bd_abort_claiming);
6b4517a7 656
c8276b95
CH
657static void blkdev_flush_mapping(struct block_device *bdev)
658{
659 WARN_ON_ONCE(bdev->bd_holders);
660 sync_blockdev(bdev);
661 kill_bdev(bdev);
662 bdev_write_inode(bdev);
663}
37be4124 664
362529d9 665static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
a1548b67 666{
142fe8f4 667 struct gendisk *disk = bdev->bd_disk;
d173b65a 668 int ret = 0;
a1548b67 669
362529d9
CH
670 if (disk->fops->open) {
671 ret = disk->fops->open(bdev, mode);
672 if (ret) {
673 /* avoid ghost partitions on a removed medium */
674 if (ret == -ENOMEDIUM &&
675 test_bit(GD_NEED_PART_SCAN, &disk->state))
0384264e 676 bdev_disk_changed(disk, true);
362529d9
CH
677 return ret;
678 }
d981cb5b 679 }
a1548b67 680
a11d7fc2 681 if (!bdev->bd_openers)
362529d9 682 set_init_blocksize(bdev);
362529d9 683 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
0384264e 684 bdev_disk_changed(disk, false);
362529d9
CH
685 bdev->bd_openers++;
686 return 0;;
687}
a1548b67 688
c8276b95
CH
689static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
690{
691 if (!--bdev->bd_openers)
692 blkdev_flush_mapping(bdev);
693 if (bdev->bd_disk->fops->release)
694 bdev->bd_disk->fops->release(bdev->bd_disk, mode);
a1548b67
CH
695}
696
362529d9 697static int blkdev_get_part(struct block_device *part, fmode_t mode)
1da177e4 698{
362529d9 699 struct gendisk *disk = part->bd_disk;
362529d9 700 int ret;
5b642d8b 701
362529d9
CH
702 if (part->bd_openers)
703 goto done;
6c60ff04 704
9d3b8813 705 ret = blkdev_get_whole(bdev_whole(part), mode);
a8698707 706 if (ret)
9d3b8813 707 return ret;
7e69723f 708
362529d9
CH
709 ret = -ENXIO;
710 if (!bdev_nr_sectors(part))
711 goto out_blkdev_put;
7e69723f 712
ab4b5705 713 disk->open_partitions++;
362529d9 714 set_init_blocksize(part);
362529d9
CH
715done:
716 part->bd_openers++;
1da177e4 717 return 0;
5a023cdb 718
362529d9 719out_blkdev_put:
9d3b8813 720 blkdev_put_whole(bdev_whole(part), mode);
362529d9 721 return ret;
1da177e4 722}
5b56b6ed 723
c8276b95
CH
724static void blkdev_put_part(struct block_device *part, fmode_t mode)
725{
726 struct block_device *whole = bdev_whole(part);
03e26279 727
c8276b95
CH
728 if (--part->bd_openers)
729 return;
730 blkdev_flush_mapping(part);
ab4b5705 731 whole->bd_disk->open_partitions--;
c8276b95 732 blkdev_put_whole(whole, mode);
1da177e4
LT
733}
734
22ae8ce8
CH
735struct block_device *blkdev_get_no_open(dev_t dev)
736{
737 struct block_device *bdev;
9d3b8813 738 struct inode *inode;
22ae8ce8 739
9d3b8813
CH
740 inode = ilookup(blockdev_superblock, dev);
741 if (!inode) {
22ae8ce8 742 blk_request_module(dev);
9d3b8813
CH
743 inode = ilookup(blockdev_superblock, dev);
744 if (!inode)
6c60ff04 745 return NULL;
22ae8ce8
CH
746 }
747
9d3b8813
CH
748 /* switch from the inode reference to a device mode one: */
749 bdev = &BDEV_I(inode)->bdev;
750 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
751 bdev = NULL;
752 iput(inode);
753
754 if (!bdev)
755 return NULL;
756 if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
757 !try_module_get(bdev->bd_disk->fops->owner)) {
758 put_device(&bdev->bd_device);
759 return NULL;
760 }
761
22ae8ce8 762 return bdev;
22ae8ce8
CH
763}
764
765void blkdev_put_no_open(struct block_device *bdev)
766{
767 module_put(bdev->bd_disk->fops->owner);
9d3b8813 768 put_device(&bdev->bd_device);
22ae8ce8
CH
769}
770
d4d77629 771/**
4e7b5671
CH
772 * blkdev_get_by_dev - open a block device by device number
773 * @dev: device number of block device to open
d4d77629
TH
774 * @mode: FMODE_* mask
775 * @holder: exclusive holder identifier
776 *
4e7b5671
CH
777 * Open the block device described by device number @dev. If @mode includes
778 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
779 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
780 * the same @holder.
d4d77629 781 *
4e7b5671
CH
782 * Use this interface ONLY if you really do not have anything better - i.e. when
783 * you are behind a truly sucky interface and all you are given is a device
784 * number. Everything else should use blkdev_get_by_path().
d4d77629
TH
785 *
786 * CONTEXT:
787 * Might sleep.
788 *
789 * RETURNS:
4e7b5671 790 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
d4d77629 791 */
4e7b5671 792struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1da177e4 793{
5b56b6ed 794 bool unblock_events = true;
4e7b5671 795 struct block_device *bdev;
5b56b6ed 796 struct gendisk *disk;
5b56b6ed 797 int ret;
e525fd89 798
7918f0f6 799 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
4e7b5671 800 MAJOR(dev), MINOR(dev),
7918f0f6
CH
801 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
802 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
e5c7fb40 803 if (ret)
4e7b5671
CH
804 return ERR_PTR(ret);
805
22ae8ce8
CH
806 bdev = blkdev_get_no_open(dev);
807 if (!bdev)
808 return ERR_PTR(-ENXIO);
809 disk = bdev->bd_disk;
e5c7fb40 810
5b56b6ed 811 if (mode & FMODE_EXCL) {
37c3fc9a 812 ret = bd_prepare_to_claim(bdev, holder);
5b56b6ed 813 if (ret)
37c3fc9a 814 goto put_blkdev;
5b56b6ed
CH
815 }
816
817 disk_block_events(disk);
818
a8698707 819 mutex_lock(&disk->open_mutex);
362529d9 820 ret = -ENXIO;
50b4aecf 821 if (!disk_live(disk))
362529d9
CH
822 goto abort_claiming;
823 if (bdev_is_partition(bdev))
824 ret = blkdev_get_part(bdev, mode);
825 else
826 ret = blkdev_get_whole(bdev, mode);
22ae8ce8
CH
827 if (ret)
828 goto abort_claiming;
829 if (mode & FMODE_EXCL) {
37c3fc9a 830 bd_finish_claiming(bdev, holder);
5b56b6ed
CH
831
832 /*
833 * Block event polling for write claims if requested. Any write
834 * holder makes the write_holder state stick until all are
835 * released. This is good enough and tracking individual
836 * writeable reference is too fragile given the way @mode is
837 * used in blkdev_get/put().
838 */
839 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
840 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
841 bdev->bd_write_holder = true;
842 unblock_events = false;
843 }
844 }
a8698707 845 mutex_unlock(&disk->open_mutex);
5b56b6ed
CH
846
847 if (unblock_events)
848 disk_unblock_events(disk);
22ae8ce8 849 return bdev;
5b56b6ed 850
22ae8ce8
CH
851abort_claiming:
852 if (mode & FMODE_EXCL)
37c3fc9a 853 bd_abort_claiming(bdev, holder);
a8698707 854 mutex_unlock(&disk->open_mutex);
22ae8ce8 855 disk_unblock_events(disk);
22ae8ce8
CH
856put_blkdev:
857 blkdev_put_no_open(bdev);
22ae8ce8 858 return ERR_PTR(ret);
37be4124 859}
4e7b5671 860EXPORT_SYMBOL(blkdev_get_by_dev);
1da177e4 861
d4d77629
TH
862/**
863 * blkdev_get_by_path - open a block device by name
864 * @path: path to the block device to open
865 * @mode: FMODE_* mask
866 * @holder: exclusive holder identifier
867 *
4e7b5671
CH
868 * Open the block device described by the device file at @path. If @mode
869 * includes %FMODE_EXCL, the block device is opened with exclusive access.
870 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
871 * nest for the same @holder.
d4d77629
TH
872 *
873 * CONTEXT:
874 * Might sleep.
875 *
876 * RETURNS:
4e7b5671 877 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
d4d77629
TH
878 */
879struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
880 void *holder)
881{
882 struct block_device *bdev;
4e7b5671
CH
883 dev_t dev;
884 int error;
d4d77629 885
4e7b5671
CH
886 error = lookup_bdev(path, &dev);
887 if (error)
888 return ERR_PTR(error);
d4d77629 889
4e7b5671
CH
890 bdev = blkdev_get_by_dev(dev, mode, holder);
891 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
e51900f7
CE
892 blkdev_put(bdev, mode);
893 return ERR_PTR(-EACCES);
894 }
895
d4d77629
TH
896 return bdev;
897}
898EXPORT_SYMBOL(blkdev_get_by_path);
899
4385bab1 900void blkdev_put(struct block_device *bdev, fmode_t mode)
2e7b651d 901{
2e7b651d
PZ
902 struct gendisk *disk = bdev->bd_disk;
903
b849dd84
DA
904 /*
905 * Sync early if it looks like we're the last one. If someone else
906 * opens the block device between now and the decrement of bd_openers
907 * then we did a sync that we didn't need to, but that's not the end
908 * of the world and we want to avoid long (could be several minute)
909 * syncs while holding the mutex.
910 */
911 if (bdev->bd_openers == 1)
912 sync_blockdev(bdev);
913
a8698707 914 mutex_lock(&disk->open_mutex);
e525fd89 915 if (mode & FMODE_EXCL) {
a954ea81 916 struct block_device *whole = bdev_whole(bdev);
6a027eff
TH
917 bool bdev_free;
918
919 /*
920 * Release a claim on the device. The holder fields
a8698707 921 * are protected with bdev_lock. open_mutex is to
6a027eff
TH
922 * synchronize disk_holder unlinking.
923 */
6a027eff
TH
924 spin_lock(&bdev_lock);
925
926 WARN_ON_ONCE(--bdev->bd_holders < 0);
a954ea81 927 WARN_ON_ONCE(--whole->bd_holders < 0);
6a027eff 928
6a027eff
TH
929 if ((bdev_free = !bdev->bd_holders))
930 bdev->bd_holder = NULL;
a954ea81
CH
931 if (!whole->bd_holders)
932 whole->bd_holder = NULL;
6a027eff
TH
933
934 spin_unlock(&bdev_lock);
935
77ea887e
TH
936 /*
937 * If this was the last claim, remove holder link and
938 * unblock evpoll if it was a write holder.
939 */
85ef06d1 940 if (bdev_free && bdev->bd_write_holder) {
5b56b6ed 941 disk_unblock_events(disk);
85ef06d1 942 bdev->bd_write_holder = false;
77ea887e 943 }
6936217c 944 }
77ea887e 945
85ef06d1
TH
946 /*
947 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
948 * event. This is to ensure detection of media removal commanded
949 * from userland - e.g. eject(1).
950 */
5b56b6ed 951 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
85ef06d1 952
c8276b95
CH
953 if (bdev_is_partition(bdev))
954 blkdev_put_part(bdev, mode);
955 else
956 blkdev_put_whole(bdev, mode);
a8698707
CH
957 mutex_unlock(&disk->open_mutex);
958
22ae8ce8 959 blkdev_put_no_open(bdev);
37be4124 960}
2e7b651d
PZ
961EXPORT_SYMBOL(blkdev_put);
962
1da177e4
LT
963/**
964 * lookup_bdev - lookup a struct block_device by name
94e2959e 965 * @pathname: special file representing the block device
875b2376 966 * @dev: return value of the block device's dev_t
1da177e4 967 *
057178cf
JL
968 * Lookup the block device's dev_t at @pathname in the current
969 * namespace if possible and return it by @dev.
970 *
971 * RETURNS:
972 * 0 if succeeded, errno otherwise.
1da177e4 973 */
4e7b5671 974int lookup_bdev(const char *pathname, dev_t *dev)
1da177e4 975{
1da177e4 976 struct inode *inode;
421748ec 977 struct path path;
1da177e4
LT
978 int error;
979
421748ec 980 if (!pathname || !*pathname)
4e7b5671 981 return -EINVAL;
1da177e4 982
421748ec 983 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1da177e4 984 if (error)
4e7b5671 985 return error;
1da177e4 986
bb668734 987 inode = d_backing_inode(path.dentry);
1da177e4
LT
988 error = -ENOTBLK;
989 if (!S_ISBLK(inode->i_mode))
4e7b5671 990 goto out_path_put;
1da177e4 991 error = -EACCES;
a2982cc9 992 if (!may_open_dev(&path))
4e7b5671
CH
993 goto out_path_put;
994
995 *dev = inode->i_rdev;
996 error = 0;
997out_path_put:
421748ec 998 path_put(&path);
4e7b5671 999 return error;
1da177e4 1000}
d5686b44 1001EXPORT_SYMBOL(lookup_bdev);
1da177e4 1002
93b270f7 1003int __invalidate_device(struct block_device *bdev, bool kill_dirty)
b71e8a4c
DH
1004{
1005 struct super_block *sb = get_super(bdev);
1006 int res = 0;
1007
1008 if (sb) {
1009 /*
1010 * no need to lock the super, get_super holds the
1011 * read mutex so the filesystem cannot go away
1012 * under us (->put_super runs with the write lock
1013 * hold).
1014 */
1015 shrink_dcache_sb(sb);
93b270f7 1016 res = invalidate_inodes(sb, kill_dirty);
b71e8a4c
DH
1017 drop_super(sb);
1018 }
f98393a6 1019 invalidate_bdev(bdev);
b71e8a4c
DH
1020 return res;
1021}
1022EXPORT_SYMBOL(__invalidate_device);
5c0d6b60 1023
1e03a36b 1024void sync_bdevs(bool wait)
5c0d6b60
JK
1025{
1026 struct inode *inode, *old_inode = NULL;
1027
74278da9 1028 spin_lock(&blockdev_superblock->s_inode_list_lock);
5c0d6b60
JK
1029 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1030 struct address_space *mapping = inode->i_mapping;
af309226 1031 struct block_device *bdev;
5c0d6b60
JK
1032
1033 spin_lock(&inode->i_lock);
1034 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1035 mapping->nrpages == 0) {
1036 spin_unlock(&inode->i_lock);
1037 continue;
1038 }
1039 __iget(inode);
1040 spin_unlock(&inode->i_lock);
74278da9 1041 spin_unlock(&blockdev_superblock->s_inode_list_lock);
5c0d6b60
JK
1042 /*
1043 * We hold a reference to 'inode' so it couldn't have been
1044 * removed from s_inodes list while we dropped the
74278da9 1045 * s_inode_list_lock We cannot iput the inode now as we can
5c0d6b60 1046 * be holding the last reference and we cannot iput it under
74278da9 1047 * s_inode_list_lock. So we keep the reference and iput it
5c0d6b60
JK
1048 * later.
1049 */
1050 iput(old_inode);
1051 old_inode = inode;
af309226 1052 bdev = I_BDEV(inode);
5c0d6b60 1053
a8698707 1054 mutex_lock(&bdev->bd_disk->open_mutex);
1e03a36b
CH
1055 if (!bdev->bd_openers) {
1056 ; /* skip */
1057 } else if (wait) {
1058 /*
1059 * We keep the error status of individual mapping so
1060 * that applications can catch the writeback error using
1061 * fsync(2). See filemap_fdatawait_keep_errors() for
1062 * details.
1063 */
1064 filemap_fdatawait_keep_errors(inode->i_mapping);
1065 } else {
1066 filemap_fdatawrite(inode->i_mapping);
1067 }
a8698707 1068 mutex_unlock(&bdev->bd_disk->open_mutex);
5c0d6b60 1069
74278da9 1070 spin_lock(&blockdev_superblock->s_inode_list_lock);
5c0d6b60 1071 }
74278da9 1072 spin_unlock(&blockdev_superblock->s_inode_list_lock);
5c0d6b60
JK
1073 iput(old_inode);
1074}