1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
18 #include <linux/module.h>
21 static inline struct inode
*bdev_file_inode(struct file
*file
)
23 return file
->f_mapping
->host
;
26 static int blkdev_get_block(struct inode
*inode
, sector_t iblock
,
27 struct buffer_head
*bh
, int create
)
29 bh
->b_bdev
= I_BDEV(inode
);
30 bh
->b_blocknr
= iblock
;
31 set_buffer_mapped(bh
);
35 static unsigned int dio_bio_write_op(struct kiocb
*iocb
)
37 unsigned int op
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
39 /* avoid the need for a I/O completion work item */
40 if (iocb
->ki_flags
& IOCB_DSYNC
)
45 #define DIO_INLINE_BIO_VECS 4
47 static ssize_t
__blkdev_direct_IO_simple(struct kiocb
*iocb
,
48 struct iov_iter
*iter
, unsigned int nr_pages
)
50 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
51 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
52 loff_t pos
= iocb
->ki_pos
;
53 bool should_dirty
= false;
57 if ((pos
| iov_iter_alignment(iter
)) &
58 (bdev_logical_block_size(bdev
) - 1))
61 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
64 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
70 if (iov_iter_rw(iter
) == READ
) {
71 bio_init(&bio
, bdev
, vecs
, nr_pages
, REQ_OP_READ
);
72 if (iter_is_iovec(iter
))
75 bio_init(&bio
, bdev
, vecs
, nr_pages
, dio_bio_write_op(iocb
));
77 bio
.bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
78 bio
.bi_ioprio
= iocb
->ki_ioprio
;
80 ret
= bio_iov_iter_get_pages(&bio
, iter
);
83 ret
= bio
.bi_iter
.bi_size
;
85 if (iov_iter_rw(iter
) == WRITE
)
86 task_io_account_write(ret
);
88 if (iocb
->ki_flags
& IOCB_NOWAIT
)
89 bio
.bi_opf
|= REQ_NOWAIT
;
91 submit_bio_wait(&bio
);
93 bio_release_pages(&bio
, should_dirty
);
94 if (unlikely(bio
.bi_status
))
95 ret
= blk_status_to_errno(bio
.bi_status
);
98 if (vecs
!= inline_vecs
)
107 DIO_SHOULD_DIRTY
= 1,
114 struct task_struct
*waiter
;
119 struct bio bio ____cacheline_aligned_in_smp
;
122 static struct bio_set blkdev_dio_pool
;
124 static void blkdev_bio_end_io(struct bio
*bio
)
126 struct blkdev_dio
*dio
= bio
->bi_private
;
127 bool should_dirty
= dio
->flags
& DIO_SHOULD_DIRTY
;
129 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
130 dio
->bio
.bi_status
= bio
->bi_status
;
132 if (atomic_dec_and_test(&dio
->ref
)) {
133 if (!(dio
->flags
& DIO_IS_SYNC
)) {
134 struct kiocb
*iocb
= dio
->iocb
;
137 WRITE_ONCE(iocb
->private, NULL
);
139 if (likely(!dio
->bio
.bi_status
)) {
143 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
146 dio
->iocb
->ki_complete(iocb
, ret
);
149 struct task_struct
*waiter
= dio
->waiter
;
151 WRITE_ONCE(dio
->waiter
, NULL
);
152 blk_wake_io_task(waiter
);
157 bio_check_pages_dirty(bio
);
159 bio_release_pages(bio
, false);
164 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
165 unsigned int nr_pages
)
167 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
168 struct blk_plug plug
;
169 struct blkdev_dio
*dio
;
171 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
172 unsigned int opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
173 loff_t pos
= iocb
->ki_pos
;
176 if ((pos
| iov_iter_alignment(iter
)) &
177 (bdev_logical_block_size(bdev
) - 1))
180 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
181 opf
|= REQ_ALLOC_CACHE
;
182 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
184 dio
= container_of(bio
, struct blkdev_dio
, bio
);
185 atomic_set(&dio
->ref
, 1);
187 * Grab an extra reference to ensure the dio structure which is embedded
188 * into the first bio stays around.
192 is_sync
= is_sync_kiocb(iocb
);
194 dio
->flags
= DIO_IS_SYNC
;
195 dio
->waiter
= current
;
202 if (is_read
&& iter_is_iovec(iter
))
203 dio
->flags
|= DIO_SHOULD_DIRTY
;
205 blk_start_plug(&plug
);
208 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
209 bio
->bi_private
= dio
;
210 bio
->bi_end_io
= blkdev_bio_end_io
;
211 bio
->bi_ioprio
= iocb
->ki_ioprio
;
213 ret
= bio_iov_iter_get_pages(bio
, iter
);
215 bio
->bi_status
= BLK_STS_IOERR
;
221 if (dio
->flags
& DIO_SHOULD_DIRTY
)
222 bio_set_pages_dirty(bio
);
224 task_io_account_write(bio
->bi_iter
.bi_size
);
226 if (iocb
->ki_flags
& IOCB_NOWAIT
)
227 bio
->bi_opf
|= REQ_NOWAIT
;
229 dio
->size
+= bio
->bi_iter
.bi_size
;
230 pos
+= bio
->bi_iter
.bi_size
;
232 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
);
237 atomic_inc(&dio
->ref
);
239 bio
= bio_alloc(bdev
, nr_pages
, opf
, GFP_KERNEL
);
242 blk_finish_plug(&plug
);
248 set_current_state(TASK_UNINTERRUPTIBLE
);
249 if (!READ_ONCE(dio
->waiter
))
253 __set_current_state(TASK_RUNNING
);
256 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
264 static void blkdev_bio_end_io_async(struct bio
*bio
)
266 struct blkdev_dio
*dio
= container_of(bio
, struct blkdev_dio
, bio
);
267 struct kiocb
*iocb
= dio
->iocb
;
270 WRITE_ONCE(iocb
->private, NULL
);
272 if (likely(!bio
->bi_status
)) {
276 ret
= blk_status_to_errno(bio
->bi_status
);
279 iocb
->ki_complete(iocb
, ret
);
281 if (dio
->flags
& DIO_SHOULD_DIRTY
) {
282 bio_check_pages_dirty(bio
);
284 bio_release_pages(bio
, false);
289 static ssize_t
__blkdev_direct_IO_async(struct kiocb
*iocb
,
290 struct iov_iter
*iter
,
291 unsigned int nr_pages
)
293 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
294 bool is_read
= iov_iter_rw(iter
) == READ
;
295 unsigned int opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
296 struct blkdev_dio
*dio
;
298 loff_t pos
= iocb
->ki_pos
;
301 if ((pos
| iov_iter_alignment(iter
)) &
302 (bdev_logical_block_size(bdev
) - 1))
305 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
306 opf
|= REQ_ALLOC_CACHE
;
307 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
309 dio
= container_of(bio
, struct blkdev_dio
, bio
);
312 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
313 bio
->bi_end_io
= blkdev_bio_end_io_async
;
314 bio
->bi_ioprio
= iocb
->ki_ioprio
;
316 if (iov_iter_is_bvec(iter
)) {
318 * Users don't rely on the iterator being in any particular
319 * state for async I/O returning -EIOCBQUEUED, hence we can
320 * avoid expensive iov_iter_advance(). Bypass
321 * bio_iov_iter_get_pages() and set the bvec directly.
323 bio_iov_bvec_set(bio
, iter
);
325 ret
= bio_iov_iter_get_pages(bio
, iter
);
331 dio
->size
= bio
->bi_iter
.bi_size
;
334 if (iter_is_iovec(iter
)) {
335 dio
->flags
|= DIO_SHOULD_DIRTY
;
336 bio_set_pages_dirty(bio
);
339 task_io_account_write(bio
->bi_iter
.bi_size
);
342 if (iocb
->ki_flags
& IOCB_HIPRI
) {
343 bio
->bi_opf
|= REQ_POLLED
| REQ_NOWAIT
;
345 WRITE_ONCE(iocb
->private, bio
);
347 if (iocb
->ki_flags
& IOCB_NOWAIT
)
348 bio
->bi_opf
|= REQ_NOWAIT
;
354 static ssize_t
blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
356 unsigned int nr_pages
;
358 if (!iov_iter_count(iter
))
361 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
+ 1);
362 if (likely(nr_pages
<= BIO_MAX_VECS
)) {
363 if (is_sync_kiocb(iocb
))
364 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
365 return __blkdev_direct_IO_async(iocb
, iter
, nr_pages
);
367 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
370 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
372 return block_write_full_page(page
, blkdev_get_block
, wbc
);
375 static int blkdev_read_folio(struct file
*file
, struct folio
*folio
)
377 return block_read_full_folio(folio
, blkdev_get_block
);
380 static void blkdev_readahead(struct readahead_control
*rac
)
382 mpage_readahead(rac
, blkdev_get_block
);
385 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
386 loff_t pos
, unsigned len
, struct page
**pagep
, void **fsdata
)
388 return block_write_begin(mapping
, pos
, len
, pagep
, blkdev_get_block
);
391 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
392 loff_t pos
, unsigned len
, unsigned copied
, struct page
*page
,
396 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
404 static int blkdev_writepages(struct address_space
*mapping
,
405 struct writeback_control
*wbc
)
407 return generic_writepages(mapping
, wbc
);
410 const struct address_space_operations def_blk_aops
= {
411 .dirty_folio
= block_dirty_folio
,
412 .invalidate_folio
= block_invalidate_folio
,
413 .read_folio
= blkdev_read_folio
,
414 .readahead
= blkdev_readahead
,
415 .writepage
= blkdev_writepage
,
416 .write_begin
= blkdev_write_begin
,
417 .write_end
= blkdev_write_end
,
418 .writepages
= blkdev_writepages
,
419 .direct_IO
= blkdev_direct_IO
,
420 .migratepage
= buffer_migrate_page_norefs
,
421 .is_dirty_writeback
= buffer_check_dirty_writeback
,
425 * for a block special file file_inode(file)->i_size is zero
426 * so we compute the size by hand (just as in block_read/write above)
428 static loff_t
blkdev_llseek(struct file
*file
, loff_t offset
, int whence
)
430 struct inode
*bd_inode
= bdev_file_inode(file
);
433 inode_lock(bd_inode
);
434 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
435 inode_unlock(bd_inode
);
439 static int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
,
442 struct block_device
*bdev
= filp
->private_data
;
445 error
= file_write_and_wait_range(filp
, start
, end
);
450 * There is no need to serialise calls to blkdev_issue_flush with
451 * i_mutex and doing so causes performance issues with concurrent
452 * O_SYNC writers to a block device.
454 error
= blkdev_issue_flush(bdev
);
455 if (error
== -EOPNOTSUPP
)
461 static int blkdev_open(struct inode
*inode
, struct file
*filp
)
463 struct block_device
*bdev
;
466 * Preserve backwards compatibility and allow large file access
467 * even if userspace doesn't ask for it explicitly. Some mkfs
468 * binary needs it. We might want to drop this workaround
469 * during an unstable branch.
471 filp
->f_flags
|= O_LARGEFILE
;
472 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
474 if (filp
->f_flags
& O_NDELAY
)
475 filp
->f_mode
|= FMODE_NDELAY
;
476 if (filp
->f_flags
& O_EXCL
)
477 filp
->f_mode
|= FMODE_EXCL
;
478 if ((filp
->f_flags
& O_ACCMODE
) == 3)
479 filp
->f_mode
|= FMODE_WRITE_IOCTL
;
481 bdev
= blkdev_get_by_dev(inode
->i_rdev
, filp
->f_mode
, filp
);
483 return PTR_ERR(bdev
);
485 filp
->private_data
= bdev
;
486 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
487 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
491 static int blkdev_close(struct inode
*inode
, struct file
*filp
)
493 struct block_device
*bdev
= filp
->private_data
;
495 blkdev_put(bdev
, filp
->f_mode
);
500 * Write data to the block device. Only intended for the block device itself
501 * and the raw driver which basically is a fake block device.
503 * Does not take i_mutex for the write and thus is not for general purpose
506 static ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
508 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
509 struct inode
*bd_inode
= bdev
->bd_inode
;
510 loff_t size
= bdev_nr_bytes(bdev
);
511 struct blk_plug plug
;
515 if (bdev_read_only(bdev
))
518 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
521 if (!iov_iter_count(from
))
524 if (iocb
->ki_pos
>= size
)
527 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
530 size
-= iocb
->ki_pos
;
531 if (iov_iter_count(from
) > size
) {
532 shorted
= iov_iter_count(from
) - size
;
533 iov_iter_truncate(from
, size
);
536 blk_start_plug(&plug
);
537 ret
= __generic_file_write_iter(iocb
, from
);
539 ret
= generic_write_sync(iocb
, ret
);
540 iov_iter_reexpand(from
, iov_iter_count(from
) + shorted
);
541 blk_finish_plug(&plug
);
545 static ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
547 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
548 loff_t size
= bdev_nr_bytes(bdev
);
549 loff_t pos
= iocb
->ki_pos
;
554 if (unlikely(pos
+ iov_iter_count(to
) > size
)) {
558 shorted
= iov_iter_count(to
) - size
;
559 iov_iter_truncate(to
, size
);
562 count
= iov_iter_count(to
);
564 goto reexpand
; /* skip atime */
566 if (iocb
->ki_flags
& IOCB_DIRECT
) {
567 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
569 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
570 if (filemap_range_needs_writeback(mapping
, pos
,
576 ret
= filemap_write_and_wait_range(mapping
, pos
,
582 file_accessed(iocb
->ki_filp
);
584 ret
= blkdev_direct_IO(iocb
, to
);
589 iov_iter_revert(to
, count
- iov_iter_count(to
));
590 if (ret
< 0 || !count
)
594 ret
= filemap_read(iocb
, to
, ret
);
597 if (unlikely(shorted
))
598 iov_iter_reexpand(to
, iov_iter_count(to
) + shorted
);
602 #define BLKDEV_FALLOC_FL_SUPPORTED \
603 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
604 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
606 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
609 struct inode
*inode
= bdev_file_inode(file
);
610 struct block_device
*bdev
= I_BDEV(inode
);
611 loff_t end
= start
+ len
- 1;
615 /* Fail if we don't recognize the flags. */
616 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
619 /* Don't go off the end of the device. */
620 isize
= bdev_nr_bytes(bdev
);
624 if (mode
& FALLOC_FL_KEEP_SIZE
) {
626 end
= start
+ len
- 1;
632 * Don't allow IO that isn't aligned to logical block size.
634 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
637 filemap_invalidate_lock(inode
->i_mapping
);
639 /* Invalidate the page cache, including dirty pages. */
640 error
= truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
645 case FALLOC_FL_ZERO_RANGE
:
646 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
647 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
648 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
649 BLKDEV_ZERO_NOUNMAP
);
651 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
652 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
653 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
654 BLKDEV_ZERO_NOFALLBACK
);
656 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
657 error
= blkdev_issue_discard(bdev
, start
>> SECTOR_SHIFT
,
658 len
>> SECTOR_SHIFT
, GFP_KERNEL
);
665 filemap_invalidate_unlock(inode
->i_mapping
);
669 const struct file_operations def_blk_fops
= {
671 .release
= blkdev_close
,
672 .llseek
= blkdev_llseek
,
673 .read_iter
= blkdev_read_iter
,
674 .write_iter
= blkdev_write_iter
,
675 .iopoll
= iocb_bio_iopoll
,
676 .mmap
= generic_file_mmap
,
677 .fsync
= blkdev_fsync
,
678 .unlocked_ioctl
= blkdev_ioctl
,
680 .compat_ioctl
= compat_blkdev_ioctl
,
682 .splice_read
= generic_file_splice_read
,
683 .splice_write
= iter_file_splice_write
,
684 .fallocate
= blkdev_fallocate
,
687 static __init
int blkdev_init(void)
689 return bioset_init(&blkdev_dio_pool
, 4,
690 offsetof(struct blkdev_dio
, bio
),
691 BIOSET_NEED_BVECS
|BIOSET_PERCPU_CACHE
);
693 module_init(blkdev_init
);