1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
18 #include <linux/iomap.h>
19 #include <linux/module.h>
22 static inline struct inode
*bdev_file_inode(struct file
*file
)
24 return file
->f_mapping
->host
;
27 static blk_opf_t
dio_bio_write_op(struct kiocb
*iocb
)
29 blk_opf_t opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
31 /* avoid the need for a I/O completion work item */
32 if (iocb_is_dsync(iocb
))
37 static bool blkdev_dio_unaligned(struct block_device
*bdev
, loff_t pos
,
38 struct iov_iter
*iter
)
40 return pos
& (bdev_logical_block_size(bdev
) - 1) ||
41 !bdev_iter_is_aligned(bdev
, iter
);
44 #define DIO_INLINE_BIO_VECS 4
46 static ssize_t
__blkdev_direct_IO_simple(struct kiocb
*iocb
,
47 struct iov_iter
*iter
, unsigned int nr_pages
)
49 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
50 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
51 loff_t pos
= iocb
->ki_pos
;
52 bool should_dirty
= false;
56 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
59 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
62 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
68 if (iov_iter_rw(iter
) == READ
) {
69 bio_init(&bio
, bdev
, vecs
, nr_pages
, REQ_OP_READ
);
70 if (user_backed_iter(iter
))
73 bio_init(&bio
, bdev
, vecs
, nr_pages
, dio_bio_write_op(iocb
));
75 bio
.bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
76 bio
.bi_write_hint
= file_inode(iocb
->ki_filp
)->i_write_hint
;
77 bio
.bi_ioprio
= iocb
->ki_ioprio
;
79 ret
= bio_iov_iter_get_pages(&bio
, iter
);
82 ret
= bio
.bi_iter
.bi_size
;
84 if (iov_iter_rw(iter
) == WRITE
)
85 task_io_account_write(ret
);
87 if (iocb
->ki_flags
& IOCB_NOWAIT
)
88 bio
.bi_opf
|= REQ_NOWAIT
;
90 submit_bio_wait(&bio
);
92 bio_release_pages(&bio
, should_dirty
);
93 if (unlikely(bio
.bi_status
))
94 ret
= blk_status_to_errno(bio
.bi_status
);
97 if (vecs
!= inline_vecs
)
106 DIO_SHOULD_DIRTY
= 1,
113 struct task_struct
*waiter
;
118 struct bio bio ____cacheline_aligned_in_smp
;
121 static struct bio_set blkdev_dio_pool
;
123 static void blkdev_bio_end_io(struct bio
*bio
)
125 struct blkdev_dio
*dio
= bio
->bi_private
;
126 bool should_dirty
= dio
->flags
& DIO_SHOULD_DIRTY
;
128 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
129 dio
->bio
.bi_status
= bio
->bi_status
;
131 if (atomic_dec_and_test(&dio
->ref
)) {
132 if (!(dio
->flags
& DIO_IS_SYNC
)) {
133 struct kiocb
*iocb
= dio
->iocb
;
136 WRITE_ONCE(iocb
->private, NULL
);
138 if (likely(!dio
->bio
.bi_status
)) {
142 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
145 dio
->iocb
->ki_complete(iocb
, ret
);
148 struct task_struct
*waiter
= dio
->waiter
;
150 WRITE_ONCE(dio
->waiter
, NULL
);
151 blk_wake_io_task(waiter
);
156 bio_check_pages_dirty(bio
);
158 bio_release_pages(bio
, false);
163 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
164 unsigned int nr_pages
)
166 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
167 struct blk_plug plug
;
168 struct blkdev_dio
*dio
;
170 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
171 blk_opf_t opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
172 loff_t pos
= iocb
->ki_pos
;
175 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
178 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
179 opf
|= REQ_ALLOC_CACHE
;
180 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
182 dio
= container_of(bio
, struct blkdev_dio
, bio
);
183 atomic_set(&dio
->ref
, 1);
185 * Grab an extra reference to ensure the dio structure which is embedded
186 * into the first bio stays around.
190 is_sync
= is_sync_kiocb(iocb
);
192 dio
->flags
= DIO_IS_SYNC
;
193 dio
->waiter
= current
;
200 if (is_read
&& user_backed_iter(iter
))
201 dio
->flags
|= DIO_SHOULD_DIRTY
;
203 blk_start_plug(&plug
);
206 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
207 bio
->bi_write_hint
= file_inode(iocb
->ki_filp
)->i_write_hint
;
208 bio
->bi_private
= dio
;
209 bio
->bi_end_io
= blkdev_bio_end_io
;
210 bio
->bi_ioprio
= iocb
->ki_ioprio
;
212 ret
= bio_iov_iter_get_pages(bio
, iter
);
214 bio
->bi_status
= BLK_STS_IOERR
;
218 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
220 * This is nonblocking IO, and we need to allocate
221 * another bio if we have data left to map. As we
222 * cannot guarantee that one of the sub bios will not
223 * fail getting issued FOR NOWAIT and as error results
224 * are coalesced across all of them, be safe and ask for
225 * a retry of this from blocking context.
227 if (unlikely(iov_iter_count(iter
))) {
228 bio_release_pages(bio
, false);
229 bio_clear_flag(bio
, BIO_REFFED
);
231 blk_finish_plug(&plug
);
234 bio
->bi_opf
|= REQ_NOWAIT
;
238 if (dio
->flags
& DIO_SHOULD_DIRTY
)
239 bio_set_pages_dirty(bio
);
241 task_io_account_write(bio
->bi_iter
.bi_size
);
243 dio
->size
+= bio
->bi_iter
.bi_size
;
244 pos
+= bio
->bi_iter
.bi_size
;
246 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
);
251 atomic_inc(&dio
->ref
);
253 bio
= bio_alloc(bdev
, nr_pages
, opf
, GFP_KERNEL
);
256 blk_finish_plug(&plug
);
262 set_current_state(TASK_UNINTERRUPTIBLE
);
263 if (!READ_ONCE(dio
->waiter
))
267 __set_current_state(TASK_RUNNING
);
270 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
278 static void blkdev_bio_end_io_async(struct bio
*bio
)
280 struct blkdev_dio
*dio
= container_of(bio
, struct blkdev_dio
, bio
);
281 struct kiocb
*iocb
= dio
->iocb
;
284 WRITE_ONCE(iocb
->private, NULL
);
286 if (likely(!bio
->bi_status
)) {
290 ret
= blk_status_to_errno(bio
->bi_status
);
293 iocb
->ki_complete(iocb
, ret
);
295 if (dio
->flags
& DIO_SHOULD_DIRTY
) {
296 bio_check_pages_dirty(bio
);
298 bio_release_pages(bio
, false);
303 static ssize_t
__blkdev_direct_IO_async(struct kiocb
*iocb
,
304 struct iov_iter
*iter
,
305 unsigned int nr_pages
)
307 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
308 bool is_read
= iov_iter_rw(iter
) == READ
;
309 blk_opf_t opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
310 struct blkdev_dio
*dio
;
312 loff_t pos
= iocb
->ki_pos
;
315 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
318 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
319 opf
|= REQ_ALLOC_CACHE
;
320 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
322 dio
= container_of(bio
, struct blkdev_dio
, bio
);
325 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
326 bio
->bi_write_hint
= file_inode(iocb
->ki_filp
)->i_write_hint
;
327 bio
->bi_end_io
= blkdev_bio_end_io_async
;
328 bio
->bi_ioprio
= iocb
->ki_ioprio
;
330 if (iov_iter_is_bvec(iter
)) {
332 * Users don't rely on the iterator being in any particular
333 * state for async I/O returning -EIOCBQUEUED, hence we can
334 * avoid expensive iov_iter_advance(). Bypass
335 * bio_iov_iter_get_pages() and set the bvec directly.
337 bio_iov_bvec_set(bio
, iter
);
339 ret
= bio_iov_iter_get_pages(bio
, iter
);
345 dio
->size
= bio
->bi_iter
.bi_size
;
348 if (user_backed_iter(iter
)) {
349 dio
->flags
|= DIO_SHOULD_DIRTY
;
350 bio_set_pages_dirty(bio
);
353 task_io_account_write(bio
->bi_iter
.bi_size
);
356 if (iocb
->ki_flags
& IOCB_NOWAIT
)
357 bio
->bi_opf
|= REQ_NOWAIT
;
359 if (iocb
->ki_flags
& IOCB_HIPRI
) {
360 bio
->bi_opf
|= REQ_POLLED
;
362 WRITE_ONCE(iocb
->private, bio
);
369 static ssize_t
blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
371 unsigned int nr_pages
;
373 if (!iov_iter_count(iter
))
376 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
+ 1);
377 if (likely(nr_pages
<= BIO_MAX_VECS
)) {
378 if (is_sync_kiocb(iocb
))
379 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
380 return __blkdev_direct_IO_async(iocb
, iter
, nr_pages
);
382 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
385 static int blkdev_iomap_begin(struct inode
*inode
, loff_t offset
, loff_t length
,
386 unsigned int flags
, struct iomap
*iomap
, struct iomap
*srcmap
)
388 struct block_device
*bdev
= I_BDEV(inode
);
389 loff_t isize
= i_size_read(inode
);
392 iomap
->offset
= ALIGN_DOWN(offset
, bdev_logical_block_size(bdev
));
393 if (iomap
->offset
>= isize
)
395 iomap
->type
= IOMAP_MAPPED
;
396 iomap
->addr
= iomap
->offset
;
397 iomap
->length
= isize
- iomap
->offset
;
398 iomap
->flags
|= IOMAP_F_BUFFER_HEAD
; /* noop for !CONFIG_BUFFER_HEAD */
402 static const struct iomap_ops blkdev_iomap_ops
= {
403 .iomap_begin
= blkdev_iomap_begin
,
406 #ifdef CONFIG_BUFFER_HEAD
407 static int blkdev_get_block(struct inode
*inode
, sector_t iblock
,
408 struct buffer_head
*bh
, int create
)
410 bh
->b_bdev
= I_BDEV(inode
);
411 bh
->b_blocknr
= iblock
;
412 set_buffer_mapped(bh
);
417 * We cannot call mpage_writepages() as it does not take the buffer lock.
418 * We must use block_write_full_folio() directly which holds the buffer
419 * lock. The buffer lock provides the synchronisation with writeback
420 * that filesystems rely on when they use the blockdev's mapping.
422 static int blkdev_writepages(struct address_space
*mapping
,
423 struct writeback_control
*wbc
)
425 struct blk_plug plug
;
428 blk_start_plug(&plug
);
429 err
= write_cache_pages(mapping
, wbc
, block_write_full_folio
,
431 blk_finish_plug(&plug
);
436 static int blkdev_read_folio(struct file
*file
, struct folio
*folio
)
438 return block_read_full_folio(folio
, blkdev_get_block
);
441 static void blkdev_readahead(struct readahead_control
*rac
)
443 mpage_readahead(rac
, blkdev_get_block
);
446 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
447 loff_t pos
, unsigned len
, struct page
**pagep
, void **fsdata
)
449 return block_write_begin(mapping
, pos
, len
, pagep
, blkdev_get_block
);
452 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
453 loff_t pos
, unsigned len
, unsigned copied
, struct page
*page
,
457 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
465 const struct address_space_operations def_blk_aops
= {
466 .dirty_folio
= block_dirty_folio
,
467 .invalidate_folio
= block_invalidate_folio
,
468 .read_folio
= blkdev_read_folio
,
469 .readahead
= blkdev_readahead
,
470 .writepages
= blkdev_writepages
,
471 .write_begin
= blkdev_write_begin
,
472 .write_end
= blkdev_write_end
,
473 .migrate_folio
= buffer_migrate_folio_norefs
,
474 .is_dirty_writeback
= buffer_check_dirty_writeback
,
476 #else /* CONFIG_BUFFER_HEAD */
477 static int blkdev_read_folio(struct file
*file
, struct folio
*folio
)
479 return iomap_read_folio(folio
, &blkdev_iomap_ops
);
482 static void blkdev_readahead(struct readahead_control
*rac
)
484 iomap_readahead(rac
, &blkdev_iomap_ops
);
487 static int blkdev_map_blocks(struct iomap_writepage_ctx
*wpc
,
488 struct inode
*inode
, loff_t offset
, unsigned int len
)
490 loff_t isize
= i_size_read(inode
);
492 if (WARN_ON_ONCE(offset
>= isize
))
494 if (offset
>= wpc
->iomap
.offset
&&
495 offset
< wpc
->iomap
.offset
+ wpc
->iomap
.length
)
497 return blkdev_iomap_begin(inode
, offset
, isize
- offset
,
498 IOMAP_WRITE
, &wpc
->iomap
, NULL
);
501 static const struct iomap_writeback_ops blkdev_writeback_ops
= {
502 .map_blocks
= blkdev_map_blocks
,
505 static int blkdev_writepages(struct address_space
*mapping
,
506 struct writeback_control
*wbc
)
508 struct iomap_writepage_ctx wpc
= { };
510 return iomap_writepages(mapping
, wbc
, &wpc
, &blkdev_writeback_ops
);
513 const struct address_space_operations def_blk_aops
= {
514 .dirty_folio
= filemap_dirty_folio
,
515 .release_folio
= iomap_release_folio
,
516 .invalidate_folio
= iomap_invalidate_folio
,
517 .read_folio
= blkdev_read_folio
,
518 .readahead
= blkdev_readahead
,
519 .writepages
= blkdev_writepages
,
520 .is_partially_uptodate
= iomap_is_partially_uptodate
,
521 .error_remove_folio
= generic_error_remove_folio
,
522 .migrate_folio
= filemap_migrate_folio
,
524 #endif /* CONFIG_BUFFER_HEAD */
527 * for a block special file file_inode(file)->i_size is zero
528 * so we compute the size by hand (just as in block_read/write above)
530 static loff_t
blkdev_llseek(struct file
*file
, loff_t offset
, int whence
)
532 struct inode
*bd_inode
= bdev_file_inode(file
);
535 inode_lock(bd_inode
);
536 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
537 inode_unlock(bd_inode
);
541 static int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
,
544 struct block_device
*bdev
= I_BDEV(filp
->f_mapping
->host
);
547 error
= file_write_and_wait_range(filp
, start
, end
);
552 * There is no need to serialise calls to blkdev_issue_flush with
553 * i_mutex and doing so causes performance issues with concurrent
554 * O_SYNC writers to a block device.
556 error
= blkdev_issue_flush(bdev
);
557 if (error
== -EOPNOTSUPP
)
564 * file_to_blk_mode - get block open flags from file flags
565 * @file: file whose open flags should be converted
567 * Look at file open flags and generate corresponding block open flags from
568 * them. The function works both for file just being open (e.g. during ->open
569 * callback) and for file that is already open. This is actually non-trivial
570 * (see comment in the function).
572 blk_mode_t
file_to_blk_mode(struct file
*file
)
576 if (file
->f_mode
& FMODE_READ
)
577 mode
|= BLK_OPEN_READ
;
578 if (file
->f_mode
& FMODE_WRITE
)
579 mode
|= BLK_OPEN_WRITE
;
581 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
582 * to determine whether the open was exclusive for already open files.
584 if (file
->private_data
)
585 mode
|= BLK_OPEN_EXCL
;
586 else if (file
->f_flags
& O_EXCL
)
587 mode
|= BLK_OPEN_EXCL
;
588 if (file
->f_flags
& O_NDELAY
)
589 mode
|= BLK_OPEN_NDELAY
;
592 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
593 * driver has historically allowed ioctls as if the file was opened for
594 * writing, but does not allow and actual reads or writes.
596 if ((file
->f_flags
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
))
597 mode
|= BLK_OPEN_WRITE_IOCTL
;
602 static int blkdev_open(struct inode
*inode
, struct file
*filp
)
604 struct block_device
*bdev
;
608 mode
= file_to_blk_mode(filp
);
609 /* Use the file as the holder. */
610 if (mode
& BLK_OPEN_EXCL
)
611 filp
->private_data
= filp
;
612 ret
= bdev_permission(inode
->i_rdev
, mode
, filp
->private_data
);
616 bdev
= blkdev_get_no_open(inode
->i_rdev
);
620 ret
= bdev_open(bdev
, mode
, filp
->private_data
, NULL
, filp
);
622 blkdev_put_no_open(bdev
);
626 static int blkdev_release(struct inode
*inode
, struct file
*filp
)
633 blkdev_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
635 size_t count
= iov_iter_count(from
);
638 written
= kiocb_invalidate_pages(iocb
, count
);
640 if (written
== -EBUSY
)
645 written
= blkdev_direct_IO(iocb
, from
);
647 kiocb_invalidate_post_direct_write(iocb
, count
);
648 iocb
->ki_pos
+= written
;
651 if (written
!= -EIOCBQUEUED
)
652 iov_iter_revert(from
, count
- iov_iter_count(from
));
656 static ssize_t
blkdev_buffered_write(struct kiocb
*iocb
, struct iov_iter
*from
)
658 return iomap_file_buffered_write(iocb
, from
, &blkdev_iomap_ops
);
662 * Write data to the block device. Only intended for the block device itself
663 * and the raw driver which basically is a fake block device.
665 * Does not take i_mutex for the write and thus is not for general purpose
668 static ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
670 struct file
*file
= iocb
->ki_filp
;
671 struct block_device
*bdev
= I_BDEV(file
->f_mapping
->host
);
672 struct inode
*bd_inode
= bdev
->bd_inode
;
673 loff_t size
= bdev_nr_bytes(bdev
);
677 if (bdev_read_only(bdev
))
680 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
683 if (!iov_iter_count(from
))
686 if (iocb
->ki_pos
>= size
)
689 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
692 size
-= iocb
->ki_pos
;
693 if (iov_iter_count(from
) > size
) {
694 shorted
= iov_iter_count(from
) - size
;
695 iov_iter_truncate(from
, size
);
698 ret
= file_update_time(file
);
702 if (iocb
->ki_flags
& IOCB_DIRECT
) {
703 ret
= blkdev_direct_write(iocb
, from
);
704 if (ret
>= 0 && iov_iter_count(from
))
705 ret
= direct_write_fallback(iocb
, from
, ret
,
706 blkdev_buffered_write(iocb
, from
));
708 ret
= blkdev_buffered_write(iocb
, from
);
712 ret
= generic_write_sync(iocb
, ret
);
713 iov_iter_reexpand(from
, iov_iter_count(from
) + shorted
);
717 static ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
719 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
720 loff_t size
= bdev_nr_bytes(bdev
);
721 loff_t pos
= iocb
->ki_pos
;
726 if (unlikely(pos
+ iov_iter_count(to
) > size
)) {
730 shorted
= iov_iter_count(to
) - size
;
731 iov_iter_truncate(to
, size
);
734 count
= iov_iter_count(to
);
736 goto reexpand
; /* skip atime */
738 if (iocb
->ki_flags
& IOCB_DIRECT
) {
739 ret
= kiocb_write_and_wait(iocb
, count
);
742 file_accessed(iocb
->ki_filp
);
744 ret
= blkdev_direct_IO(iocb
, to
);
749 iov_iter_revert(to
, count
- iov_iter_count(to
));
750 if (ret
< 0 || !count
)
754 ret
= filemap_read(iocb
, to
, ret
);
757 if (unlikely(shorted
))
758 iov_iter_reexpand(to
, iov_iter_count(to
) + shorted
);
762 #define BLKDEV_FALLOC_FL_SUPPORTED \
763 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
764 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
766 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
769 struct inode
*inode
= bdev_file_inode(file
);
770 struct block_device
*bdev
= I_BDEV(inode
);
771 loff_t end
= start
+ len
- 1;
775 /* Fail if we don't recognize the flags. */
776 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
779 /* Don't go off the end of the device. */
780 isize
= bdev_nr_bytes(bdev
);
784 if (mode
& FALLOC_FL_KEEP_SIZE
) {
786 end
= start
+ len
- 1;
792 * Don't allow IO that isn't aligned to logical block size.
794 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
797 filemap_invalidate_lock(inode
->i_mapping
);
800 * Invalidate the page cache, including dirty pages, for valid
801 * de-allocate mode calls to fallocate().
804 case FALLOC_FL_ZERO_RANGE
:
805 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
806 error
= truncate_bdev_range(bdev
, file_to_blk_mode(file
), start
, end
);
810 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
811 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
812 BLKDEV_ZERO_NOUNMAP
);
814 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
815 error
= truncate_bdev_range(bdev
, file_to_blk_mode(file
), start
, end
);
819 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
820 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
821 BLKDEV_ZERO_NOFALLBACK
);
823 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
824 error
= truncate_bdev_range(bdev
, file_to_blk_mode(file
), start
, end
);
828 error
= blkdev_issue_discard(bdev
, start
>> SECTOR_SHIFT
,
829 len
>> SECTOR_SHIFT
, GFP_KERNEL
);
836 filemap_invalidate_unlock(inode
->i_mapping
);
840 static int blkdev_mmap(struct file
*file
, struct vm_area_struct
*vma
)
842 struct inode
*bd_inode
= bdev_file_inode(file
);
844 if (bdev_read_only(I_BDEV(bd_inode
)))
845 return generic_file_readonly_mmap(file
, vma
);
847 return generic_file_mmap(file
, vma
);
850 const struct file_operations def_blk_fops
= {
852 .release
= blkdev_release
,
853 .llseek
= blkdev_llseek
,
854 .read_iter
= blkdev_read_iter
,
855 .write_iter
= blkdev_write_iter
,
856 .iopoll
= iocb_bio_iopoll
,
858 .fsync
= blkdev_fsync
,
859 .unlocked_ioctl
= blkdev_ioctl
,
861 .compat_ioctl
= compat_blkdev_ioctl
,
863 .splice_read
= filemap_splice_read
,
864 .splice_write
= iter_file_splice_write
,
865 .fallocate
= blkdev_fallocate
,
868 static __init
int blkdev_init(void)
870 return bioset_init(&blkdev_dio_pool
, 4,
871 offsetof(struct blkdev_dio
, bio
),
872 BIOSET_NEED_BVECS
|BIOSET_PERCPU_CACHE
);
874 module_init(blkdev_init
);