1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
18 #include <linux/iomap.h>
19 #include <linux/module.h>
22 static inline struct inode
*bdev_file_inode(struct file
*file
)
24 return file
->f_mapping
->host
;
27 static blk_opf_t
dio_bio_write_op(struct kiocb
*iocb
)
29 blk_opf_t opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
31 /* avoid the need for a I/O completion work item */
32 if (iocb_is_dsync(iocb
))
37 static bool blkdev_dio_unaligned(struct block_device
*bdev
, loff_t pos
,
38 struct iov_iter
*iter
)
40 return pos
& (bdev_logical_block_size(bdev
) - 1) ||
41 !bdev_iter_is_aligned(bdev
, iter
);
44 #define DIO_INLINE_BIO_VECS 4
46 static ssize_t
__blkdev_direct_IO_simple(struct kiocb
*iocb
,
47 struct iov_iter
*iter
, unsigned int nr_pages
)
49 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
50 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
51 loff_t pos
= iocb
->ki_pos
;
52 bool should_dirty
= false;
56 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
59 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
62 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
68 if (iov_iter_rw(iter
) == READ
) {
69 bio_init(&bio
, bdev
, vecs
, nr_pages
, REQ_OP_READ
);
70 if (user_backed_iter(iter
))
73 bio_init(&bio
, bdev
, vecs
, nr_pages
, dio_bio_write_op(iocb
));
75 bio
.bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
76 bio
.bi_ioprio
= iocb
->ki_ioprio
;
78 ret
= bio_iov_iter_get_pages(&bio
, iter
);
81 ret
= bio
.bi_iter
.bi_size
;
83 if (iov_iter_rw(iter
) == WRITE
)
84 task_io_account_write(ret
);
86 if (iocb
->ki_flags
& IOCB_NOWAIT
)
87 bio
.bi_opf
|= REQ_NOWAIT
;
89 submit_bio_wait(&bio
);
91 bio_release_pages(&bio
, should_dirty
);
92 if (unlikely(bio
.bi_status
))
93 ret
= blk_status_to_errno(bio
.bi_status
);
96 if (vecs
!= inline_vecs
)
105 DIO_SHOULD_DIRTY
= 1,
112 struct task_struct
*waiter
;
117 struct bio bio ____cacheline_aligned_in_smp
;
120 static struct bio_set blkdev_dio_pool
;
122 static void blkdev_bio_end_io(struct bio
*bio
)
124 struct blkdev_dio
*dio
= bio
->bi_private
;
125 bool should_dirty
= dio
->flags
& DIO_SHOULD_DIRTY
;
127 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
128 dio
->bio
.bi_status
= bio
->bi_status
;
130 if (atomic_dec_and_test(&dio
->ref
)) {
131 if (!(dio
->flags
& DIO_IS_SYNC
)) {
132 struct kiocb
*iocb
= dio
->iocb
;
135 WRITE_ONCE(iocb
->private, NULL
);
137 if (likely(!dio
->bio
.bi_status
)) {
141 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
144 dio
->iocb
->ki_complete(iocb
, ret
);
147 struct task_struct
*waiter
= dio
->waiter
;
149 WRITE_ONCE(dio
->waiter
, NULL
);
150 blk_wake_io_task(waiter
);
155 bio_check_pages_dirty(bio
);
157 bio_release_pages(bio
, false);
162 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
163 unsigned int nr_pages
)
165 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
166 struct blk_plug plug
;
167 struct blkdev_dio
*dio
;
169 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
170 blk_opf_t opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
171 loff_t pos
= iocb
->ki_pos
;
174 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
177 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
178 opf
|= REQ_ALLOC_CACHE
;
179 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
181 dio
= container_of(bio
, struct blkdev_dio
, bio
);
182 atomic_set(&dio
->ref
, 1);
184 * Grab an extra reference to ensure the dio structure which is embedded
185 * into the first bio stays around.
189 is_sync
= is_sync_kiocb(iocb
);
191 dio
->flags
= DIO_IS_SYNC
;
192 dio
->waiter
= current
;
199 if (is_read
&& user_backed_iter(iter
))
200 dio
->flags
|= DIO_SHOULD_DIRTY
;
202 blk_start_plug(&plug
);
205 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
206 bio
->bi_private
= dio
;
207 bio
->bi_end_io
= blkdev_bio_end_io
;
208 bio
->bi_ioprio
= iocb
->ki_ioprio
;
210 ret
= bio_iov_iter_get_pages(bio
, iter
);
212 bio
->bi_status
= BLK_STS_IOERR
;
216 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
218 * This is nonblocking IO, and we need to allocate
219 * another bio if we have data left to map. As we
220 * cannot guarantee that one of the sub bios will not
221 * fail getting issued FOR NOWAIT and as error results
222 * are coalesced across all of them, be safe and ask for
223 * a retry of this from blocking context.
225 if (unlikely(iov_iter_count(iter
))) {
226 bio_release_pages(bio
, false);
227 bio_clear_flag(bio
, BIO_REFFED
);
229 blk_finish_plug(&plug
);
232 bio
->bi_opf
|= REQ_NOWAIT
;
236 if (dio
->flags
& DIO_SHOULD_DIRTY
)
237 bio_set_pages_dirty(bio
);
239 task_io_account_write(bio
->bi_iter
.bi_size
);
241 dio
->size
+= bio
->bi_iter
.bi_size
;
242 pos
+= bio
->bi_iter
.bi_size
;
244 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
);
249 atomic_inc(&dio
->ref
);
251 bio
= bio_alloc(bdev
, nr_pages
, opf
, GFP_KERNEL
);
254 blk_finish_plug(&plug
);
260 set_current_state(TASK_UNINTERRUPTIBLE
);
261 if (!READ_ONCE(dio
->waiter
))
265 __set_current_state(TASK_RUNNING
);
268 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
276 static void blkdev_bio_end_io_async(struct bio
*bio
)
278 struct blkdev_dio
*dio
= container_of(bio
, struct blkdev_dio
, bio
);
279 struct kiocb
*iocb
= dio
->iocb
;
282 WRITE_ONCE(iocb
->private, NULL
);
284 if (likely(!bio
->bi_status
)) {
288 ret
= blk_status_to_errno(bio
->bi_status
);
291 iocb
->ki_complete(iocb
, ret
);
293 if (dio
->flags
& DIO_SHOULD_DIRTY
) {
294 bio_check_pages_dirty(bio
);
296 bio_release_pages(bio
, false);
301 static ssize_t
__blkdev_direct_IO_async(struct kiocb
*iocb
,
302 struct iov_iter
*iter
,
303 unsigned int nr_pages
)
305 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
306 bool is_read
= iov_iter_rw(iter
) == READ
;
307 blk_opf_t opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
308 struct blkdev_dio
*dio
;
310 loff_t pos
= iocb
->ki_pos
;
313 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
316 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
317 opf
|= REQ_ALLOC_CACHE
;
318 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
320 dio
= container_of(bio
, struct blkdev_dio
, bio
);
323 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
324 bio
->bi_end_io
= blkdev_bio_end_io_async
;
325 bio
->bi_ioprio
= iocb
->ki_ioprio
;
327 if (iov_iter_is_bvec(iter
)) {
329 * Users don't rely on the iterator being in any particular
330 * state for async I/O returning -EIOCBQUEUED, hence we can
331 * avoid expensive iov_iter_advance(). Bypass
332 * bio_iov_iter_get_pages() and set the bvec directly.
334 bio_iov_bvec_set(bio
, iter
);
336 ret
= bio_iov_iter_get_pages(bio
, iter
);
342 dio
->size
= bio
->bi_iter
.bi_size
;
345 if (user_backed_iter(iter
)) {
346 dio
->flags
|= DIO_SHOULD_DIRTY
;
347 bio_set_pages_dirty(bio
);
350 task_io_account_write(bio
->bi_iter
.bi_size
);
353 if (iocb
->ki_flags
& IOCB_NOWAIT
)
354 bio
->bi_opf
|= REQ_NOWAIT
;
356 if (iocb
->ki_flags
& IOCB_HIPRI
) {
357 bio
->bi_opf
|= REQ_POLLED
;
359 WRITE_ONCE(iocb
->private, bio
);
366 static ssize_t
blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
368 unsigned int nr_pages
;
370 if (!iov_iter_count(iter
))
373 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
+ 1);
374 if (likely(nr_pages
<= BIO_MAX_VECS
)) {
375 if (is_sync_kiocb(iocb
))
376 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
377 return __blkdev_direct_IO_async(iocb
, iter
, nr_pages
);
379 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
382 static int blkdev_iomap_begin(struct inode
*inode
, loff_t offset
, loff_t length
,
383 unsigned int flags
, struct iomap
*iomap
, struct iomap
*srcmap
)
385 struct block_device
*bdev
= I_BDEV(inode
);
386 loff_t isize
= i_size_read(inode
);
389 iomap
->offset
= ALIGN_DOWN(offset
, bdev_logical_block_size(bdev
));
390 if (iomap
->offset
>= isize
)
392 iomap
->type
= IOMAP_MAPPED
;
393 iomap
->addr
= iomap
->offset
;
394 iomap
->length
= isize
- iomap
->offset
;
395 iomap
->flags
|= IOMAP_F_BUFFER_HEAD
; /* noop for !CONFIG_BUFFER_HEAD */
399 static const struct iomap_ops blkdev_iomap_ops
= {
400 .iomap_begin
= blkdev_iomap_begin
,
403 #ifdef CONFIG_BUFFER_HEAD
404 static int blkdev_get_block(struct inode
*inode
, sector_t iblock
,
405 struct buffer_head
*bh
, int create
)
407 bh
->b_bdev
= I_BDEV(inode
);
408 bh
->b_blocknr
= iblock
;
409 set_buffer_mapped(bh
);
413 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
415 return block_write_full_page(page
, blkdev_get_block
, wbc
);
418 static int blkdev_read_folio(struct file
*file
, struct folio
*folio
)
420 return block_read_full_folio(folio
, blkdev_get_block
);
423 static void blkdev_readahead(struct readahead_control
*rac
)
425 mpage_readahead(rac
, blkdev_get_block
);
428 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
429 loff_t pos
, unsigned len
, struct page
**pagep
, void **fsdata
)
431 return block_write_begin(mapping
, pos
, len
, pagep
, blkdev_get_block
);
434 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
435 loff_t pos
, unsigned len
, unsigned copied
, struct page
*page
,
439 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
447 const struct address_space_operations def_blk_aops
= {
448 .dirty_folio
= block_dirty_folio
,
449 .invalidate_folio
= block_invalidate_folio
,
450 .read_folio
= blkdev_read_folio
,
451 .readahead
= blkdev_readahead
,
452 .writepage
= blkdev_writepage
,
453 .write_begin
= blkdev_write_begin
,
454 .write_end
= blkdev_write_end
,
455 .migrate_folio
= buffer_migrate_folio_norefs
,
456 .is_dirty_writeback
= buffer_check_dirty_writeback
,
458 #else /* CONFIG_BUFFER_HEAD */
459 static int blkdev_read_folio(struct file
*file
, struct folio
*folio
)
461 return iomap_read_folio(folio
, &blkdev_iomap_ops
);
464 static void blkdev_readahead(struct readahead_control
*rac
)
466 iomap_readahead(rac
, &blkdev_iomap_ops
);
469 static int blkdev_map_blocks(struct iomap_writepage_ctx
*wpc
,
470 struct inode
*inode
, loff_t offset
)
472 loff_t isize
= i_size_read(inode
);
474 if (WARN_ON_ONCE(offset
>= isize
))
476 if (offset
>= wpc
->iomap
.offset
&&
477 offset
< wpc
->iomap
.offset
+ wpc
->iomap
.length
)
479 return blkdev_iomap_begin(inode
, offset
, isize
- offset
,
480 IOMAP_WRITE
, &wpc
->iomap
, NULL
);
483 static const struct iomap_writeback_ops blkdev_writeback_ops
= {
484 .map_blocks
= blkdev_map_blocks
,
487 static int blkdev_writepages(struct address_space
*mapping
,
488 struct writeback_control
*wbc
)
490 struct iomap_writepage_ctx wpc
= { };
492 return iomap_writepages(mapping
, wbc
, &wpc
, &blkdev_writeback_ops
);
495 const struct address_space_operations def_blk_aops
= {
496 .dirty_folio
= filemap_dirty_folio
,
497 .release_folio
= iomap_release_folio
,
498 .invalidate_folio
= iomap_invalidate_folio
,
499 .read_folio
= blkdev_read_folio
,
500 .readahead
= blkdev_readahead
,
501 .writepages
= blkdev_writepages
,
502 .is_partially_uptodate
= iomap_is_partially_uptodate
,
503 .error_remove_page
= generic_error_remove_page
,
504 .migrate_folio
= filemap_migrate_folio
,
506 #endif /* CONFIG_BUFFER_HEAD */
509 * for a block special file file_inode(file)->i_size is zero
510 * so we compute the size by hand (just as in block_read/write above)
512 static loff_t
blkdev_llseek(struct file
*file
, loff_t offset
, int whence
)
514 struct inode
*bd_inode
= bdev_file_inode(file
);
517 inode_lock(bd_inode
);
518 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
519 inode_unlock(bd_inode
);
523 static int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
,
526 struct block_device
*bdev
= I_BDEV(filp
->f_mapping
->host
);
529 error
= file_write_and_wait_range(filp
, start
, end
);
534 * There is no need to serialise calls to blkdev_issue_flush with
535 * i_mutex and doing so causes performance issues with concurrent
536 * O_SYNC writers to a block device.
538 error
= blkdev_issue_flush(bdev
);
539 if (error
== -EOPNOTSUPP
)
546 * file_to_blk_mode - get block open flags from file flags
547 * @file: file whose open flags should be converted
549 * Look at file open flags and generate corresponding block open flags from
550 * them. The function works both for file just being open (e.g. during ->open
551 * callback) and for file that is already open. This is actually non-trivial
552 * (see comment in the function).
554 blk_mode_t
file_to_blk_mode(struct file
*file
)
557 struct bdev_handle
*handle
= file
->private_data
;
559 if (file
->f_mode
& FMODE_READ
)
560 mode
|= BLK_OPEN_READ
;
561 if (file
->f_mode
& FMODE_WRITE
)
562 mode
|= BLK_OPEN_WRITE
;
564 * do_dentry_open() clears O_EXCL from f_flags, use handle->mode to
565 * determine whether the open was exclusive for already open files.
568 mode
|= handle
->mode
& BLK_OPEN_EXCL
;
569 else if (file
->f_flags
& O_EXCL
)
570 mode
|= BLK_OPEN_EXCL
;
571 if (file
->f_flags
& O_NDELAY
)
572 mode
|= BLK_OPEN_NDELAY
;
575 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
576 * driver has historically allowed ioctls as if the file was opened for
577 * writing, but does not allow and actual reads or writes.
579 if ((file
->f_flags
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
))
580 mode
|= BLK_OPEN_WRITE_IOCTL
;
585 static int blkdev_open(struct inode
*inode
, struct file
*filp
)
587 struct bdev_handle
*handle
;
591 * Preserve backwards compatibility and allow large file access
592 * even if userspace doesn't ask for it explicitly. Some mkfs
593 * binary needs it. We might want to drop this workaround
594 * during an unstable branch.
596 filp
->f_flags
|= O_LARGEFILE
;
597 filp
->f_mode
|= FMODE_BUF_RASYNC
| FMODE_CAN_ODIRECT
;
599 mode
= file_to_blk_mode(filp
);
600 handle
= bdev_open_by_dev(inode
->i_rdev
, mode
,
601 mode
& BLK_OPEN_EXCL
? filp
: NULL
, NULL
);
603 return PTR_ERR(handle
);
605 if (bdev_nowait(handle
->bdev
))
606 filp
->f_mode
|= FMODE_NOWAIT
;
608 filp
->f_mapping
= handle
->bdev
->bd_inode
->i_mapping
;
609 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
610 filp
->private_data
= handle
;
614 static int blkdev_release(struct inode
*inode
, struct file
*filp
)
616 bdev_release(filp
->private_data
);
621 blkdev_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
623 size_t count
= iov_iter_count(from
);
626 written
= kiocb_invalidate_pages(iocb
, count
);
628 if (written
== -EBUSY
)
633 written
= blkdev_direct_IO(iocb
, from
);
635 kiocb_invalidate_post_direct_write(iocb
, count
);
636 iocb
->ki_pos
+= written
;
639 if (written
!= -EIOCBQUEUED
)
640 iov_iter_revert(from
, count
- iov_iter_count(from
));
644 static ssize_t
blkdev_buffered_write(struct kiocb
*iocb
, struct iov_iter
*from
)
646 return iomap_file_buffered_write(iocb
, from
, &blkdev_iomap_ops
);
650 * Write data to the block device. Only intended for the block device itself
651 * and the raw driver which basically is a fake block device.
653 * Does not take i_mutex for the write and thus is not for general purpose
656 static ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
658 struct file
*file
= iocb
->ki_filp
;
659 struct block_device
*bdev
= I_BDEV(file
->f_mapping
->host
);
660 struct inode
*bd_inode
= bdev
->bd_inode
;
661 loff_t size
= bdev_nr_bytes(bdev
);
665 if (bdev_read_only(bdev
))
668 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
671 if (!iov_iter_count(from
))
674 if (iocb
->ki_pos
>= size
)
677 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
680 size
-= iocb
->ki_pos
;
681 if (iov_iter_count(from
) > size
) {
682 shorted
= iov_iter_count(from
) - size
;
683 iov_iter_truncate(from
, size
);
686 ret
= file_update_time(file
);
690 if (iocb
->ki_flags
& IOCB_DIRECT
) {
691 ret
= blkdev_direct_write(iocb
, from
);
692 if (ret
>= 0 && iov_iter_count(from
))
693 ret
= direct_write_fallback(iocb
, from
, ret
,
694 blkdev_buffered_write(iocb
, from
));
696 ret
= blkdev_buffered_write(iocb
, from
);
700 ret
= generic_write_sync(iocb
, ret
);
701 iov_iter_reexpand(from
, iov_iter_count(from
) + shorted
);
705 static ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
707 struct block_device
*bdev
= I_BDEV(iocb
->ki_filp
->f_mapping
->host
);
708 loff_t size
= bdev_nr_bytes(bdev
);
709 loff_t pos
= iocb
->ki_pos
;
714 if (unlikely(pos
+ iov_iter_count(to
) > size
)) {
718 shorted
= iov_iter_count(to
) - size
;
719 iov_iter_truncate(to
, size
);
722 count
= iov_iter_count(to
);
724 goto reexpand
; /* skip atime */
726 if (iocb
->ki_flags
& IOCB_DIRECT
) {
727 ret
= kiocb_write_and_wait(iocb
, count
);
730 file_accessed(iocb
->ki_filp
);
732 ret
= blkdev_direct_IO(iocb
, to
);
737 iov_iter_revert(to
, count
- iov_iter_count(to
));
738 if (ret
< 0 || !count
)
742 ret
= filemap_read(iocb
, to
, ret
);
745 if (unlikely(shorted
))
746 iov_iter_reexpand(to
, iov_iter_count(to
) + shorted
);
750 #define BLKDEV_FALLOC_FL_SUPPORTED \
751 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
752 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
754 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
757 struct inode
*inode
= bdev_file_inode(file
);
758 struct block_device
*bdev
= I_BDEV(inode
);
759 loff_t end
= start
+ len
- 1;
763 /* Fail if we don't recognize the flags. */
764 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
767 /* Don't go off the end of the device. */
768 isize
= bdev_nr_bytes(bdev
);
772 if (mode
& FALLOC_FL_KEEP_SIZE
) {
774 end
= start
+ len
- 1;
780 * Don't allow IO that isn't aligned to logical block size.
782 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
785 filemap_invalidate_lock(inode
->i_mapping
);
788 * Invalidate the page cache, including dirty pages, for valid
789 * de-allocate mode calls to fallocate().
792 case FALLOC_FL_ZERO_RANGE
:
793 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
794 error
= truncate_bdev_range(bdev
, file_to_blk_mode(file
), start
, end
);
798 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
799 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
800 BLKDEV_ZERO_NOUNMAP
);
802 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
803 error
= truncate_bdev_range(bdev
, file_to_blk_mode(file
), start
, end
);
807 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
808 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
809 BLKDEV_ZERO_NOFALLBACK
);
811 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
812 error
= truncate_bdev_range(bdev
, file_to_blk_mode(file
), start
, end
);
816 error
= blkdev_issue_discard(bdev
, start
>> SECTOR_SHIFT
,
817 len
>> SECTOR_SHIFT
, GFP_KERNEL
);
824 filemap_invalidate_unlock(inode
->i_mapping
);
828 static int blkdev_mmap(struct file
*file
, struct vm_area_struct
*vma
)
830 struct inode
*bd_inode
= bdev_file_inode(file
);
832 if (bdev_read_only(I_BDEV(bd_inode
)))
833 return generic_file_readonly_mmap(file
, vma
);
835 return generic_file_mmap(file
, vma
);
838 const struct file_operations def_blk_fops
= {
840 .release
= blkdev_release
,
841 .llseek
= blkdev_llseek
,
842 .read_iter
= blkdev_read_iter
,
843 .write_iter
= blkdev_write_iter
,
844 .iopoll
= iocb_bio_iopoll
,
846 .fsync
= blkdev_fsync
,
847 .unlocked_ioctl
= blkdev_ioctl
,
849 .compat_ioctl
= compat_blkdev_ioctl
,
851 .splice_read
= filemap_splice_read
,
852 .splice_write
= iter_file_splice_write
,
853 .fallocate
= blkdev_fallocate
,
856 static __init
int blkdev_init(void)
858 return bioset_init(&blkdev_dio_pool
, 4,
859 offsetof(struct blkdev_dio
, bio
),
860 BIOSET_NEED_BVECS
|BIOSET_PERCPU_CACHE
);
862 module_init(blkdev_init
);