]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - block/fops.c
Merge tag 'kvm-x86-misc-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / block / fops.c
CommitLineData
cd82cca7
CH
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/uio.h>
13#include <linux/namei.h>
14#include <linux/task_io_accounting_ops.h>
15#include <linux/falloc.h>
16#include <linux/suspend.h>
f278eb3d 17#include <linux/fs.h>
487c607d 18#include <linux/iomap.h>
8581fd40 19#include <linux/module.h>
cd82cca7
CH
20#include "blk.h"
21
fac7c6d5 22static inline struct inode *bdev_file_inode(struct file *file)
cd82cca7
CH
23{
24 return file->f_mapping->host;
25}
26
16458cf3 27static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
cd82cca7 28{
16458cf3 29 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
cd82cca7
CH
30
31 /* avoid the need for a I/O completion work item */
91b94c5d 32 if (iocb_is_dsync(iocb))
16458cf3
BVA
33 opf |= REQ_FUA;
34 return opf;
cd82cca7
CH
35}
36
37fee2e4
KB
37static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos,
38 struct iov_iter *iter)
39{
b1a000d3
KB
40 return pos & (bdev_logical_block_size(bdev) - 1) ||
41 !bdev_iter_is_aligned(bdev, iter);
37fee2e4
KB
42}
43
cd82cca7
CH
44#define DIO_INLINE_BIO_VECS 4
45
cd82cca7
CH
46static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
47 struct iov_iter *iter, unsigned int nr_pages)
48{
4e762d86 49 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
cd82cca7
CH
50 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
51 loff_t pos = iocb->ki_pos;
52 bool should_dirty = false;
53 struct bio bio;
54 ssize_t ret;
cd82cca7 55
37fee2e4 56 if (blkdev_dio_unaligned(bdev, pos, iter))
cd82cca7
CH
57 return -EINVAL;
58
59 if (nr_pages <= DIO_INLINE_BIO_VECS)
60 vecs = inline_vecs;
61 else {
62 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
63 GFP_KERNEL);
64 if (!vecs)
65 return -ENOMEM;
66 }
67
49add496
CH
68 if (iov_iter_rw(iter) == READ) {
69 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
fcb14cb1 70 if (user_backed_iter(iter))
49add496
CH
71 should_dirty = true;
72 } else {
73 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
74 }
6549a874 75 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
cd82cca7
CH
76 bio.bi_ioprio = iocb->ki_ioprio;
77
78 ret = bio_iov_iter_get_pages(&bio, iter);
79 if (unlikely(ret))
80 goto out;
81 ret = bio.bi_iter.bi_size;
82
49add496 83 if (iov_iter_rw(iter) == WRITE)
cd82cca7 84 task_io_account_write(ret);
49add496 85
cd82cca7
CH
86 if (iocb->ki_flags & IOCB_NOWAIT)
87 bio.bi_opf |= REQ_NOWAIT;
9650b453
ML
88
89 submit_bio_wait(&bio);
cd82cca7
CH
90
91 bio_release_pages(&bio, should_dirty);
92 if (unlikely(bio.bi_status))
93 ret = blk_status_to_errno(bio.bi_status);
94
95out:
96 if (vecs != inline_vecs)
97 kfree(vecs);
98
99 bio_uninit(&bio);
100
101 return ret;
102}
103
09ce8744 104enum {
e71aa913
PB
105 DIO_SHOULD_DIRTY = 1,
106 DIO_IS_SYNC = 2,
09ce8744
JA
107};
108
cd82cca7
CH
109struct blkdev_dio {
110 union {
111 struct kiocb *iocb;
112 struct task_struct *waiter;
113 };
114 size_t size;
115 atomic_t ref;
09ce8744 116 unsigned int flags;
6155631a 117 struct bio bio ____cacheline_aligned_in_smp;
cd82cca7
CH
118};
119
120static struct bio_set blkdev_dio_pool;
121
cd82cca7
CH
122static void blkdev_bio_end_io(struct bio *bio)
123{
124 struct blkdev_dio *dio = bio->bi_private;
09ce8744 125 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
cd82cca7
CH
126
127 if (bio->bi_status && !dio->bio.bi_status)
128 dio->bio.bi_status = bio->bi_status;
129
e71aa913 130 if (atomic_dec_and_test(&dio->ref)) {
09ce8744 131 if (!(dio->flags & DIO_IS_SYNC)) {
cd82cca7
CH
132 struct kiocb *iocb = dio->iocb;
133 ssize_t ret;
134
3e08773c
CH
135 WRITE_ONCE(iocb->private, NULL);
136
cd82cca7
CH
137 if (likely(!dio->bio.bi_status)) {
138 ret = dio->size;
139 iocb->ki_pos += ret;
140 } else {
141 ret = blk_status_to_errno(dio->bio.bi_status);
142 }
143
6b19b766 144 dio->iocb->ki_complete(iocb, ret);
e71aa913 145 bio_put(&dio->bio);
cd82cca7
CH
146 } else {
147 struct task_struct *waiter = dio->waiter;
148
149 WRITE_ONCE(dio->waiter, NULL);
150 blk_wake_io_task(waiter);
151 }
152 }
153
154 if (should_dirty) {
155 bio_check_pages_dirty(bio);
156 } else {
157 bio_release_pages(bio, false);
158 bio_put(bio);
159 }
160}
161
162static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
163 unsigned int nr_pages)
164{
4e762d86 165 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
cd82cca7
CH
166 struct blk_plug plug;
167 struct blkdev_dio *dio;
168 struct bio *bio;
cd82cca7 169 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
16458cf3 170 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
cd82cca7 171 loff_t pos = iocb->ki_pos;
cd82cca7
CH
172 int ret = 0;
173
37fee2e4 174 if (blkdev_dio_unaligned(bdev, pos, iter))
cd82cca7
CH
175 return -EINVAL;
176
0df71650
MS
177 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
178 opf |= REQ_ALLOC_CACHE;
179 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
180 &blkdev_dio_pool);
cd82cca7 181 dio = container_of(bio, struct blkdev_dio, bio);
e71aa913
PB
182 atomic_set(&dio->ref, 1);
183 /*
184 * Grab an extra reference to ensure the dio structure which is embedded
185 * into the first bio stays around.
186 */
187 bio_get(bio);
188
09ce8744
JA
189 is_sync = is_sync_kiocb(iocb);
190 if (is_sync) {
191 dio->flags = DIO_IS_SYNC;
cd82cca7 192 dio->waiter = current;
cd82cca7 193 } else {
09ce8744 194 dio->flags = 0;
cd82cca7
CH
195 dio->iocb = iocb;
196 }
197
198 dio->size = 0;
fcb14cb1 199 if (is_read && user_backed_iter(iter))
09ce8744 200 dio->flags |= DIO_SHOULD_DIRTY;
cd82cca7 201
25d207dc 202 blk_start_plug(&plug);
cd82cca7
CH
203
204 for (;;) {
6549a874 205 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
cd82cca7
CH
206 bio->bi_private = dio;
207 bio->bi_end_io = blkdev_bio_end_io;
208 bio->bi_ioprio = iocb->ki_ioprio;
209
210 ret = bio_iov_iter_get_pages(bio, iter);
211 if (unlikely(ret)) {
212 bio->bi_status = BLK_STS_IOERR;
213 bio_endio(bio);
214 break;
215 }
67d59247
JA
216 if (iocb->ki_flags & IOCB_NOWAIT) {
217 /*
218 * This is nonblocking IO, and we need to allocate
219 * another bio if we have data left to map. As we
220 * cannot guarantee that one of the sub bios will not
221 * fail getting issued FOR NOWAIT and as error results
222 * are coalesced across all of them, be safe and ask for
223 * a retry of this from blocking context.
224 */
225 if (unlikely(iov_iter_count(iter))) {
226 bio_release_pages(bio, false);
227 bio_clear_flag(bio, BIO_REFFED);
228 bio_put(bio);
229 blk_finish_plug(&plug);
230 return -EAGAIN;
231 }
232 bio->bi_opf |= REQ_NOWAIT;
233 }
cd82cca7
CH
234
235 if (is_read) {
09ce8744 236 if (dio->flags & DIO_SHOULD_DIRTY)
cd82cca7
CH
237 bio_set_pages_dirty(bio);
238 } else {
cd82cca7
CH
239 task_io_account_write(bio->bi_iter.bi_size);
240 }
cd82cca7
CH
241 dio->size += bio->bi_iter.bi_size;
242 pos += bio->bi_iter.bi_size;
243
244 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
245 if (!nr_pages) {
3e08773c 246 submit_bio(bio);
cd82cca7
CH
247 break;
248 }
e71aa913 249 atomic_inc(&dio->ref);
cd82cca7 250 submit_bio(bio);
07888c66 251 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
cd82cca7
CH
252 }
253
25d207dc 254 blk_finish_plug(&plug);
cd82cca7
CH
255
256 if (!is_sync)
257 return -EIOCBQUEUED;
258
259 for (;;) {
260 set_current_state(TASK_UNINTERRUPTIBLE);
261 if (!READ_ONCE(dio->waiter))
262 break;
25d207dc 263 blk_io_schedule();
cd82cca7
CH
264 }
265 __set_current_state(TASK_RUNNING);
266
267 if (!ret)
268 ret = blk_status_to_errno(dio->bio.bi_status);
269 if (likely(!ret))
270 ret = dio->size;
271
272 bio_put(&dio->bio);
273 return ret;
274}
275
54a88eb8
PB
276static void blkdev_bio_end_io_async(struct bio *bio)
277{
278 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
279 struct kiocb *iocb = dio->iocb;
280 ssize_t ret;
281
bb49c6fa
SG
282 WRITE_ONCE(iocb->private, NULL);
283
54a88eb8
PB
284 if (likely(!bio->bi_status)) {
285 ret = dio->size;
286 iocb->ki_pos += ret;
287 } else {
288 ret = blk_status_to_errno(bio->bi_status);
289 }
290
b6773cdb 291 iocb->ki_complete(iocb, ret);
54a88eb8
PB
292
293 if (dio->flags & DIO_SHOULD_DIRTY) {
294 bio_check_pages_dirty(bio);
295 } else {
296 bio_release_pages(bio, false);
297 bio_put(bio);
298 }
299}
300
301static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
302 struct iov_iter *iter,
303 unsigned int nr_pages)
304{
4e762d86 305 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
b77c88c2 306 bool is_read = iov_iter_rw(iter) == READ;
16458cf3 307 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
54a88eb8
PB
308 struct blkdev_dio *dio;
309 struct bio *bio;
310 loff_t pos = iocb->ki_pos;
311 int ret = 0;
312
37fee2e4 313 if (blkdev_dio_unaligned(bdev, pos, iter))
54a88eb8
PB
314 return -EINVAL;
315
0df71650
MS
316 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
317 opf |= REQ_ALLOC_CACHE;
318 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
319 &blkdev_dio_pool);
54a88eb8
PB
320 dio = container_of(bio, struct blkdev_dio, bio);
321 dio->flags = 0;
322 dio->iocb = iocb;
54a88eb8 323 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
54a88eb8
PB
324 bio->bi_end_io = blkdev_bio_end_io_async;
325 bio->bi_ioprio = iocb->ki_ioprio;
326
1bb6b810
PB
327 if (iov_iter_is_bvec(iter)) {
328 /*
329 * Users don't rely on the iterator being in any particular
330 * state for async I/O returning -EIOCBQUEUED, hence we can
331 * avoid expensive iov_iter_advance(). Bypass
332 * bio_iov_iter_get_pages() and set the bvec directly.
333 */
334 bio_iov_bvec_set(bio, iter);
335 } else {
336 ret = bio_iov_iter_get_pages(bio, iter);
337 if (unlikely(ret)) {
75feae73 338 bio_put(bio);
1bb6b810
PB
339 return ret;
340 }
54a88eb8
PB
341 }
342 dio->size = bio->bi_iter.bi_size;
343
b77c88c2 344 if (is_read) {
fcb14cb1 345 if (user_backed_iter(iter)) {
54a88eb8
PB
346 dio->flags |= DIO_SHOULD_DIRTY;
347 bio_set_pages_dirty(bio);
348 }
349 } else {
54a88eb8
PB
350 task_io_account_write(bio->bi_iter.bi_size);
351 }
352
2bc05769
JA
353 if (iocb->ki_flags & IOCB_NOWAIT)
354 bio->bi_opf |= REQ_NOWAIT;
355
54a88eb8 356 if (iocb->ki_flags & IOCB_HIPRI) {
2bc05769 357 bio->bi_opf |= REQ_POLLED;
54a88eb8
PB
358 submit_bio(bio);
359 WRITE_ONCE(iocb->private, bio);
360 } else {
361 submit_bio(bio);
362 }
363 return -EIOCBQUEUED;
364}
365
cd82cca7
CH
366static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
367{
368 unsigned int nr_pages;
369
370 if (!iov_iter_count(iter))
371 return 0;
372
373 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
54a88eb8
PB
374 if (likely(nr_pages <= BIO_MAX_VECS)) {
375 if (is_sync_kiocb(iocb))
376 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
377 return __blkdev_direct_IO_async(iocb, iter, nr_pages);
378 }
cd82cca7
CH
379 return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
380}
381
487c607d
CH
382static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
383 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
384{
385 struct block_device *bdev = I_BDEV(inode);
386 loff_t isize = i_size_read(inode);
387
388 iomap->bdev = bdev;
389 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
390 if (iomap->offset >= isize)
391 return -EIO;
392 iomap->type = IOMAP_MAPPED;
393 iomap->addr = iomap->offset;
394 iomap->length = isize - iomap->offset;
925c86a1 395 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
487c607d
CH
396 return 0;
397}
398
399static const struct iomap_ops blkdev_iomap_ops = {
400 .iomap_begin = blkdev_iomap_begin,
401};
402
925c86a1
CH
403#ifdef CONFIG_BUFFER_HEAD
404static int blkdev_get_block(struct inode *inode, sector_t iblock,
405 struct buffer_head *bh, int create)
406{
407 bh->b_bdev = I_BDEV(inode);
408 bh->b_blocknr = iblock;
409 set_buffer_mapped(bh);
410 return 0;
411}
412
cd82cca7
CH
413static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
414{
415 return block_write_full_page(page, blkdev_get_block, wbc);
416}
417
2c69e205 418static int blkdev_read_folio(struct file *file, struct folio *folio)
cd82cca7 419{
2c69e205 420 return block_read_full_folio(folio, blkdev_get_block);
cd82cca7
CH
421}
422
423static void blkdev_readahead(struct readahead_control *rac)
424{
425 mpage_readahead(rac, blkdev_get_block);
426}
427
428static int blkdev_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 429 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
cd82cca7 430{
b3992d1e 431 return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
cd82cca7
CH
432}
433
434static int blkdev_write_end(struct file *file, struct address_space *mapping,
435 loff_t pos, unsigned len, unsigned copied, struct page *page,
436 void *fsdata)
437{
438 int ret;
439 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
440
441 unlock_page(page);
442 put_page(page);
443
444 return ret;
445}
446
cd82cca7 447const struct address_space_operations def_blk_aops = {
e621900a 448 .dirty_folio = block_dirty_folio,
7ba13abb 449 .invalidate_folio = block_invalidate_folio,
2c69e205 450 .read_folio = blkdev_read_folio,
cd82cca7
CH
451 .readahead = blkdev_readahead,
452 .writepage = blkdev_writepage,
453 .write_begin = blkdev_write_begin,
454 .write_end = blkdev_write_end,
67235182 455 .migrate_folio = buffer_migrate_folio_norefs,
cd82cca7
CH
456 .is_dirty_writeback = buffer_check_dirty_writeback,
457};
925c86a1
CH
458#else /* CONFIG_BUFFER_HEAD */
459static int blkdev_read_folio(struct file *file, struct folio *folio)
460{
461 return iomap_read_folio(folio, &blkdev_iomap_ops);
462}
463
464static void blkdev_readahead(struct readahead_control *rac)
465{
466 iomap_readahead(rac, &blkdev_iomap_ops);
467}
468
469static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
470 struct inode *inode, loff_t offset)
471{
472 loff_t isize = i_size_read(inode);
473
474 if (WARN_ON_ONCE(offset >= isize))
475 return -EIO;
476 if (offset >= wpc->iomap.offset &&
477 offset < wpc->iomap.offset + wpc->iomap.length)
478 return 0;
479 return blkdev_iomap_begin(inode, offset, isize - offset,
480 IOMAP_WRITE, &wpc->iomap, NULL);
481}
482
483static const struct iomap_writeback_ops blkdev_writeback_ops = {
484 .map_blocks = blkdev_map_blocks,
485};
486
487static int blkdev_writepages(struct address_space *mapping,
488 struct writeback_control *wbc)
489{
490 struct iomap_writepage_ctx wpc = { };
491
492 return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
493}
494
495const struct address_space_operations def_blk_aops = {
496 .dirty_folio = filemap_dirty_folio,
497 .release_folio = iomap_release_folio,
498 .invalidate_folio = iomap_invalidate_folio,
499 .read_folio = blkdev_read_folio,
500 .readahead = blkdev_readahead,
501 .writepages = blkdev_writepages,
502 .is_partially_uptodate = iomap_is_partially_uptodate,
503 .error_remove_page = generic_error_remove_page,
504 .migrate_folio = filemap_migrate_folio,
505};
506#endif /* CONFIG_BUFFER_HEAD */
cd82cca7
CH
507
508/*
509 * for a block special file file_inode(file)->i_size is zero
510 * so we compute the size by hand (just as in block_read/write above)
511 */
512static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
513{
514 struct inode *bd_inode = bdev_file_inode(file);
515 loff_t retval;
516
517 inode_lock(bd_inode);
518 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
519 inode_unlock(bd_inode);
520 return retval;
521}
522
523static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
524 int datasync)
525{
4e762d86 526 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
cd82cca7
CH
527 int error;
528
529 error = file_write_and_wait_range(filp, start, end);
530 if (error)
531 return error;
532
533 /*
534 * There is no need to serialise calls to blkdev_issue_flush with
535 * i_mutex and doing so causes performance issues with concurrent
536 * O_SYNC writers to a block device.
537 */
538 error = blkdev_issue_flush(bdev);
539 if (error == -EOPNOTSUPP)
540 error = 0;
541
542 return error;
543}
544
05bdb996
CH
545blk_mode_t file_to_blk_mode(struct file *file)
546{
547 blk_mode_t mode = 0;
548
549 if (file->f_mode & FMODE_READ)
550 mode |= BLK_OPEN_READ;
551 if (file->f_mode & FMODE_WRITE)
552 mode |= BLK_OPEN_WRITE;
ee3249a8 553 if (file->private_data)
05bdb996
CH
554 mode |= BLK_OPEN_EXCL;
555 if (file->f_flags & O_NDELAY)
556 mode |= BLK_OPEN_NDELAY;
557
558 /*
559 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
560 * driver has historically allowed ioctls as if the file was opened for
561 * writing, but does not allow and actual reads or writes.
562 */
563 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
564 mode |= BLK_OPEN_WRITE_IOCTL;
565
566 return mode;
567}
568
cd82cca7
CH
569static int blkdev_open(struct inode *inode, struct file *filp)
570{
571 struct block_device *bdev;
572
573 /*
574 * Preserve backwards compatibility and allow large file access
575 * even if userspace doesn't ask for it explicitly. Some mkfs
576 * binary needs it. We might want to drop this workaround
577 * during an unstable branch.
578 */
579 filp->f_flags |= O_LARGEFILE;
a05f7bd9 580 filp->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
cd82cca7 581
ee3249a8
CH
582 /*
583 * Use the file private data to store the holder for exclusive openes.
584 * file_to_blk_mode relies on it being present to set BLK_OPEN_EXCL.
585 */
cd82cca7 586 if (filp->f_flags & O_EXCL)
ee3249a8 587 filp->private_data = filp;
cd82cca7 588
05bdb996 589 bdev = blkdev_get_by_dev(inode->i_rdev, file_to_blk_mode(filp),
ee3249a8 590 filp->private_data, NULL);
cd82cca7
CH
591 if (IS_ERR(bdev))
592 return PTR_ERR(bdev);
fac7c6d5 593
e9833d87
JA
594 if (bdev_nowait(bdev))
595 filp->f_mode |= FMODE_NOWAIT;
596
cd82cca7
CH
597 filp->f_mapping = bdev->bd_inode->i_mapping;
598 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
599 return 0;
600}
601
7ee34cbc 602static int blkdev_release(struct inode *inode, struct file *filp)
cd82cca7 603{
ee3249a8 604 blkdev_put(I_BDEV(filp->f_mapping->host), filp->private_data);
cd82cca7
CH
605 return 0;
606}
607
727cfe97
CH
608static ssize_t
609blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
610{
611 size_t count = iov_iter_count(from);
612 ssize_t written;
613
614 written = kiocb_invalidate_pages(iocb, count);
615 if (written) {
616 if (written == -EBUSY)
617 return 0;
618 return written;
619 }
620
621 written = blkdev_direct_IO(iocb, from);
622 if (written > 0) {
623 kiocb_invalidate_post_direct_write(iocb, count);
624 iocb->ki_pos += written;
625 count -= written;
626 }
627 if (written != -EIOCBQUEUED)
628 iov_iter_revert(from, count - iov_iter_count(from));
629 return written;
630}
631
487c607d
CH
632static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
633{
634 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops);
635}
636
cd82cca7
CH
637/*
638 * Write data to the block device. Only intended for the block device itself
639 * and the raw driver which basically is a fake block device.
640 *
641 * Does not take i_mutex for the write and thus is not for general purpose
642 * use.
643 */
644static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
645{
727cfe97
CH
646 struct file *file = iocb->ki_filp;
647 struct block_device *bdev = I_BDEV(file->f_mapping->host);
fac7c6d5 648 struct inode *bd_inode = bdev->bd_inode;
138c1a38 649 loff_t size = bdev_nr_bytes(bdev);
cd82cca7
CH
650 size_t shorted = 0;
651 ssize_t ret;
652
fac7c6d5 653 if (bdev_read_only(bdev))
cd82cca7
CH
654 return -EPERM;
655
656 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
657 return -ETXTBSY;
658
659 if (!iov_iter_count(from))
660 return 0;
661
662 if (iocb->ki_pos >= size)
663 return -ENOSPC;
664
665 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
666 return -EOPNOTSUPP;
667
668 size -= iocb->ki_pos;
669 if (iov_iter_count(from) > size) {
670 shorted = iov_iter_count(from) - size;
671 iov_iter_truncate(from, size);
672 }
673
727cfe97
CH
674 ret = file_update_time(file);
675 if (ret)
676 return ret;
677
678 if (iocb->ki_flags & IOCB_DIRECT) {
679 ret = blkdev_direct_write(iocb, from);
680 if (ret >= 0 && iov_iter_count(from))
681 ret = direct_write_fallback(iocb, from, ret,
487c607d 682 blkdev_buffered_write(iocb, from));
727cfe97 683 } else {
487c607d 684 ret = blkdev_buffered_write(iocb, from);
727cfe97
CH
685 }
686
cd82cca7
CH
687 if (ret > 0)
688 ret = generic_write_sync(iocb, ret);
689 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
cd82cca7
CH
690 return ret;
691}
692
693static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
694{
4e762d86 695 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
138c1a38 696 loff_t size = bdev_nr_bytes(bdev);
cd82cca7
CH
697 loff_t pos = iocb->ki_pos;
698 size_t shorted = 0;
ceaa7625 699 ssize_t ret = 0;
3e1f941d 700 size_t count;
cd82cca7 701
3e1f941d 702 if (unlikely(pos + iov_iter_count(to) > size)) {
6450fe1f
PB
703 if (pos >= size)
704 return 0;
705 size -= pos;
3e1f941d
ID
706 shorted = iov_iter_count(to) - size;
707 iov_iter_truncate(to, size);
cd82cca7
CH
708 }
709
3e1f941d
ID
710 count = iov_iter_count(to);
711 if (!count)
712 goto reexpand; /* skip atime */
713
ceaa7625 714 if (iocb->ki_flags & IOCB_DIRECT) {
3c435a0f
CH
715 ret = kiocb_write_and_wait(iocb, count);
716 if (ret < 0)
717 goto reexpand;
ceaa7625
JA
718 file_accessed(iocb->ki_filp);
719
720 ret = blkdev_direct_IO(iocb, to);
721 if (ret >= 0) {
722 iocb->ki_pos += ret;
723 count -= ret;
724 }
3e1f941d 725 iov_iter_revert(to, count - iov_iter_count(to));
ceaa7625 726 if (ret < 0 || !count)
3e1f941d 727 goto reexpand;
ceaa7625
JA
728 }
729
730 ret = filemap_read(iocb, to, ret);
6450fe1f 731
3e1f941d 732reexpand:
6450fe1f
PB
733 if (unlikely(shorted))
734 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
cd82cca7
CH
735 return ret;
736}
737
738#define BLKDEV_FALLOC_FL_SUPPORTED \
739 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
740 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
741
742static long blkdev_fallocate(struct file *file, int mode, loff_t start,
743 loff_t len)
744{
f278eb3d
ML
745 struct inode *inode = bdev_file_inode(file);
746 struct block_device *bdev = I_BDEV(inode);
cd82cca7
CH
747 loff_t end = start + len - 1;
748 loff_t isize;
749 int error;
750
751 /* Fail if we don't recognize the flags. */
752 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
753 return -EOPNOTSUPP;
754
755 /* Don't go off the end of the device. */
2a93ad8f 756 isize = bdev_nr_bytes(bdev);
cd82cca7
CH
757 if (start >= isize)
758 return -EINVAL;
759 if (end >= isize) {
760 if (mode & FALLOC_FL_KEEP_SIZE) {
761 len = isize - start;
762 end = start + len - 1;
763 } else
764 return -EINVAL;
765 }
766
767 /*
768 * Don't allow IO that isn't aligned to logical block size.
769 */
770 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
771 return -EINVAL;
772
f278eb3d
ML
773 filemap_invalidate_lock(inode->i_mapping);
774
1364a3c3
SK
775 /*
776 * Invalidate the page cache, including dirty pages, for valid
777 * de-allocate mode calls to fallocate().
778 */
cd82cca7
CH
779 switch (mode) {
780 case FALLOC_FL_ZERO_RANGE:
781 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
1364a3c3
SK
782 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
783 if (error)
784 goto fail;
785
6549a874
PB
786 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
787 len >> SECTOR_SHIFT, GFP_KERNEL,
788 BLKDEV_ZERO_NOUNMAP);
cd82cca7
CH
789 break;
790 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
1364a3c3
SK
791 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
792 if (error)
793 goto fail;
794
6549a874
PB
795 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
796 len >> SECTOR_SHIFT, GFP_KERNEL,
797 BLKDEV_ZERO_NOFALLBACK);
cd82cca7
CH
798 break;
799 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
1364a3c3
SK
800 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
801 if (error)
802 goto fail;
803
6549a874 804 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
44abff2c 805 len >> SECTOR_SHIFT, GFP_KERNEL);
cd82cca7
CH
806 break;
807 default:
f278eb3d 808 error = -EOPNOTSUPP;
cd82cca7 809 }
cd82cca7 810
f278eb3d
ML
811 fail:
812 filemap_invalidate_unlock(inode->i_mapping);
813 return error;
cd82cca7
CH
814}
815
69baa3a6
LP
816static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
817{
818 struct inode *bd_inode = bdev_file_inode(file);
819
820 if (bdev_read_only(I_BDEV(bd_inode)))
821 return generic_file_readonly_mmap(file, vma);
822
823 return generic_file_mmap(file, vma);
824}
825
cd82cca7
CH
826const struct file_operations def_blk_fops = {
827 .open = blkdev_open,
7ee34cbc 828 .release = blkdev_release,
cd82cca7
CH
829 .llseek = blkdev_llseek,
830 .read_iter = blkdev_read_iter,
831 .write_iter = blkdev_write_iter,
3e08773c 832 .iopoll = iocb_bio_iopoll,
69baa3a6 833 .mmap = blkdev_mmap,
cd82cca7 834 .fsync = blkdev_fsync,
8a709512 835 .unlocked_ioctl = blkdev_ioctl,
cd82cca7
CH
836#ifdef CONFIG_COMPAT
837 .compat_ioctl = compat_blkdev_ioctl,
838#endif
2cb1e089 839 .splice_read = filemap_splice_read,
cd82cca7
CH
840 .splice_write = iter_file_splice_write,
841 .fallocate = blkdev_fallocate,
842};
843
844static __init int blkdev_init(void)
845{
846 return bioset_init(&blkdev_dio_pool, 4,
847 offsetof(struct blkdev_dio, bio),
848 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
849}
850module_init(blkdev_init);