]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - block/fops.c
Merge tag 'for-6.16-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[thirdparty/kernel/linux.git] / block / fops.c
CommitLineData
cd82cca7
CH
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/uio.h>
13#include <linux/namei.h>
14#include <linux/task_io_accounting_ops.h>
15#include <linux/falloc.h>
16#include <linux/suspend.h>
f278eb3d 17#include <linux/fs.h>
487c607d 18#include <linux/iomap.h>
8581fd40 19#include <linux/module.h>
50c52250 20#include <linux/io_uring/cmd.h>
cd82cca7
CH
21#include "blk.h"
22
fac7c6d5 23static inline struct inode *bdev_file_inode(struct file *file)
cd82cca7
CH
24{
25 return file->f_mapping->host;
26}
27
16458cf3 28static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
cd82cca7 29{
16458cf3 30 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
cd82cca7
CH
31
32 /* avoid the need for a I/O completion work item */
91b94c5d 33 if (iocb_is_dsync(iocb))
16458cf3
BVA
34 opf |= REQ_FUA;
35 return opf;
cd82cca7
CH
36}
37
9a8dbdad 38static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
c3be7ebb 39 struct iov_iter *iter)
37fee2e4 40{
9a8dbdad 41 return iocb->ki_pos & (bdev_logical_block_size(bdev) - 1) ||
b1a000d3 42 !bdev_iter_is_aligned(bdev, iter);
37fee2e4
KB
43}
44
cd82cca7
CH
45#define DIO_INLINE_BIO_VECS 4
46
cd82cca7 47static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
de4c7bef
JG
48 struct iov_iter *iter, struct block_device *bdev,
49 unsigned int nr_pages)
cd82cca7 50{
cd82cca7
CH
51 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
52 loff_t pos = iocb->ki_pos;
53 bool should_dirty = false;
54 struct bio bio;
55 ssize_t ret;
cd82cca7 56
3d8b5a22 57 WARN_ON_ONCE(iocb->ki_flags & IOCB_HAS_METADATA);
cd82cca7
CH
58 if (nr_pages <= DIO_INLINE_BIO_VECS)
59 vecs = inline_vecs;
60 else {
61 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
62 GFP_KERNEL);
63 if (!vecs)
64 return -ENOMEM;
65 }
66
49add496
CH
67 if (iov_iter_rw(iter) == READ) {
68 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
fcb14cb1 69 if (user_backed_iter(iter))
49add496
CH
70 should_dirty = true;
71 } else {
72 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
73 }
6549a874 74 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
44981351 75 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
c27683da 76 bio.bi_write_stream = iocb->ki_write_stream;
cd82cca7 77 bio.bi_ioprio = iocb->ki_ioprio;
caf336f8
JG
78 if (iocb->ki_flags & IOCB_ATOMIC)
79 bio.bi_opf |= REQ_ATOMIC;
cd82cca7
CH
80
81 ret = bio_iov_iter_get_pages(&bio, iter);
82 if (unlikely(ret))
83 goto out;
84 ret = bio.bi_iter.bi_size;
85
49add496 86 if (iov_iter_rw(iter) == WRITE)
cd82cca7 87 task_io_account_write(ret);
49add496 88
cd82cca7
CH
89 if (iocb->ki_flags & IOCB_NOWAIT)
90 bio.bi_opf |= REQ_NOWAIT;
9650b453
ML
91
92 submit_bio_wait(&bio);
cd82cca7
CH
93
94 bio_release_pages(&bio, should_dirty);
95 if (unlikely(bio.bi_status))
96 ret = blk_status_to_errno(bio.bi_status);
97
98out:
99 if (vecs != inline_vecs)
100 kfree(vecs);
101
102 bio_uninit(&bio);
103
104 return ret;
105}
106
09ce8744 107enum {
e71aa913
PB
108 DIO_SHOULD_DIRTY = 1,
109 DIO_IS_SYNC = 2,
09ce8744
JA
110};
111
cd82cca7
CH
112struct blkdev_dio {
113 union {
114 struct kiocb *iocb;
115 struct task_struct *waiter;
116 };
117 size_t size;
118 atomic_t ref;
09ce8744 119 unsigned int flags;
6155631a 120 struct bio bio ____cacheline_aligned_in_smp;
cd82cca7
CH
121};
122
123static struct bio_set blkdev_dio_pool;
124
cd82cca7
CH
125static void blkdev_bio_end_io(struct bio *bio)
126{
127 struct blkdev_dio *dio = bio->bi_private;
09ce8744 128 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
3d8b5a22 129 bool is_sync = dio->flags & DIO_IS_SYNC;
cd82cca7
CH
130
131 if (bio->bi_status && !dio->bio.bi_status)
132 dio->bio.bi_status = bio->bi_status;
133
3d8b5a22
KJ
134 if (!is_sync && (dio->iocb->ki_flags & IOCB_HAS_METADATA))
135 bio_integrity_unmap_user(bio);
136
e71aa913 137 if (atomic_dec_and_test(&dio->ref)) {
3d8b5a22 138 if (!is_sync) {
cd82cca7
CH
139 struct kiocb *iocb = dio->iocb;
140 ssize_t ret;
141
3e08773c
CH
142 WRITE_ONCE(iocb->private, NULL);
143
cd82cca7
CH
144 if (likely(!dio->bio.bi_status)) {
145 ret = dio->size;
146 iocb->ki_pos += ret;
147 } else {
148 ret = blk_status_to_errno(dio->bio.bi_status);
149 }
150
6b19b766 151 dio->iocb->ki_complete(iocb, ret);
e71aa913 152 bio_put(&dio->bio);
cd82cca7
CH
153 } else {
154 struct task_struct *waiter = dio->waiter;
155
156 WRITE_ONCE(dio->waiter, NULL);
157 blk_wake_io_task(waiter);
158 }
159 }
160
161 if (should_dirty) {
162 bio_check_pages_dirty(bio);
163 } else {
164 bio_release_pages(bio, false);
165 bio_put(bio);
166 }
167}
168
169static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
de4c7bef 170 struct block_device *bdev, unsigned int nr_pages)
cd82cca7 171{
cd82cca7
CH
172 struct blk_plug plug;
173 struct blkdev_dio *dio;
174 struct bio *bio;
cd82cca7 175 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
16458cf3 176 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
cd82cca7 177 loff_t pos = iocb->ki_pos;
cd82cca7
CH
178 int ret = 0;
179
0df71650
MS
180 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
181 opf |= REQ_ALLOC_CACHE;
182 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
183 &blkdev_dio_pool);
cd82cca7 184 dio = container_of(bio, struct blkdev_dio, bio);
e71aa913
PB
185 atomic_set(&dio->ref, 1);
186 /*
187 * Grab an extra reference to ensure the dio structure which is embedded
188 * into the first bio stays around.
189 */
190 bio_get(bio);
191
09ce8744
JA
192 is_sync = is_sync_kiocb(iocb);
193 if (is_sync) {
194 dio->flags = DIO_IS_SYNC;
cd82cca7 195 dio->waiter = current;
cd82cca7 196 } else {
09ce8744 197 dio->flags = 0;
cd82cca7
CH
198 dio->iocb = iocb;
199 }
200
201 dio->size = 0;
fcb14cb1 202 if (is_read && user_backed_iter(iter))
09ce8744 203 dio->flags |= DIO_SHOULD_DIRTY;
cd82cca7 204
25d207dc 205 blk_start_plug(&plug);
cd82cca7
CH
206
207 for (;;) {
6549a874 208 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
44981351 209 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
c27683da 210 bio->bi_write_stream = iocb->ki_write_stream;
cd82cca7
CH
211 bio->bi_private = dio;
212 bio->bi_end_io = blkdev_bio_end_io;
213 bio->bi_ioprio = iocb->ki_ioprio;
214
215 ret = bio_iov_iter_get_pages(bio, iter);
216 if (unlikely(ret)) {
217 bio->bi_status = BLK_STS_IOERR;
218 bio_endio(bio);
219 break;
220 }
67d59247
JA
221 if (iocb->ki_flags & IOCB_NOWAIT) {
222 /*
223 * This is nonblocking IO, and we need to allocate
224 * another bio if we have data left to map. As we
225 * cannot guarantee that one of the sub bios will not
226 * fail getting issued FOR NOWAIT and as error results
227 * are coalesced across all of them, be safe and ask for
228 * a retry of this from blocking context.
229 */
230 if (unlikely(iov_iter_count(iter))) {
3d8b5a22
KJ
231 ret = -EAGAIN;
232 goto fail;
67d59247
JA
233 }
234 bio->bi_opf |= REQ_NOWAIT;
235 }
3d8b5a22
KJ
236 if (!is_sync && (iocb->ki_flags & IOCB_HAS_METADATA)) {
237 ret = bio_integrity_map_iter(bio, iocb->private);
238 if (unlikely(ret))
239 goto fail;
240 }
cd82cca7
CH
241
242 if (is_read) {
09ce8744 243 if (dio->flags & DIO_SHOULD_DIRTY)
cd82cca7
CH
244 bio_set_pages_dirty(bio);
245 } else {
cd82cca7
CH
246 task_io_account_write(bio->bi_iter.bi_size);
247 }
cd82cca7
CH
248 dio->size += bio->bi_iter.bi_size;
249 pos += bio->bi_iter.bi_size;
250
251 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
252 if (!nr_pages) {
3e08773c 253 submit_bio(bio);
cd82cca7
CH
254 break;
255 }
e71aa913 256 atomic_inc(&dio->ref);
cd82cca7 257 submit_bio(bio);
07888c66 258 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
cd82cca7
CH
259 }
260
25d207dc 261 blk_finish_plug(&plug);
cd82cca7
CH
262
263 if (!is_sync)
264 return -EIOCBQUEUED;
265
266 for (;;) {
267 set_current_state(TASK_UNINTERRUPTIBLE);
268 if (!READ_ONCE(dio->waiter))
269 break;
25d207dc 270 blk_io_schedule();
cd82cca7
CH
271 }
272 __set_current_state(TASK_RUNNING);
273
274 if (!ret)
275 ret = blk_status_to_errno(dio->bio.bi_status);
276 if (likely(!ret))
277 ret = dio->size;
278
279 bio_put(&dio->bio);
280 return ret;
3d8b5a22
KJ
281fail:
282 bio_release_pages(bio, false);
283 bio_clear_flag(bio, BIO_REFFED);
284 bio_put(bio);
285 blk_finish_plug(&plug);
286 return ret;
cd82cca7
CH
287}
288
54a88eb8
PB
289static void blkdev_bio_end_io_async(struct bio *bio)
290{
291 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
292 struct kiocb *iocb = dio->iocb;
293 ssize_t ret;
294
bb49c6fa
SG
295 WRITE_ONCE(iocb->private, NULL);
296
54a88eb8
PB
297 if (likely(!bio->bi_status)) {
298 ret = dio->size;
299 iocb->ki_pos += ret;
300 } else {
301 ret = blk_status_to_errno(bio->bi_status);
302 }
303
3d8b5a22
KJ
304 if (iocb->ki_flags & IOCB_HAS_METADATA)
305 bio_integrity_unmap_user(bio);
306
b6773cdb 307 iocb->ki_complete(iocb, ret);
54a88eb8
PB
308
309 if (dio->flags & DIO_SHOULD_DIRTY) {
310 bio_check_pages_dirty(bio);
311 } else {
312 bio_release_pages(bio, false);
313 bio_put(bio);
314 }
315}
316
317static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
318 struct iov_iter *iter,
de4c7bef 319 struct block_device *bdev,
54a88eb8
PB
320 unsigned int nr_pages)
321{
b77c88c2 322 bool is_read = iov_iter_rw(iter) == READ;
16458cf3 323 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
54a88eb8
PB
324 struct blkdev_dio *dio;
325 struct bio *bio;
326 loff_t pos = iocb->ki_pos;
327 int ret = 0;
328
0df71650
MS
329 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
330 opf |= REQ_ALLOC_CACHE;
331 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
332 &blkdev_dio_pool);
54a88eb8
PB
333 dio = container_of(bio, struct blkdev_dio, bio);
334 dio->flags = 0;
335 dio->iocb = iocb;
54a88eb8 336 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
44981351 337 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
c27683da 338 bio->bi_write_stream = iocb->ki_write_stream;
54a88eb8
PB
339 bio->bi_end_io = blkdev_bio_end_io_async;
340 bio->bi_ioprio = iocb->ki_ioprio;
341
1bb6b810
PB
342 if (iov_iter_is_bvec(iter)) {
343 /*
344 * Users don't rely on the iterator being in any particular
345 * state for async I/O returning -EIOCBQUEUED, hence we can
346 * avoid expensive iov_iter_advance(). Bypass
347 * bio_iov_iter_get_pages() and set the bvec directly.
348 */
349 bio_iov_bvec_set(bio, iter);
350 } else {
351 ret = bio_iov_iter_get_pages(bio, iter);
3d8b5a22
KJ
352 if (unlikely(ret))
353 goto out_bio_put;
54a88eb8
PB
354 }
355 dio->size = bio->bi_iter.bi_size;
356
b77c88c2 357 if (is_read) {
fcb14cb1 358 if (user_backed_iter(iter)) {
54a88eb8
PB
359 dio->flags |= DIO_SHOULD_DIRTY;
360 bio_set_pages_dirty(bio);
361 }
362 } else {
54a88eb8
PB
363 task_io_account_write(bio->bi_iter.bi_size);
364 }
365
3d8b5a22
KJ
366 if (iocb->ki_flags & IOCB_HAS_METADATA) {
367 ret = bio_integrity_map_iter(bio, iocb->private);
368 WRITE_ONCE(iocb->private, NULL);
369 if (unlikely(ret))
370 goto out_bio_put;
371 }
372
caf336f8
JG
373 if (iocb->ki_flags & IOCB_ATOMIC)
374 bio->bi_opf |= REQ_ATOMIC;
375
2bc05769
JA
376 if (iocb->ki_flags & IOCB_NOWAIT)
377 bio->bi_opf |= REQ_NOWAIT;
378
54a88eb8 379 if (iocb->ki_flags & IOCB_HIPRI) {
2bc05769 380 bio->bi_opf |= REQ_POLLED;
54a88eb8
PB
381 submit_bio(bio);
382 WRITE_ONCE(iocb->private, bio);
383 } else {
384 submit_bio(bio);
385 }
386 return -EIOCBQUEUED;
3d8b5a22
KJ
387
388out_bio_put:
389 bio_put(bio);
390 return ret;
54a88eb8
PB
391}
392
cd82cca7
CH
393static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
394{
de4c7bef 395 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
cd82cca7
CH
396 unsigned int nr_pages;
397
398 if (!iov_iter_count(iter))
399 return 0;
400
c3be7ebb 401 if (blkdev_dio_invalid(bdev, iocb, iter))
de4c7bef
JG
402 return -EINVAL;
403
c27683da
CH
404 if (iov_iter_rw(iter) == WRITE) {
405 u16 max_write_streams = bdev_max_write_streams(bdev);
406
407 if (iocb->ki_write_stream) {
408 if (iocb->ki_write_stream > max_write_streams)
409 return -EINVAL;
410 } else if (max_write_streams) {
411 enum rw_hint write_hint =
412 file_inode(iocb->ki_filp)->i_write_hint;
413
414 /*
415 * Just use the write hint as write stream for block
416 * device writes. This assumes no file system is
417 * mounted that would use the streams differently.
418 */
419 if (write_hint <= max_write_streams)
420 iocb->ki_write_stream = write_hint;
421 }
422 }
423
cd82cca7 424 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
54a88eb8
PB
425 if (likely(nr_pages <= BIO_MAX_VECS)) {
426 if (is_sync_kiocb(iocb))
de4c7bef
JG
427 return __blkdev_direct_IO_simple(iocb, iter, bdev,
428 nr_pages);
429 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
c3be7ebb 430 } else if (iocb->ki_flags & IOCB_ATOMIC) {
caf336f8 431 return -EINVAL;
54a88eb8 432 }
de4c7bef 433 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
cd82cca7
CH
434}
435
487c607d
CH
436static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
437 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
438{
439 struct block_device *bdev = I_BDEV(inode);
440 loff_t isize = i_size_read(inode);
441
0c12028a 442 if (offset >= isize)
487c607d 443 return -EIO;
e269537e
LN
444
445 iomap->bdev = bdev;
446 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
487c607d
CH
447 iomap->type = IOMAP_MAPPED;
448 iomap->addr = iomap->offset;
449 iomap->length = isize - iomap->offset;
925c86a1 450 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
487c607d
CH
451 return 0;
452}
453
454static const struct iomap_ops blkdev_iomap_ops = {
455 .iomap_begin = blkdev_iomap_begin,
456};
457
925c86a1
CH
458#ifdef CONFIG_BUFFER_HEAD
459static int blkdev_get_block(struct inode *inode, sector_t iblock,
460 struct buffer_head *bh, int create)
461{
462 bh->b_bdev = I_BDEV(inode);
463 bh->b_blocknr = iblock;
464 set_buffer_mapped(bh);
465 return 0;
466}
467
17bf23a9
MWO
468/*
469 * We cannot call mpage_writepages() as it does not take the buffer lock.
470 * We must use block_write_full_folio() directly which holds the buffer
471 * lock. The buffer lock provides the synchronisation with writeback
472 * that filesystems rely on when they use the blockdev's mapping.
473 */
474static int blkdev_writepages(struct address_space *mapping,
475 struct writeback_control *wbc)
cd82cca7 476{
00ef5c72 477 struct folio *folio = NULL;
17bf23a9
MWO
478 struct blk_plug plug;
479 int err;
480
481 blk_start_plug(&plug);
00ef5c72
CH
482 while ((folio = writeback_iter(mapping, wbc, folio, &err)))
483 err = block_write_full_folio(folio, wbc, blkdev_get_block);
17bf23a9
MWO
484 blk_finish_plug(&plug);
485
486 return err;
cd82cca7
CH
487}
488
2c69e205 489static int blkdev_read_folio(struct file *file, struct folio *folio)
cd82cca7 490{
2c69e205 491 return block_read_full_folio(folio, blkdev_get_block);
cd82cca7
CH
492}
493
494static void blkdev_readahead(struct readahead_control *rac)
495{
496 mpage_readahead(rac, blkdev_get_block);
497}
498
499static int blkdev_write_begin(struct file *file, struct address_space *mapping,
1da86618 500 loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
cd82cca7 501{
1da86618 502 return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
cd82cca7
CH
503}
504
505static int blkdev_write_end(struct file *file, struct address_space *mapping,
a225800f 506 loff_t pos, unsigned len, unsigned copied, struct folio *folio,
cd82cca7
CH
507 void *fsdata)
508{
509 int ret;
97edbc02 510 ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
cd82cca7 511
1262249d
MWO
512 folio_unlock(folio);
513 folio_put(folio);
cd82cca7
CH
514
515 return ret;
516}
517
cd82cca7 518const struct address_space_operations def_blk_aops = {
e621900a 519 .dirty_folio = block_dirty_folio,
7ba13abb 520 .invalidate_folio = block_invalidate_folio,
2c69e205 521 .read_folio = blkdev_read_folio,
cd82cca7 522 .readahead = blkdev_readahead,
17bf23a9 523 .writepages = blkdev_writepages,
cd82cca7
CH
524 .write_begin = blkdev_write_begin,
525 .write_end = blkdev_write_end,
67235182 526 .migrate_folio = buffer_migrate_folio_norefs,
cd82cca7
CH
527 .is_dirty_writeback = buffer_check_dirty_writeback,
528};
925c86a1
CH
529#else /* CONFIG_BUFFER_HEAD */
530static int blkdev_read_folio(struct file *file, struct folio *folio)
531{
532 return iomap_read_folio(folio, &blkdev_iomap_ops);
533}
534
535static void blkdev_readahead(struct readahead_control *rac)
536{
537 iomap_readahead(rac, &blkdev_iomap_ops);
538}
539
540static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
19871b5c 541 struct inode *inode, loff_t offset, unsigned int len)
925c86a1
CH
542{
543 loff_t isize = i_size_read(inode);
544
545 if (WARN_ON_ONCE(offset >= isize))
546 return -EIO;
547 if (offset >= wpc->iomap.offset &&
548 offset < wpc->iomap.offset + wpc->iomap.length)
549 return 0;
550 return blkdev_iomap_begin(inode, offset, isize - offset,
551 IOMAP_WRITE, &wpc->iomap, NULL);
552}
553
554static const struct iomap_writeback_ops blkdev_writeback_ops = {
555 .map_blocks = blkdev_map_blocks,
556};
557
558static int blkdev_writepages(struct address_space *mapping,
559 struct writeback_control *wbc)
560{
561 struct iomap_writepage_ctx wpc = { };
562
563 return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
564}
565
566const struct address_space_operations def_blk_aops = {
567 .dirty_folio = filemap_dirty_folio,
568 .release_folio = iomap_release_folio,
569 .invalidate_folio = iomap_invalidate_folio,
570 .read_folio = blkdev_read_folio,
571 .readahead = blkdev_readahead,
572 .writepages = blkdev_writepages,
573 .is_partially_uptodate = iomap_is_partially_uptodate,
af7628d6 574 .error_remove_folio = generic_error_remove_folio,
925c86a1
CH
575 .migrate_folio = filemap_migrate_folio,
576};
577#endif /* CONFIG_BUFFER_HEAD */
cd82cca7
CH
578
579/*
580 * for a block special file file_inode(file)->i_size is zero
581 * so we compute the size by hand (just as in block_read/write above)
582 */
583static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
584{
585 struct inode *bd_inode = bdev_file_inode(file);
586 loff_t retval;
587
588 inode_lock(bd_inode);
589 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
590 inode_unlock(bd_inode);
591 return retval;
592}
593
594static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
595 int datasync)
596{
4e762d86 597 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
cd82cca7
CH
598 int error;
599
600 error = file_write_and_wait_range(filp, start, end);
601 if (error)
602 return error;
603
604 /*
605 * There is no need to serialise calls to blkdev_issue_flush with
606 * i_mutex and doing so causes performance issues with concurrent
607 * O_SYNC writers to a block device.
608 */
609 error = blkdev_issue_flush(bdev);
610 if (error == -EOPNOTSUPP)
611 error = 0;
612
613 return error;
614}
615
841dd789
JK
616/**
617 * file_to_blk_mode - get block open flags from file flags
618 * @file: file whose open flags should be converted
619 *
620 * Look at file open flags and generate corresponding block open flags from
621 * them. The function works both for file just being open (e.g. during ->open
622 * callback) and for file that is already open. This is actually non-trivial
623 * (see comment in the function).
624 */
05bdb996
CH
625blk_mode_t file_to_blk_mode(struct file *file)
626{
627 blk_mode_t mode = 0;
628
629 if (file->f_mode & FMODE_READ)
630 mode |= BLK_OPEN_READ;
631 if (file->f_mode & FMODE_WRITE)
632 mode |= BLK_OPEN_WRITE;
841dd789 633 /*
ab838b3f
CB
634 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
635 * to determine whether the open was exclusive for already open files.
841dd789 636 */
ab838b3f
CB
637 if (file->private_data)
638 mode |= BLK_OPEN_EXCL;
841dd789 639 else if (file->f_flags & O_EXCL)
05bdb996
CH
640 mode |= BLK_OPEN_EXCL;
641 if (file->f_flags & O_NDELAY)
642 mode |= BLK_OPEN_NDELAY;
643
644 /*
645 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
646 * driver has historically allowed ioctls as if the file was opened for
647 * writing, but does not allow and actual reads or writes.
648 */
649 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
650 mode |= BLK_OPEN_WRITE_IOCTL;
651
652 return mode;
653}
654
cd82cca7
CH
655static int blkdev_open(struct inode *inode, struct file *filp)
656{
a56aefca 657 struct block_device *bdev;
841dd789 658 blk_mode_t mode;
a56aefca 659 int ret;
cd82cca7 660
841dd789 661 mode = file_to_blk_mode(filp);
ab838b3f
CB
662 /* Use the file as the holder. */
663 if (mode & BLK_OPEN_EXCL)
664 filp->private_data = filp;
665 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
a56aefca
CB
666 if (ret)
667 return ret;
668
5f33b522 669 bdev = blkdev_get_no_open(inode->i_rdev, true);
a56aefca
CB
670 if (!bdev)
671 return -ENXIO;
672
c3be7ebb 673 if (bdev_can_atomic_write(bdev))
caf336f8
JG
674 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
675
ab838b3f 676 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
a56aefca
CB
677 if (ret)
678 blkdev_put_no_open(bdev);
679 return ret;
cd82cca7
CH
680}
681
7ee34cbc 682static int blkdev_release(struct inode *inode, struct file *filp)
cd82cca7 683{
ab838b3f 684 bdev_release(filp);
cd82cca7
CH
685 return 0;
686}
687
727cfe97
CH
688static ssize_t
689blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
690{
691 size_t count = iov_iter_count(from);
692 ssize_t written;
693
694 written = kiocb_invalidate_pages(iocb, count);
695 if (written) {
696 if (written == -EBUSY)
697 return 0;
698 return written;
699 }
700
701 written = blkdev_direct_IO(iocb, from);
702 if (written > 0) {
703 kiocb_invalidate_post_direct_write(iocb, count);
704 iocb->ki_pos += written;
705 count -= written;
706 }
707 if (written != -EIOCBQUEUED)
708 iov_iter_revert(from, count - iov_iter_count(from));
709 return written;
710}
711
487c607d
CH
712static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
713{
31754ea6 714 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
487c607d
CH
715}
716
cd82cca7
CH
717/*
718 * Write data to the block device. Only intended for the block device itself
719 * and the raw driver which basically is a fake block device.
720 *
721 * Does not take i_mutex for the write and thus is not for general purpose
722 * use.
723 */
724static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
725{
727cfe97 726 struct file *file = iocb->ki_filp;
39c3b4e7
AV
727 struct inode *bd_inode = bdev_file_inode(file);
728 struct block_device *bdev = I_BDEV(bd_inode);
2cbd51f1 729 bool atomic = iocb->ki_flags & IOCB_ATOMIC;
138c1a38 730 loff_t size = bdev_nr_bytes(bdev);
cd82cca7
CH
731 size_t shorted = 0;
732 ssize_t ret;
733
fac7c6d5 734 if (bdev_read_only(bdev))
cd82cca7
CH
735 return -EPERM;
736
737 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
738 return -ETXTBSY;
739
740 if (!iov_iter_count(from))
741 return 0;
742
743 if (iocb->ki_pos >= size)
744 return -ENOSPC;
745
746 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
747 return -EOPNOTSUPP;
748
2cbd51f1 749 if (atomic) {
c3be7ebb
JG
750 ret = generic_atomic_write_valid(iocb, from);
751 if (ret)
752 return ret;
753 }
754
cd82cca7
CH
755 size -= iocb->ki_pos;
756 if (iov_iter_count(from) > size) {
2cbd51f1
JG
757 if (atomic)
758 return -EINVAL;
cd82cca7
CH
759 shorted = iov_iter_count(from) - size;
760 iov_iter_truncate(from, size);
761 }
762
727cfe97
CH
763 ret = file_update_time(file);
764 if (ret)
765 return ret;
766
767 if (iocb->ki_flags & IOCB_DIRECT) {
768 ret = blkdev_direct_write(iocb, from);
769 if (ret >= 0 && iov_iter_count(from))
770 ret = direct_write_fallback(iocb, from, ret,
487c607d 771 blkdev_buffered_write(iocb, from));
727cfe97 772 } else {
c0e473a0
DW
773 /*
774 * Take i_rwsem and invalidate_lock to avoid racing with
775 * set_blocksize changing i_blkbits/folio order and punching
776 * out the pagecache.
777 */
778 inode_lock_shared(bd_inode);
487c607d 779 ret = blkdev_buffered_write(iocb, from);
c0e473a0 780 inode_unlock_shared(bd_inode);
727cfe97
CH
781 }
782
cd82cca7
CH
783 if (ret > 0)
784 ret = generic_write_sync(iocb, ret);
785 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
cd82cca7
CH
786 return ret;
787}
788
789static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
790{
c0e473a0 791 struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
4e762d86 792 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
138c1a38 793 loff_t size = bdev_nr_bytes(bdev);
cd82cca7
CH
794 loff_t pos = iocb->ki_pos;
795 size_t shorted = 0;
ceaa7625 796 ssize_t ret = 0;
3e1f941d 797 size_t count;
cd82cca7 798
3e1f941d 799 if (unlikely(pos + iov_iter_count(to) > size)) {
6450fe1f
PB
800 if (pos >= size)
801 return 0;
802 size -= pos;
3e1f941d
ID
803 shorted = iov_iter_count(to) - size;
804 iov_iter_truncate(to, size);
cd82cca7
CH
805 }
806
3e1f941d
ID
807 count = iov_iter_count(to);
808 if (!count)
809 goto reexpand; /* skip atime */
810
ceaa7625 811 if (iocb->ki_flags & IOCB_DIRECT) {
3c435a0f
CH
812 ret = kiocb_write_and_wait(iocb, count);
813 if (ret < 0)
814 goto reexpand;
ceaa7625
JA
815 file_accessed(iocb->ki_filp);
816
817 ret = blkdev_direct_IO(iocb, to);
b13ee668 818 if (ret > 0) {
ceaa7625
JA
819 iocb->ki_pos += ret;
820 count -= ret;
821 }
b13ee668
JA
822 if (ret != -EIOCBQUEUED)
823 iov_iter_revert(to, count - iov_iter_count(to));
ceaa7625 824 if (ret < 0 || !count)
3e1f941d 825 goto reexpand;
ceaa7625
JA
826 }
827
c0e473a0
DW
828 /*
829 * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
830 * changing i_blkbits/folio order and punching out the pagecache.
831 */
832 inode_lock_shared(bd_inode);
ceaa7625 833 ret = filemap_read(iocb, to, ret);
c0e473a0 834 inode_unlock_shared(bd_inode);
6450fe1f 835
3e1f941d 836reexpand:
6450fe1f
PB
837 if (unlikely(shorted))
838 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
cd82cca7
CH
839 return ret;
840}
841
842#define BLKDEV_FALLOC_FL_SUPPORTED \
843 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
ad01dada 844 FALLOC_FL_ZERO_RANGE)
cd82cca7
CH
845
846static long blkdev_fallocate(struct file *file, int mode, loff_t start,
847 loff_t len)
848{
f278eb3d
ML
849 struct inode *inode = bdev_file_inode(file);
850 struct block_device *bdev = I_BDEV(inode);
cd82cca7
CH
851 loff_t end = start + len - 1;
852 loff_t isize;
853 int error;
854
855 /* Fail if we don't recognize the flags. */
856 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
857 return -EOPNOTSUPP;
858
859 /* Don't go off the end of the device. */
2a93ad8f 860 isize = bdev_nr_bytes(bdev);
cd82cca7
CH
861 if (start >= isize)
862 return -EINVAL;
863 if (end >= isize) {
864 if (mode & FALLOC_FL_KEEP_SIZE) {
865 len = isize - start;
866 end = start + len - 1;
867 } else
868 return -EINVAL;
869 }
870
871 /*
872 * Don't allow IO that isn't aligned to logical block size.
873 */
874 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
875 return -EINVAL;
876
c0e473a0 877 inode_lock(inode);
f278eb3d
ML
878 filemap_invalidate_lock(inode->i_mapping);
879
1364a3c3
SK
880 /*
881 * Invalidate the page cache, including dirty pages, for valid
882 * de-allocate mode calls to fallocate().
883 */
cd82cca7
CH
884 switch (mode) {
885 case FALLOC_FL_ZERO_RANGE:
886 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
1364a3c3
SK
887 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
888 if (error)
889 goto fail;
890
6549a874
PB
891 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
892 len >> SECTOR_SHIFT, GFP_KERNEL,
893 BLKDEV_ZERO_NOUNMAP);
cd82cca7
CH
894 break;
895 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
1364a3c3
SK
896 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
897 if (error)
898 goto fail;
899
6549a874
PB
900 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
901 len >> SECTOR_SHIFT, GFP_KERNEL,
902 BLKDEV_ZERO_NOFALLBACK);
cd82cca7 903 break;
cd82cca7 904 default:
f278eb3d 905 error = -EOPNOTSUPP;
cd82cca7 906 }
cd82cca7 907
f278eb3d
ML
908 fail:
909 filemap_invalidate_unlock(inode->i_mapping);
c0e473a0 910 inode_unlock(inode);
f278eb3d 911 return error;
cd82cca7
CH
912}
913
69baa3a6
LP
914static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
915{
916 struct inode *bd_inode = bdev_file_inode(file);
917
918 if (bdev_read_only(I_BDEV(bd_inode)))
919 return generic_file_readonly_mmap(file, vma);
920
921 return generic_file_mmap(file, vma);
922}
923
cd82cca7
CH
924const struct file_operations def_blk_fops = {
925 .open = blkdev_open,
7ee34cbc 926 .release = blkdev_release,
cd82cca7
CH
927 .llseek = blkdev_llseek,
928 .read_iter = blkdev_read_iter,
929 .write_iter = blkdev_write_iter,
3e08773c 930 .iopoll = iocb_bio_iopoll,
69baa3a6 931 .mmap = blkdev_mmap,
cd82cca7 932 .fsync = blkdev_fsync,
8a709512 933 .unlocked_ioctl = blkdev_ioctl,
cd82cca7
CH
934#ifdef CONFIG_COMPAT
935 .compat_ioctl = compat_blkdev_ioctl,
936#endif
2cb1e089 937 .splice_read = filemap_splice_read,
cd82cca7
CH
938 .splice_write = iter_file_splice_write,
939 .fallocate = blkdev_fallocate,
50c52250 940 .uring_cmd = blkdev_uring_cmd,
210a03c9 941 .fop_flags = FOP_BUFFER_RASYNC,
cd82cca7
CH
942};
943
944static __init int blkdev_init(void)
945{
946 return bioset_init(&blkdev_dio_pool, 4,
947 offsetof(struct blkdev_dio, bio),
948 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
949}
950module_init(blkdev_init);