1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
26 #include <linux/iomap.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
56 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
);
57 static void submit_bh_wbc(blk_opf_t opf
, struct buffer_head
*bh
,
58 struct writeback_control
*wbc
);
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
62 inline void touch_buffer(struct buffer_head
*bh
)
64 trace_block_touch_buffer(bh
);
65 folio_mark_accessed(bh
->b_folio
);
67 EXPORT_SYMBOL(touch_buffer
);
69 void __lock_buffer(struct buffer_head
*bh
)
71 wait_on_bit_lock_io(&bh
->b_state
, BH_Lock
, TASK_UNINTERRUPTIBLE
);
73 EXPORT_SYMBOL(__lock_buffer
);
75 void unlock_buffer(struct buffer_head
*bh
)
77 clear_bit_unlock(BH_Lock
, &bh
->b_state
);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh
->b_state
, BH_Lock
);
81 EXPORT_SYMBOL(unlock_buffer
);
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
88 void buffer_check_dirty_writeback(struct folio
*folio
,
89 bool *dirty
, bool *writeback
)
91 struct buffer_head
*head
, *bh
;
95 BUG_ON(!folio_test_locked(folio
));
97 head
= folio_buffers(folio
);
101 if (folio_test_writeback(folio
))
106 if (buffer_locked(bh
))
109 if (buffer_dirty(bh
))
112 bh
= bh
->b_this_page
;
113 } while (bh
!= head
);
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
121 void __wait_on_buffer(struct buffer_head
* bh
)
123 wait_on_bit_io(&bh
->b_state
, BH_Lock
, TASK_UNINTERRUPTIBLE
);
125 EXPORT_SYMBOL(__wait_on_buffer
);
127 static void buffer_io_error(struct buffer_head
*bh
, char *msg
)
129 if (!test_bit(BH_Quiet
, &bh
->b_state
))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh
->b_bdev
, (unsigned long long)bh
->b_blocknr
, msg
);
136 * End-of-IO handler helper function which does not touch the bh after
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
143 static void __end_buffer_read_notouch(struct buffer_head
*bh
, int uptodate
)
146 set_buffer_uptodate(bh
);
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh
);
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
158 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
)
160 __end_buffer_read_notouch(bh
, uptodate
);
163 EXPORT_SYMBOL(end_buffer_read_sync
);
165 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
168 set_buffer_uptodate(bh
);
170 buffer_io_error(bh
, ", lost sync page write");
171 mark_buffer_write_io_error(bh
);
172 clear_buffer_uptodate(bh
);
177 EXPORT_SYMBOL(end_buffer_write_sync
);
180 * Various filesystems appear to want __find_get_block to be non-blocking.
181 * But it's the page lock which protects the buffers. To get around this,
182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
185 * Hack idea: for the blockdev mapping, i_private_lock contention
186 * may be quite high. This code could TryLock the page, and if that
187 * succeeds, there is no need to take i_private_lock.
189 static struct buffer_head
*
190 __find_get_block_slow(struct block_device
*bdev
, sector_t block
)
192 struct inode
*bd_inode
= bdev
->bd_inode
;
193 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
194 struct buffer_head
*ret
= NULL
;
196 struct buffer_head
*bh
;
197 struct buffer_head
*head
;
200 static DEFINE_RATELIMIT_STATE(last_warned
, HZ
, 1);
202 index
= ((loff_t
)block
<< bd_inode
->i_blkbits
) / PAGE_SIZE
;
203 folio
= __filemap_get_folio(bd_mapping
, index
, FGP_ACCESSED
, 0);
207 spin_lock(&bd_mapping
->i_private_lock
);
208 head
= folio_buffers(folio
);
213 if (!buffer_mapped(bh
))
215 else if (bh
->b_blocknr
== block
) {
220 bh
= bh
->b_this_page
;
221 } while (bh
!= head
);
223 /* we might be here because some of the buffers on this page are
224 * not mapped. This is due to various races between
225 * file io on the block device and getblk. It gets dealt with
226 * elsewhere, don't buffer_error if we had some unmapped buffers
228 ratelimit_set_flags(&last_warned
, RATELIMIT_MSG_ON_RELEASE
);
229 if (all_mapped
&& __ratelimit(&last_warned
)) {
230 printk("__find_get_block_slow() failed. block=%llu, "
231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 "device %pg blocksize: %d\n",
233 (unsigned long long)block
,
234 (unsigned long long)bh
->b_blocknr
,
235 bh
->b_state
, bh
->b_size
, bdev
,
236 1 << bd_inode
->i_blkbits
);
239 spin_unlock(&bd_mapping
->i_private_lock
);
245 static void end_buffer_async_read(struct buffer_head
*bh
, int uptodate
)
248 struct buffer_head
*first
;
249 struct buffer_head
*tmp
;
251 int folio_uptodate
= 1;
253 BUG_ON(!buffer_async_read(bh
));
257 set_buffer_uptodate(bh
);
259 clear_buffer_uptodate(bh
);
260 buffer_io_error(bh
, ", async page read");
261 folio_set_error(folio
);
265 * Be _very_ careful from here on. Bad things can happen if
266 * two buffer heads end IO at almost the same time and both
267 * decide that the page is now completely done.
269 first
= folio_buffers(folio
);
270 spin_lock_irqsave(&first
->b_uptodate_lock
, flags
);
271 clear_buffer_async_read(bh
);
275 if (!buffer_uptodate(tmp
))
277 if (buffer_async_read(tmp
)) {
278 BUG_ON(!buffer_locked(tmp
));
281 tmp
= tmp
->b_this_page
;
283 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
285 folio_end_read(folio
, folio_uptodate
);
289 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
293 struct postprocess_bh_ctx
{
294 struct work_struct work
;
295 struct buffer_head
*bh
;
298 static void verify_bh(struct work_struct
*work
)
300 struct postprocess_bh_ctx
*ctx
=
301 container_of(work
, struct postprocess_bh_ctx
, work
);
302 struct buffer_head
*bh
= ctx
->bh
;
305 valid
= fsverity_verify_blocks(bh
->b_folio
, bh
->b_size
, bh_offset(bh
));
306 end_buffer_async_read(bh
, valid
);
310 static bool need_fsverity(struct buffer_head
*bh
)
312 struct folio
*folio
= bh
->b_folio
;
313 struct inode
*inode
= folio
->mapping
->host
;
315 return fsverity_active(inode
) &&
317 folio
->index
< DIV_ROUND_UP(inode
->i_size
, PAGE_SIZE
);
320 static void decrypt_bh(struct work_struct
*work
)
322 struct postprocess_bh_ctx
*ctx
=
323 container_of(work
, struct postprocess_bh_ctx
, work
);
324 struct buffer_head
*bh
= ctx
->bh
;
327 err
= fscrypt_decrypt_pagecache_blocks(bh
->b_folio
, bh
->b_size
,
329 if (err
== 0 && need_fsverity(bh
)) {
331 * We use different work queues for decryption and for verity
332 * because verity may require reading metadata pages that need
333 * decryption, and we shouldn't recurse to the same workqueue.
335 INIT_WORK(&ctx
->work
, verify_bh
);
336 fsverity_enqueue_verify_work(&ctx
->work
);
339 end_buffer_async_read(bh
, err
== 0);
344 * I/O completion handler for block_read_full_folio() - pages
345 * which come unlocked at the end of I/O.
347 static void end_buffer_async_read_io(struct buffer_head
*bh
, int uptodate
)
349 struct inode
*inode
= bh
->b_folio
->mapping
->host
;
350 bool decrypt
= fscrypt_inode_uses_fs_layer_crypto(inode
);
351 bool verify
= need_fsverity(bh
);
353 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
354 if (uptodate
&& (decrypt
|| verify
)) {
355 struct postprocess_bh_ctx
*ctx
=
356 kmalloc(sizeof(*ctx
), GFP_ATOMIC
);
361 INIT_WORK(&ctx
->work
, decrypt_bh
);
362 fscrypt_enqueue_decrypt_work(&ctx
->work
);
364 INIT_WORK(&ctx
->work
, verify_bh
);
365 fsverity_enqueue_verify_work(&ctx
->work
);
371 end_buffer_async_read(bh
, uptodate
);
375 * Completion handler for block_write_full_folio() - folios which are unlocked
376 * during I/O, and which have the writeback flag cleared upon I/O completion.
378 static void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
)
381 struct buffer_head
*first
;
382 struct buffer_head
*tmp
;
385 BUG_ON(!buffer_async_write(bh
));
389 set_buffer_uptodate(bh
);
391 buffer_io_error(bh
, ", lost async page write");
392 mark_buffer_write_io_error(bh
);
393 clear_buffer_uptodate(bh
);
394 folio_set_error(folio
);
397 first
= folio_buffers(folio
);
398 spin_lock_irqsave(&first
->b_uptodate_lock
, flags
);
400 clear_buffer_async_write(bh
);
402 tmp
= bh
->b_this_page
;
404 if (buffer_async_write(tmp
)) {
405 BUG_ON(!buffer_locked(tmp
));
408 tmp
= tmp
->b_this_page
;
410 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
411 folio_end_writeback(folio
);
415 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
420 * If a page's buffers are under async readin (end_buffer_async_read
421 * completion) then there is a possibility that another thread of
422 * control could lock one of the buffers after it has completed
423 * but while some of the other buffers have not completed. This
424 * locked buffer would confuse end_buffer_async_read() into not unlocking
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
426 * that this buffer is not under async I/O.
428 * The page comes unlocked when it has no locked buffer_async buffers
431 * PageLocked prevents anyone starting new async I/O reads any of
434 * PageWriteback is used to prevent simultaneous writeout of the same
437 * PageLocked prevents anyone from starting writeback of a page which is
438 * under read I/O (PageWriteback is only ever set against a locked page).
440 static void mark_buffer_async_read(struct buffer_head
*bh
)
442 bh
->b_end_io
= end_buffer_async_read_io
;
443 set_buffer_async_read(bh
);
446 static void mark_buffer_async_write_endio(struct buffer_head
*bh
,
447 bh_end_io_t
*handler
)
449 bh
->b_end_io
= handler
;
450 set_buffer_async_write(bh
);
453 void mark_buffer_async_write(struct buffer_head
*bh
)
455 mark_buffer_async_write_endio(bh
, end_buffer_async_write
);
457 EXPORT_SYMBOL(mark_buffer_async_write
);
461 * fs/buffer.c contains helper functions for buffer-backed address space's
462 * fsync functions. A common requirement for buffer-based filesystems is
463 * that certain data from the backing blockdev needs to be written out for
464 * a successful fsync(). For example, ext2 indirect blocks need to be
465 * written back and waited upon before fsync() returns.
467 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469 * management of a list of dependent buffers at ->i_mapping->i_private_list.
471 * Locking is a little subtle: try_to_free_buffers() will remove buffers
472 * from their controlling inode's queue when they are being freed. But
473 * try_to_free_buffers() will be operating against the *blockdev* mapping
474 * at the time, not against the S_ISREG file which depends on those buffers.
475 * So the locking for i_private_list is via the i_private_lock in the address_space
476 * which backs the buffers. Which is different from the address_space
477 * against which the buffers are listed. So for a particular address_space,
478 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
479 * mapping->i_private_list will always be protected by the backing blockdev's
482 * Which introduces a requirement: all buffers on an address_space's
483 * ->i_private_list must be from the same address_space: the blockdev's.
485 * address_spaces which do not place buffers at ->i_private_list via these
486 * utility functions are free to use i_private_lock and i_private_list for
487 * whatever they want. The only requirement is that list_empty(i_private_list)
488 * be true at clear_inode() time.
490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
491 * filesystems should do that. invalidate_inode_buffers() should just go
492 * BUG_ON(!list_empty).
494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
495 * take an address_space, not an inode. And it should be called
496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500 * list if it is already on a list. Because if the buffer is on a list,
501 * it *must* already be on the right one. If not, the filesystem is being
502 * silly. This will save a ton of locking. But first we have to ensure
503 * that buffers are taken *off* the old inode's list when they are freed
504 * (presumably in truncate). That requires careful auditing of all
505 * filesystems (do it inside bforget()). It could also be done by bringing
510 * The buffer's backing address_space's i_private_lock must be held
512 static void __remove_assoc_queue(struct buffer_head
*bh
)
514 list_del_init(&bh
->b_assoc_buffers
);
515 WARN_ON(!bh
->b_assoc_map
);
516 bh
->b_assoc_map
= NULL
;
519 int inode_has_buffers(struct inode
*inode
)
521 return !list_empty(&inode
->i_data
.i_private_list
);
525 * osync is designed to support O_SYNC io. It waits synchronously for
526 * all already-submitted IO to complete, but does not queue any new
527 * writes to the disk.
529 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
530 * as you dirty the buffers, and then use osync_inode_buffers to wait for
531 * completion. Any other dirty buffers which are not yet queued for
532 * write will not be flushed to disk by the osync.
534 static int osync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
536 struct buffer_head
*bh
;
542 list_for_each_prev(p
, list
) {
544 if (buffer_locked(bh
)) {
548 if (!buffer_uptodate(bh
))
560 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
561 * @mapping: the mapping which wants those buffers written
563 * Starts I/O against the buffers at mapping->i_private_list, and waits upon
566 * Basically, this is a convenience function for fsync().
567 * @mapping is a file or directory which needs those buffers to be written for
568 * a successful fsync().
570 int sync_mapping_buffers(struct address_space
*mapping
)
572 struct address_space
*buffer_mapping
= mapping
->i_private_data
;
574 if (buffer_mapping
== NULL
|| list_empty(&mapping
->i_private_list
))
577 return fsync_buffers_list(&buffer_mapping
->i_private_lock
,
578 &mapping
->i_private_list
);
580 EXPORT_SYMBOL(sync_mapping_buffers
);
583 * generic_buffers_fsync_noflush - generic buffer fsync implementation
584 * for simple filesystems with no inode lock
586 * @file: file to synchronize
587 * @start: start offset in bytes
588 * @end: end offset in bytes (inclusive)
589 * @datasync: only synchronize essential metadata if true
591 * This is a generic implementation of the fsync method for simple
592 * filesystems which track all non-inode metadata in the buffers list
593 * hanging off the address_space structure.
595 int generic_buffers_fsync_noflush(struct file
*file
, loff_t start
, loff_t end
,
598 struct inode
*inode
= file
->f_mapping
->host
;
602 err
= file_write_and_wait_range(file
, start
, end
);
606 ret
= sync_mapping_buffers(inode
->i_mapping
);
607 if (!(inode
->i_state
& I_DIRTY_ALL
))
609 if (datasync
&& !(inode
->i_state
& I_DIRTY_DATASYNC
))
612 err
= sync_inode_metadata(inode
, 1);
617 /* check and advance again to catch errors after syncing out buffers */
618 err
= file_check_and_advance_wb_err(file
);
623 EXPORT_SYMBOL(generic_buffers_fsync_noflush
);
626 * generic_buffers_fsync - generic buffer fsync implementation
627 * for simple filesystems with no inode lock
629 * @file: file to synchronize
630 * @start: start offset in bytes
631 * @end: end offset in bytes (inclusive)
632 * @datasync: only synchronize essential metadata if true
634 * This is a generic implementation of the fsync method for simple
635 * filesystems which track all non-inode metadata in the buffers list
636 * hanging off the address_space structure. This also makes sure that
637 * a device cache flush operation is called at the end.
639 int generic_buffers_fsync(struct file
*file
, loff_t start
, loff_t end
,
642 struct inode
*inode
= file
->f_mapping
->host
;
645 ret
= generic_buffers_fsync_noflush(file
, start
, end
, datasync
);
647 ret
= blkdev_issue_flush(inode
->i_sb
->s_bdev
);
650 EXPORT_SYMBOL(generic_buffers_fsync
);
653 * Called when we've recently written block `bblock', and it is known that
654 * `bblock' was for a buffer_boundary() buffer. This means that the block at
655 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
656 * dirty, schedule it for IO. So that indirects merge nicely with their data.
658 void write_boundary_block(struct block_device
*bdev
,
659 sector_t bblock
, unsigned blocksize
)
661 struct buffer_head
*bh
= __find_get_block(bdev
, bblock
+ 1, blocksize
);
663 if (buffer_dirty(bh
))
664 write_dirty_buffer(bh
, 0);
669 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
)
671 struct address_space
*mapping
= inode
->i_mapping
;
672 struct address_space
*buffer_mapping
= bh
->b_folio
->mapping
;
674 mark_buffer_dirty(bh
);
675 if (!mapping
->i_private_data
) {
676 mapping
->i_private_data
= buffer_mapping
;
678 BUG_ON(mapping
->i_private_data
!= buffer_mapping
);
680 if (!bh
->b_assoc_map
) {
681 spin_lock(&buffer_mapping
->i_private_lock
);
682 list_move_tail(&bh
->b_assoc_buffers
,
683 &mapping
->i_private_list
);
684 bh
->b_assoc_map
= mapping
;
685 spin_unlock(&buffer_mapping
->i_private_lock
);
688 EXPORT_SYMBOL(mark_buffer_dirty_inode
);
691 * Add a page to the dirty page list.
693 * It is a sad fact of life that this function is called from several places
694 * deeply under spinlocking. It may not sleep.
696 * If the page has buffers, the uptodate buffers are set dirty, to preserve
697 * dirty-state coherency between the page and the buffers. It the page does
698 * not have buffers then when they are later attached they will all be set
701 * The buffers are dirtied before the page is dirtied. There's a small race
702 * window in which a writepage caller may see the page cleanness but not the
703 * buffer dirtiness. That's fine. If this code were to set the page dirty
704 * before the buffers, a concurrent writepage caller could clear the page dirty
705 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
706 * page on the dirty page list.
708 * We use i_private_lock to lock against try_to_free_buffers while using the
709 * page's buffer list. Also use this to protect against clean buffers being
710 * added to the page after it was set dirty.
712 * FIXME: may need to call ->reservepage here as well. That's rather up to the
713 * address_space though.
715 bool block_dirty_folio(struct address_space
*mapping
, struct folio
*folio
)
717 struct buffer_head
*head
;
720 spin_lock(&mapping
->i_private_lock
);
721 head
= folio_buffers(folio
);
723 struct buffer_head
*bh
= head
;
726 set_buffer_dirty(bh
);
727 bh
= bh
->b_this_page
;
728 } while (bh
!= head
);
731 * Lock out page's memcg migration to keep PageDirty
732 * synchronized with per-memcg dirty page counters.
734 folio_memcg_lock(folio
);
735 newly_dirty
= !folio_test_set_dirty(folio
);
736 spin_unlock(&mapping
->i_private_lock
);
739 __folio_mark_dirty(folio
, mapping
, 1);
741 folio_memcg_unlock(folio
);
744 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
748 EXPORT_SYMBOL(block_dirty_folio
);
751 * Write out and wait upon a list of buffers.
753 * We have conflicting pressures: we want to make sure that all
754 * initially dirty buffers get waited on, but that any subsequently
755 * dirtied buffers don't. After all, we don't want fsync to last
756 * forever if somebody is actively writing to the file.
758 * Do this in two main stages: first we copy dirty buffers to a
759 * temporary inode list, queueing the writes as we go. Then we clean
760 * up, waiting for those writes to complete.
762 * During this second stage, any subsequent updates to the file may end
763 * up refiling the buffer on the original inode's dirty list again, so
764 * there is a chance we will end up with a buffer queued for write but
765 * not yet completed on that list. So, as a final cleanup we go through
766 * the osync code to catch these locked, dirty buffers without requeuing
767 * any newly dirty buffers for write.
769 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
771 struct buffer_head
*bh
;
772 struct list_head tmp
;
773 struct address_space
*mapping
;
775 struct blk_plug plug
;
777 INIT_LIST_HEAD(&tmp
);
778 blk_start_plug(&plug
);
781 while (!list_empty(list
)) {
782 bh
= BH_ENTRY(list
->next
);
783 mapping
= bh
->b_assoc_map
;
784 __remove_assoc_queue(bh
);
785 /* Avoid race with mark_buffer_dirty_inode() which does
786 * a lockless check and we rely on seeing the dirty bit */
788 if (buffer_dirty(bh
) || buffer_locked(bh
)) {
789 list_add(&bh
->b_assoc_buffers
, &tmp
);
790 bh
->b_assoc_map
= mapping
;
791 if (buffer_dirty(bh
)) {
795 * Ensure any pending I/O completes so that
796 * write_dirty_buffer() actually writes the
797 * current contents - it is a noop if I/O is
798 * still in flight on potentially older
801 write_dirty_buffer(bh
, REQ_SYNC
);
804 * Kick off IO for the previous mapping. Note
805 * that we will not run the very last mapping,
806 * wait_on_buffer() will do that for us
807 * through sync_buffer().
816 blk_finish_plug(&plug
);
819 while (!list_empty(&tmp
)) {
820 bh
= BH_ENTRY(tmp
.prev
);
822 mapping
= bh
->b_assoc_map
;
823 __remove_assoc_queue(bh
);
824 /* Avoid race with mark_buffer_dirty_inode() which does
825 * a lockless check and we rely on seeing the dirty bit */
827 if (buffer_dirty(bh
)) {
828 list_add(&bh
->b_assoc_buffers
,
829 &mapping
->i_private_list
);
830 bh
->b_assoc_map
= mapping
;
834 if (!buffer_uptodate(bh
))
841 err2
= osync_buffers_list(lock
, list
);
849 * Invalidate any and all dirty buffers on a given inode. We are
850 * probably unmounting the fs, but that doesn't mean we have already
851 * done a sync(). Just drop the buffers from the inode list.
853 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
854 * assumes that all the buffers are against the blockdev. Not true
857 void invalidate_inode_buffers(struct inode
*inode
)
859 if (inode_has_buffers(inode
)) {
860 struct address_space
*mapping
= &inode
->i_data
;
861 struct list_head
*list
= &mapping
->i_private_list
;
862 struct address_space
*buffer_mapping
= mapping
->i_private_data
;
864 spin_lock(&buffer_mapping
->i_private_lock
);
865 while (!list_empty(list
))
866 __remove_assoc_queue(BH_ENTRY(list
->next
));
867 spin_unlock(&buffer_mapping
->i_private_lock
);
870 EXPORT_SYMBOL(invalidate_inode_buffers
);
873 * Remove any clean buffers from the inode's buffer list. This is called
874 * when we're trying to free the inode itself. Those buffers can pin it.
876 * Returns true if all buffers were removed.
878 int remove_inode_buffers(struct inode
*inode
)
882 if (inode_has_buffers(inode
)) {
883 struct address_space
*mapping
= &inode
->i_data
;
884 struct list_head
*list
= &mapping
->i_private_list
;
885 struct address_space
*buffer_mapping
= mapping
->i_private_data
;
887 spin_lock(&buffer_mapping
->i_private_lock
);
888 while (!list_empty(list
)) {
889 struct buffer_head
*bh
= BH_ENTRY(list
->next
);
890 if (buffer_dirty(bh
)) {
894 __remove_assoc_queue(bh
);
896 spin_unlock(&buffer_mapping
->i_private_lock
);
902 * Create the appropriate buffers when given a folio for data area and
903 * the size of each buffer.. Use the bh->b_this_page linked list to
904 * follow the buffers created. Return NULL if unable to create more
907 * The retry flag is used to differentiate async IO (paging, swapping)
908 * which may not fail from ordinary buffer allocations.
910 struct buffer_head
*folio_alloc_buffers(struct folio
*folio
, unsigned long size
,
913 struct buffer_head
*bh
, *head
;
915 struct mem_cgroup
*memcg
, *old_memcg
;
917 /* The folio lock pins the memcg */
918 memcg
= folio_memcg(folio
);
919 old_memcg
= set_active_memcg(memcg
);
922 offset
= folio_size(folio
);
923 while ((offset
-= size
) >= 0) {
924 bh
= alloc_buffer_head(gfp
);
928 bh
->b_this_page
= head
;
934 /* Link the buffer to its folio */
935 folio_set_bh(bh
, folio
, offset
);
938 set_active_memcg(old_memcg
);
941 * In case anything failed, we just free everything we got.
947 head
= head
->b_this_page
;
948 free_buffer_head(bh
);
954 EXPORT_SYMBOL_GPL(folio_alloc_buffers
);
956 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
959 gfp_t gfp
= GFP_NOFS
| __GFP_ACCOUNT
;
963 return folio_alloc_buffers(page_folio(page
), size
, gfp
);
965 EXPORT_SYMBOL_GPL(alloc_page_buffers
);
967 static inline void link_dev_buffers(struct folio
*folio
,
968 struct buffer_head
*head
)
970 struct buffer_head
*bh
, *tail
;
975 bh
= bh
->b_this_page
;
977 tail
->b_this_page
= head
;
978 folio_attach_private(folio
, head
);
981 static sector_t
blkdev_max_block(struct block_device
*bdev
, unsigned int size
)
983 sector_t retval
= ~((sector_t
)0);
984 loff_t sz
= bdev_nr_bytes(bdev
);
987 unsigned int sizebits
= blksize_bits(size
);
988 retval
= (sz
>> sizebits
);
994 * Initialise the state of a blockdev folio's buffers.
996 static sector_t
folio_init_buffers(struct folio
*folio
,
997 struct block_device
*bdev
, unsigned size
)
999 struct buffer_head
*head
= folio_buffers(folio
);
1000 struct buffer_head
*bh
= head
;
1001 bool uptodate
= folio_test_uptodate(folio
);
1002 sector_t block
= div_u64(folio_pos(folio
), size
);
1003 sector_t end_block
= blkdev_max_block(bdev
, size
);
1006 if (!buffer_mapped(bh
)) {
1007 bh
->b_end_io
= NULL
;
1008 bh
->b_private
= NULL
;
1010 bh
->b_blocknr
= block
;
1012 set_buffer_uptodate(bh
);
1013 if (block
< end_block
)
1014 set_buffer_mapped(bh
);
1017 bh
= bh
->b_this_page
;
1018 } while (bh
!= head
);
1021 * Caller needs to validate requested block against end of device.
1027 * Create the page-cache folio that contains the requested block.
1029 * This is used purely for blockdev mappings.
1031 * Returns false if we have a failure which cannot be cured by retrying
1032 * without sleeping. Returns true if we succeeded, or the caller should retry.
1034 static bool grow_dev_folio(struct block_device
*bdev
, sector_t block
,
1035 pgoff_t index
, unsigned size
, gfp_t gfp
)
1037 struct inode
*inode
= bdev
->bd_inode
;
1038 struct folio
*folio
;
1039 struct buffer_head
*bh
;
1040 sector_t end_block
= 0;
1042 folio
= __filemap_get_folio(inode
->i_mapping
, index
,
1043 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
, gfp
);
1047 bh
= folio_buffers(folio
);
1049 if (bh
->b_size
== size
) {
1050 end_block
= folio_init_buffers(folio
, bdev
, size
);
1055 * Retrying may succeed; for example the folio may finish
1056 * writeback, or buffers may be cleaned. This should not
1057 * happen very often; maybe we have old buffers attached to
1058 * this blockdev's page cache and we're trying to change
1061 if (!try_to_free_buffers(folio
)) {
1067 bh
= folio_alloc_buffers(folio
, size
, gfp
| __GFP_ACCOUNT
);
1072 * Link the folio to the buffers and initialise them. Take the
1073 * lock to be atomic wrt __find_get_block(), which does not
1074 * run under the folio lock.
1076 spin_lock(&inode
->i_mapping
->i_private_lock
);
1077 link_dev_buffers(folio
, bh
);
1078 end_block
= folio_init_buffers(folio
, bdev
, size
);
1079 spin_unlock(&inode
->i_mapping
->i_private_lock
);
1081 folio_unlock(folio
);
1083 return block
< end_block
;
1087 * Create buffers for the specified block device block's folio. If
1088 * that folio was dirty, the buffers are set dirty also. Returns false
1089 * if we've hit a permanent error.
1091 static bool grow_buffers(struct block_device
*bdev
, sector_t block
,
1092 unsigned size
, gfp_t gfp
)
1097 * Check for a block which lies outside our maximum possible
1100 if (check_mul_overflow(block
, (sector_t
)size
, &pos
) || pos
> MAX_LFS_FILESIZE
) {
1101 printk(KERN_ERR
"%s: requested out-of-range block %llu for device %pg\n",
1102 __func__
, (unsigned long long)block
,
1107 /* Create a folio with the proper size buffers */
1108 return grow_dev_folio(bdev
, block
, pos
/ PAGE_SIZE
, size
, gfp
);
1111 static struct buffer_head
*
1112 __getblk_slow(struct block_device
*bdev
, sector_t block
,
1113 unsigned size
, gfp_t gfp
)
1115 /* Size must be multiple of hard sectorsize */
1116 if (unlikely(size
& (bdev_logical_block_size(bdev
)-1) ||
1117 (size
< 512 || size
> PAGE_SIZE
))) {
1118 printk(KERN_ERR
"getblk(): invalid block size %d requested\n",
1120 printk(KERN_ERR
"logical block size: %d\n",
1121 bdev_logical_block_size(bdev
));
1128 struct buffer_head
*bh
;
1130 bh
= __find_get_block(bdev
, block
, size
);
1134 if (!grow_buffers(bdev
, block
, size
, gfp
))
1140 * The relationship between dirty buffers and dirty pages:
1142 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1143 * the page is tagged dirty in the page cache.
1145 * At all times, the dirtiness of the buffers represents the dirtiness of
1146 * subsections of the page. If the page has buffers, the page dirty bit is
1147 * merely a hint about the true dirty state.
1149 * When a page is set dirty in its entirety, all its buffers are marked dirty
1150 * (if the page has buffers).
1152 * When a buffer is marked dirty, its page is dirtied, but the page's other
1155 * Also. When blockdev buffers are explicitly read with bread(), they
1156 * individually become uptodate. But their backing page remains not
1157 * uptodate - even if all of its buffers are uptodate. A subsequent
1158 * block_read_full_folio() against that folio will discover all the uptodate
1159 * buffers, will set the folio uptodate and will perform no I/O.
1163 * mark_buffer_dirty - mark a buffer_head as needing writeout
1164 * @bh: the buffer_head to mark dirty
1166 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1167 * its backing page dirty, then tag the page as dirty in the page cache
1168 * and then attach the address_space's inode to its superblock's dirty
1171 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1172 * i_pages lock and mapping->host->i_lock.
1174 void mark_buffer_dirty(struct buffer_head
*bh
)
1176 WARN_ON_ONCE(!buffer_uptodate(bh
));
1178 trace_block_dirty_buffer(bh
);
1181 * Very *carefully* optimize the it-is-already-dirty case.
1183 * Don't let the final "is it dirty" escape to before we
1184 * perhaps modified the buffer.
1186 if (buffer_dirty(bh
)) {
1188 if (buffer_dirty(bh
))
1192 if (!test_set_buffer_dirty(bh
)) {
1193 struct folio
*folio
= bh
->b_folio
;
1194 struct address_space
*mapping
= NULL
;
1196 folio_memcg_lock(folio
);
1197 if (!folio_test_set_dirty(folio
)) {
1198 mapping
= folio
->mapping
;
1200 __folio_mark_dirty(folio
, mapping
, 0);
1202 folio_memcg_unlock(folio
);
1204 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1207 EXPORT_SYMBOL(mark_buffer_dirty
);
1209 void mark_buffer_write_io_error(struct buffer_head
*bh
)
1211 set_buffer_write_io_error(bh
);
1212 /* FIXME: do we need to set this in both places? */
1213 if (bh
->b_folio
&& bh
->b_folio
->mapping
)
1214 mapping_set_error(bh
->b_folio
->mapping
, -EIO
);
1215 if (bh
->b_assoc_map
) {
1216 mapping_set_error(bh
->b_assoc_map
, -EIO
);
1217 errseq_set(&bh
->b_assoc_map
->host
->i_sb
->s_wb_err
, -EIO
);
1220 EXPORT_SYMBOL(mark_buffer_write_io_error
);
1223 * Decrement a buffer_head's reference count. If all buffers against a page
1224 * have zero reference count, are clean and unlocked, and if the page is clean
1225 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1226 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1227 * a page but it ends up not being freed, and buffers may later be reattached).
1229 void __brelse(struct buffer_head
* buf
)
1231 if (atomic_read(&buf
->b_count
)) {
1235 WARN(1, KERN_ERR
"VFS: brelse: Trying to free free buffer\n");
1237 EXPORT_SYMBOL(__brelse
);
1240 * bforget() is like brelse(), except it discards any
1241 * potentially dirty data.
1243 void __bforget(struct buffer_head
*bh
)
1245 clear_buffer_dirty(bh
);
1246 if (bh
->b_assoc_map
) {
1247 struct address_space
*buffer_mapping
= bh
->b_folio
->mapping
;
1249 spin_lock(&buffer_mapping
->i_private_lock
);
1250 list_del_init(&bh
->b_assoc_buffers
);
1251 bh
->b_assoc_map
= NULL
;
1252 spin_unlock(&buffer_mapping
->i_private_lock
);
1256 EXPORT_SYMBOL(__bforget
);
1258 static struct buffer_head
*__bread_slow(struct buffer_head
*bh
)
1261 if (buffer_uptodate(bh
)) {
1266 bh
->b_end_io
= end_buffer_read_sync
;
1267 submit_bh(REQ_OP_READ
, bh
);
1269 if (buffer_uptodate(bh
))
1277 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1278 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1279 * refcount elevated by one when they're in an LRU. A buffer can only appear
1280 * once in a particular CPU's LRU. A single buffer can be present in multiple
1281 * CPU's LRUs at the same time.
1283 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1284 * sb_find_get_block().
1286 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1287 * a local interrupt disable for that.
1290 #define BH_LRU_SIZE 16
1293 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1296 static DEFINE_PER_CPU(struct bh_lru
, bh_lrus
) = {{ NULL
}};
1299 #define bh_lru_lock() local_irq_disable()
1300 #define bh_lru_unlock() local_irq_enable()
1302 #define bh_lru_lock() preempt_disable()
1303 #define bh_lru_unlock() preempt_enable()
1306 static inline void check_irqs_on(void)
1308 #ifdef irqs_disabled
1309 BUG_ON(irqs_disabled());
1314 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1315 * inserted at the front, and the buffer_head at the back if any is evicted.
1316 * Or, if already in the LRU it is moved to the front.
1318 static void bh_lru_install(struct buffer_head
*bh
)
1320 struct buffer_head
*evictee
= bh
;
1328 * the refcount of buffer_head in bh_lru prevents dropping the
1329 * attached page(i.e., try_to_free_buffers) so it could cause
1330 * failing page migration.
1331 * Skip putting upcoming bh into bh_lru until migration is done.
1333 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1338 b
= this_cpu_ptr(&bh_lrus
);
1339 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1340 swap(evictee
, b
->bhs
[i
]);
1341 if (evictee
== bh
) {
1353 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1355 static struct buffer_head
*
1356 lookup_bh_lru(struct block_device
*bdev
, sector_t block
, unsigned size
)
1358 struct buffer_head
*ret
= NULL
;
1363 if (cpu_is_isolated(smp_processor_id())) {
1367 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1368 struct buffer_head
*bh
= __this_cpu_read(bh_lrus
.bhs
[i
]);
1370 if (bh
&& bh
->b_blocknr
== block
&& bh
->b_bdev
== bdev
&&
1371 bh
->b_size
== size
) {
1374 __this_cpu_write(bh_lrus
.bhs
[i
],
1375 __this_cpu_read(bh_lrus
.bhs
[i
- 1]));
1378 __this_cpu_write(bh_lrus
.bhs
[0], bh
);
1390 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1391 * it in the LRU and mark it as accessed. If it is not present then return
1394 struct buffer_head
*
1395 __find_get_block(struct block_device
*bdev
, sector_t block
, unsigned size
)
1397 struct buffer_head
*bh
= lookup_bh_lru(bdev
, block
, size
);
1400 /* __find_get_block_slow will mark the page accessed */
1401 bh
= __find_get_block_slow(bdev
, block
);
1409 EXPORT_SYMBOL(__find_get_block
);
1412 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1413 * @bdev: The block device.
1414 * @block: The block number.
1415 * @size: The size of buffer_heads for this @bdev.
1416 * @gfp: The memory allocation flags to use.
1418 * Return: The buffer head, or NULL if memory could not be allocated.
1420 struct buffer_head
*bdev_getblk(struct block_device
*bdev
, sector_t block
,
1421 unsigned size
, gfp_t gfp
)
1423 struct buffer_head
*bh
= __find_get_block(bdev
, block
, size
);
1429 return __getblk_slow(bdev
, block
, size
, gfp
);
1431 EXPORT_SYMBOL(bdev_getblk
);
1434 * Do async read-ahead on a buffer..
1436 void __breadahead(struct block_device
*bdev
, sector_t block
, unsigned size
)
1438 struct buffer_head
*bh
= bdev_getblk(bdev
, block
, size
,
1439 GFP_NOWAIT
| __GFP_MOVABLE
);
1442 bh_readahead(bh
, REQ_RAHEAD
);
1446 EXPORT_SYMBOL(__breadahead
);
1449 * __bread_gfp() - reads a specified block and returns the bh
1450 * @bdev: the block_device to read from
1451 * @block: number of block
1452 * @size: size (in bytes) to read
1453 * @gfp: page allocation flag
1455 * Reads a specified block, and returns buffer head that contains it.
1456 * The page cache can be allocated from non-movable area
1457 * not to prevent page migration if you set gfp to zero.
1458 * It returns NULL if the block was unreadable.
1460 struct buffer_head
*
1461 __bread_gfp(struct block_device
*bdev
, sector_t block
,
1462 unsigned size
, gfp_t gfp
)
1464 struct buffer_head
*bh
;
1466 gfp
|= mapping_gfp_constraint(bdev
->bd_inode
->i_mapping
, ~__GFP_FS
);
1469 * Prefer looping in the allocator rather than here, at least that
1470 * code knows what it's doing.
1472 gfp
|= __GFP_NOFAIL
;
1474 bh
= bdev_getblk(bdev
, block
, size
, gfp
);
1476 if (likely(bh
) && !buffer_uptodate(bh
))
1477 bh
= __bread_slow(bh
);
1480 EXPORT_SYMBOL(__bread_gfp
);
1482 static void __invalidate_bh_lrus(struct bh_lru
*b
)
1486 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1492 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1493 * This doesn't race because it runs in each cpu either in irq
1494 * or with preempt disabled.
1496 static void invalidate_bh_lru(void *arg
)
1498 struct bh_lru
*b
= &get_cpu_var(bh_lrus
);
1500 __invalidate_bh_lrus(b
);
1501 put_cpu_var(bh_lrus
);
1504 bool has_bh_in_lru(int cpu
, void *dummy
)
1506 struct bh_lru
*b
= per_cpu_ptr(&bh_lrus
, cpu
);
1509 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1517 void invalidate_bh_lrus(void)
1519 on_each_cpu_cond(has_bh_in_lru
, invalidate_bh_lru
, NULL
, 1);
1521 EXPORT_SYMBOL_GPL(invalidate_bh_lrus
);
1524 * It's called from workqueue context so we need a bh_lru_lock to close
1525 * the race with preemption/irq.
1527 void invalidate_bh_lrus_cpu(void)
1532 b
= this_cpu_ptr(&bh_lrus
);
1533 __invalidate_bh_lrus(b
);
1537 void folio_set_bh(struct buffer_head
*bh
, struct folio
*folio
,
1538 unsigned long offset
)
1540 bh
->b_folio
= folio
;
1541 BUG_ON(offset
>= folio_size(folio
));
1542 if (folio_test_highmem(folio
))
1544 * This catches illegal uses and preserves the offset:
1546 bh
->b_data
= (char *)(0 + offset
);
1548 bh
->b_data
= folio_address(folio
) + offset
;
1550 EXPORT_SYMBOL(folio_set_bh
);
1553 * Called when truncating a buffer on a page completely.
1556 /* Bits that are cleared during an invalidate */
1557 #define BUFFER_FLAGS_DISCARD \
1558 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1559 1 << BH_Delay | 1 << BH_Unwritten)
1561 static void discard_buffer(struct buffer_head
* bh
)
1563 unsigned long b_state
;
1566 clear_buffer_dirty(bh
);
1568 b_state
= READ_ONCE(bh
->b_state
);
1570 } while (!try_cmpxchg(&bh
->b_state
, &b_state
,
1571 b_state
& ~BUFFER_FLAGS_DISCARD
));
1576 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1577 * @folio: The folio which is affected.
1578 * @offset: start of the range to invalidate
1579 * @length: length of the range to invalidate
1581 * block_invalidate_folio() is called when all or part of the folio has been
1582 * invalidated by a truncate operation.
1584 * block_invalidate_folio() does not have to release all buffers, but it must
1585 * ensure that no dirty buffer is left outside @offset and that no I/O
1586 * is underway against any of the blocks which are outside the truncation
1587 * point. Because the caller is about to free (and possibly reuse) those
1590 void block_invalidate_folio(struct folio
*folio
, size_t offset
, size_t length
)
1592 struct buffer_head
*head
, *bh
, *next
;
1593 size_t curr_off
= 0;
1594 size_t stop
= length
+ offset
;
1596 BUG_ON(!folio_test_locked(folio
));
1599 * Check for overflow
1601 BUG_ON(stop
> folio_size(folio
) || stop
< length
);
1603 head
= folio_buffers(folio
);
1609 size_t next_off
= curr_off
+ bh
->b_size
;
1610 next
= bh
->b_this_page
;
1613 * Are we still fully in range ?
1615 if (next_off
> stop
)
1619 * is this block fully invalidated?
1621 if (offset
<= curr_off
)
1623 curr_off
= next_off
;
1625 } while (bh
!= head
);
1628 * We release buffers only if the entire folio is being invalidated.
1629 * The get_block cached value has been unconditionally invalidated,
1630 * so real IO is not possible anymore.
1632 if (length
== folio_size(folio
))
1633 filemap_release_folio(folio
, 0);
1637 EXPORT_SYMBOL(block_invalidate_folio
);
1640 * We attach and possibly dirty the buffers atomically wrt
1641 * block_dirty_folio() via i_private_lock. try_to_free_buffers
1642 * is already excluded via the folio lock.
1644 struct buffer_head
*create_empty_buffers(struct folio
*folio
,
1645 unsigned long blocksize
, unsigned long b_state
)
1647 struct buffer_head
*bh
, *head
, *tail
;
1648 gfp_t gfp
= GFP_NOFS
| __GFP_ACCOUNT
| __GFP_NOFAIL
;
1650 head
= folio_alloc_buffers(folio
, blocksize
, gfp
);
1653 bh
->b_state
|= b_state
;
1655 bh
= bh
->b_this_page
;
1657 tail
->b_this_page
= head
;
1659 spin_lock(&folio
->mapping
->i_private_lock
);
1660 if (folio_test_uptodate(folio
) || folio_test_dirty(folio
)) {
1663 if (folio_test_dirty(folio
))
1664 set_buffer_dirty(bh
);
1665 if (folio_test_uptodate(folio
))
1666 set_buffer_uptodate(bh
);
1667 bh
= bh
->b_this_page
;
1668 } while (bh
!= head
);
1670 folio_attach_private(folio
, head
);
1671 spin_unlock(&folio
->mapping
->i_private_lock
);
1675 EXPORT_SYMBOL(create_empty_buffers
);
1678 * clean_bdev_aliases: clean a range of buffers in block device
1679 * @bdev: Block device to clean buffers in
1680 * @block: Start of a range of blocks to clean
1681 * @len: Number of blocks to clean
1683 * We are taking a range of blocks for data and we don't want writeback of any
1684 * buffer-cache aliases starting from return from this function and until the
1685 * moment when something will explicitly mark the buffer dirty (hopefully that
1686 * will not happen until we will free that block ;-) We don't even need to mark
1687 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1688 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1689 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1690 * would confuse anyone who might pick it with bread() afterwards...
1692 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1693 * writeout I/O going on against recently-freed buffers. We don't wait on that
1694 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1695 * need to. That happens here.
1697 void clean_bdev_aliases(struct block_device
*bdev
, sector_t block
, sector_t len
)
1699 struct inode
*bd_inode
= bdev
->bd_inode
;
1700 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
1701 struct folio_batch fbatch
;
1702 pgoff_t index
= ((loff_t
)block
<< bd_inode
->i_blkbits
) / PAGE_SIZE
;
1705 struct buffer_head
*bh
;
1706 struct buffer_head
*head
;
1708 end
= ((loff_t
)(block
+ len
- 1) << bd_inode
->i_blkbits
) / PAGE_SIZE
;
1709 folio_batch_init(&fbatch
);
1710 while (filemap_get_folios(bd_mapping
, &index
, end
, &fbatch
)) {
1711 count
= folio_batch_count(&fbatch
);
1712 for (i
= 0; i
< count
; i
++) {
1713 struct folio
*folio
= fbatch
.folios
[i
];
1715 if (!folio_buffers(folio
))
1718 * We use folio lock instead of bd_mapping->i_private_lock
1719 * to pin buffers here since we can afford to sleep and
1720 * it scales better than a global spinlock lock.
1723 /* Recheck when the folio is locked which pins bhs */
1724 head
= folio_buffers(folio
);
1729 if (!buffer_mapped(bh
) || (bh
->b_blocknr
< block
))
1731 if (bh
->b_blocknr
>= block
+ len
)
1733 clear_buffer_dirty(bh
);
1735 clear_buffer_req(bh
);
1737 bh
= bh
->b_this_page
;
1738 } while (bh
!= head
);
1740 folio_unlock(folio
);
1742 folio_batch_release(&fbatch
);
1744 /* End of range already reached? */
1745 if (index
> end
|| !index
)
1749 EXPORT_SYMBOL(clean_bdev_aliases
);
1751 static struct buffer_head
*folio_create_buffers(struct folio
*folio
,
1752 struct inode
*inode
,
1753 unsigned int b_state
)
1755 struct buffer_head
*bh
;
1757 BUG_ON(!folio_test_locked(folio
));
1759 bh
= folio_buffers(folio
);
1761 bh
= create_empty_buffers(folio
,
1762 1 << READ_ONCE(inode
->i_blkbits
), b_state
);
1767 * NOTE! All mapped/uptodate combinations are valid:
1769 * Mapped Uptodate Meaning
1771 * No No "unknown" - must do get_block()
1772 * No Yes "hole" - zero-filled
1773 * Yes No "allocated" - allocated on disk, not read in
1774 * Yes Yes "valid" - allocated and up-to-date in memory.
1776 * "Dirty" is valid only with the last case (mapped+uptodate).
1780 * While block_write_full_folio is writing back the dirty buffers under
1781 * the page lock, whoever dirtied the buffers may decide to clean them
1782 * again at any time. We handle that by only looking at the buffer
1783 * state inside lock_buffer().
1785 * If block_write_full_folio() is called for regular writeback
1786 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1787 * locked buffer. This only can happen if someone has written the buffer
1788 * directly, with submit_bh(). At the address_space level PageWriteback
1789 * prevents this contention from occurring.
1791 * If block_write_full_folio() is called with wbc->sync_mode ==
1792 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1793 * causes the writes to be flagged as synchronous writes.
1795 int __block_write_full_folio(struct inode
*inode
, struct folio
*folio
,
1796 get_block_t
*get_block
, struct writeback_control
*wbc
)
1800 sector_t last_block
;
1801 struct buffer_head
*bh
, *head
;
1803 int nr_underway
= 0;
1804 blk_opf_t write_flags
= wbc_to_write_flags(wbc
);
1806 head
= folio_create_buffers(folio
, inode
,
1807 (1 << BH_Dirty
) | (1 << BH_Uptodate
));
1810 * Be very careful. We have no exclusion from block_dirty_folio
1811 * here, and the (potentially unmapped) buffers may become dirty at
1812 * any time. If a buffer becomes dirty here after we've inspected it
1813 * then we just miss that fact, and the folio stays dirty.
1815 * Buffers outside i_size may be dirtied by block_dirty_folio;
1816 * handle that here by just cleaning them.
1820 blocksize
= bh
->b_size
;
1822 block
= div_u64(folio_pos(folio
), blocksize
);
1823 last_block
= div_u64(i_size_read(inode
) - 1, blocksize
);
1826 * Get all the dirty buffers mapped to disk addresses and
1827 * handle any aliases from the underlying blockdev's mapping.
1830 if (block
> last_block
) {
1832 * mapped buffers outside i_size will occur, because
1833 * this folio can be outside i_size when there is a
1834 * truncate in progress.
1837 * The buffer was zeroed by block_write_full_folio()
1839 clear_buffer_dirty(bh
);
1840 set_buffer_uptodate(bh
);
1841 } else if ((!buffer_mapped(bh
) || buffer_delay(bh
)) &&
1843 WARN_ON(bh
->b_size
!= blocksize
);
1844 err
= get_block(inode
, block
, bh
, 1);
1847 clear_buffer_delay(bh
);
1848 if (buffer_new(bh
)) {
1849 /* blockdev mappings never come here */
1850 clear_buffer_new(bh
);
1851 clean_bdev_bh_alias(bh
);
1854 bh
= bh
->b_this_page
;
1856 } while (bh
!= head
);
1859 if (!buffer_mapped(bh
))
1862 * If it's a fully non-blocking write attempt and we cannot
1863 * lock the buffer then redirty the folio. Note that this can
1864 * potentially cause a busy-wait loop from writeback threads
1865 * and kswapd activity, but those code paths have their own
1866 * higher-level throttling.
1868 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
1870 } else if (!trylock_buffer(bh
)) {
1871 folio_redirty_for_writepage(wbc
, folio
);
1874 if (test_clear_buffer_dirty(bh
)) {
1875 mark_buffer_async_write_endio(bh
,
1876 end_buffer_async_write
);
1880 } while ((bh
= bh
->b_this_page
) != head
);
1883 * The folio and its buffers are protected by the writeback flag,
1884 * so we can drop the bh refcounts early.
1886 BUG_ON(folio_test_writeback(folio
));
1887 folio_start_writeback(folio
);
1890 struct buffer_head
*next
= bh
->b_this_page
;
1891 if (buffer_async_write(bh
)) {
1892 submit_bh_wbc(REQ_OP_WRITE
| write_flags
, bh
, wbc
);
1896 } while (bh
!= head
);
1897 folio_unlock(folio
);
1901 if (nr_underway
== 0) {
1903 * The folio was marked dirty, but the buffers were
1904 * clean. Someone wrote them back by hand with
1905 * write_dirty_buffer/submit_bh. A rare case.
1907 folio_end_writeback(folio
);
1910 * The folio and buffer_heads can be released at any time from
1918 * ENOSPC, or some other error. We may already have added some
1919 * blocks to the file, so we need to write these out to avoid
1920 * exposing stale data.
1921 * The folio is currently locked and not marked for writeback
1924 /* Recovery: lock and submit the mapped buffers */
1926 if (buffer_mapped(bh
) && buffer_dirty(bh
) &&
1927 !buffer_delay(bh
)) {
1929 mark_buffer_async_write_endio(bh
,
1930 end_buffer_async_write
);
1933 * The buffer may have been set dirty during
1934 * attachment to a dirty folio.
1936 clear_buffer_dirty(bh
);
1938 } while ((bh
= bh
->b_this_page
) != head
);
1939 folio_set_error(folio
);
1940 BUG_ON(folio_test_writeback(folio
));
1941 mapping_set_error(folio
->mapping
, err
);
1942 folio_start_writeback(folio
);
1944 struct buffer_head
*next
= bh
->b_this_page
;
1945 if (buffer_async_write(bh
)) {
1946 clear_buffer_dirty(bh
);
1947 submit_bh_wbc(REQ_OP_WRITE
| write_flags
, bh
, wbc
);
1951 } while (bh
!= head
);
1952 folio_unlock(folio
);
1955 EXPORT_SYMBOL(__block_write_full_folio
);
1958 * If a folio has any new buffers, zero them out here, and mark them uptodate
1959 * and dirty so they'll be written out (in order to prevent uninitialised
1960 * block data from leaking). And clear the new bit.
1962 void folio_zero_new_buffers(struct folio
*folio
, size_t from
, size_t to
)
1964 size_t block_start
, block_end
;
1965 struct buffer_head
*head
, *bh
;
1967 BUG_ON(!folio_test_locked(folio
));
1968 head
= folio_buffers(folio
);
1975 block_end
= block_start
+ bh
->b_size
;
1977 if (buffer_new(bh
)) {
1978 if (block_end
> from
&& block_start
< to
) {
1979 if (!folio_test_uptodate(folio
)) {
1982 start
= max(from
, block_start
);
1983 xend
= min(to
, block_end
);
1985 folio_zero_segment(folio
, start
, xend
);
1986 set_buffer_uptodate(bh
);
1989 clear_buffer_new(bh
);
1990 mark_buffer_dirty(bh
);
1994 block_start
= block_end
;
1995 bh
= bh
->b_this_page
;
1996 } while (bh
!= head
);
1998 EXPORT_SYMBOL(folio_zero_new_buffers
);
2001 iomap_to_bh(struct inode
*inode
, sector_t block
, struct buffer_head
*bh
,
2002 const struct iomap
*iomap
)
2004 loff_t offset
= (loff_t
)block
<< inode
->i_blkbits
;
2006 bh
->b_bdev
= iomap
->bdev
;
2009 * Block points to offset in file we need to map, iomap contains
2010 * the offset at which the map starts. If the map ends before the
2011 * current block, then do not map the buffer and let the caller
2014 if (offset
>= iomap
->offset
+ iomap
->length
)
2017 switch (iomap
->type
) {
2020 * If the buffer is not up to date or beyond the current EOF,
2021 * we need to mark it as new to ensure sub-block zeroing is
2022 * executed if necessary.
2024 if (!buffer_uptodate(bh
) ||
2025 (offset
>= i_size_read(inode
)))
2028 case IOMAP_DELALLOC
:
2029 if (!buffer_uptodate(bh
) ||
2030 (offset
>= i_size_read(inode
)))
2032 set_buffer_uptodate(bh
);
2033 set_buffer_mapped(bh
);
2034 set_buffer_delay(bh
);
2036 case IOMAP_UNWRITTEN
:
2038 * For unwritten regions, we always need to ensure that regions
2039 * in the block we are not writing to are zeroed. Mark the
2040 * buffer as new to ensure this.
2043 set_buffer_unwritten(bh
);
2046 if ((iomap
->flags
& IOMAP_F_NEW
) ||
2047 offset
>= i_size_read(inode
)) {
2049 * This can happen if truncating the block device races
2050 * with the check in the caller as i_size updates on
2051 * block devices aren't synchronized by i_rwsem for
2054 if (S_ISBLK(inode
->i_mode
))
2058 bh
->b_blocknr
= (iomap
->addr
+ offset
- iomap
->offset
) >>
2060 set_buffer_mapped(bh
);
2068 int __block_write_begin_int(struct folio
*folio
, loff_t pos
, unsigned len
,
2069 get_block_t
*get_block
, const struct iomap
*iomap
)
2071 size_t from
= offset_in_folio(folio
, pos
);
2072 size_t to
= from
+ len
;
2073 struct inode
*inode
= folio
->mapping
->host
;
2074 size_t block_start
, block_end
;
2078 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
=wait
;
2080 BUG_ON(!folio_test_locked(folio
));
2081 BUG_ON(to
> folio_size(folio
));
2084 head
= folio_create_buffers(folio
, inode
, 0);
2085 blocksize
= head
->b_size
;
2086 block
= div_u64(folio_pos(folio
), blocksize
);
2088 for (bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
2089 block
++, block_start
=block_end
, bh
= bh
->b_this_page
) {
2090 block_end
= block_start
+ blocksize
;
2091 if (block_end
<= from
|| block_start
>= to
) {
2092 if (folio_test_uptodate(folio
)) {
2093 if (!buffer_uptodate(bh
))
2094 set_buffer_uptodate(bh
);
2099 clear_buffer_new(bh
);
2100 if (!buffer_mapped(bh
)) {
2101 WARN_ON(bh
->b_size
!= blocksize
);
2103 err
= get_block(inode
, block
, bh
, 1);
2105 err
= iomap_to_bh(inode
, block
, bh
, iomap
);
2109 if (buffer_new(bh
)) {
2110 clean_bdev_bh_alias(bh
);
2111 if (folio_test_uptodate(folio
)) {
2112 clear_buffer_new(bh
);
2113 set_buffer_uptodate(bh
);
2114 mark_buffer_dirty(bh
);
2117 if (block_end
> to
|| block_start
< from
)
2118 folio_zero_segments(folio
,
2124 if (folio_test_uptodate(folio
)) {
2125 if (!buffer_uptodate(bh
))
2126 set_buffer_uptodate(bh
);
2129 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) &&
2130 !buffer_unwritten(bh
) &&
2131 (block_start
< from
|| block_end
> to
)) {
2132 bh_read_nowait(bh
, 0);
2137 * If we issued read requests - let them complete.
2139 while(wait_bh
> wait
) {
2140 wait_on_buffer(*--wait_bh
);
2141 if (!buffer_uptodate(*wait_bh
))
2145 folio_zero_new_buffers(folio
, from
, to
);
2149 int __block_write_begin(struct page
*page
, loff_t pos
, unsigned len
,
2150 get_block_t
*get_block
)
2152 return __block_write_begin_int(page_folio(page
), pos
, len
, get_block
,
2155 EXPORT_SYMBOL(__block_write_begin
);
2157 static void __block_commit_write(struct folio
*folio
, size_t from
, size_t to
)
2159 size_t block_start
, block_end
;
2160 bool partial
= false;
2162 struct buffer_head
*bh
, *head
;
2164 bh
= head
= folio_buffers(folio
);
2165 blocksize
= bh
->b_size
;
2169 block_end
= block_start
+ blocksize
;
2170 if (block_end
<= from
|| block_start
>= to
) {
2171 if (!buffer_uptodate(bh
))
2174 set_buffer_uptodate(bh
);
2175 mark_buffer_dirty(bh
);
2178 clear_buffer_new(bh
);
2180 block_start
= block_end
;
2181 bh
= bh
->b_this_page
;
2182 } while (bh
!= head
);
2185 * If this is a partial write which happened to make all buffers
2186 * uptodate then we can optimize away a bogus read_folio() for
2187 * the next read(). Here we 'discover' whether the folio went
2188 * uptodate as a result of this (potentially partial) write.
2191 folio_mark_uptodate(folio
);
2195 * block_write_begin takes care of the basic task of block allocation and
2196 * bringing partial write blocks uptodate first.
2198 * The filesystem needs to handle block truncation upon failure.
2200 int block_write_begin(struct address_space
*mapping
, loff_t pos
, unsigned len
,
2201 struct page
**pagep
, get_block_t
*get_block
)
2203 pgoff_t index
= pos
>> PAGE_SHIFT
;
2207 page
= grab_cache_page_write_begin(mapping
, index
);
2211 status
= __block_write_begin(page
, pos
, len
, get_block
);
2212 if (unlikely(status
)) {
2221 EXPORT_SYMBOL(block_write_begin
);
2223 int block_write_end(struct file
*file
, struct address_space
*mapping
,
2224 loff_t pos
, unsigned len
, unsigned copied
,
2225 struct page
*page
, void *fsdata
)
2227 struct folio
*folio
= page_folio(page
);
2228 size_t start
= pos
- folio_pos(folio
);
2230 if (unlikely(copied
< len
)) {
2232 * The buffers that were written will now be uptodate, so
2233 * we don't have to worry about a read_folio reading them
2234 * and overwriting a partial write. However if we have
2235 * encountered a short write and only partially written
2236 * into a buffer, it will not be marked uptodate, so a
2237 * read_folio might come in and destroy our partial write.
2239 * Do the simplest thing, and just treat any short write to a
2240 * non uptodate folio as a zero-length write, and force the
2241 * caller to redo the whole thing.
2243 if (!folio_test_uptodate(folio
))
2246 folio_zero_new_buffers(folio
, start
+copied
, start
+len
);
2248 flush_dcache_folio(folio
);
2250 /* This could be a short (even 0-length) commit */
2251 __block_commit_write(folio
, start
, start
+ copied
);
2255 EXPORT_SYMBOL(block_write_end
);
2257 int generic_write_end(struct file
*file
, struct address_space
*mapping
,
2258 loff_t pos
, unsigned len
, unsigned copied
,
2259 struct page
*page
, void *fsdata
)
2261 struct inode
*inode
= mapping
->host
;
2262 loff_t old_size
= inode
->i_size
;
2263 bool i_size_changed
= false;
2265 copied
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
2268 * No need to use i_size_read() here, the i_size cannot change under us
2269 * because we hold i_rwsem.
2271 * But it's important to update i_size while still holding page lock:
2272 * page writeout could otherwise come in and zero beyond i_size.
2274 if (pos
+ copied
> inode
->i_size
) {
2275 i_size_write(inode
, pos
+ copied
);
2276 i_size_changed
= true;
2283 pagecache_isize_extended(inode
, old_size
, pos
);
2285 * Don't mark the inode dirty under page lock. First, it unnecessarily
2286 * makes the holding time of page lock longer. Second, it forces lock
2287 * ordering of page lock and transaction start for journaling
2291 mark_inode_dirty(inode
);
2294 EXPORT_SYMBOL(generic_write_end
);
2297 * block_is_partially_uptodate checks whether buffers within a folio are
2300 * Returns true if all buffers which correspond to the specified part
2301 * of the folio are uptodate.
2303 bool block_is_partially_uptodate(struct folio
*folio
, size_t from
, size_t count
)
2305 unsigned block_start
, block_end
, blocksize
;
2307 struct buffer_head
*bh
, *head
;
2310 head
= folio_buffers(folio
);
2313 blocksize
= head
->b_size
;
2314 to
= min_t(unsigned, folio_size(folio
) - from
, count
);
2316 if (from
< blocksize
&& to
> folio_size(folio
) - blocksize
)
2322 block_end
= block_start
+ blocksize
;
2323 if (block_end
> from
&& block_start
< to
) {
2324 if (!buffer_uptodate(bh
)) {
2328 if (block_end
>= to
)
2331 block_start
= block_end
;
2332 bh
= bh
->b_this_page
;
2333 } while (bh
!= head
);
2337 EXPORT_SYMBOL(block_is_partially_uptodate
);
2340 * Generic "read_folio" function for block devices that have the normal
2341 * get_block functionality. This is most of the block device filesystems.
2342 * Reads the folio asynchronously --- the unlock_buffer() and
2343 * set/clear_buffer_uptodate() functions propagate buffer state into the
2344 * folio once IO has completed.
2346 int block_read_full_folio(struct folio
*folio
, get_block_t
*get_block
)
2348 struct inode
*inode
= folio
->mapping
->host
;
2349 sector_t iblock
, lblock
;
2350 struct buffer_head
*bh
, *head
, *arr
[MAX_BUF_PER_PAGE
];
2353 int fully_mapped
= 1;
2354 bool page_error
= false;
2355 loff_t limit
= i_size_read(inode
);
2357 /* This is needed for ext4. */
2358 if (IS_ENABLED(CONFIG_FS_VERITY
) && IS_VERITY(inode
))
2359 limit
= inode
->i_sb
->s_maxbytes
;
2361 VM_BUG_ON_FOLIO(folio_test_large(folio
), folio
);
2363 head
= folio_create_buffers(folio
, inode
, 0);
2364 blocksize
= head
->b_size
;
2366 iblock
= div_u64(folio_pos(folio
), blocksize
);
2367 lblock
= div_u64(limit
+ blocksize
- 1, blocksize
);
2373 if (buffer_uptodate(bh
))
2376 if (!buffer_mapped(bh
)) {
2380 if (iblock
< lblock
) {
2381 WARN_ON(bh
->b_size
!= blocksize
);
2382 err
= get_block(inode
, iblock
, bh
, 0);
2384 folio_set_error(folio
);
2388 if (!buffer_mapped(bh
)) {
2389 folio_zero_range(folio
, i
* blocksize
,
2392 set_buffer_uptodate(bh
);
2396 * get_block() might have updated the buffer
2399 if (buffer_uptodate(bh
))
2403 } while (i
++, iblock
++, (bh
= bh
->b_this_page
) != head
);
2406 folio_set_mappedtodisk(folio
);
2410 * All buffers are uptodate or get_block() returned an
2411 * error when trying to map them - we can finish the read.
2413 folio_end_read(folio
, !page_error
);
2417 /* Stage two: lock the buffers */
2418 for (i
= 0; i
< nr
; i
++) {
2421 mark_buffer_async_read(bh
);
2425 * Stage 3: start the IO. Check for uptodateness
2426 * inside the buffer lock in case another process reading
2427 * the underlying blockdev brought it uptodate (the sct fix).
2429 for (i
= 0; i
< nr
; i
++) {
2431 if (buffer_uptodate(bh
))
2432 end_buffer_async_read(bh
, 1);
2434 submit_bh(REQ_OP_READ
, bh
);
2438 EXPORT_SYMBOL(block_read_full_folio
);
2440 /* utility function for filesystems that need to do work on expanding
2441 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2442 * deal with the hole.
2444 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
)
2446 struct address_space
*mapping
= inode
->i_mapping
;
2447 const struct address_space_operations
*aops
= mapping
->a_ops
;
2449 void *fsdata
= NULL
;
2452 err
= inode_newsize_ok(inode
, size
);
2456 err
= aops
->write_begin(NULL
, mapping
, size
, 0, &page
, &fsdata
);
2460 err
= aops
->write_end(NULL
, mapping
, size
, 0, 0, page
, fsdata
);
2466 EXPORT_SYMBOL(generic_cont_expand_simple
);
2468 static int cont_expand_zero(struct file
*file
, struct address_space
*mapping
,
2469 loff_t pos
, loff_t
*bytes
)
2471 struct inode
*inode
= mapping
->host
;
2472 const struct address_space_operations
*aops
= mapping
->a_ops
;
2473 unsigned int blocksize
= i_blocksize(inode
);
2475 void *fsdata
= NULL
;
2476 pgoff_t index
, curidx
;
2478 unsigned zerofrom
, offset
, len
;
2481 index
= pos
>> PAGE_SHIFT
;
2482 offset
= pos
& ~PAGE_MASK
;
2484 while (index
> (curidx
= (curpos
= *bytes
)>>PAGE_SHIFT
)) {
2485 zerofrom
= curpos
& ~PAGE_MASK
;
2486 if (zerofrom
& (blocksize
-1)) {
2487 *bytes
|= (blocksize
-1);
2490 len
= PAGE_SIZE
- zerofrom
;
2492 err
= aops
->write_begin(file
, mapping
, curpos
, len
,
2496 zero_user(page
, zerofrom
, len
);
2497 err
= aops
->write_end(file
, mapping
, curpos
, len
, len
,
2504 balance_dirty_pages_ratelimited(mapping
);
2506 if (fatal_signal_pending(current
)) {
2512 /* page covers the boundary, find the boundary offset */
2513 if (index
== curidx
) {
2514 zerofrom
= curpos
& ~PAGE_MASK
;
2515 /* if we will expand the thing last block will be filled */
2516 if (offset
<= zerofrom
) {
2519 if (zerofrom
& (blocksize
-1)) {
2520 *bytes
|= (blocksize
-1);
2523 len
= offset
- zerofrom
;
2525 err
= aops
->write_begin(file
, mapping
, curpos
, len
,
2529 zero_user(page
, zerofrom
, len
);
2530 err
= aops
->write_end(file
, mapping
, curpos
, len
, len
,
2542 * For moronic filesystems that do not allow holes in file.
2543 * We may have to extend the file.
2545 int cont_write_begin(struct file
*file
, struct address_space
*mapping
,
2546 loff_t pos
, unsigned len
,
2547 struct page
**pagep
, void **fsdata
,
2548 get_block_t
*get_block
, loff_t
*bytes
)
2550 struct inode
*inode
= mapping
->host
;
2551 unsigned int blocksize
= i_blocksize(inode
);
2552 unsigned int zerofrom
;
2555 err
= cont_expand_zero(file
, mapping
, pos
, bytes
);
2559 zerofrom
= *bytes
& ~PAGE_MASK
;
2560 if (pos
+len
> *bytes
&& zerofrom
& (blocksize
-1)) {
2561 *bytes
|= (blocksize
-1);
2565 return block_write_begin(mapping
, pos
, len
, pagep
, get_block
);
2567 EXPORT_SYMBOL(cont_write_begin
);
2569 void block_commit_write(struct page
*page
, unsigned from
, unsigned to
)
2571 struct folio
*folio
= page_folio(page
);
2572 __block_commit_write(folio
, from
, to
);
2574 EXPORT_SYMBOL(block_commit_write
);
2577 * block_page_mkwrite() is not allowed to change the file size as it gets
2578 * called from a page fault handler when a page is first dirtied. Hence we must
2579 * be careful to check for EOF conditions here. We set the page up correctly
2580 * for a written page which means we get ENOSPC checking when writing into
2581 * holes and correct delalloc and unwritten extent mapping on filesystems that
2582 * support these features.
2584 * We are not allowed to take the i_mutex here so we have to play games to
2585 * protect against truncate races as the page could now be beyond EOF. Because
2586 * truncate writes the inode size before removing pages, once we have the
2587 * page lock we can determine safely if the page is beyond EOF. If it is not
2588 * beyond EOF, then the page is guaranteed safe against truncation until we
2591 * Direct callers of this function should protect against filesystem freezing
2592 * using sb_start_pagefault() - sb_end_pagefault() functions.
2594 int block_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
2595 get_block_t get_block
)
2597 struct folio
*folio
= page_folio(vmf
->page
);
2598 struct inode
*inode
= file_inode(vma
->vm_file
);
2604 size
= i_size_read(inode
);
2605 if ((folio
->mapping
!= inode
->i_mapping
) ||
2606 (folio_pos(folio
) >= size
)) {
2607 /* We overload EFAULT to mean page got truncated */
2612 end
= folio_size(folio
);
2613 /* folio is wholly or partially inside EOF */
2614 if (folio_pos(folio
) + end
> size
)
2615 end
= size
- folio_pos(folio
);
2617 ret
= __block_write_begin_int(folio
, 0, end
, get_block
, NULL
);
2621 __block_commit_write(folio
, 0, end
);
2623 folio_mark_dirty(folio
);
2624 folio_wait_stable(folio
);
2627 folio_unlock(folio
);
2630 EXPORT_SYMBOL(block_page_mkwrite
);
2632 int block_truncate_page(struct address_space
*mapping
,
2633 loff_t from
, get_block_t
*get_block
)
2635 pgoff_t index
= from
>> PAGE_SHIFT
;
2638 size_t offset
, length
, pos
;
2639 struct inode
*inode
= mapping
->host
;
2640 struct folio
*folio
;
2641 struct buffer_head
*bh
;
2644 blocksize
= i_blocksize(inode
);
2645 length
= from
& (blocksize
- 1);
2647 /* Block boundary? Nothing to do */
2651 length
= blocksize
- length
;
2652 iblock
= ((loff_t
)index
* PAGE_SIZE
) >> inode
->i_blkbits
;
2654 folio
= filemap_grab_folio(mapping
, index
);
2656 return PTR_ERR(folio
);
2658 bh
= folio_buffers(folio
);
2660 bh
= create_empty_buffers(folio
, blocksize
, 0);
2662 /* Find the buffer that contains "offset" */
2663 offset
= offset_in_folio(folio
, from
);
2665 while (offset
>= pos
) {
2666 bh
= bh
->b_this_page
;
2671 if (!buffer_mapped(bh
)) {
2672 WARN_ON(bh
->b_size
!= blocksize
);
2673 err
= get_block(inode
, iblock
, bh
, 0);
2676 /* unmapped? It's a hole - nothing to do */
2677 if (!buffer_mapped(bh
))
2681 /* Ok, it's mapped. Make sure it's up-to-date */
2682 if (folio_test_uptodate(folio
))
2683 set_buffer_uptodate(bh
);
2685 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) && !buffer_unwritten(bh
)) {
2686 err
= bh_read(bh
, 0);
2687 /* Uhhuh. Read error. Complain and punt. */
2692 folio_zero_range(folio
, offset
, length
);
2693 mark_buffer_dirty(bh
);
2696 folio_unlock(folio
);
2701 EXPORT_SYMBOL(block_truncate_page
);
2704 * The generic ->writepage function for buffer-backed address_spaces
2706 int block_write_full_folio(struct folio
*folio
, struct writeback_control
*wbc
,
2709 struct inode
* const inode
= folio
->mapping
->host
;
2710 loff_t i_size
= i_size_read(inode
);
2712 /* Is the folio fully inside i_size? */
2713 if (folio_pos(folio
) + folio_size(folio
) <= i_size
)
2714 return __block_write_full_folio(inode
, folio
, get_block
, wbc
);
2716 /* Is the folio fully outside i_size? (truncate in progress) */
2717 if (folio_pos(folio
) >= i_size
) {
2718 folio_unlock(folio
);
2719 return 0; /* don't care */
2723 * The folio straddles i_size. It must be zeroed out on each and every
2724 * writepage invocation because it may be mmapped. "A file is mapped
2725 * in multiples of the page size. For a file that is not a multiple of
2726 * the page size, the remaining memory is zeroed when mapped, and
2727 * writes to that region are not written out to the file."
2729 folio_zero_segment(folio
, offset_in_folio(folio
, i_size
),
2731 return __block_write_full_folio(inode
, folio
, get_block
, wbc
);
2734 sector_t
generic_block_bmap(struct address_space
*mapping
, sector_t block
,
2735 get_block_t
*get_block
)
2737 struct inode
*inode
= mapping
->host
;
2738 struct buffer_head tmp
= {
2739 .b_size
= i_blocksize(inode
),
2742 get_block(inode
, block
, &tmp
, 0);
2743 return tmp
.b_blocknr
;
2745 EXPORT_SYMBOL(generic_block_bmap
);
2747 static void end_bio_bh_io_sync(struct bio
*bio
)
2749 struct buffer_head
*bh
= bio
->bi_private
;
2751 if (unlikely(bio_flagged(bio
, BIO_QUIET
)))
2752 set_bit(BH_Quiet
, &bh
->b_state
);
2754 bh
->b_end_io(bh
, !bio
->bi_status
);
2758 static void submit_bh_wbc(blk_opf_t opf
, struct buffer_head
*bh
,
2759 struct writeback_control
*wbc
)
2761 const enum req_op op
= opf
& REQ_OP_MASK
;
2764 BUG_ON(!buffer_locked(bh
));
2765 BUG_ON(!buffer_mapped(bh
));
2766 BUG_ON(!bh
->b_end_io
);
2767 BUG_ON(buffer_delay(bh
));
2768 BUG_ON(buffer_unwritten(bh
));
2771 * Only clear out a write error when rewriting
2773 if (test_set_buffer_req(bh
) && (op
== REQ_OP_WRITE
))
2774 clear_buffer_write_io_error(bh
);
2776 if (buffer_meta(bh
))
2778 if (buffer_prio(bh
))
2781 bio
= bio_alloc(bh
->b_bdev
, 1, opf
, GFP_NOIO
);
2783 fscrypt_set_bio_crypt_ctx_bh(bio
, bh
, GFP_NOIO
);
2785 bio
->bi_iter
.bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
2787 __bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
2789 bio
->bi_end_io
= end_bio_bh_io_sync
;
2790 bio
->bi_private
= bh
;
2792 /* Take care of bh's that straddle the end of the device */
2796 wbc_init_bio(wbc
, bio
);
2797 wbc_account_cgroup_owner(wbc
, bh
->b_page
, bh
->b_size
);
2803 void submit_bh(blk_opf_t opf
, struct buffer_head
*bh
)
2805 submit_bh_wbc(opf
, bh
, NULL
);
2807 EXPORT_SYMBOL(submit_bh
);
2809 void write_dirty_buffer(struct buffer_head
*bh
, blk_opf_t op_flags
)
2812 if (!test_clear_buffer_dirty(bh
)) {
2816 bh
->b_end_io
= end_buffer_write_sync
;
2818 submit_bh(REQ_OP_WRITE
| op_flags
, bh
);
2820 EXPORT_SYMBOL(write_dirty_buffer
);
2823 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2824 * and then start new I/O and then wait upon it. The caller must have a ref on
2827 int __sync_dirty_buffer(struct buffer_head
*bh
, blk_opf_t op_flags
)
2829 WARN_ON(atomic_read(&bh
->b_count
) < 1);
2831 if (test_clear_buffer_dirty(bh
)) {
2833 * The bh should be mapped, but it might not be if the
2834 * device was hot-removed. Not much we can do but fail the I/O.
2836 if (!buffer_mapped(bh
)) {
2842 bh
->b_end_io
= end_buffer_write_sync
;
2843 submit_bh(REQ_OP_WRITE
| op_flags
, bh
);
2845 if (!buffer_uptodate(bh
))
2852 EXPORT_SYMBOL(__sync_dirty_buffer
);
2854 int sync_dirty_buffer(struct buffer_head
*bh
)
2856 return __sync_dirty_buffer(bh
, REQ_SYNC
);
2858 EXPORT_SYMBOL(sync_dirty_buffer
);
2861 * try_to_free_buffers() checks if all the buffers on this particular folio
2862 * are unused, and releases them if so.
2864 * Exclusion against try_to_free_buffers may be obtained by either
2865 * locking the folio or by holding its mapping's i_private_lock.
2867 * If the folio is dirty but all the buffers are clean then we need to
2868 * be sure to mark the folio clean as well. This is because the folio
2869 * may be against a block device, and a later reattachment of buffers
2870 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2871 * filesystem data on the same device.
2873 * The same applies to regular filesystem folios: if all the buffers are
2874 * clean then we set the folio clean and proceed. To do that, we require
2875 * total exclusion from block_dirty_folio(). That is obtained with
2878 * try_to_free_buffers() is non-blocking.
2880 static inline int buffer_busy(struct buffer_head
*bh
)
2882 return atomic_read(&bh
->b_count
) |
2883 (bh
->b_state
& ((1 << BH_Dirty
) | (1 << BH_Lock
)));
2887 drop_buffers(struct folio
*folio
, struct buffer_head
**buffers_to_free
)
2889 struct buffer_head
*head
= folio_buffers(folio
);
2890 struct buffer_head
*bh
;
2894 if (buffer_busy(bh
))
2896 bh
= bh
->b_this_page
;
2897 } while (bh
!= head
);
2900 struct buffer_head
*next
= bh
->b_this_page
;
2902 if (bh
->b_assoc_map
)
2903 __remove_assoc_queue(bh
);
2905 } while (bh
!= head
);
2906 *buffers_to_free
= head
;
2907 folio_detach_private(folio
);
2913 bool try_to_free_buffers(struct folio
*folio
)
2915 struct address_space
* const mapping
= folio
->mapping
;
2916 struct buffer_head
*buffers_to_free
= NULL
;
2919 BUG_ON(!folio_test_locked(folio
));
2920 if (folio_test_writeback(folio
))
2923 if (mapping
== NULL
) { /* can this still happen? */
2924 ret
= drop_buffers(folio
, &buffers_to_free
);
2928 spin_lock(&mapping
->i_private_lock
);
2929 ret
= drop_buffers(folio
, &buffers_to_free
);
2932 * If the filesystem writes its buffers by hand (eg ext3)
2933 * then we can have clean buffers against a dirty folio. We
2934 * clean the folio here; otherwise the VM will never notice
2935 * that the filesystem did any IO at all.
2937 * Also, during truncate, discard_buffer will have marked all
2938 * the folio's buffers clean. We discover that here and clean
2941 * i_private_lock must be held over this entire operation in order
2942 * to synchronise against block_dirty_folio and prevent the
2943 * dirty bit from being lost.
2946 folio_cancel_dirty(folio
);
2947 spin_unlock(&mapping
->i_private_lock
);
2949 if (buffers_to_free
) {
2950 struct buffer_head
*bh
= buffers_to_free
;
2953 struct buffer_head
*next
= bh
->b_this_page
;
2954 free_buffer_head(bh
);
2956 } while (bh
!= buffers_to_free
);
2960 EXPORT_SYMBOL(try_to_free_buffers
);
2963 * Buffer-head allocation
2965 static struct kmem_cache
*bh_cachep __ro_after_init
;
2968 * Once the number of bh's in the machine exceeds this level, we start
2969 * stripping them in writeback.
2971 static unsigned long max_buffer_heads __ro_after_init
;
2973 int buffer_heads_over_limit
;
2975 struct bh_accounting
{
2976 int nr
; /* Number of live bh's */
2977 int ratelimit
; /* Limit cacheline bouncing */
2980 static DEFINE_PER_CPU(struct bh_accounting
, bh_accounting
) = {0, 0};
2982 static void recalc_bh_state(void)
2987 if (__this_cpu_inc_return(bh_accounting
.ratelimit
) - 1 < 4096)
2989 __this_cpu_write(bh_accounting
.ratelimit
, 0);
2990 for_each_online_cpu(i
)
2991 tot
+= per_cpu(bh_accounting
, i
).nr
;
2992 buffer_heads_over_limit
= (tot
> max_buffer_heads
);
2995 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
)
2997 struct buffer_head
*ret
= kmem_cache_zalloc(bh_cachep
, gfp_flags
);
2999 INIT_LIST_HEAD(&ret
->b_assoc_buffers
);
3000 spin_lock_init(&ret
->b_uptodate_lock
);
3002 __this_cpu_inc(bh_accounting
.nr
);
3008 EXPORT_SYMBOL(alloc_buffer_head
);
3010 void free_buffer_head(struct buffer_head
*bh
)
3012 BUG_ON(!list_empty(&bh
->b_assoc_buffers
));
3013 kmem_cache_free(bh_cachep
, bh
);
3015 __this_cpu_dec(bh_accounting
.nr
);
3019 EXPORT_SYMBOL(free_buffer_head
);
3021 static int buffer_exit_cpu_dead(unsigned int cpu
)
3024 struct bh_lru
*b
= &per_cpu(bh_lrus
, cpu
);
3026 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
3030 this_cpu_add(bh_accounting
.nr
, per_cpu(bh_accounting
, cpu
).nr
);
3031 per_cpu(bh_accounting
, cpu
).nr
= 0;
3036 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3037 * @bh: struct buffer_head
3039 * Return true if the buffer is up-to-date and false,
3040 * with the buffer locked, if not.
3042 int bh_uptodate_or_lock(struct buffer_head
*bh
)
3044 if (!buffer_uptodate(bh
)) {
3046 if (!buffer_uptodate(bh
))
3052 EXPORT_SYMBOL(bh_uptodate_or_lock
);
3055 * __bh_read - Submit read for a locked buffer
3056 * @bh: struct buffer_head
3057 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3058 * @wait: wait until reading finish
3060 * Returns zero on success or don't wait, and -EIO on error.
3062 int __bh_read(struct buffer_head
*bh
, blk_opf_t op_flags
, bool wait
)
3066 BUG_ON(!buffer_locked(bh
));
3069 bh
->b_end_io
= end_buffer_read_sync
;
3070 submit_bh(REQ_OP_READ
| op_flags
, bh
);
3073 if (!buffer_uptodate(bh
))
3078 EXPORT_SYMBOL(__bh_read
);
3081 * __bh_read_batch - Submit read for a batch of unlocked buffers
3082 * @nr: entry number of the buffer batch
3083 * @bhs: a batch of struct buffer_head
3084 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3085 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3086 * buffer that cannot lock.
3088 * Returns zero on success or don't wait, and -EIO on error.
3090 void __bh_read_batch(int nr
, struct buffer_head
*bhs
[],
3091 blk_opf_t op_flags
, bool force_lock
)
3095 for (i
= 0; i
< nr
; i
++) {
3096 struct buffer_head
*bh
= bhs
[i
];
3098 if (buffer_uptodate(bh
))
3104 if (!trylock_buffer(bh
))
3107 if (buffer_uptodate(bh
)) {
3112 bh
->b_end_io
= end_buffer_read_sync
;
3114 submit_bh(REQ_OP_READ
| op_flags
, bh
);
3117 EXPORT_SYMBOL(__bh_read_batch
);
3119 void __init
buffer_init(void)
3121 unsigned long nrpages
;
3124 bh_cachep
= kmem_cache_create("buffer_head",
3125 sizeof(struct buffer_head
), 0,
3126 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
3131 * Limit the bh occupancy to 10% of ZONE_NORMAL
3133 nrpages
= (nr_free_buffer_pages() * 10) / 100;
3134 max_buffer_heads
= nrpages
* (PAGE_SIZE
/ sizeof(struct buffer_head
));
3135 ret
= cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD
, "fs/buffer:dead",
3136 NULL
, buffer_exit_cpu_dead
);