1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
26 #include <linux/iomap.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
56 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
);
57 static void submit_bh_wbc(blk_opf_t opf
, struct buffer_head
*bh
,
58 struct writeback_control
*wbc
);
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
62 inline void touch_buffer(struct buffer_head
*bh
)
64 trace_block_touch_buffer(bh
);
65 folio_mark_accessed(bh
->b_folio
);
67 EXPORT_SYMBOL(touch_buffer
);
69 void __lock_buffer(struct buffer_head
*bh
)
71 wait_on_bit_lock_io(&bh
->b_state
, BH_Lock
, TASK_UNINTERRUPTIBLE
);
73 EXPORT_SYMBOL(__lock_buffer
);
75 void unlock_buffer(struct buffer_head
*bh
)
77 clear_bit_unlock(BH_Lock
, &bh
->b_state
);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh
->b_state
, BH_Lock
);
81 EXPORT_SYMBOL(unlock_buffer
);
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
88 void buffer_check_dirty_writeback(struct folio
*folio
,
89 bool *dirty
, bool *writeback
)
91 struct buffer_head
*head
, *bh
;
95 BUG_ON(!folio_test_locked(folio
));
97 head
= folio_buffers(folio
);
101 if (folio_test_writeback(folio
))
106 if (buffer_locked(bh
))
109 if (buffer_dirty(bh
))
112 bh
= bh
->b_this_page
;
113 } while (bh
!= head
);
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
121 void __wait_on_buffer(struct buffer_head
* bh
)
123 wait_on_bit_io(&bh
->b_state
, BH_Lock
, TASK_UNINTERRUPTIBLE
);
125 EXPORT_SYMBOL(__wait_on_buffer
);
127 static void buffer_io_error(struct buffer_head
*bh
, char *msg
)
129 if (!test_bit(BH_Quiet
, &bh
->b_state
))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh
->b_bdev
, (unsigned long long)bh
->b_blocknr
, msg
);
136 * End-of-IO handler helper function which does not touch the bh after
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
143 static void __end_buffer_read_notouch(struct buffer_head
*bh
, int uptodate
)
146 set_buffer_uptodate(bh
);
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh
);
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
158 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
)
160 __end_buffer_read_notouch(bh
, uptodate
);
163 EXPORT_SYMBOL(end_buffer_read_sync
);
165 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
168 set_buffer_uptodate(bh
);
170 buffer_io_error(bh
, ", lost sync page write");
171 mark_buffer_write_io_error(bh
);
172 clear_buffer_uptodate(bh
);
177 EXPORT_SYMBOL(end_buffer_write_sync
);
180 * Various filesystems appear to want __find_get_block to be non-blocking.
181 * But it's the page lock which protects the buffers. To get around this,
182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
185 * Hack idea: for the blockdev mapping, private_lock contention
186 * may be quite high. This code could TryLock the page, and if that
187 * succeeds, there is no need to take private_lock.
189 static struct buffer_head
*
190 __find_get_block_slow(struct block_device
*bdev
, sector_t block
)
192 struct inode
*bd_inode
= bdev
->bd_inode
;
193 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
194 struct buffer_head
*ret
= NULL
;
196 struct buffer_head
*bh
;
197 struct buffer_head
*head
;
200 static DEFINE_RATELIMIT_STATE(last_warned
, HZ
, 1);
202 index
= block
>> (PAGE_SHIFT
- bd_inode
->i_blkbits
);
203 folio
= __filemap_get_folio(bd_mapping
, index
, FGP_ACCESSED
, 0);
207 spin_lock(&bd_mapping
->private_lock
);
208 head
= folio_buffers(folio
);
213 if (!buffer_mapped(bh
))
215 else if (bh
->b_blocknr
== block
) {
220 bh
= bh
->b_this_page
;
221 } while (bh
!= head
);
223 /* we might be here because some of the buffers on this page are
224 * not mapped. This is due to various races between
225 * file io on the block device and getblk. It gets dealt with
226 * elsewhere, don't buffer_error if we had some unmapped buffers
228 ratelimit_set_flags(&last_warned
, RATELIMIT_MSG_ON_RELEASE
);
229 if (all_mapped
&& __ratelimit(&last_warned
)) {
230 printk("__find_get_block_slow() failed. block=%llu, "
231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 "device %pg blocksize: %d\n",
233 (unsigned long long)block
,
234 (unsigned long long)bh
->b_blocknr
,
235 bh
->b_state
, bh
->b_size
, bdev
,
236 1 << bd_inode
->i_blkbits
);
239 spin_unlock(&bd_mapping
->private_lock
);
245 static void end_buffer_async_read(struct buffer_head
*bh
, int uptodate
)
248 struct buffer_head
*first
;
249 struct buffer_head
*tmp
;
251 int folio_uptodate
= 1;
253 BUG_ON(!buffer_async_read(bh
));
257 set_buffer_uptodate(bh
);
259 clear_buffer_uptodate(bh
);
260 buffer_io_error(bh
, ", async page read");
261 folio_set_error(folio
);
265 * Be _very_ careful from here on. Bad things can happen if
266 * two buffer heads end IO at almost the same time and both
267 * decide that the page is now completely done.
269 first
= folio_buffers(folio
);
270 spin_lock_irqsave(&first
->b_uptodate_lock
, flags
);
271 clear_buffer_async_read(bh
);
275 if (!buffer_uptodate(tmp
))
277 if (buffer_async_read(tmp
)) {
278 BUG_ON(!buffer_locked(tmp
));
281 tmp
= tmp
->b_this_page
;
283 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
286 * If all of the buffers are uptodate then we can set the page
290 folio_mark_uptodate(folio
);
295 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
299 struct postprocess_bh_ctx
{
300 struct work_struct work
;
301 struct buffer_head
*bh
;
304 static void verify_bh(struct work_struct
*work
)
306 struct postprocess_bh_ctx
*ctx
=
307 container_of(work
, struct postprocess_bh_ctx
, work
);
308 struct buffer_head
*bh
= ctx
->bh
;
311 valid
= fsverity_verify_blocks(bh
->b_folio
, bh
->b_size
, bh_offset(bh
));
312 end_buffer_async_read(bh
, valid
);
316 static bool need_fsverity(struct buffer_head
*bh
)
318 struct folio
*folio
= bh
->b_folio
;
319 struct inode
*inode
= folio
->mapping
->host
;
321 return fsverity_active(inode
) &&
323 folio
->index
< DIV_ROUND_UP(inode
->i_size
, PAGE_SIZE
);
326 static void decrypt_bh(struct work_struct
*work
)
328 struct postprocess_bh_ctx
*ctx
=
329 container_of(work
, struct postprocess_bh_ctx
, work
);
330 struct buffer_head
*bh
= ctx
->bh
;
333 err
= fscrypt_decrypt_pagecache_blocks(bh
->b_folio
, bh
->b_size
,
335 if (err
== 0 && need_fsverity(bh
)) {
337 * We use different work queues for decryption and for verity
338 * because verity may require reading metadata pages that need
339 * decryption, and we shouldn't recurse to the same workqueue.
341 INIT_WORK(&ctx
->work
, verify_bh
);
342 fsverity_enqueue_verify_work(&ctx
->work
);
345 end_buffer_async_read(bh
, err
== 0);
350 * I/O completion handler for block_read_full_folio() - pages
351 * which come unlocked at the end of I/O.
353 static void end_buffer_async_read_io(struct buffer_head
*bh
, int uptodate
)
355 struct inode
*inode
= bh
->b_folio
->mapping
->host
;
356 bool decrypt
= fscrypt_inode_uses_fs_layer_crypto(inode
);
357 bool verify
= need_fsverity(bh
);
359 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
360 if (uptodate
&& (decrypt
|| verify
)) {
361 struct postprocess_bh_ctx
*ctx
=
362 kmalloc(sizeof(*ctx
), GFP_ATOMIC
);
367 INIT_WORK(&ctx
->work
, decrypt_bh
);
368 fscrypt_enqueue_decrypt_work(&ctx
->work
);
370 INIT_WORK(&ctx
->work
, verify_bh
);
371 fsverity_enqueue_verify_work(&ctx
->work
);
377 end_buffer_async_read(bh
, uptodate
);
381 * Completion handler for block_write_full_page() - pages which are unlocked
382 * during I/O, and which have PageWriteback cleared upon I/O completion.
384 void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
)
387 struct buffer_head
*first
;
388 struct buffer_head
*tmp
;
391 BUG_ON(!buffer_async_write(bh
));
395 set_buffer_uptodate(bh
);
397 buffer_io_error(bh
, ", lost async page write");
398 mark_buffer_write_io_error(bh
);
399 clear_buffer_uptodate(bh
);
400 folio_set_error(folio
);
403 first
= folio_buffers(folio
);
404 spin_lock_irqsave(&first
->b_uptodate_lock
, flags
);
406 clear_buffer_async_write(bh
);
408 tmp
= bh
->b_this_page
;
410 if (buffer_async_write(tmp
)) {
411 BUG_ON(!buffer_locked(tmp
));
414 tmp
= tmp
->b_this_page
;
416 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
417 folio_end_writeback(folio
);
421 spin_unlock_irqrestore(&first
->b_uptodate_lock
, flags
);
424 EXPORT_SYMBOL(end_buffer_async_write
);
427 * If a page's buffers are under async readin (end_buffer_async_read
428 * completion) then there is a possibility that another thread of
429 * control could lock one of the buffers after it has completed
430 * but while some of the other buffers have not completed. This
431 * locked buffer would confuse end_buffer_async_read() into not unlocking
432 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
433 * that this buffer is not under async I/O.
435 * The page comes unlocked when it has no locked buffer_async buffers
438 * PageLocked prevents anyone starting new async I/O reads any of
441 * PageWriteback is used to prevent simultaneous writeout of the same
444 * PageLocked prevents anyone from starting writeback of a page which is
445 * under read I/O (PageWriteback is only ever set against a locked page).
447 static void mark_buffer_async_read(struct buffer_head
*bh
)
449 bh
->b_end_io
= end_buffer_async_read_io
;
450 set_buffer_async_read(bh
);
453 static void mark_buffer_async_write_endio(struct buffer_head
*bh
,
454 bh_end_io_t
*handler
)
456 bh
->b_end_io
= handler
;
457 set_buffer_async_write(bh
);
460 void mark_buffer_async_write(struct buffer_head
*bh
)
462 mark_buffer_async_write_endio(bh
, end_buffer_async_write
);
464 EXPORT_SYMBOL(mark_buffer_async_write
);
468 * fs/buffer.c contains helper functions for buffer-backed address space's
469 * fsync functions. A common requirement for buffer-based filesystems is
470 * that certain data from the backing blockdev needs to be written out for
471 * a successful fsync(). For example, ext2 indirect blocks need to be
472 * written back and waited upon before fsync() returns.
474 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
475 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
476 * management of a list of dependent buffers at ->i_mapping->private_list.
478 * Locking is a little subtle: try_to_free_buffers() will remove buffers
479 * from their controlling inode's queue when they are being freed. But
480 * try_to_free_buffers() will be operating against the *blockdev* mapping
481 * at the time, not against the S_ISREG file which depends on those buffers.
482 * So the locking for private_list is via the private_lock in the address_space
483 * which backs the buffers. Which is different from the address_space
484 * against which the buffers are listed. So for a particular address_space,
485 * mapping->private_lock does *not* protect mapping->private_list! In fact,
486 * mapping->private_list will always be protected by the backing blockdev's
489 * Which introduces a requirement: all buffers on an address_space's
490 * ->private_list must be from the same address_space: the blockdev's.
492 * address_spaces which do not place buffers at ->private_list via these
493 * utility functions are free to use private_lock and private_list for
494 * whatever they want. The only requirement is that list_empty(private_list)
495 * be true at clear_inode() time.
497 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
498 * filesystems should do that. invalidate_inode_buffers() should just go
499 * BUG_ON(!list_empty).
501 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
502 * take an address_space, not an inode. And it should be called
503 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
506 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
507 * list if it is already on a list. Because if the buffer is on a list,
508 * it *must* already be on the right one. If not, the filesystem is being
509 * silly. This will save a ton of locking. But first we have to ensure
510 * that buffers are taken *off* the old inode's list when they are freed
511 * (presumably in truncate). That requires careful auditing of all
512 * filesystems (do it inside bforget()). It could also be done by bringing
517 * The buffer's backing address_space's private_lock must be held
519 static void __remove_assoc_queue(struct buffer_head
*bh
)
521 list_del_init(&bh
->b_assoc_buffers
);
522 WARN_ON(!bh
->b_assoc_map
);
523 bh
->b_assoc_map
= NULL
;
526 int inode_has_buffers(struct inode
*inode
)
528 return !list_empty(&inode
->i_data
.private_list
);
532 * osync is designed to support O_SYNC io. It waits synchronously for
533 * all already-submitted IO to complete, but does not queue any new
534 * writes to the disk.
536 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
537 * as you dirty the buffers, and then use osync_inode_buffers to wait for
538 * completion. Any other dirty buffers which are not yet queued for
539 * write will not be flushed to disk by the osync.
541 static int osync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
543 struct buffer_head
*bh
;
549 list_for_each_prev(p
, list
) {
551 if (buffer_locked(bh
)) {
555 if (!buffer_uptodate(bh
))
567 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
568 * @mapping: the mapping which wants those buffers written
570 * Starts I/O against the buffers at mapping->private_list, and waits upon
573 * Basically, this is a convenience function for fsync().
574 * @mapping is a file or directory which needs those buffers to be written for
575 * a successful fsync().
577 int sync_mapping_buffers(struct address_space
*mapping
)
579 struct address_space
*buffer_mapping
= mapping
->private_data
;
581 if (buffer_mapping
== NULL
|| list_empty(&mapping
->private_list
))
584 return fsync_buffers_list(&buffer_mapping
->private_lock
,
585 &mapping
->private_list
);
587 EXPORT_SYMBOL(sync_mapping_buffers
);
590 * generic_buffers_fsync_noflush - generic buffer fsync implementation
591 * for simple filesystems with no inode lock
593 * @file: file to synchronize
594 * @start: start offset in bytes
595 * @end: end offset in bytes (inclusive)
596 * @datasync: only synchronize essential metadata if true
598 * This is a generic implementation of the fsync method for simple
599 * filesystems which track all non-inode metadata in the buffers list
600 * hanging off the address_space structure.
602 int generic_buffers_fsync_noflush(struct file
*file
, loff_t start
, loff_t end
,
605 struct inode
*inode
= file
->f_mapping
->host
;
609 err
= file_write_and_wait_range(file
, start
, end
);
613 ret
= sync_mapping_buffers(inode
->i_mapping
);
614 if (!(inode
->i_state
& I_DIRTY_ALL
))
616 if (datasync
&& !(inode
->i_state
& I_DIRTY_DATASYNC
))
619 err
= sync_inode_metadata(inode
, 1);
624 /* check and advance again to catch errors after syncing out buffers */
625 err
= file_check_and_advance_wb_err(file
);
630 EXPORT_SYMBOL(generic_buffers_fsync_noflush
);
633 * generic_buffers_fsync - generic buffer fsync implementation
634 * for simple filesystems with no inode lock
636 * @file: file to synchronize
637 * @start: start offset in bytes
638 * @end: end offset in bytes (inclusive)
639 * @datasync: only synchronize essential metadata if true
641 * This is a generic implementation of the fsync method for simple
642 * filesystems which track all non-inode metadata in the buffers list
643 * hanging off the address_space structure. This also makes sure that
644 * a device cache flush operation is called at the end.
646 int generic_buffers_fsync(struct file
*file
, loff_t start
, loff_t end
,
649 struct inode
*inode
= file
->f_mapping
->host
;
652 ret
= generic_buffers_fsync_noflush(file
, start
, end
, datasync
);
654 ret
= blkdev_issue_flush(inode
->i_sb
->s_bdev
);
657 EXPORT_SYMBOL(generic_buffers_fsync
);
660 * Called when we've recently written block `bblock', and it is known that
661 * `bblock' was for a buffer_boundary() buffer. This means that the block at
662 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
663 * dirty, schedule it for IO. So that indirects merge nicely with their data.
665 void write_boundary_block(struct block_device
*bdev
,
666 sector_t bblock
, unsigned blocksize
)
668 struct buffer_head
*bh
= __find_get_block(bdev
, bblock
+ 1, blocksize
);
670 if (buffer_dirty(bh
))
671 write_dirty_buffer(bh
, 0);
676 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
)
678 struct address_space
*mapping
= inode
->i_mapping
;
679 struct address_space
*buffer_mapping
= bh
->b_folio
->mapping
;
681 mark_buffer_dirty(bh
);
682 if (!mapping
->private_data
) {
683 mapping
->private_data
= buffer_mapping
;
685 BUG_ON(mapping
->private_data
!= buffer_mapping
);
687 if (!bh
->b_assoc_map
) {
688 spin_lock(&buffer_mapping
->private_lock
);
689 list_move_tail(&bh
->b_assoc_buffers
,
690 &mapping
->private_list
);
691 bh
->b_assoc_map
= mapping
;
692 spin_unlock(&buffer_mapping
->private_lock
);
695 EXPORT_SYMBOL(mark_buffer_dirty_inode
);
698 * Add a page to the dirty page list.
700 * It is a sad fact of life that this function is called from several places
701 * deeply under spinlocking. It may not sleep.
703 * If the page has buffers, the uptodate buffers are set dirty, to preserve
704 * dirty-state coherency between the page and the buffers. It the page does
705 * not have buffers then when they are later attached they will all be set
708 * The buffers are dirtied before the page is dirtied. There's a small race
709 * window in which a writepage caller may see the page cleanness but not the
710 * buffer dirtiness. That's fine. If this code were to set the page dirty
711 * before the buffers, a concurrent writepage caller could clear the page dirty
712 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
713 * page on the dirty page list.
715 * We use private_lock to lock against try_to_free_buffers while using the
716 * page's buffer list. Also use this to protect against clean buffers being
717 * added to the page after it was set dirty.
719 * FIXME: may need to call ->reservepage here as well. That's rather up to the
720 * address_space though.
722 bool block_dirty_folio(struct address_space
*mapping
, struct folio
*folio
)
724 struct buffer_head
*head
;
727 spin_lock(&mapping
->private_lock
);
728 head
= folio_buffers(folio
);
730 struct buffer_head
*bh
= head
;
733 set_buffer_dirty(bh
);
734 bh
= bh
->b_this_page
;
735 } while (bh
!= head
);
738 * Lock out page's memcg migration to keep PageDirty
739 * synchronized with per-memcg dirty page counters.
741 folio_memcg_lock(folio
);
742 newly_dirty
= !folio_test_set_dirty(folio
);
743 spin_unlock(&mapping
->private_lock
);
746 __folio_mark_dirty(folio
, mapping
, 1);
748 folio_memcg_unlock(folio
);
751 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
755 EXPORT_SYMBOL(block_dirty_folio
);
758 * Write out and wait upon a list of buffers.
760 * We have conflicting pressures: we want to make sure that all
761 * initially dirty buffers get waited on, but that any subsequently
762 * dirtied buffers don't. After all, we don't want fsync to last
763 * forever if somebody is actively writing to the file.
765 * Do this in two main stages: first we copy dirty buffers to a
766 * temporary inode list, queueing the writes as we go. Then we clean
767 * up, waiting for those writes to complete.
769 * During this second stage, any subsequent updates to the file may end
770 * up refiling the buffer on the original inode's dirty list again, so
771 * there is a chance we will end up with a buffer queued for write but
772 * not yet completed on that list. So, as a final cleanup we go through
773 * the osync code to catch these locked, dirty buffers without requeuing
774 * any newly dirty buffers for write.
776 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
778 struct buffer_head
*bh
;
779 struct list_head tmp
;
780 struct address_space
*mapping
;
782 struct blk_plug plug
;
784 INIT_LIST_HEAD(&tmp
);
785 blk_start_plug(&plug
);
788 while (!list_empty(list
)) {
789 bh
= BH_ENTRY(list
->next
);
790 mapping
= bh
->b_assoc_map
;
791 __remove_assoc_queue(bh
);
792 /* Avoid race with mark_buffer_dirty_inode() which does
793 * a lockless check and we rely on seeing the dirty bit */
795 if (buffer_dirty(bh
) || buffer_locked(bh
)) {
796 list_add(&bh
->b_assoc_buffers
, &tmp
);
797 bh
->b_assoc_map
= mapping
;
798 if (buffer_dirty(bh
)) {
802 * Ensure any pending I/O completes so that
803 * write_dirty_buffer() actually writes the
804 * current contents - it is a noop if I/O is
805 * still in flight on potentially older
808 write_dirty_buffer(bh
, REQ_SYNC
);
811 * Kick off IO for the previous mapping. Note
812 * that we will not run the very last mapping,
813 * wait_on_buffer() will do that for us
814 * through sync_buffer().
823 blk_finish_plug(&plug
);
826 while (!list_empty(&tmp
)) {
827 bh
= BH_ENTRY(tmp
.prev
);
829 mapping
= bh
->b_assoc_map
;
830 __remove_assoc_queue(bh
);
831 /* Avoid race with mark_buffer_dirty_inode() which does
832 * a lockless check and we rely on seeing the dirty bit */
834 if (buffer_dirty(bh
)) {
835 list_add(&bh
->b_assoc_buffers
,
836 &mapping
->private_list
);
837 bh
->b_assoc_map
= mapping
;
841 if (!buffer_uptodate(bh
))
848 err2
= osync_buffers_list(lock
, list
);
856 * Invalidate any and all dirty buffers on a given inode. We are
857 * probably unmounting the fs, but that doesn't mean we have already
858 * done a sync(). Just drop the buffers from the inode list.
860 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
861 * assumes that all the buffers are against the blockdev. Not true
864 void invalidate_inode_buffers(struct inode
*inode
)
866 if (inode_has_buffers(inode
)) {
867 struct address_space
*mapping
= &inode
->i_data
;
868 struct list_head
*list
= &mapping
->private_list
;
869 struct address_space
*buffer_mapping
= mapping
->private_data
;
871 spin_lock(&buffer_mapping
->private_lock
);
872 while (!list_empty(list
))
873 __remove_assoc_queue(BH_ENTRY(list
->next
));
874 spin_unlock(&buffer_mapping
->private_lock
);
877 EXPORT_SYMBOL(invalidate_inode_buffers
);
880 * Remove any clean buffers from the inode's buffer list. This is called
881 * when we're trying to free the inode itself. Those buffers can pin it.
883 * Returns true if all buffers were removed.
885 int remove_inode_buffers(struct inode
*inode
)
889 if (inode_has_buffers(inode
)) {
890 struct address_space
*mapping
= &inode
->i_data
;
891 struct list_head
*list
= &mapping
->private_list
;
892 struct address_space
*buffer_mapping
= mapping
->private_data
;
894 spin_lock(&buffer_mapping
->private_lock
);
895 while (!list_empty(list
)) {
896 struct buffer_head
*bh
= BH_ENTRY(list
->next
);
897 if (buffer_dirty(bh
)) {
901 __remove_assoc_queue(bh
);
903 spin_unlock(&buffer_mapping
->private_lock
);
909 * Create the appropriate buffers when given a folio for data area and
910 * the size of each buffer.. Use the bh->b_this_page linked list to
911 * follow the buffers created. Return NULL if unable to create more
914 * The retry flag is used to differentiate async IO (paging, swapping)
915 * which may not fail from ordinary buffer allocations.
917 struct buffer_head
*folio_alloc_buffers(struct folio
*folio
, unsigned long size
,
920 struct buffer_head
*bh
, *head
;
921 gfp_t gfp
= GFP_NOFS
| __GFP_ACCOUNT
;
923 struct mem_cgroup
*memcg
, *old_memcg
;
928 /* The folio lock pins the memcg */
929 memcg
= folio_memcg(folio
);
930 old_memcg
= set_active_memcg(memcg
);
933 offset
= folio_size(folio
);
934 while ((offset
-= size
) >= 0) {
935 bh
= alloc_buffer_head(gfp
);
939 bh
->b_this_page
= head
;
945 /* Link the buffer to its folio */
946 folio_set_bh(bh
, folio
, offset
);
949 set_active_memcg(old_memcg
);
952 * In case anything failed, we just free everything we got.
958 head
= head
->b_this_page
;
959 free_buffer_head(bh
);
965 EXPORT_SYMBOL_GPL(folio_alloc_buffers
);
967 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
970 return folio_alloc_buffers(page_folio(page
), size
, retry
);
972 EXPORT_SYMBOL_GPL(alloc_page_buffers
);
974 static inline void link_dev_buffers(struct folio
*folio
,
975 struct buffer_head
*head
)
977 struct buffer_head
*bh
, *tail
;
982 bh
= bh
->b_this_page
;
984 tail
->b_this_page
= head
;
985 folio_attach_private(folio
, head
);
988 static sector_t
blkdev_max_block(struct block_device
*bdev
, unsigned int size
)
990 sector_t retval
= ~((sector_t
)0);
991 loff_t sz
= bdev_nr_bytes(bdev
);
994 unsigned int sizebits
= blksize_bits(size
);
995 retval
= (sz
>> sizebits
);
1001 * Initialise the state of a blockdev folio's buffers.
1003 static sector_t
folio_init_buffers(struct folio
*folio
,
1004 struct block_device
*bdev
, sector_t block
, int size
)
1006 struct buffer_head
*head
= folio_buffers(folio
);
1007 struct buffer_head
*bh
= head
;
1008 bool uptodate
= folio_test_uptodate(folio
);
1009 sector_t end_block
= blkdev_max_block(bdev
, size
);
1012 if (!buffer_mapped(bh
)) {
1013 bh
->b_end_io
= NULL
;
1014 bh
->b_private
= NULL
;
1016 bh
->b_blocknr
= block
;
1018 set_buffer_uptodate(bh
);
1019 if (block
< end_block
)
1020 set_buffer_mapped(bh
);
1023 bh
= bh
->b_this_page
;
1024 } while (bh
!= head
);
1027 * Caller needs to validate requested block against end of device.
1033 * Create the page-cache page that contains the requested block.
1035 * This is used purely for blockdev mappings.
1038 grow_dev_page(struct block_device
*bdev
, sector_t block
,
1039 pgoff_t index
, int size
, int sizebits
, gfp_t gfp
)
1041 struct inode
*inode
= bdev
->bd_inode
;
1042 struct folio
*folio
;
1043 struct buffer_head
*bh
;
1048 gfp_mask
= mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
) | gfp
;
1051 * XXX: __getblk_slow() can not really deal with failure and
1052 * will endlessly loop on improvised global reclaim. Prefer
1053 * looping in the allocator rather than here, at least that
1054 * code knows what it's doing.
1056 gfp_mask
|= __GFP_NOFAIL
;
1058 folio
= __filemap_get_folio(inode
->i_mapping
, index
,
1059 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
, gfp_mask
);
1061 bh
= folio_buffers(folio
);
1063 if (bh
->b_size
== size
) {
1064 end_block
= folio_init_buffers(folio
, bdev
,
1065 (sector_t
)index
<< sizebits
, size
);
1068 if (!try_to_free_buffers(folio
))
1072 bh
= folio_alloc_buffers(folio
, size
, true);
1075 * Link the folio to the buffers and initialise them. Take the
1076 * lock to be atomic wrt __find_get_block(), which does not
1077 * run under the folio lock.
1079 spin_lock(&inode
->i_mapping
->private_lock
);
1080 link_dev_buffers(folio
, bh
);
1081 end_block
= folio_init_buffers(folio
, bdev
,
1082 (sector_t
)index
<< sizebits
, size
);
1083 spin_unlock(&inode
->i_mapping
->private_lock
);
1085 ret
= (block
< end_block
) ? 1 : -ENXIO
;
1087 folio_unlock(folio
);
1093 * Create buffers for the specified block device block's page. If
1094 * that page was dirty, the buffers are set dirty also.
1097 grow_buffers(struct block_device
*bdev
, sector_t block
, int size
, gfp_t gfp
)
1102 sizebits
= PAGE_SHIFT
- __ffs(size
);
1103 index
= block
>> sizebits
;
1106 * Check for a block which wants to lie outside our maximum possible
1107 * pagecache index. (this comparison is done using sector_t types).
1109 if (unlikely(index
!= block
>> sizebits
)) {
1110 printk(KERN_ERR
"%s: requested out-of-range block %llu for "
1112 __func__
, (unsigned long long)block
,
1117 /* Create a page with the proper size buffers.. */
1118 return grow_dev_page(bdev
, block
, index
, size
, sizebits
, gfp
);
1121 static struct buffer_head
*
1122 __getblk_slow(struct block_device
*bdev
, sector_t block
,
1123 unsigned size
, gfp_t gfp
)
1125 /* Size must be multiple of hard sectorsize */
1126 if (unlikely(size
& (bdev_logical_block_size(bdev
)-1) ||
1127 (size
< 512 || size
> PAGE_SIZE
))) {
1128 printk(KERN_ERR
"getblk(): invalid block size %d requested\n",
1130 printk(KERN_ERR
"logical block size: %d\n",
1131 bdev_logical_block_size(bdev
));
1138 struct buffer_head
*bh
;
1141 bh
= __find_get_block(bdev
, block
, size
);
1145 ret
= grow_buffers(bdev
, block
, size
, gfp
);
1152 * The relationship between dirty buffers and dirty pages:
1154 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1155 * the page is tagged dirty in the page cache.
1157 * At all times, the dirtiness of the buffers represents the dirtiness of
1158 * subsections of the page. If the page has buffers, the page dirty bit is
1159 * merely a hint about the true dirty state.
1161 * When a page is set dirty in its entirety, all its buffers are marked dirty
1162 * (if the page has buffers).
1164 * When a buffer is marked dirty, its page is dirtied, but the page's other
1167 * Also. When blockdev buffers are explicitly read with bread(), they
1168 * individually become uptodate. But their backing page remains not
1169 * uptodate - even if all of its buffers are uptodate. A subsequent
1170 * block_read_full_folio() against that folio will discover all the uptodate
1171 * buffers, will set the folio uptodate and will perform no I/O.
1175 * mark_buffer_dirty - mark a buffer_head as needing writeout
1176 * @bh: the buffer_head to mark dirty
1178 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1179 * its backing page dirty, then tag the page as dirty in the page cache
1180 * and then attach the address_space's inode to its superblock's dirty
1183 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
1184 * i_pages lock and mapping->host->i_lock.
1186 void mark_buffer_dirty(struct buffer_head
*bh
)
1188 WARN_ON_ONCE(!buffer_uptodate(bh
));
1190 trace_block_dirty_buffer(bh
);
1193 * Very *carefully* optimize the it-is-already-dirty case.
1195 * Don't let the final "is it dirty" escape to before we
1196 * perhaps modified the buffer.
1198 if (buffer_dirty(bh
)) {
1200 if (buffer_dirty(bh
))
1204 if (!test_set_buffer_dirty(bh
)) {
1205 struct folio
*folio
= bh
->b_folio
;
1206 struct address_space
*mapping
= NULL
;
1208 folio_memcg_lock(folio
);
1209 if (!folio_test_set_dirty(folio
)) {
1210 mapping
= folio
->mapping
;
1212 __folio_mark_dirty(folio
, mapping
, 0);
1214 folio_memcg_unlock(folio
);
1216 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1219 EXPORT_SYMBOL(mark_buffer_dirty
);
1221 void mark_buffer_write_io_error(struct buffer_head
*bh
)
1223 set_buffer_write_io_error(bh
);
1224 /* FIXME: do we need to set this in both places? */
1225 if (bh
->b_folio
&& bh
->b_folio
->mapping
)
1226 mapping_set_error(bh
->b_folio
->mapping
, -EIO
);
1227 if (bh
->b_assoc_map
) {
1228 mapping_set_error(bh
->b_assoc_map
, -EIO
);
1229 errseq_set(&bh
->b_assoc_map
->host
->i_sb
->s_wb_err
, -EIO
);
1232 EXPORT_SYMBOL(mark_buffer_write_io_error
);
1235 * Decrement a buffer_head's reference count. If all buffers against a page
1236 * have zero reference count, are clean and unlocked, and if the page is clean
1237 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1238 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1239 * a page but it ends up not being freed, and buffers may later be reattached).
1241 void __brelse(struct buffer_head
* buf
)
1243 if (atomic_read(&buf
->b_count
)) {
1247 WARN(1, KERN_ERR
"VFS: brelse: Trying to free free buffer\n");
1249 EXPORT_SYMBOL(__brelse
);
1252 * bforget() is like brelse(), except it discards any
1253 * potentially dirty data.
1255 void __bforget(struct buffer_head
*bh
)
1257 clear_buffer_dirty(bh
);
1258 if (bh
->b_assoc_map
) {
1259 struct address_space
*buffer_mapping
= bh
->b_folio
->mapping
;
1261 spin_lock(&buffer_mapping
->private_lock
);
1262 list_del_init(&bh
->b_assoc_buffers
);
1263 bh
->b_assoc_map
= NULL
;
1264 spin_unlock(&buffer_mapping
->private_lock
);
1268 EXPORT_SYMBOL(__bforget
);
1270 static struct buffer_head
*__bread_slow(struct buffer_head
*bh
)
1273 if (buffer_uptodate(bh
)) {
1278 bh
->b_end_io
= end_buffer_read_sync
;
1279 submit_bh(REQ_OP_READ
, bh
);
1281 if (buffer_uptodate(bh
))
1289 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1290 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1291 * refcount elevated by one when they're in an LRU. A buffer can only appear
1292 * once in a particular CPU's LRU. A single buffer can be present in multiple
1293 * CPU's LRUs at the same time.
1295 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1296 * sb_find_get_block().
1298 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1299 * a local interrupt disable for that.
1302 #define BH_LRU_SIZE 16
1305 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1308 static DEFINE_PER_CPU(struct bh_lru
, bh_lrus
) = {{ NULL
}};
1311 #define bh_lru_lock() local_irq_disable()
1312 #define bh_lru_unlock() local_irq_enable()
1314 #define bh_lru_lock() preempt_disable()
1315 #define bh_lru_unlock() preempt_enable()
1318 static inline void check_irqs_on(void)
1320 #ifdef irqs_disabled
1321 BUG_ON(irqs_disabled());
1326 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1327 * inserted at the front, and the buffer_head at the back if any is evicted.
1328 * Or, if already in the LRU it is moved to the front.
1330 static void bh_lru_install(struct buffer_head
*bh
)
1332 struct buffer_head
*evictee
= bh
;
1340 * the refcount of buffer_head in bh_lru prevents dropping the
1341 * attached page(i.e., try_to_free_buffers) so it could cause
1342 * failing page migration.
1343 * Skip putting upcoming bh into bh_lru until migration is done.
1345 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1350 b
= this_cpu_ptr(&bh_lrus
);
1351 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1352 swap(evictee
, b
->bhs
[i
]);
1353 if (evictee
== bh
) {
1365 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1367 static struct buffer_head
*
1368 lookup_bh_lru(struct block_device
*bdev
, sector_t block
, unsigned size
)
1370 struct buffer_head
*ret
= NULL
;
1375 if (cpu_is_isolated(smp_processor_id())) {
1379 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1380 struct buffer_head
*bh
= __this_cpu_read(bh_lrus
.bhs
[i
]);
1382 if (bh
&& bh
->b_blocknr
== block
&& bh
->b_bdev
== bdev
&&
1383 bh
->b_size
== size
) {
1386 __this_cpu_write(bh_lrus
.bhs
[i
],
1387 __this_cpu_read(bh_lrus
.bhs
[i
- 1]));
1390 __this_cpu_write(bh_lrus
.bhs
[0], bh
);
1402 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1403 * it in the LRU and mark it as accessed. If it is not present then return
1406 struct buffer_head
*
1407 __find_get_block(struct block_device
*bdev
, sector_t block
, unsigned size
)
1409 struct buffer_head
*bh
= lookup_bh_lru(bdev
, block
, size
);
1412 /* __find_get_block_slow will mark the page accessed */
1413 bh
= __find_get_block_slow(bdev
, block
);
1421 EXPORT_SYMBOL(__find_get_block
);
1424 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1425 * which corresponds to the passed block_device, block and size. The
1426 * returned buffer has its reference count incremented.
1428 * __getblk_gfp() will lock up the machine if grow_dev_page's
1429 * try_to_free_buffers() attempt is failing. FIXME, perhaps?
1431 struct buffer_head
*
1432 __getblk_gfp(struct block_device
*bdev
, sector_t block
,
1433 unsigned size
, gfp_t gfp
)
1435 struct buffer_head
*bh
= __find_get_block(bdev
, block
, size
);
1439 bh
= __getblk_slow(bdev
, block
, size
, gfp
);
1442 EXPORT_SYMBOL(__getblk_gfp
);
1445 * Do async read-ahead on a buffer..
1447 void __breadahead(struct block_device
*bdev
, sector_t block
, unsigned size
)
1449 struct buffer_head
*bh
= __getblk(bdev
, block
, size
);
1451 bh_readahead(bh
, REQ_RAHEAD
);
1455 EXPORT_SYMBOL(__breadahead
);
1458 * __bread_gfp() - reads a specified block and returns the bh
1459 * @bdev: the block_device to read from
1460 * @block: number of block
1461 * @size: size (in bytes) to read
1462 * @gfp: page allocation flag
1464 * Reads a specified block, and returns buffer head that contains it.
1465 * The page cache can be allocated from non-movable area
1466 * not to prevent page migration if you set gfp to zero.
1467 * It returns NULL if the block was unreadable.
1469 struct buffer_head
*
1470 __bread_gfp(struct block_device
*bdev
, sector_t block
,
1471 unsigned size
, gfp_t gfp
)
1473 struct buffer_head
*bh
= __getblk_gfp(bdev
, block
, size
, gfp
);
1475 if (likely(bh
) && !buffer_uptodate(bh
))
1476 bh
= __bread_slow(bh
);
1479 EXPORT_SYMBOL(__bread_gfp
);
1481 static void __invalidate_bh_lrus(struct bh_lru
*b
)
1485 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1491 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1492 * This doesn't race because it runs in each cpu either in irq
1493 * or with preempt disabled.
1495 static void invalidate_bh_lru(void *arg
)
1497 struct bh_lru
*b
= &get_cpu_var(bh_lrus
);
1499 __invalidate_bh_lrus(b
);
1500 put_cpu_var(bh_lrus
);
1503 bool has_bh_in_lru(int cpu
, void *dummy
)
1505 struct bh_lru
*b
= per_cpu_ptr(&bh_lrus
, cpu
);
1508 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1516 void invalidate_bh_lrus(void)
1518 on_each_cpu_cond(has_bh_in_lru
, invalidate_bh_lru
, NULL
, 1);
1520 EXPORT_SYMBOL_GPL(invalidate_bh_lrus
);
1523 * It's called from workqueue context so we need a bh_lru_lock to close
1524 * the race with preemption/irq.
1526 void invalidate_bh_lrus_cpu(void)
1531 b
= this_cpu_ptr(&bh_lrus
);
1532 __invalidate_bh_lrus(b
);
1536 void folio_set_bh(struct buffer_head
*bh
, struct folio
*folio
,
1537 unsigned long offset
)
1539 bh
->b_folio
= folio
;
1540 BUG_ON(offset
>= folio_size(folio
));
1541 if (folio_test_highmem(folio
))
1543 * This catches illegal uses and preserves the offset:
1545 bh
->b_data
= (char *)(0 + offset
);
1547 bh
->b_data
= folio_address(folio
) + offset
;
1549 EXPORT_SYMBOL(folio_set_bh
);
1552 * Called when truncating a buffer on a page completely.
1555 /* Bits that are cleared during an invalidate */
1556 #define BUFFER_FLAGS_DISCARD \
1557 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1558 1 << BH_Delay | 1 << BH_Unwritten)
1560 static void discard_buffer(struct buffer_head
* bh
)
1562 unsigned long b_state
;
1565 clear_buffer_dirty(bh
);
1567 b_state
= READ_ONCE(bh
->b_state
);
1569 } while (!try_cmpxchg(&bh
->b_state
, &b_state
,
1570 b_state
& ~BUFFER_FLAGS_DISCARD
));
1575 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1576 * @folio: The folio which is affected.
1577 * @offset: start of the range to invalidate
1578 * @length: length of the range to invalidate
1580 * block_invalidate_folio() is called when all or part of the folio has been
1581 * invalidated by a truncate operation.
1583 * block_invalidate_folio() does not have to release all buffers, but it must
1584 * ensure that no dirty buffer is left outside @offset and that no I/O
1585 * is underway against any of the blocks which are outside the truncation
1586 * point. Because the caller is about to free (and possibly reuse) those
1589 void block_invalidate_folio(struct folio
*folio
, size_t offset
, size_t length
)
1591 struct buffer_head
*head
, *bh
, *next
;
1592 size_t curr_off
= 0;
1593 size_t stop
= length
+ offset
;
1595 BUG_ON(!folio_test_locked(folio
));
1598 * Check for overflow
1600 BUG_ON(stop
> folio_size(folio
) || stop
< length
);
1602 head
= folio_buffers(folio
);
1608 size_t next_off
= curr_off
+ bh
->b_size
;
1609 next
= bh
->b_this_page
;
1612 * Are we still fully in range ?
1614 if (next_off
> stop
)
1618 * is this block fully invalidated?
1620 if (offset
<= curr_off
)
1622 curr_off
= next_off
;
1624 } while (bh
!= head
);
1627 * We release buffers only if the entire folio is being invalidated.
1628 * The get_block cached value has been unconditionally invalidated,
1629 * so real IO is not possible anymore.
1631 if (length
== folio_size(folio
))
1632 filemap_release_folio(folio
, 0);
1636 EXPORT_SYMBOL(block_invalidate_folio
);
1639 * We attach and possibly dirty the buffers atomically wrt
1640 * block_dirty_folio() via private_lock. try_to_free_buffers
1641 * is already excluded via the folio lock.
1643 void folio_create_empty_buffers(struct folio
*folio
, unsigned long blocksize
,
1644 unsigned long b_state
)
1646 struct buffer_head
*bh
, *head
, *tail
;
1648 head
= folio_alloc_buffers(folio
, blocksize
, true);
1651 bh
->b_state
|= b_state
;
1653 bh
= bh
->b_this_page
;
1655 tail
->b_this_page
= head
;
1657 spin_lock(&folio
->mapping
->private_lock
);
1658 if (folio_test_uptodate(folio
) || folio_test_dirty(folio
)) {
1661 if (folio_test_dirty(folio
))
1662 set_buffer_dirty(bh
);
1663 if (folio_test_uptodate(folio
))
1664 set_buffer_uptodate(bh
);
1665 bh
= bh
->b_this_page
;
1666 } while (bh
!= head
);
1668 folio_attach_private(folio
, head
);
1669 spin_unlock(&folio
->mapping
->private_lock
);
1671 EXPORT_SYMBOL(folio_create_empty_buffers
);
1673 void create_empty_buffers(struct page
*page
,
1674 unsigned long blocksize
, unsigned long b_state
)
1676 folio_create_empty_buffers(page_folio(page
), blocksize
, b_state
);
1678 EXPORT_SYMBOL(create_empty_buffers
);
1681 * clean_bdev_aliases: clean a range of buffers in block device
1682 * @bdev: Block device to clean buffers in
1683 * @block: Start of a range of blocks to clean
1684 * @len: Number of blocks to clean
1686 * We are taking a range of blocks for data and we don't want writeback of any
1687 * buffer-cache aliases starting from return from this function and until the
1688 * moment when something will explicitly mark the buffer dirty (hopefully that
1689 * will not happen until we will free that block ;-) We don't even need to mark
1690 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1691 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1692 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1693 * would confuse anyone who might pick it with bread() afterwards...
1695 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1696 * writeout I/O going on against recently-freed buffers. We don't wait on that
1697 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1698 * need to. That happens here.
1700 void clean_bdev_aliases(struct block_device
*bdev
, sector_t block
, sector_t len
)
1702 struct inode
*bd_inode
= bdev
->bd_inode
;
1703 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
1704 struct folio_batch fbatch
;
1705 pgoff_t index
= block
>> (PAGE_SHIFT
- bd_inode
->i_blkbits
);
1708 struct buffer_head
*bh
;
1709 struct buffer_head
*head
;
1711 end
= (block
+ len
- 1) >> (PAGE_SHIFT
- bd_inode
->i_blkbits
);
1712 folio_batch_init(&fbatch
);
1713 while (filemap_get_folios(bd_mapping
, &index
, end
, &fbatch
)) {
1714 count
= folio_batch_count(&fbatch
);
1715 for (i
= 0; i
< count
; i
++) {
1716 struct folio
*folio
= fbatch
.folios
[i
];
1718 if (!folio_buffers(folio
))
1721 * We use folio lock instead of bd_mapping->private_lock
1722 * to pin buffers here since we can afford to sleep and
1723 * it scales better than a global spinlock lock.
1726 /* Recheck when the folio is locked which pins bhs */
1727 head
= folio_buffers(folio
);
1732 if (!buffer_mapped(bh
) || (bh
->b_blocknr
< block
))
1734 if (bh
->b_blocknr
>= block
+ len
)
1736 clear_buffer_dirty(bh
);
1738 clear_buffer_req(bh
);
1740 bh
= bh
->b_this_page
;
1741 } while (bh
!= head
);
1743 folio_unlock(folio
);
1745 folio_batch_release(&fbatch
);
1747 /* End of range already reached? */
1748 if (index
> end
|| !index
)
1752 EXPORT_SYMBOL(clean_bdev_aliases
);
1755 * Size is a power-of-two in the range 512..PAGE_SIZE,
1756 * and the case we care about most is PAGE_SIZE.
1758 * So this *could* possibly be written with those
1759 * constraints in mind (relevant mostly if some
1760 * architecture has a slow bit-scan instruction)
1762 static inline int block_size_bits(unsigned int blocksize
)
1764 return ilog2(blocksize
);
1767 static struct buffer_head
*folio_create_buffers(struct folio
*folio
,
1768 struct inode
*inode
,
1769 unsigned int b_state
)
1771 BUG_ON(!folio_test_locked(folio
));
1773 if (!folio_buffers(folio
))
1774 folio_create_empty_buffers(folio
,
1775 1 << READ_ONCE(inode
->i_blkbits
),
1777 return folio_buffers(folio
);
1781 * NOTE! All mapped/uptodate combinations are valid:
1783 * Mapped Uptodate Meaning
1785 * No No "unknown" - must do get_block()
1786 * No Yes "hole" - zero-filled
1787 * Yes No "allocated" - allocated on disk, not read in
1788 * Yes Yes "valid" - allocated and up-to-date in memory.
1790 * "Dirty" is valid only with the last case (mapped+uptodate).
1794 * While block_write_full_page is writing back the dirty buffers under
1795 * the page lock, whoever dirtied the buffers may decide to clean them
1796 * again at any time. We handle that by only looking at the buffer
1797 * state inside lock_buffer().
1799 * If block_write_full_page() is called for regular writeback
1800 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1801 * locked buffer. This only can happen if someone has written the buffer
1802 * directly, with submit_bh(). At the address_space level PageWriteback
1803 * prevents this contention from occurring.
1805 * If block_write_full_page() is called with wbc->sync_mode ==
1806 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1807 * causes the writes to be flagged as synchronous writes.
1809 int __block_write_full_folio(struct inode
*inode
, struct folio
*folio
,
1810 get_block_t
*get_block
, struct writeback_control
*wbc
,
1811 bh_end_io_t
*handler
)
1815 sector_t last_block
;
1816 struct buffer_head
*bh
, *head
;
1817 unsigned int blocksize
, bbits
;
1818 int nr_underway
= 0;
1819 blk_opf_t write_flags
= wbc_to_write_flags(wbc
);
1821 head
= folio_create_buffers(folio
, inode
,
1822 (1 << BH_Dirty
) | (1 << BH_Uptodate
));
1825 * Be very careful. We have no exclusion from block_dirty_folio
1826 * here, and the (potentially unmapped) buffers may become dirty at
1827 * any time. If a buffer becomes dirty here after we've inspected it
1828 * then we just miss that fact, and the folio stays dirty.
1830 * Buffers outside i_size may be dirtied by block_dirty_folio;
1831 * handle that here by just cleaning them.
1835 blocksize
= bh
->b_size
;
1836 bbits
= block_size_bits(blocksize
);
1838 block
= (sector_t
)folio
->index
<< (PAGE_SHIFT
- bbits
);
1839 last_block
= (i_size_read(inode
) - 1) >> bbits
;
1842 * Get all the dirty buffers mapped to disk addresses and
1843 * handle any aliases from the underlying blockdev's mapping.
1846 if (block
> last_block
) {
1848 * mapped buffers outside i_size will occur, because
1849 * this folio can be outside i_size when there is a
1850 * truncate in progress.
1853 * The buffer was zeroed by block_write_full_page()
1855 clear_buffer_dirty(bh
);
1856 set_buffer_uptodate(bh
);
1857 } else if ((!buffer_mapped(bh
) || buffer_delay(bh
)) &&
1859 WARN_ON(bh
->b_size
!= blocksize
);
1860 err
= get_block(inode
, block
, bh
, 1);
1863 clear_buffer_delay(bh
);
1864 if (buffer_new(bh
)) {
1865 /* blockdev mappings never come here */
1866 clear_buffer_new(bh
);
1867 clean_bdev_bh_alias(bh
);
1870 bh
= bh
->b_this_page
;
1872 } while (bh
!= head
);
1875 if (!buffer_mapped(bh
))
1878 * If it's a fully non-blocking write attempt and we cannot
1879 * lock the buffer then redirty the folio. Note that this can
1880 * potentially cause a busy-wait loop from writeback threads
1881 * and kswapd activity, but those code paths have their own
1882 * higher-level throttling.
1884 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
1886 } else if (!trylock_buffer(bh
)) {
1887 folio_redirty_for_writepage(wbc
, folio
);
1890 if (test_clear_buffer_dirty(bh
)) {
1891 mark_buffer_async_write_endio(bh
, handler
);
1895 } while ((bh
= bh
->b_this_page
) != head
);
1898 * The folio and its buffers are protected by the writeback flag,
1899 * so we can drop the bh refcounts early.
1901 BUG_ON(folio_test_writeback(folio
));
1902 folio_start_writeback(folio
);
1905 struct buffer_head
*next
= bh
->b_this_page
;
1906 if (buffer_async_write(bh
)) {
1907 submit_bh_wbc(REQ_OP_WRITE
| write_flags
, bh
, wbc
);
1911 } while (bh
!= head
);
1912 folio_unlock(folio
);
1916 if (nr_underway
== 0) {
1918 * The folio was marked dirty, but the buffers were
1919 * clean. Someone wrote them back by hand with
1920 * write_dirty_buffer/submit_bh. A rare case.
1922 folio_end_writeback(folio
);
1925 * The folio and buffer_heads can be released at any time from
1933 * ENOSPC, or some other error. We may already have added some
1934 * blocks to the file, so we need to write these out to avoid
1935 * exposing stale data.
1936 * The folio is currently locked and not marked for writeback
1939 /* Recovery: lock and submit the mapped buffers */
1941 if (buffer_mapped(bh
) && buffer_dirty(bh
) &&
1942 !buffer_delay(bh
)) {
1944 mark_buffer_async_write_endio(bh
, handler
);
1947 * The buffer may have been set dirty during
1948 * attachment to a dirty folio.
1950 clear_buffer_dirty(bh
);
1952 } while ((bh
= bh
->b_this_page
) != head
);
1953 folio_set_error(folio
);
1954 BUG_ON(folio_test_writeback(folio
));
1955 mapping_set_error(folio
->mapping
, err
);
1956 folio_start_writeback(folio
);
1958 struct buffer_head
*next
= bh
->b_this_page
;
1959 if (buffer_async_write(bh
)) {
1960 clear_buffer_dirty(bh
);
1961 submit_bh_wbc(REQ_OP_WRITE
| write_flags
, bh
, wbc
);
1965 } while (bh
!= head
);
1966 folio_unlock(folio
);
1969 EXPORT_SYMBOL(__block_write_full_folio
);
1972 * If a folio has any new buffers, zero them out here, and mark them uptodate
1973 * and dirty so they'll be written out (in order to prevent uninitialised
1974 * block data from leaking). And clear the new bit.
1976 void folio_zero_new_buffers(struct folio
*folio
, size_t from
, size_t to
)
1978 size_t block_start
, block_end
;
1979 struct buffer_head
*head
, *bh
;
1981 BUG_ON(!folio_test_locked(folio
));
1982 head
= folio_buffers(folio
);
1989 block_end
= block_start
+ bh
->b_size
;
1991 if (buffer_new(bh
)) {
1992 if (block_end
> from
&& block_start
< to
) {
1993 if (!folio_test_uptodate(folio
)) {
1996 start
= max(from
, block_start
);
1997 xend
= min(to
, block_end
);
1999 folio_zero_segment(folio
, start
, xend
);
2000 set_buffer_uptodate(bh
);
2003 clear_buffer_new(bh
);
2004 mark_buffer_dirty(bh
);
2008 block_start
= block_end
;
2009 bh
= bh
->b_this_page
;
2010 } while (bh
!= head
);
2012 EXPORT_SYMBOL(folio_zero_new_buffers
);
2015 iomap_to_bh(struct inode
*inode
, sector_t block
, struct buffer_head
*bh
,
2016 const struct iomap
*iomap
)
2018 loff_t offset
= block
<< inode
->i_blkbits
;
2020 bh
->b_bdev
= iomap
->bdev
;
2023 * Block points to offset in file we need to map, iomap contains
2024 * the offset at which the map starts. If the map ends before the
2025 * current block, then do not map the buffer and let the caller
2028 if (offset
>= iomap
->offset
+ iomap
->length
)
2031 switch (iomap
->type
) {
2034 * If the buffer is not up to date or beyond the current EOF,
2035 * we need to mark it as new to ensure sub-block zeroing is
2036 * executed if necessary.
2038 if (!buffer_uptodate(bh
) ||
2039 (offset
>= i_size_read(inode
)))
2042 case IOMAP_DELALLOC
:
2043 if (!buffer_uptodate(bh
) ||
2044 (offset
>= i_size_read(inode
)))
2046 set_buffer_uptodate(bh
);
2047 set_buffer_mapped(bh
);
2048 set_buffer_delay(bh
);
2050 case IOMAP_UNWRITTEN
:
2052 * For unwritten regions, we always need to ensure that regions
2053 * in the block we are not writing to are zeroed. Mark the
2054 * buffer as new to ensure this.
2057 set_buffer_unwritten(bh
);
2060 if ((iomap
->flags
& IOMAP_F_NEW
) ||
2061 offset
>= i_size_read(inode
)) {
2063 * This can happen if truncating the block device races
2064 * with the check in the caller as i_size updates on
2065 * block devices aren't synchronized by i_rwsem for
2068 if (S_ISBLK(inode
->i_mode
))
2072 bh
->b_blocknr
= (iomap
->addr
+ offset
- iomap
->offset
) >>
2074 set_buffer_mapped(bh
);
2082 int __block_write_begin_int(struct folio
*folio
, loff_t pos
, unsigned len
,
2083 get_block_t
*get_block
, const struct iomap
*iomap
)
2085 unsigned from
= pos
& (PAGE_SIZE
- 1);
2086 unsigned to
= from
+ len
;
2087 struct inode
*inode
= folio
->mapping
->host
;
2088 unsigned block_start
, block_end
;
2091 unsigned blocksize
, bbits
;
2092 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
=wait
;
2094 BUG_ON(!folio_test_locked(folio
));
2095 BUG_ON(from
> PAGE_SIZE
);
2096 BUG_ON(to
> PAGE_SIZE
);
2099 head
= folio_create_buffers(folio
, inode
, 0);
2100 blocksize
= head
->b_size
;
2101 bbits
= block_size_bits(blocksize
);
2103 block
= (sector_t
)folio
->index
<< (PAGE_SHIFT
- bbits
);
2105 for(bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
2106 block
++, block_start
=block_end
, bh
= bh
->b_this_page
) {
2107 block_end
= block_start
+ blocksize
;
2108 if (block_end
<= from
|| block_start
>= to
) {
2109 if (folio_test_uptodate(folio
)) {
2110 if (!buffer_uptodate(bh
))
2111 set_buffer_uptodate(bh
);
2116 clear_buffer_new(bh
);
2117 if (!buffer_mapped(bh
)) {
2118 WARN_ON(bh
->b_size
!= blocksize
);
2120 err
= get_block(inode
, block
, bh
, 1);
2122 err
= iomap_to_bh(inode
, block
, bh
, iomap
);
2126 if (buffer_new(bh
)) {
2127 clean_bdev_bh_alias(bh
);
2128 if (folio_test_uptodate(folio
)) {
2129 clear_buffer_new(bh
);
2130 set_buffer_uptodate(bh
);
2131 mark_buffer_dirty(bh
);
2134 if (block_end
> to
|| block_start
< from
)
2135 folio_zero_segments(folio
,
2141 if (folio_test_uptodate(folio
)) {
2142 if (!buffer_uptodate(bh
))
2143 set_buffer_uptodate(bh
);
2146 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) &&
2147 !buffer_unwritten(bh
) &&
2148 (block_start
< from
|| block_end
> to
)) {
2149 bh_read_nowait(bh
, 0);
2154 * If we issued read requests - let them complete.
2156 while(wait_bh
> wait
) {
2157 wait_on_buffer(*--wait_bh
);
2158 if (!buffer_uptodate(*wait_bh
))
2162 folio_zero_new_buffers(folio
, from
, to
);
2166 int __block_write_begin(struct page
*page
, loff_t pos
, unsigned len
,
2167 get_block_t
*get_block
)
2169 return __block_write_begin_int(page_folio(page
), pos
, len
, get_block
,
2172 EXPORT_SYMBOL(__block_write_begin
);
2174 static void __block_commit_write(struct folio
*folio
, size_t from
, size_t to
)
2176 size_t block_start
, block_end
;
2177 bool partial
= false;
2179 struct buffer_head
*bh
, *head
;
2181 bh
= head
= folio_buffers(folio
);
2182 blocksize
= bh
->b_size
;
2186 block_end
= block_start
+ blocksize
;
2187 if (block_end
<= from
|| block_start
>= to
) {
2188 if (!buffer_uptodate(bh
))
2191 set_buffer_uptodate(bh
);
2192 mark_buffer_dirty(bh
);
2195 clear_buffer_new(bh
);
2197 block_start
= block_end
;
2198 bh
= bh
->b_this_page
;
2199 } while (bh
!= head
);
2202 * If this is a partial write which happened to make all buffers
2203 * uptodate then we can optimize away a bogus read_folio() for
2204 * the next read(). Here we 'discover' whether the folio went
2205 * uptodate as a result of this (potentially partial) write.
2208 folio_mark_uptodate(folio
);
2212 * block_write_begin takes care of the basic task of block allocation and
2213 * bringing partial write blocks uptodate first.
2215 * The filesystem needs to handle block truncation upon failure.
2217 int block_write_begin(struct address_space
*mapping
, loff_t pos
, unsigned len
,
2218 struct page
**pagep
, get_block_t
*get_block
)
2220 pgoff_t index
= pos
>> PAGE_SHIFT
;
2224 page
= grab_cache_page_write_begin(mapping
, index
);
2228 status
= __block_write_begin(page
, pos
, len
, get_block
);
2229 if (unlikely(status
)) {
2238 EXPORT_SYMBOL(block_write_begin
);
2240 int block_write_end(struct file
*file
, struct address_space
*mapping
,
2241 loff_t pos
, unsigned len
, unsigned copied
,
2242 struct page
*page
, void *fsdata
)
2244 struct folio
*folio
= page_folio(page
);
2245 size_t start
= pos
- folio_pos(folio
);
2247 if (unlikely(copied
< len
)) {
2249 * The buffers that were written will now be uptodate, so
2250 * we don't have to worry about a read_folio reading them
2251 * and overwriting a partial write. However if we have
2252 * encountered a short write and only partially written
2253 * into a buffer, it will not be marked uptodate, so a
2254 * read_folio might come in and destroy our partial write.
2256 * Do the simplest thing, and just treat any short write to a
2257 * non uptodate folio as a zero-length write, and force the
2258 * caller to redo the whole thing.
2260 if (!folio_test_uptodate(folio
))
2263 folio_zero_new_buffers(folio
, start
+copied
, start
+len
);
2265 flush_dcache_folio(folio
);
2267 /* This could be a short (even 0-length) commit */
2268 __block_commit_write(folio
, start
, start
+ copied
);
2272 EXPORT_SYMBOL(block_write_end
);
2274 int generic_write_end(struct file
*file
, struct address_space
*mapping
,
2275 loff_t pos
, unsigned len
, unsigned copied
,
2276 struct page
*page
, void *fsdata
)
2278 struct inode
*inode
= mapping
->host
;
2279 loff_t old_size
= inode
->i_size
;
2280 bool i_size_changed
= false;
2282 copied
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
2285 * No need to use i_size_read() here, the i_size cannot change under us
2286 * because we hold i_rwsem.
2288 * But it's important to update i_size while still holding page lock:
2289 * page writeout could otherwise come in and zero beyond i_size.
2291 if (pos
+ copied
> inode
->i_size
) {
2292 i_size_write(inode
, pos
+ copied
);
2293 i_size_changed
= true;
2300 pagecache_isize_extended(inode
, old_size
, pos
);
2302 * Don't mark the inode dirty under page lock. First, it unnecessarily
2303 * makes the holding time of page lock longer. Second, it forces lock
2304 * ordering of page lock and transaction start for journaling
2308 mark_inode_dirty(inode
);
2311 EXPORT_SYMBOL(generic_write_end
);
2314 * block_is_partially_uptodate checks whether buffers within a folio are
2317 * Returns true if all buffers which correspond to the specified part
2318 * of the folio are uptodate.
2320 bool block_is_partially_uptodate(struct folio
*folio
, size_t from
, size_t count
)
2322 unsigned block_start
, block_end
, blocksize
;
2324 struct buffer_head
*bh
, *head
;
2327 head
= folio_buffers(folio
);
2330 blocksize
= head
->b_size
;
2331 to
= min_t(unsigned, folio_size(folio
) - from
, count
);
2333 if (from
< blocksize
&& to
> folio_size(folio
) - blocksize
)
2339 block_end
= block_start
+ blocksize
;
2340 if (block_end
> from
&& block_start
< to
) {
2341 if (!buffer_uptodate(bh
)) {
2345 if (block_end
>= to
)
2348 block_start
= block_end
;
2349 bh
= bh
->b_this_page
;
2350 } while (bh
!= head
);
2354 EXPORT_SYMBOL(block_is_partially_uptodate
);
2357 * Generic "read_folio" function for block devices that have the normal
2358 * get_block functionality. This is most of the block device filesystems.
2359 * Reads the folio asynchronously --- the unlock_buffer() and
2360 * set/clear_buffer_uptodate() functions propagate buffer state into the
2361 * folio once IO has completed.
2363 int block_read_full_folio(struct folio
*folio
, get_block_t
*get_block
)
2365 struct inode
*inode
= folio
->mapping
->host
;
2366 sector_t iblock
, lblock
;
2367 struct buffer_head
*bh
, *head
, *arr
[MAX_BUF_PER_PAGE
];
2368 unsigned int blocksize
, bbits
;
2370 int fully_mapped
= 1;
2371 bool page_error
= false;
2372 loff_t limit
= i_size_read(inode
);
2374 /* This is needed for ext4. */
2375 if (IS_ENABLED(CONFIG_FS_VERITY
) && IS_VERITY(inode
))
2376 limit
= inode
->i_sb
->s_maxbytes
;
2378 VM_BUG_ON_FOLIO(folio_test_large(folio
), folio
);
2380 head
= folio_create_buffers(folio
, inode
, 0);
2381 blocksize
= head
->b_size
;
2382 bbits
= block_size_bits(blocksize
);
2384 iblock
= (sector_t
)folio
->index
<< (PAGE_SHIFT
- bbits
);
2385 lblock
= (limit
+blocksize
-1) >> bbits
;
2391 if (buffer_uptodate(bh
))
2394 if (!buffer_mapped(bh
)) {
2398 if (iblock
< lblock
) {
2399 WARN_ON(bh
->b_size
!= blocksize
);
2400 err
= get_block(inode
, iblock
, bh
, 0);
2402 folio_set_error(folio
);
2406 if (!buffer_mapped(bh
)) {
2407 folio_zero_range(folio
, i
* blocksize
,
2410 set_buffer_uptodate(bh
);
2414 * get_block() might have updated the buffer
2417 if (buffer_uptodate(bh
))
2421 } while (i
++, iblock
++, (bh
= bh
->b_this_page
) != head
);
2424 folio_set_mappedtodisk(folio
);
2428 * All buffers are uptodate - we can set the folio uptodate
2429 * as well. But not if get_block() returned an error.
2432 folio_mark_uptodate(folio
);
2433 folio_unlock(folio
);
2437 /* Stage two: lock the buffers */
2438 for (i
= 0; i
< nr
; i
++) {
2441 mark_buffer_async_read(bh
);
2445 * Stage 3: start the IO. Check for uptodateness
2446 * inside the buffer lock in case another process reading
2447 * the underlying blockdev brought it uptodate (the sct fix).
2449 for (i
= 0; i
< nr
; i
++) {
2451 if (buffer_uptodate(bh
))
2452 end_buffer_async_read(bh
, 1);
2454 submit_bh(REQ_OP_READ
, bh
);
2458 EXPORT_SYMBOL(block_read_full_folio
);
2460 /* utility function for filesystems that need to do work on expanding
2461 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2462 * deal with the hole.
2464 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
)
2466 struct address_space
*mapping
= inode
->i_mapping
;
2467 const struct address_space_operations
*aops
= mapping
->a_ops
;
2469 void *fsdata
= NULL
;
2472 err
= inode_newsize_ok(inode
, size
);
2476 err
= aops
->write_begin(NULL
, mapping
, size
, 0, &page
, &fsdata
);
2480 err
= aops
->write_end(NULL
, mapping
, size
, 0, 0, page
, fsdata
);
2486 EXPORT_SYMBOL(generic_cont_expand_simple
);
2488 static int cont_expand_zero(struct file
*file
, struct address_space
*mapping
,
2489 loff_t pos
, loff_t
*bytes
)
2491 struct inode
*inode
= mapping
->host
;
2492 const struct address_space_operations
*aops
= mapping
->a_ops
;
2493 unsigned int blocksize
= i_blocksize(inode
);
2495 void *fsdata
= NULL
;
2496 pgoff_t index
, curidx
;
2498 unsigned zerofrom
, offset
, len
;
2501 index
= pos
>> PAGE_SHIFT
;
2502 offset
= pos
& ~PAGE_MASK
;
2504 while (index
> (curidx
= (curpos
= *bytes
)>>PAGE_SHIFT
)) {
2505 zerofrom
= curpos
& ~PAGE_MASK
;
2506 if (zerofrom
& (blocksize
-1)) {
2507 *bytes
|= (blocksize
-1);
2510 len
= PAGE_SIZE
- zerofrom
;
2512 err
= aops
->write_begin(file
, mapping
, curpos
, len
,
2516 zero_user(page
, zerofrom
, len
);
2517 err
= aops
->write_end(file
, mapping
, curpos
, len
, len
,
2524 balance_dirty_pages_ratelimited(mapping
);
2526 if (fatal_signal_pending(current
)) {
2532 /* page covers the boundary, find the boundary offset */
2533 if (index
== curidx
) {
2534 zerofrom
= curpos
& ~PAGE_MASK
;
2535 /* if we will expand the thing last block will be filled */
2536 if (offset
<= zerofrom
) {
2539 if (zerofrom
& (blocksize
-1)) {
2540 *bytes
|= (blocksize
-1);
2543 len
= offset
- zerofrom
;
2545 err
= aops
->write_begin(file
, mapping
, curpos
, len
,
2549 zero_user(page
, zerofrom
, len
);
2550 err
= aops
->write_end(file
, mapping
, curpos
, len
, len
,
2562 * For moronic filesystems that do not allow holes in file.
2563 * We may have to extend the file.
2565 int cont_write_begin(struct file
*file
, struct address_space
*mapping
,
2566 loff_t pos
, unsigned len
,
2567 struct page
**pagep
, void **fsdata
,
2568 get_block_t
*get_block
, loff_t
*bytes
)
2570 struct inode
*inode
= mapping
->host
;
2571 unsigned int blocksize
= i_blocksize(inode
);
2572 unsigned int zerofrom
;
2575 err
= cont_expand_zero(file
, mapping
, pos
, bytes
);
2579 zerofrom
= *bytes
& ~PAGE_MASK
;
2580 if (pos
+len
> *bytes
&& zerofrom
& (blocksize
-1)) {
2581 *bytes
|= (blocksize
-1);
2585 return block_write_begin(mapping
, pos
, len
, pagep
, get_block
);
2587 EXPORT_SYMBOL(cont_write_begin
);
2589 void block_commit_write(struct page
*page
, unsigned from
, unsigned to
)
2591 struct folio
*folio
= page_folio(page
);
2592 __block_commit_write(folio
, from
, to
);
2594 EXPORT_SYMBOL(block_commit_write
);
2597 * block_page_mkwrite() is not allowed to change the file size as it gets
2598 * called from a page fault handler when a page is first dirtied. Hence we must
2599 * be careful to check for EOF conditions here. We set the page up correctly
2600 * for a written page which means we get ENOSPC checking when writing into
2601 * holes and correct delalloc and unwritten extent mapping on filesystems that
2602 * support these features.
2604 * We are not allowed to take the i_mutex here so we have to play games to
2605 * protect against truncate races as the page could now be beyond EOF. Because
2606 * truncate writes the inode size before removing pages, once we have the
2607 * page lock we can determine safely if the page is beyond EOF. If it is not
2608 * beyond EOF, then the page is guaranteed safe against truncation until we
2611 * Direct callers of this function should protect against filesystem freezing
2612 * using sb_start_pagefault() - sb_end_pagefault() functions.
2614 int block_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
2615 get_block_t get_block
)
2617 struct folio
*folio
= page_folio(vmf
->page
);
2618 struct inode
*inode
= file_inode(vma
->vm_file
);
2624 size
= i_size_read(inode
);
2625 if ((folio
->mapping
!= inode
->i_mapping
) ||
2626 (folio_pos(folio
) >= size
)) {
2627 /* We overload EFAULT to mean page got truncated */
2632 end
= folio_size(folio
);
2633 /* folio is wholly or partially inside EOF */
2634 if (folio_pos(folio
) + end
> size
)
2635 end
= size
- folio_pos(folio
);
2637 ret
= __block_write_begin_int(folio
, 0, end
, get_block
, NULL
);
2641 __block_commit_write(folio
, 0, end
);
2643 folio_mark_dirty(folio
);
2644 folio_wait_stable(folio
);
2647 folio_unlock(folio
);
2650 EXPORT_SYMBOL(block_page_mkwrite
);
2652 int block_truncate_page(struct address_space
*mapping
,
2653 loff_t from
, get_block_t
*get_block
)
2655 pgoff_t index
= from
>> PAGE_SHIFT
;
2658 size_t offset
, length
, pos
;
2659 struct inode
*inode
= mapping
->host
;
2660 struct folio
*folio
;
2661 struct buffer_head
*bh
;
2664 blocksize
= i_blocksize(inode
);
2665 length
= from
& (blocksize
- 1);
2667 /* Block boundary? Nothing to do */
2671 length
= blocksize
- length
;
2672 iblock
= (sector_t
)index
<< (PAGE_SHIFT
- inode
->i_blkbits
);
2674 folio
= filemap_grab_folio(mapping
, index
);
2676 return PTR_ERR(folio
);
2678 bh
= folio_buffers(folio
);
2680 folio_create_empty_buffers(folio
, blocksize
, 0);
2681 bh
= folio_buffers(folio
);
2684 /* Find the buffer that contains "offset" */
2685 offset
= offset_in_folio(folio
, from
);
2687 while (offset
>= pos
) {
2688 bh
= bh
->b_this_page
;
2693 if (!buffer_mapped(bh
)) {
2694 WARN_ON(bh
->b_size
!= blocksize
);
2695 err
= get_block(inode
, iblock
, bh
, 0);
2698 /* unmapped? It's a hole - nothing to do */
2699 if (!buffer_mapped(bh
))
2703 /* Ok, it's mapped. Make sure it's up-to-date */
2704 if (folio_test_uptodate(folio
))
2705 set_buffer_uptodate(bh
);
2707 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) && !buffer_unwritten(bh
)) {
2708 err
= bh_read(bh
, 0);
2709 /* Uhhuh. Read error. Complain and punt. */
2714 folio_zero_range(folio
, offset
, length
);
2715 mark_buffer_dirty(bh
);
2718 folio_unlock(folio
);
2723 EXPORT_SYMBOL(block_truncate_page
);
2726 * The generic ->writepage function for buffer-backed address_spaces
2728 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
2729 struct writeback_control
*wbc
)
2731 struct folio
*folio
= page_folio(page
);
2732 struct inode
* const inode
= folio
->mapping
->host
;
2733 loff_t i_size
= i_size_read(inode
);
2735 /* Is the folio fully inside i_size? */
2736 if (folio_pos(folio
) + folio_size(folio
) <= i_size
)
2737 return __block_write_full_folio(inode
, folio
, get_block
, wbc
,
2738 end_buffer_async_write
);
2740 /* Is the folio fully outside i_size? (truncate in progress) */
2741 if (folio_pos(folio
) >= i_size
) {
2742 folio_unlock(folio
);
2743 return 0; /* don't care */
2747 * The folio straddles i_size. It must be zeroed out on each and every
2748 * writepage invocation because it may be mmapped. "A file is mapped
2749 * in multiples of the page size. For a file that is not a multiple of
2750 * the page size, the remaining memory is zeroed when mapped, and
2751 * writes to that region are not written out to the file."
2753 folio_zero_segment(folio
, offset_in_folio(folio
, i_size
),
2755 return __block_write_full_folio(inode
, folio
, get_block
, wbc
,
2756 end_buffer_async_write
);
2758 EXPORT_SYMBOL(block_write_full_page
);
2760 sector_t
generic_block_bmap(struct address_space
*mapping
, sector_t block
,
2761 get_block_t
*get_block
)
2763 struct inode
*inode
= mapping
->host
;
2764 struct buffer_head tmp
= {
2765 .b_size
= i_blocksize(inode
),
2768 get_block(inode
, block
, &tmp
, 0);
2769 return tmp
.b_blocknr
;
2771 EXPORT_SYMBOL(generic_block_bmap
);
2773 static void end_bio_bh_io_sync(struct bio
*bio
)
2775 struct buffer_head
*bh
= bio
->bi_private
;
2777 if (unlikely(bio_flagged(bio
, BIO_QUIET
)))
2778 set_bit(BH_Quiet
, &bh
->b_state
);
2780 bh
->b_end_io(bh
, !bio
->bi_status
);
2784 static void submit_bh_wbc(blk_opf_t opf
, struct buffer_head
*bh
,
2785 struct writeback_control
*wbc
)
2787 const enum req_op op
= opf
& REQ_OP_MASK
;
2790 BUG_ON(!buffer_locked(bh
));
2791 BUG_ON(!buffer_mapped(bh
));
2792 BUG_ON(!bh
->b_end_io
);
2793 BUG_ON(buffer_delay(bh
));
2794 BUG_ON(buffer_unwritten(bh
));
2797 * Only clear out a write error when rewriting
2799 if (test_set_buffer_req(bh
) && (op
== REQ_OP_WRITE
))
2800 clear_buffer_write_io_error(bh
);
2802 if (buffer_meta(bh
))
2804 if (buffer_prio(bh
))
2807 bio
= bio_alloc(bh
->b_bdev
, 1, opf
, GFP_NOIO
);
2809 fscrypt_set_bio_crypt_ctx_bh(bio
, bh
, GFP_NOIO
);
2811 bio
->bi_iter
.bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
2813 __bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
2815 bio
->bi_end_io
= end_bio_bh_io_sync
;
2816 bio
->bi_private
= bh
;
2818 /* Take care of bh's that straddle the end of the device */
2822 wbc_init_bio(wbc
, bio
);
2823 wbc_account_cgroup_owner(wbc
, bh
->b_page
, bh
->b_size
);
2829 void submit_bh(blk_opf_t opf
, struct buffer_head
*bh
)
2831 submit_bh_wbc(opf
, bh
, NULL
);
2833 EXPORT_SYMBOL(submit_bh
);
2835 void write_dirty_buffer(struct buffer_head
*bh
, blk_opf_t op_flags
)
2838 if (!test_clear_buffer_dirty(bh
)) {
2842 bh
->b_end_io
= end_buffer_write_sync
;
2844 submit_bh(REQ_OP_WRITE
| op_flags
, bh
);
2846 EXPORT_SYMBOL(write_dirty_buffer
);
2849 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2850 * and then start new I/O and then wait upon it. The caller must have a ref on
2853 int __sync_dirty_buffer(struct buffer_head
*bh
, blk_opf_t op_flags
)
2855 WARN_ON(atomic_read(&bh
->b_count
) < 1);
2857 if (test_clear_buffer_dirty(bh
)) {
2859 * The bh should be mapped, but it might not be if the
2860 * device was hot-removed. Not much we can do but fail the I/O.
2862 if (!buffer_mapped(bh
)) {
2868 bh
->b_end_io
= end_buffer_write_sync
;
2869 submit_bh(REQ_OP_WRITE
| op_flags
, bh
);
2871 if (!buffer_uptodate(bh
))
2878 EXPORT_SYMBOL(__sync_dirty_buffer
);
2880 int sync_dirty_buffer(struct buffer_head
*bh
)
2882 return __sync_dirty_buffer(bh
, REQ_SYNC
);
2884 EXPORT_SYMBOL(sync_dirty_buffer
);
2887 * try_to_free_buffers() checks if all the buffers on this particular folio
2888 * are unused, and releases them if so.
2890 * Exclusion against try_to_free_buffers may be obtained by either
2891 * locking the folio or by holding its mapping's private_lock.
2893 * If the folio is dirty but all the buffers are clean then we need to
2894 * be sure to mark the folio clean as well. This is because the folio
2895 * may be against a block device, and a later reattachment of buffers
2896 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2897 * filesystem data on the same device.
2899 * The same applies to regular filesystem folios: if all the buffers are
2900 * clean then we set the folio clean and proceed. To do that, we require
2901 * total exclusion from block_dirty_folio(). That is obtained with
2904 * try_to_free_buffers() is non-blocking.
2906 static inline int buffer_busy(struct buffer_head
*bh
)
2908 return atomic_read(&bh
->b_count
) |
2909 (bh
->b_state
& ((1 << BH_Dirty
) | (1 << BH_Lock
)));
2913 drop_buffers(struct folio
*folio
, struct buffer_head
**buffers_to_free
)
2915 struct buffer_head
*head
= folio_buffers(folio
);
2916 struct buffer_head
*bh
;
2920 if (buffer_busy(bh
))
2922 bh
= bh
->b_this_page
;
2923 } while (bh
!= head
);
2926 struct buffer_head
*next
= bh
->b_this_page
;
2928 if (bh
->b_assoc_map
)
2929 __remove_assoc_queue(bh
);
2931 } while (bh
!= head
);
2932 *buffers_to_free
= head
;
2933 folio_detach_private(folio
);
2939 bool try_to_free_buffers(struct folio
*folio
)
2941 struct address_space
* const mapping
= folio
->mapping
;
2942 struct buffer_head
*buffers_to_free
= NULL
;
2945 BUG_ON(!folio_test_locked(folio
));
2946 if (folio_test_writeback(folio
))
2949 if (mapping
== NULL
) { /* can this still happen? */
2950 ret
= drop_buffers(folio
, &buffers_to_free
);
2954 spin_lock(&mapping
->private_lock
);
2955 ret
= drop_buffers(folio
, &buffers_to_free
);
2958 * If the filesystem writes its buffers by hand (eg ext3)
2959 * then we can have clean buffers against a dirty folio. We
2960 * clean the folio here; otherwise the VM will never notice
2961 * that the filesystem did any IO at all.
2963 * Also, during truncate, discard_buffer will have marked all
2964 * the folio's buffers clean. We discover that here and clean
2967 * private_lock must be held over this entire operation in order
2968 * to synchronise against block_dirty_folio and prevent the
2969 * dirty bit from being lost.
2972 folio_cancel_dirty(folio
);
2973 spin_unlock(&mapping
->private_lock
);
2975 if (buffers_to_free
) {
2976 struct buffer_head
*bh
= buffers_to_free
;
2979 struct buffer_head
*next
= bh
->b_this_page
;
2980 free_buffer_head(bh
);
2982 } while (bh
!= buffers_to_free
);
2986 EXPORT_SYMBOL(try_to_free_buffers
);
2989 * Buffer-head allocation
2991 static struct kmem_cache
*bh_cachep __read_mostly
;
2994 * Once the number of bh's in the machine exceeds this level, we start
2995 * stripping them in writeback.
2997 static unsigned long max_buffer_heads
;
2999 int buffer_heads_over_limit
;
3001 struct bh_accounting
{
3002 int nr
; /* Number of live bh's */
3003 int ratelimit
; /* Limit cacheline bouncing */
3006 static DEFINE_PER_CPU(struct bh_accounting
, bh_accounting
) = {0, 0};
3008 static void recalc_bh_state(void)
3013 if (__this_cpu_inc_return(bh_accounting
.ratelimit
) - 1 < 4096)
3015 __this_cpu_write(bh_accounting
.ratelimit
, 0);
3016 for_each_online_cpu(i
)
3017 tot
+= per_cpu(bh_accounting
, i
).nr
;
3018 buffer_heads_over_limit
= (tot
> max_buffer_heads
);
3021 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
)
3023 struct buffer_head
*ret
= kmem_cache_zalloc(bh_cachep
, gfp_flags
);
3025 INIT_LIST_HEAD(&ret
->b_assoc_buffers
);
3026 spin_lock_init(&ret
->b_uptodate_lock
);
3028 __this_cpu_inc(bh_accounting
.nr
);
3034 EXPORT_SYMBOL(alloc_buffer_head
);
3036 void free_buffer_head(struct buffer_head
*bh
)
3038 BUG_ON(!list_empty(&bh
->b_assoc_buffers
));
3039 kmem_cache_free(bh_cachep
, bh
);
3041 __this_cpu_dec(bh_accounting
.nr
);
3045 EXPORT_SYMBOL(free_buffer_head
);
3047 static int buffer_exit_cpu_dead(unsigned int cpu
)
3050 struct bh_lru
*b
= &per_cpu(bh_lrus
, cpu
);
3052 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
3056 this_cpu_add(bh_accounting
.nr
, per_cpu(bh_accounting
, cpu
).nr
);
3057 per_cpu(bh_accounting
, cpu
).nr
= 0;
3062 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3063 * @bh: struct buffer_head
3065 * Return true if the buffer is up-to-date and false,
3066 * with the buffer locked, if not.
3068 int bh_uptodate_or_lock(struct buffer_head
*bh
)
3070 if (!buffer_uptodate(bh
)) {
3072 if (!buffer_uptodate(bh
))
3078 EXPORT_SYMBOL(bh_uptodate_or_lock
);
3081 * __bh_read - Submit read for a locked buffer
3082 * @bh: struct buffer_head
3083 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3084 * @wait: wait until reading finish
3086 * Returns zero on success or don't wait, and -EIO on error.
3088 int __bh_read(struct buffer_head
*bh
, blk_opf_t op_flags
, bool wait
)
3092 BUG_ON(!buffer_locked(bh
));
3095 bh
->b_end_io
= end_buffer_read_sync
;
3096 submit_bh(REQ_OP_READ
| op_flags
, bh
);
3099 if (!buffer_uptodate(bh
))
3104 EXPORT_SYMBOL(__bh_read
);
3107 * __bh_read_batch - Submit read for a batch of unlocked buffers
3108 * @nr: entry number of the buffer batch
3109 * @bhs: a batch of struct buffer_head
3110 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3111 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3112 * buffer that cannot lock.
3114 * Returns zero on success or don't wait, and -EIO on error.
3116 void __bh_read_batch(int nr
, struct buffer_head
*bhs
[],
3117 blk_opf_t op_flags
, bool force_lock
)
3121 for (i
= 0; i
< nr
; i
++) {
3122 struct buffer_head
*bh
= bhs
[i
];
3124 if (buffer_uptodate(bh
))
3130 if (!trylock_buffer(bh
))
3133 if (buffer_uptodate(bh
)) {
3138 bh
->b_end_io
= end_buffer_read_sync
;
3140 submit_bh(REQ_OP_READ
| op_flags
, bh
);
3143 EXPORT_SYMBOL(__bh_read_batch
);
3145 void __init
buffer_init(void)
3147 unsigned long nrpages
;
3150 bh_cachep
= kmem_cache_create("buffer_head",
3151 sizeof(struct buffer_head
), 0,
3152 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
3157 * Limit the bh occupancy to 10% of ZONE_NORMAL
3159 nrpages
= (nr_free_buffer_pages() * 10) / 100;
3160 max_buffer_heads
= nrpages
* (PAGE_SIZE
/ sizeof(struct buffer_head
));
3161 ret
= cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD
, "fs/buffer:dead",
3162 NULL
, buffer_exit_cpu_dead
);