]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Merge tag 'f2fs-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Feb 2026 17:48:10 +0000 (09:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Feb 2026 17:48:10 +0000 (09:48 -0800)
Pull f2fs updates from Jaegeuk Kim:
 "In this development cycle, we focused on several key performance
  optimizations:

   - introducing large folio support to enhance read speeds for
     immutable files

   - reducing checkpoint=enable latency by flushing only committed dirty
     pages

   - implementing tracepoints to diagnose and resolve lock priority
     inversion.

  Additionally, we introduced the packed_ssa feature to optimize the SSA
  footprint when utilizing large block sizes.

  Detail summary:

  Enhancements:
   - support large folio for immutable non-compressed case
   - support non-4KB block size without packed_ssa feature
   - optimize f2fs_enable_checkpoint() to avoid long delay
   - optimize f2fs_overwrite_io() for f2fs_iomap_begin
   - optimize NAT block loading during checkpoint write
   - add write latency stats for NAT and SIT blocks in
     f2fs_write_checkpoint
   - pin files do not require sbi->writepages lock for ordering
   - avoid f2fs_map_blocks() for consecutive holes in readpages
   - flush plug periodically during GC to maximize readahead effect
   - add tracepoints to catch lock overheads
   - add several sysfs entries to tune internal lock priorities

  Fixes:
   - fix lock priority inversion issue
   - fix incomplete block usage in compact SSA summaries
   - fix to show simulate_lock_timeout correctly
   - fix to avoid mapping wrong physical block for swapfile
   - fix IS_CHECKPOINTED flag inconsistency issue caused by
     concurrent atomic commit and checkpoint writes
   - fix to avoid UAF in f2fs_write_end_io()"

* tag 'f2fs-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (61 commits)
  f2fs: sysfs: introduce critical_task_priority
  f2fs: introduce trace_f2fs_priority_update
  f2fs: fix lock priority inversion issue
  f2fs: optimize f2fs_overwrite_io() for f2fs_iomap_begin
  f2fs: fix incomplete block usage in compact SSA summaries
  f2fs: decrease maximum flush retry count in f2fs_enable_checkpoint()
  f2fs: optimize NAT block loading during checkpoint write
  f2fs: change size parameter of __has_cursum_space() to unsigned int
  f2fs: add write latency stats for NAT and SIT blocks in f2fs_write_checkpoint
  f2fs: pin files do not require sbi->writepages lock for ordering
  f2fs: fix to show simulate_lock_timeout correctly
  f2fs: introduce FAULT_SKIP_WRITE
  f2fs: check skipped write in f2fs_enable_checkpoint()
  Revert "f2fs: add timeout in f2fs_enable_checkpoint()"
  f2fs: fix to unlock folio in f2fs_read_data_large_folio()
  f2fs: fix error path handling in f2fs_read_data_large_folio()
  f2fs: use folio_end_read
  f2fs: fix to avoid mapping wrong physical block for swapfile
  f2fs: avoid f2fs_map_blocks() for consecutive holes in readpages
  f2fs: advance index and offset after zeroing in large folio read
  ...

1  2 
fs/f2fs/compress.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/inode.c
fs/f2fs/super.c

Simple merge
diff --cc fs/f2fs/data.c
index 491f6651120186fd8d77f980de788933339eccaa,f70efb040c732a74cc35645c5cd042f20802ce27..338df7a2aea6bec0dbf80c9e159ffd14aaea47f0
@@@ -189,7 -214,7 +216,7 @@@ static void f2fs_verify_bio(struct work
                        struct folio *folio = fi.folio;
  
                        if (!f2fs_is_compressed_page(folio) &&
-                           !fsverity_verify_page(vi, &folio->page)) {
 -                          !fsverity_verify_page(&folio->page)) {
++                          !fsverity_verify_folio(vi, folio)) {
                                bio->bi_status = BLK_STS_IOERR;
                                break;
                        }
        if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
                                !f2fs_is_checkpoint_ready(sbi))
                __submit_merged_bio(io);
-       f2fs_up_write(&io->io_rwsem);
+       f2fs_up_write_trace(&io->io_rwsem, &lc);
  }
  
 -static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 +static struct bio *f2fs_grab_read_bio(struct inode *inode,
 +                                    struct fsverity_info *vi, block_t blkaddr,
                                      unsigned nr_pages, blk_opf_t op_flag,
                                      pgoff_t first_idx, bool for_write)
  {
  }
  #endif
  
 -                      folio_zero_range(folio, offset << PAGE_SHIFT, PAGE_SIZE);
 -                      if (f2fs_need_verity(inode, index) &&
 -                          !fsverity_verify_page(folio_file_page(folio,
 -                                                              index))) {
+ static struct f2fs_folio_state *ffs_find_or_alloc(struct folio *folio)
+ {
+       struct f2fs_folio_state *ffs = folio->private;
+       if (ffs)
+               return ffs;
+       ffs = f2fs_kmem_cache_alloc(ffs_entry_slab,
+                       GFP_NOIO | __GFP_ZERO, true, NULL);
+       spin_lock_init(&ffs->state_lock);
+       folio_attach_private(folio, ffs);
+       return ffs;
+ }
+ static void ffs_detach_free(struct folio *folio)
+ {
+       struct f2fs_folio_state *ffs;
+       if (!folio_test_large(folio)) {
+               folio_detach_private(folio);
+               return;
+       }
+       ffs = folio_detach_private(folio);
+       if (!ffs)
+               return;
+       WARN_ON_ONCE(ffs->read_pages_pending != 0);
+       kmem_cache_free(ffs_entry_slab, ffs);
+ }
+ static int f2fs_read_data_large_folio(struct inode *inode,
++              struct fsverity_info *vi,
+               struct readahead_control *rac, struct folio *folio)
+ {
+       struct bio *bio = NULL;
+       sector_t last_block_in_bio = 0;
+       struct f2fs_map_blocks map = {0, };
+       pgoff_t index, offset, next_pgofs = 0;
+       unsigned max_nr_pages = rac ? readahead_count(rac) :
+                               folio_nr_pages(folio);
+       unsigned nrpages;
+       struct f2fs_folio_state *ffs;
+       int ret = 0;
+       bool folio_in_bio;
+       if (!IS_IMMUTABLE(inode) || f2fs_compressed_file(inode)) {
+               if (folio)
+                       folio_unlock(folio);
+               return -EOPNOTSUPP;
+       }
+       map.m_seg_type = NO_CHECK_TYPE;
+       if (rac)
+               folio = readahead_folio(rac);
+ next_folio:
+       if (!folio)
+               goto out;
+       folio_in_bio = false;
+       index = folio->index;
+       offset = 0;
+       ffs = NULL;
+       nrpages = folio_nr_pages(folio);
+       for (; nrpages; nrpages--, max_nr_pages--, index++, offset++) {
+               sector_t block_nr;
+               /*
+                * Map blocks using the previous result first.
+                */
+               if (map.m_flags & F2FS_MAP_MAPPED) {
+                       if (index > map.m_lblk &&
+                               index < (map.m_lblk + map.m_len))
+                               goto got_it;
+               } else if (index < next_pgofs) {
+                       /* hole case */
+                       goto got_it;
+               }
+               /*
+                * Then do more f2fs_map_blocks() calls until we are
+                * done with this page.
+                */
+               memset(&map, 0, sizeof(map));
+               map.m_next_pgofs = &next_pgofs;
+               map.m_seg_type = NO_CHECK_TYPE;
+               map.m_lblk = index;
+               map.m_len = max_nr_pages;
+               ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
+               if (ret)
+                       goto err_out;
+ got_it:
+               if ((map.m_flags & F2FS_MAP_MAPPED)) {
+                       block_nr = map.m_pblk + index - map.m_lblk;
+                       if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+                                               DATA_GENERIC_ENHANCE_READ)) {
+                               ret = -EFSCORRUPTED;
+                               goto err_out;
+                       }
+               } else {
 -                      bio = f2fs_grab_read_bio(inode, block_nr,
 -                                      max_nr_pages,
++                      size_t page_offset = offset << PAGE_SHIFT;
++                      folio_zero_range(folio, page_offset, PAGE_SIZE);
++                      if (vi && !fsverity_verify_blocks(vi, folio, PAGE_SIZE, page_offset)) {
+                               ret = -EIO;
+                               goto err_out;
+                       }
+                       continue;
+               }
+               /* We must increment read_pages_pending before possible BIOs submitting
+                * to prevent from premature folio_end_read() call on folio
+                */
+               if (folio_test_large(folio)) {
+                       ffs = ffs_find_or_alloc(folio);
+                       /* set the bitmap to wait */
+                       spin_lock_irq(&ffs->state_lock);
+                       ffs->read_pages_pending++;
+                       spin_unlock_irq(&ffs->state_lock);
+               }
+               /*
+                * This page will go to BIO.  Do we need to send this
+                * BIO off first?
+                */
+               if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
+                                               last_block_in_bio, block_nr) ||
+                       !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
+ submit_and_realloc:
+                       f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+                       bio = NULL;
+               }
+               if (bio == NULL)
++                      bio = f2fs_grab_read_bio(inode, vi,
++                                      block_nr, max_nr_pages,
+                                       f2fs_ra_op_flags(rac),
+                                       index, false);
+               /*
+                * If the page is under writeback, we need to wait for
+                * its completion to see the correct decrypted data.
+                */
+               f2fs_wait_on_block_writeback(inode, block_nr);
+               if (!bio_add_folio(bio, folio, F2FS_BLKSIZE,
+                                       offset << PAGE_SHIFT))
+                       goto submit_and_realloc;
+               folio_in_bio = true;
+               inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+               f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
+                               F2FS_BLKSIZE);
+               last_block_in_bio = block_nr;
+       }
+       trace_f2fs_read_folio(folio, DATA);
+ err_out:
+       if (!folio_in_bio) {
+               folio_end_read(folio, !ret);
+               if (ret)
+                       return ret;
+       }
+       if (rac) {
+               folio = readahead_folio(rac);
+               goto next_folio;
+       }
+ out:
+       f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+       if (ret) {
+               /* Wait bios and clear uptodate. */
+               folio_lock(folio);
+               folio_clear_uptodate(folio);
+               folio_unlock(folio);
+       }
+       return ret;
+ }
  /*
   * This function was originally taken from fs/mpage.c, and customized for f2fs.
   * Major change was from block_size == page_size in f2fs by default.
@@@ -2371,6 -2624,9 +2635,9 @@@ static int f2fs_mpage_readpages(struct 
        unsigned max_nr_pages = nr_pages;
        int ret = 0;
  
 -              return f2fs_read_data_large_folio(inode, rac, folio);
+       if (mapping_large_folio_support(mapping))
++              return f2fs_read_data_large_folio(inode, vi, rac, folio);
  #ifdef CONFIG_F2FS_FS_COMPRESSION
        if (f2fs_compressed_file(inode)) {
                index = rac ? readahead_index(rac) : folio->index;
diff --cc fs/f2fs/f2fs.h
Simple merge
diff --cc fs/f2fs/file.c
Simple merge
diff --cc fs/f2fs/inode.c
Simple merge
diff --cc fs/f2fs/super.c
Simple merge