]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: simplify ->read_folio_range() error handling for reads
authorJoanne Koong <joannelkoong@gmail.com>
Tue, 11 Nov 2025 19:36:54 +0000 (11:36 -0800)
committerChristian Brauner <brauner@kernel.org>
Wed, 12 Nov 2025 09:50:32 +0000 (10:50 +0100)
Instead of requiring that the caller calls iomap_finish_folio_read()
even if the ->read_folio_range() callback returns an error, account for
this internally in iomap instead, which makes the interface simpler and
makes it match writeback's ->read_folio_range() error handling
expectations.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Link: https://patch.msgid.link/20251111193658.3495942-6-joannelkoong@gmail.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
Documentation/filesystems/iomap/operations.rst
fs/fuse/file.c
fs/iomap/buffered-io.c
include/linux/iomap.h

index 4d30723be7fa50b562d16fc357a1d3bad87af37f..64f4baf5750e02f6fc53730337848698b9076937 100644 (file)
@@ -149,10 +149,9 @@ These ``struct kiocb`` flags are significant for buffered I/O with iomap:
 iomap calls these functions:
 
   - ``read_folio_range``: Called to read in the range. This must be provided
-    by the caller. The caller is responsible for calling
-    iomap_finish_folio_read() after reading in the folio range. This should be
-    done even if an error is encountered during the read. This returns 0 on
-    success or a negative error on failure.
+    by the caller. If this succeeds, iomap_finish_folio_read() must be called
+    after the range is read in, regardless of whether the read succeeded or
+    failed.
 
   - ``submit_read``: Submit any pending read requests. This function is
     optional.
index b343a6f37563af68215d5e737b076234b4e7ff96..7bcb650a9f26d5b1091da359bcb8331a0d2efc5f 100644 (file)
@@ -922,13 +922,6 @@ static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
 
        if (ctx->rac) {
                ret = fuse_handle_readahead(folio, ctx->rac, data, pos, len);
-               /*
-                * If fuse_handle_readahead was successful, fuse_readpages_end
-                * will do the iomap_finish_folio_read, else we need to call it
-                * here
-                */
-               if (ret)
-                       iomap_finish_folio_read(folio, off, len, ret);
        } else {
                /*
                 *  for non-readahead read requests, do reads synchronously
@@ -936,7 +929,8 @@ static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
                 *  out-of-order reads
                 */
                ret = fuse_do_readfolio(file, folio, off, len);
-               iomap_finish_folio_read(folio, off, len, ret);
+               if (!ret)
+                       iomap_finish_folio_read(folio, off, len, ret);
        }
        return ret;
 }
index 1873a2f74883e2ce5103c9d05beb6c1884d1c6d6..c82b5b24d4b3f529191808475d1fecdff815bf60 100644 (file)
@@ -398,7 +398,8 @@ static void iomap_read_init(struct folio *folio)
                 * has already finished reading in the entire folio.
                 */
                spin_lock_irq(&ifs->state_lock);
-               ifs->read_bytes_pending += len + 1;
+               WARN_ON_ONCE(ifs->read_bytes_pending != 0);
+               ifs->read_bytes_pending = len + 1;
                spin_unlock_irq(&ifs->state_lock);
        }
 }
@@ -414,43 +415,47 @@ static void iomap_read_init(struct folio *folio)
  */
 static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
 {
-       struct iomap_folio_state *ifs;
-
-       /*
-        * If there are no bytes submitted, this means we are responsible for
-        * unlocking the folio here, since no IO helper has taken ownership of
-        * it.
-        */
-       if (!bytes_submitted) {
-               folio_unlock(folio);
-               return;
-       }
+       struct iomap_folio_state *ifs = folio->private;
 
-       ifs = folio->private;
        if (ifs) {
                bool end_read, uptodate;
-               /*
-                * Subtract any bytes that were initially accounted to
-                * read_bytes_pending but skipped for IO.
-                * The +1 accounts for the bias we added in iomap_read_init().
-                */
-               size_t bytes_not_submitted = folio_size(folio) + 1 -
-                               bytes_submitted;
 
                spin_lock_irq(&ifs->state_lock);
-               ifs->read_bytes_pending -= bytes_not_submitted;
-               /*
-                * If !ifs->read_bytes_pending, this means all pending reads
-                * by the IO helper have already completed, which means we need
-                * to end the folio read here. If ifs->read_bytes_pending != 0,
-                * the IO helper will end the folio read.
-                */
-               end_read = !ifs->read_bytes_pending;
+               if (!ifs->read_bytes_pending) {
+                       WARN_ON_ONCE(bytes_submitted);
+                       end_read = true;
+               } else {
+                       /*
+                        * Subtract any bytes that were initially accounted to
+                        * read_bytes_pending but skipped for IO. The +1
+                        * accounts for the bias we added in iomap_read_init().
+                        */
+                       size_t bytes_not_submitted = folio_size(folio) + 1 -
+                                       bytes_submitted;
+                       ifs->read_bytes_pending -= bytes_not_submitted;
+                       /*
+                        * If !ifs->read_bytes_pending, this means all pending
+                        * reads by the IO helper have already completed, which
+                        * means we need to end the folio read here. If
+                        * ifs->read_bytes_pending != 0, the IO helper will end
+                        * the folio read.
+                        */
+                       end_read = !ifs->read_bytes_pending;
+               }
                if (end_read)
                        uptodate = ifs_is_fully_uptodate(folio, ifs);
                spin_unlock_irq(&ifs->state_lock);
                if (end_read)
                        folio_end_read(folio, uptodate);
+       } else if (!bytes_submitted) {
+               /*
+                * If there were no bytes submitted, this means we are
+                * responsible for unlocking the folio here, since no IO helper
+                * has taken ownership of it. If there were bytes submitted,
+                * then the IO helper will end the read via
+                * iomap_finish_folio_read().
+                */
+               folio_unlock(folio);
        }
 }
 
@@ -498,10 +503,10 @@ static int iomap_read_folio_iter(struct iomap_iter *iter,
                } else {
                        if (!*bytes_submitted)
                                iomap_read_init(folio);
-                       *bytes_submitted += plen;
                        ret = ctx->ops->read_folio_range(iter, ctx, plen);
                        if (ret)
                                return ret;
+                       *bytes_submitted += plen;
                }
 
                ret = iomap_iter_advance(iter, plen);
index b49e47f069dbd1fbe3af3df5d5b0c067df11b454..520e967cb501d335fd8fc9361cbbeba0042a9a4e 100644 (file)
@@ -495,9 +495,8 @@ struct iomap_read_ops {
        /*
         * Read in a folio range.
         *
-        * The caller is responsible for calling iomap_finish_folio_read() after
-        * reading in the folio range. This should be done even if an error is
-        * encountered during the read.
+        * If this succeeds, iomap_finish_folio_read() must be called after the
+        * range is read in, regardless of whether the read succeeded or failed.
         *
         * Returns 0 on success or a negative error on failure.
         */