]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: Rename IOMAP_ATOMIC -> IOMAP_ATOMIC_HW
authorJohn Garry <john.g.garry@oracle.com>
Mon, 3 Mar 2025 17:11:10 +0000 (17:11 +0000)
committerChristian Brauner <brauner@kernel.org>
Thu, 6 Mar 2025 10:00:12 +0000 (11:00 +0100)
In future xfs will support a SW-based atomic write, so rename
IOMAP_ATOMIC -> IOMAP_ATOMIC_HW to be clear which mode is being used.

Also relocate setting of IOMAP_ATOMIC_HW to the write path in
__iomap_dio_rw(), to be clear that this flag is only relevant to writes.

Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20250303171120.2837067-3-john.g.garry@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
Documentation/filesystems/iomap/operations.rst
fs/ext4/inode.c
fs/iomap/direct-io.c
fs/iomap/trace.h
include/linux/iomap.h

index d1535109587ab2afd1ec850f48f3fecc38a1aecd..0b9d7be23bce08086ccece051d39ddf5060b4f92 100644 (file)
@@ -514,8 +514,8 @@ IOMAP_WRITE`` with any combination of the following enhancements:
    if the mapping is unwritten and the filesystem cannot handle zeroing
    the unaligned regions without exposing stale contents.
 
- * ``IOMAP_ATOMIC``: This write is being issued with torn-write
-   protection.
+ * ``IOMAP_ATOMIC_HW``: This write is being issued with torn-write
+   protection based on HW-offload support.
    Only a single bio can be created for the write, and the write must
    not be split into multiple I/O requests, i.e. flag REQ_ATOMIC must be
    set.
index 7c54ae5fcbd4540e8ba69b942ffd98dded320339..ba2f1e3db7c739870dbe72c6575f00a191b2568b 100644 (file)
@@ -3467,7 +3467,7 @@ static inline bool ext4_want_directio_fallback(unsigned flags, ssize_t written)
                return false;
 
        /* atomic writes are all-or-nothing */
-       if (flags & IOMAP_ATOMIC)
+       if (flags & IOMAP_ATOMIC_HW)
                return false;
 
        /* can only try again if we wrote nothing */
index e1e32e2bb0bfa2c76713abc788969ebae8c2afe9..c696ce980796bbba958fccb9ee5df671441a2819 100644 (file)
@@ -317,7 +317,7 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
  * clearing the WRITE_THROUGH flag in the dio request.
  */
 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
-               const struct iomap *iomap, bool use_fua, bool atomic)
+               const struct iomap *iomap, bool use_fua, bool atomic_hw)
 {
        blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
 
@@ -329,7 +329,7 @@ static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
                opflags |= REQ_FUA;
        else
                dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
-       if (atomic)
+       if (atomic_hw)
                opflags |= REQ_ATOMIC;
 
        return opflags;
@@ -340,8 +340,8 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
        const struct iomap *iomap = &iter->iomap;
        struct inode *inode = iter->inode;
        unsigned int fs_block_size = i_blocksize(inode), pad;
+       bool atomic_hw = iter->flags & IOMAP_ATOMIC_HW;
        const loff_t length = iomap_length(iter);
-       bool atomic = iter->flags & IOMAP_ATOMIC;
        loff_t pos = iter->pos;
        blk_opf_t bio_opf;
        struct bio *bio;
@@ -351,7 +351,7 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
        u64 copied = 0;
        size_t orig_count;
 
-       if (atomic && length != fs_block_size)
+       if (atomic_hw && length != fs_block_size)
                return -EINVAL;
 
        if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
@@ -428,7 +428,7 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
                        goto out;
        }
 
-       bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua, atomic);
+       bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua, atomic_hw);
 
        nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
        do {
@@ -461,7 +461,7 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
                }
 
                n = bio->bi_iter.bi_size;
-               if (WARN_ON_ONCE(atomic && n != length)) {
+               if (WARN_ON_ONCE(atomic_hw && n != length)) {
                        /*
                         * This bio should have covered the complete length,
                         * which it doesn't, so error. We may need to zero out
@@ -652,9 +652,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (iocb->ki_flags & IOCB_NOWAIT)
                iomi.flags |= IOMAP_NOWAIT;
 
-       if (iocb->ki_flags & IOCB_ATOMIC)
-               iomi.flags |= IOMAP_ATOMIC;
-
        if (iov_iter_rw(iter) == READ) {
                /* reads can always complete inline */
                dio->flags |= IOMAP_DIO_INLINE_COMP;
@@ -689,6 +686,9 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                        iomi.flags |= IOMAP_OVERWRITE_ONLY;
                }
 
+               if (iocb->ki_flags & IOCB_ATOMIC)
+                       iomi.flags |= IOMAP_ATOMIC_HW;
+
                /* for data sync or sync, we need sync completion processing */
                if (iocb_is_dsync(iocb)) {
                        dio->flags |= IOMAP_DIO_NEED_SYNC;
index 9eab2c8ac3c5dc8143441beeab6a37f3faf50338..69af89044ebdb5d6134d810715d26eda47bf9595 100644 (file)
@@ -99,7 +99,7 @@ DEFINE_RANGE_EVENT(iomap_dio_rw_queued);
        { IOMAP_FAULT,          "FAULT" }, \
        { IOMAP_DIRECT,         "DIRECT" }, \
        { IOMAP_NOWAIT,         "NOWAIT" }, \
-       { IOMAP_ATOMIC,         "ATOMIC" }
+       { IOMAP_ATOMIC_HW,      "ATOMIC_HW" }
 
 #define IOMAP_F_FLAGS_STRINGS \
        { IOMAP_F_NEW,          "NEW" }, \
index ea29388b2fba234ca5d3fa99cb58368127afe315..87cd7079aaf33a05a4b6478750b9d2d804a09904 100644 (file)
@@ -189,7 +189,7 @@ struct iomap_folio_ops {
 #else
 #define IOMAP_DAX              0
 #endif /* CONFIG_FS_DAX */
-#define IOMAP_ATOMIC           (1 << 9)
+#define IOMAP_ATOMIC_HW                (1 << 9)
 #define IOMAP_DONTCACHE                (1 << 10)
 
 struct iomap_ops {