]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Apr 2023 08:22:27 +0000 (10:22 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Apr 2023 08:22:27 +0000 (10:22 +0200)
added patches:
block-io_uring-pass-in-issue_flags-for-uring_cmd-task_work-handling.patch
io_uring-fix-poll-netmsg-alloc-caches.patch
io_uring-poll-clear-single-double-poll-flags-on-poll-arming.patch
io_uring-rsrc-fix-rogue-rsrc-node-grabbing.patch
zonefs-do-not-propagate-iomap_dio_rw-enotblk-error-to-user-space.patch

queue-6.1/block-io_uring-pass-in-issue_flags-for-uring_cmd-task_work-handling.patch [new file with mode: 0644]
queue-6.1/io_uring-fix-poll-netmsg-alloc-caches.patch [new file with mode: 0644]
queue-6.1/io_uring-poll-clear-single-double-poll-flags-on-poll-arming.patch [new file with mode: 0644]
queue-6.1/io_uring-rsrc-fix-rogue-rsrc-node-grabbing.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/zonefs-do-not-propagate-iomap_dio_rw-enotblk-error-to-user-space.patch [new file with mode: 0644]

diff --git a/queue-6.1/block-io_uring-pass-in-issue_flags-for-uring_cmd-task_work-handling.patch b/queue-6.1/block-io_uring-pass-in-issue_flags-for-uring_cmd-task_work-handling.patch
new file mode 100644 (file)
index 0000000..1c13762
--- /dev/null
@@ -0,0 +1,269 @@
+From 9d2789ac9d60c049d26ef6d3005d9c94c5a559e9 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 20 Mar 2023 20:01:25 -0600
+Subject: block/io_uring: pass in issue_flags for uring_cmd task_work handling
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 9d2789ac9d60c049d26ef6d3005d9c94c5a559e9 upstream.
+
+io_uring_cmd_done() currently assumes that the uring_lock is held
+when invoked, and while it generally is, this is not guaranteed.
+Pass in the issue_flags associated with it, so that we have
+IO_URING_F_UNLOCKED available to be able to lock the CQ ring
+appropriately when completing events.
+
+Cc: stable@vger.kernel.org
+Fixes: ee692a21e9bf ("fs,io_uring: add infrastructure for uring-cmd")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/ublk_drv.c  |   31 ++++++++++++++++++-------------
+ drivers/nvme/host/ioctl.c |   14 ++++++++------
+ include/linux/io_uring.h  |   11 ++++++-----
+ io_uring/uring_cmd.c      |   10 ++++++----
+ 4 files changed, 38 insertions(+), 28 deletions(-)
+
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -656,7 +656,8 @@ static void __ublk_fail_req(struct ublk_
+       }
+ }
+-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
++static void ubq_complete_io_cmd(struct ublk_io *io, int res,
++                              unsigned issue_flags)
+ {
+       /* mark this cmd owned by ublksrv */
+       io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+@@ -668,7 +669,7 @@ static void ubq_complete_io_cmd(struct u
+       io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+       /* tell ublksrv one io request is coming */
+-      io_uring_cmd_done(io->cmd, res, 0);
++      io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ }
+ #define UBLK_REQUEUE_DELAY_MS 3
+@@ -685,7 +686,8 @@ static inline void __ublk_abort_rq(struc
+       mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+ }
+-static inline void __ublk_rq_task_work(struct request *req)
++static inline void __ublk_rq_task_work(struct request *req,
++                                     unsigned issue_flags)
+ {
+       struct ublk_queue *ubq = req->mq_hctx->driver_data;
+       int tag = req->tag;
+@@ -723,7 +725,7 @@ static inline void __ublk_rq_task_work(s
+                       pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
+                                       __func__, io->cmd->cmd_op, ubq->q_id,
+                                       req->tag, io->flags);
+-                      ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
++                      ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
+                       return;
+               }
+               /*
+@@ -761,17 +763,18 @@ static inline void __ublk_rq_task_work(s
+                       mapped_bytes >> 9;
+       }
+-      ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
++      ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ }
+-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
++static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
++                                      unsigned issue_flags)
+ {
+       struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+       struct ublk_rq_data *data, *tmp;
+       io_cmds = llist_reverse_order(io_cmds);
+       llist_for_each_entry_safe(data, tmp, io_cmds, node)
+-              __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
++              __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
+ }
+ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+@@ -783,12 +786,12 @@ static inline void ublk_abort_io_cmds(st
+               __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+ }
+-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
++static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+ {
+       struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+       struct ublk_queue *ubq = pdu->ubq;
+-      ublk_forward_io_cmds(ubq);
++      ublk_forward_io_cmds(ubq, issue_flags);
+ }
+ static void ublk_rq_task_work_fn(struct callback_head *work)
+@@ -797,8 +800,9 @@ static void ublk_rq_task_work_fn(struct
+                       struct ublk_rq_data, work);
+       struct request *req = blk_mq_rq_from_pdu(data);
+       struct ublk_queue *ubq = req->mq_hctx->driver_data;
++      unsigned issue_flags = IO_URING_F_UNLOCKED;
+-      ublk_forward_io_cmds(ubq);
++      ublk_forward_io_cmds(ubq, issue_flags);
+ }
+ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
+@@ -1052,7 +1056,8 @@ static void ublk_cancel_queue(struct ubl
+               struct ublk_io *io = &ubq->ios[i];
+               if (io->flags & UBLK_IO_FLAG_ACTIVE)
+-                      io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
++                      io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
++                                              IO_URING_F_UNLOCKED);
+       }
+       /* all io commands are canceled */
+@@ -1295,7 +1300,7 @@ static int ublk_ch_uring_cmd(struct io_u
+       return -EIOCBQUEUED;
+  out:
+-      io_uring_cmd_done(cmd, ret, 0);
++      io_uring_cmd_done(cmd, ret, 0, issue_flags);
+       pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
+                       __func__, cmd_op, tag, ret, io->flags);
+       return -EIOCBQUEUED;
+@@ -2053,7 +2058,7 @@ static int ublk_ctrl_uring_cmd(struct io
+               break;
+       }
+  out:
+-      io_uring_cmd_done(cmd, ret, 0);
++      io_uring_cmd_done(cmd, ret, 0, issue_flags);
+       pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+                       __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
+       return -EIOCBQUEUED;
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -387,7 +387,8 @@ static inline struct nvme_uring_cmd_pdu
+       return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+ }
+-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
++static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
++                                  unsigned issue_flags)
+ {
+       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+       struct request *req = pdu->req;
+@@ -408,17 +409,18 @@ static void nvme_uring_task_meta_cb(stru
+               blk_rq_unmap_user(req->bio);
+       blk_mq_free_request(req);
+-      io_uring_cmd_done(ioucmd, status, result);
++      io_uring_cmd_done(ioucmd, status, result, issue_flags);
+ }
+-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
++static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
++                             unsigned issue_flags)
+ {
+       struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+       if (pdu->bio)
+               blk_rq_unmap_user(pdu->bio);
+-      io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
++      io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+ }
+ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+@@ -440,7 +442,7 @@ static enum rq_end_io_ret nvme_uring_cmd
+        * Otherwise, move the completion to task work.
+        */
+       if (cookie != NULL && blk_rq_is_poll(req))
+-              nvme_uring_task_cb(ioucmd);
++              nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
+       else
+               io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+@@ -462,7 +464,7 @@ static enum rq_end_io_ret nvme_uring_cmd
+        * Otherwise, move the completion to task work.
+        */
+       if (cookie != NULL && blk_rq_is_poll(req))
+-              nvme_uring_task_meta_cb(ioucmd);
++              nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
+       else
+               io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+--- a/include/linux/io_uring.h
++++ b/include/linux/io_uring.h
+@@ -26,7 +26,7 @@ struct io_uring_cmd {
+       const void      *cmd;
+       union {
+               /* callback to defer completions to task context */
+-              void (*task_work_cb)(struct io_uring_cmd *cmd);
++              void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+               /* used for polled completion */
+               void *cookie;
+       };
+@@ -38,9 +38,10 @@ struct io_uring_cmd {
+ #if defined(CONFIG_IO_URING)
+ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+                             struct iov_iter *iter, void *ioucmd);
+-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
++void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
++                      unsigned issue_flags);
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-                      void (*task_work_cb)(struct io_uring_cmd *));
++                      void (*task_work_cb)(struct io_uring_cmd *, unsigned));
+ struct sock *io_uring_get_socket(struct file *file);
+ void __io_uring_cancel(bool cancel_all);
+ void __io_uring_free(struct task_struct *tsk);
+@@ -71,11 +72,11 @@ static inline int io_uring_cmd_import_fi
+       return -EOPNOTSUPP;
+ }
+ static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+-              ssize_t ret2)
++              ssize_t ret2, unsigned issue_flags)
+ {
+ }
+ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-                      void (*task_work_cb)(struct io_uring_cmd *))
++                      void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ }
+ static inline struct sock *io_uring_get_socket(struct file *file)
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -15,12 +15,13 @@
+ static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
+ {
+       struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
++      unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
+-      ioucmd->task_work_cb(ioucmd);
++      ioucmd->task_work_cb(ioucmd, issue_flags);
+ }
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-                      void (*task_work_cb)(struct io_uring_cmd *))
++                      void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+       struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+@@ -42,7 +43,8 @@ static inline void io_req_set_cqe32_extr
+  * Called by consumers of io_uring_cmd, if they originally returned
+  * -EIOCBQUEUED upon receiving the command.
+  */
+-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
++void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
++                     unsigned issue_flags)
+ {
+       struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+@@ -56,7 +58,7 @@ void io_uring_cmd_done(struct io_uring_c
+               /* order with io_iopoll_req_issued() checking ->iopoll_complete */
+               smp_store_release(&req->iopoll_completed, 1);
+       else
+-              __io_req_complete(req, 0);
++              __io_req_complete(req, issue_flags);
+ }
+ EXPORT_SYMBOL_GPL(io_uring_cmd_done);
diff --git a/queue-6.1/io_uring-fix-poll-netmsg-alloc-caches.patch b/queue-6.1/io_uring-fix-poll-netmsg-alloc-caches.patch
new file mode 100644 (file)
index 0000000..4e16227
--- /dev/null
@@ -0,0 +1,38 @@
+From fd30d1cdcc4ff405fc54765edf2e11b03f2ed4f3 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Thu, 30 Mar 2023 06:52:38 -0600
+Subject: io_uring: fix poll/netmsg alloc caches
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit fd30d1cdcc4ff405fc54765edf2e11b03f2ed4f3 upstream.
+
+We increase cache->nr_cached when we free into the cache but don't
+decrease when we take from it, so in some time we'll get an empty
+cache with cache->nr_cached larger than IO_ALLOC_CACHE_MAX, that fails
+io_alloc_cache_put() and effectively disables caching.
+
+Fixes: 9b797a37c4bd8 ("io_uring: add abstraction around apoll cache")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/alloc_cache.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
+index 729793ae9712..c2cde88aeed5 100644
+--- a/io_uring/alloc_cache.h
++++ b/io_uring/alloc_cache.h
+@@ -27,6 +27,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
+               struct hlist_node *node = cache->list.first;
+               hlist_del(node);
++              cache->nr_cached--;
+               return container_of(node, struct io_cache_entry, node);
+       }
+-- 
+2.40.0
+
diff --git a/queue-6.1/io_uring-poll-clear-single-double-poll-flags-on-poll-arming.patch b/queue-6.1/io_uring-poll-clear-single-double-poll-flags-on-poll-arming.patch
new file mode 100644 (file)
index 0000000..7a55deb
--- /dev/null
@@ -0,0 +1,38 @@
+From 005308f7bdacf5685ed1a431244a183dbbb9e0e8 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 27 Mar 2023 19:56:18 -0600
+Subject: io_uring/poll: clear single/double poll flags on poll arming
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 005308f7bdacf5685ed1a431244a183dbbb9e0e8 upstream.
+
+Unless we have at least one entry queued, then don't call into
+io_poll_remove_entries(). Normally this isn't possible, but if we
+retry poll then we can have ->nr_entries cleared again as we're
+setting it up. If this happens for a poll retry, then we'll still have
+at least REQ_F_SINGLE_POLL set. io_poll_remove_entries() then thinks
+it has entries to remove.
+
+Clear REQ_F_SINGLE_POLL and REQ_F_DOUBLE_POLL unconditionally when
+arming a poll request.
+
+Fixes: c16bda37594f ("io_uring/poll: allow some retries for poll triggering spuriously")
+Cc: stable@vger.kernel.org
+Reported-by: Pengfei Xu <pengfei.xu@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/poll.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -742,6 +742,7 @@ int io_arm_poll_handler(struct io_kiocb
+       apoll = io_req_alloc_apoll(req, issue_flags);
+       if (!apoll)
+               return IO_APOLL_ABORTED;
++      req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
+       req->flags |= REQ_F_POLLED;
+       ipt.pt._qproc = io_async_queue_proc;
diff --git a/queue-6.1/io_uring-rsrc-fix-rogue-rsrc-node-grabbing.patch b/queue-6.1/io_uring-rsrc-fix-rogue-rsrc-node-grabbing.patch
new file mode 100644 (file)
index 0000000..6f40d74
--- /dev/null
@@ -0,0 +1,46 @@
+From 4ff0b50de8cabba055efe50bbcb7506c41a69835 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 29 Mar 2023 15:03:43 +0100
+Subject: io_uring/rsrc: fix rogue rsrc node grabbing
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 4ff0b50de8cabba055efe50bbcb7506c41a69835 upstream.
+
+We should not be looking at ctx->rsrc_node and anyhow modifying the node
+without holding uring_lock, grabbing references in such a way is not
+safe either.
+
+Cc: stable@vger.kernel.org
+Fixes: 5106dd6e74ab6 ("io_uring: propagate issue_flags state down to file assignment")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/1202ede2d7bb90136e3482b2b84aad9ed483e5d6.1680098433.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/rsrc.h |   12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -143,15 +143,13 @@ static inline void io_req_set_rsrc_node(
+                                       unsigned int issue_flags)
+ {
+       if (!req->rsrc_node) {
+-              req->rsrc_node = ctx->rsrc_node;
++              io_ring_submit_lock(ctx, issue_flags);
+-              if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+-                      lockdep_assert_held(&ctx->uring_lock);
++              lockdep_assert_held(&ctx->uring_lock);
+-                      io_charge_rsrc_node(ctx);
+-              } else {
+-                      percpu_ref_get(&req->rsrc_node->refs);
+-              }
++              req->rsrc_node = ctx->rsrc_node;
++              io_charge_rsrc_node(ctx);
++              io_ring_submit_unlock(ctx, issue_flags);
+       }
+ }
index 69e05a21a654edbd3b435132f07b75d9a775dba5..d91ff0559fa8894b78b63390de945a31158e5ded 100644 (file)
@@ -130,3 +130,8 @@ input-goodix-add-lenovo-yoga-book-x90f-to-nine_bytes_report-dmi-table.patch
 btrfs-fix-deadlock-when-aborting-transaction-during-relocation-with-scrub.patch
 btrfs-fix-race-between-quota-disable-and-quota-assign-ioctls.patch
 btrfs-scan-device-in-non-exclusive-mode.patch
+zonefs-do-not-propagate-iomap_dio_rw-enotblk-error-to-user-space.patch
+block-io_uring-pass-in-issue_flags-for-uring_cmd-task_work-handling.patch
+io_uring-poll-clear-single-double-poll-flags-on-poll-arming.patch
+io_uring-rsrc-fix-rogue-rsrc-node-grabbing.patch
+io_uring-fix-poll-netmsg-alloc-caches.patch
diff --git a/queue-6.1/zonefs-do-not-propagate-iomap_dio_rw-enotblk-error-to-user-space.patch b/queue-6.1/zonefs-do-not-propagate-iomap_dio_rw-enotblk-error-to-user-space.patch
new file mode 100644 (file)
index 0000000..3b19b93
--- /dev/null
@@ -0,0 +1,56 @@
+From 77af13ba3c7f91d91c377c7e2d122849bbc17128 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Date: Thu, 30 Mar 2023 09:47:58 +0900
+Subject: zonefs: Do not propagate iomap_dio_rw() ENOTBLK error to user space
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+commit 77af13ba3c7f91d91c377c7e2d122849bbc17128 upstream.
+
+The call to invalidate_inode_pages2_range() in __iomap_dio_rw() may
+fail, in which case -ENOTBLK is returned and this error code is
+propagated back to user space trhough iomap_dio_rw() ->
+zonefs_file_dio_write() return chain. This error code is fairly obscure
+and may confuse the user. Avoid this and be consistent with the behavior
+of zonefs_file_dio_append() for similar invalidate_inode_pages2_range()
+errors by returning -EBUSY to user space when iomap_dio_rw() returns
+-ENOTBLK.
+
+Suggested-by: Christoph Hellwig <hch@infradead.org>
+Fixes: 8dcc1a9d90c1 ("fs: New zonefs file system")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Tested-by: Hans Holmberg <hans.holmberg@wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/zonefs/file.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/fs/zonefs/file.c
++++ b/fs/zonefs/file.c
+@@ -567,11 +567,21 @@ static ssize_t zonefs_file_dio_write(str
+               append = sync;
+       }
+-      if (append)
++      if (append) {
+               ret = zonefs_file_dio_append(iocb, from);
+-      else
++      } else {
++              /*
++               * iomap_dio_rw() may return ENOTBLK if there was an issue with
++               * page invalidation. Overwrite that error code with EBUSY to
++               * be consistent with zonefs_file_dio_append() return value for
++               * similar issues.
++               */
+               ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+                                  &zonefs_write_dio_ops, 0, NULL, 0);
++              if (ret == -ENOTBLK)
++                      ret = -EBUSY;
++      }
++
+       if (zonefs_zone_is_seq(z) &&
+           (ret > 0 || ret == -EIOCBQUEUED)) {
+               if (ret > 0)