]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jun 2022 16:01:09 +0000 (18:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jun 2022 16:01:09 +0000 (18:01 +0200)
added patches:
drm-amd-display-don-t-reinitialize-dmcub-on-s0ix-resume.patch
net-mana-add-handling-of-cqe_rx_truncated.patch
s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
zonefs-fix-zonefs_iomap_begin-for-reads.patch

queue-5.15/drm-amd-display-don-t-reinitialize-dmcub-on-s0ix-resume.patch [new file with mode: 0644]
queue-5.15/net-mana-add-handling-of-cqe_rx_truncated.patch [new file with mode: 0644]
queue-5.15/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch [new file with mode: 0644]
queue-5.15/zonefs-fix-zonefs_iomap_begin-for-reads.patch [new file with mode: 0644]

diff --git a/queue-5.15/drm-amd-display-don-t-reinitialize-dmcub-on-s0ix-resume.patch b/queue-5.15/drm-amd-display-don-t-reinitialize-dmcub-on-s0ix-resume.patch
new file mode 100644 (file)
index 0000000..6fbe023
--- /dev/null
@@ -0,0 +1,86 @@
+From 79d6b9351f086e0f914a26915d96ab52286ec46c Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Wed, 15 Dec 2021 18:18:41 -0500
+Subject: drm/amd/display: Don't reinitialize DMCUB on s0ix resume
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+commit 79d6b9351f086e0f914a26915d96ab52286ec46c upstream.
+
+[Why]
+PSP will suspend and resume DMCUB. Driver should just wait for DMCUB to
+finish the auto load before continuining instead of placing it into
+reset, wiping its firmware state and reinitializing.
+
+If we don't let DMCUB fully finish initializing for S0ix then some state
+will be lost and screen corruption can occur due to incorrect address
+translation.
+
+[How]
+Use dmub_srv callbacks to determine in DMCUB is running and wait for
+auto-load to complete before continuining.
+
+In S0ix DMCUB will be running and DAL fw so initialize will skip.
+
+In S3 DMCUB will not be running and we will do a full hardware init.
+
+In S3 DMCUB will be running but will not be DAL fw so we will also do
+a full hardware init.
+
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Reviewed-by: Mikita Lipski <Mikita.Lipski@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Limonciello, Mario" <Mario.Limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |   30 +++++++++++++++++++---
+ 1 file changed, 27 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -989,6 +989,32 @@ static int dm_dmub_hw_init(struct amdgpu
+       return 0;
+ }
++static void dm_dmub_hw_resume(struct amdgpu_device *adev)
++{
++      struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
++      enum dmub_status status;
++      bool init;
++
++      if (!dmub_srv) {
++              /* DMUB isn't supported on the ASIC. */
++              return;
++      }
++
++      status = dmub_srv_is_hw_init(dmub_srv, &init);
++      if (status != DMUB_STATUS_OK)
++              DRM_WARN("DMUB hardware init check failed: %d\n", status);
++
++      if (status == DMUB_STATUS_OK && init) {
++              /* Wait for firmware load to finish. */
++              status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
++              if (status != DMUB_STATUS_OK)
++                      DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
++      } else {
++              /* Perform the full hardware initialization. */
++              dm_dmub_hw_init(adev);
++      }
++}
++
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
+ {
+@@ -2268,9 +2294,7 @@ static int dm_resume(void *handle)
+               amdgpu_dm_outbox_init(adev);
+       /* Before powering on DC we need to re-initialize DMUB. */
+-      r = dm_dmub_hw_init(adev);
+-      if (r)
+-              DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
++      dm_dmub_hw_resume(adev);
+       /* power on hardware */
+       dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
diff --git a/queue-5.15/net-mana-add-handling-of-cqe_rx_truncated.patch b/queue-5.15/net-mana-add-handling-of-cqe_rx_truncated.patch
new file mode 100644 (file)
index 0000000..c72ead2
--- /dev/null
@@ -0,0 +1,43 @@
+From e4b7621982d29f26ff4d39af389e5e675a4ffed4 Mon Sep 17 00:00:00 2001
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Fri, 4 Feb 2022 14:45:44 -0800
+Subject: net: mana: Add handling of CQE_RX_TRUNCATED
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+commit e4b7621982d29f26ff4d39af389e5e675a4ffed4 upstream.
+
+The proper way to drop this kind of CQE is advancing rxq tail
+without indicating the packet to the upper network layer.
+
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/mana_en.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -980,8 +980,10 @@ static void mana_process_rx_cqe(struct m
+               break;
+       case CQE_RX_TRUNCATED:
+-              netdev_err(ndev, "Dropped a truncated packet\n");
+-              return;
++              ++ndev->stats.rx_dropped;
++              rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
++              netdev_warn_once(ndev, "Dropped a truncated packet\n");
++              goto drop;
+       case CQE_RX_COALESCED_4:
+               netdev_err(ndev, "RX coalescing is unsupported\n");
+@@ -1043,6 +1045,7 @@ static void mana_process_rx_cqe(struct m
+       mana_rx_skb(old_buf, oob, rxq);
++drop:
+       mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
+       mana_post_pkt_rxq(rxq);
diff --git a/queue-5.15/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch b/queue-5.15/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
new file mode 100644 (file)
index 0000000..3abe0ea
--- /dev/null
@@ -0,0 +1,35 @@
+From 3ae11dbcfac906a8c3a480e98660a823130dc16a Mon Sep 17 00:00:00 2001
+From: Christian Borntraeger <borntraeger@linux.ibm.com>
+Date: Mon, 30 May 2022 11:27:06 +0200
+Subject: s390/mm: use non-quiescing sske for KVM switch to keyed guest
+
+From: Christian Borntraeger <borntraeger@linux.ibm.com>
+
+commit 3ae11dbcfac906a8c3a480e98660a823130dc16a upstream.
+
+The switch to a keyed guest does not require a classic sske as the other
+guest CPUs are not accessing the key before the switch is complete.
+By using the NQ SSKE things are faster especially with multiple guests.
+
+Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Suggested-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
+Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220530092706.11637-3-borntraeger@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/mm/pgtable.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -748,7 +748,7 @@ void ptep_zap_key(struct mm_struct *mm,
+       pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
+       ptev = pte_val(*ptep);
+       if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
+-              page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
++              page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
+       pgste_set_unlock(ptep, pgste);
+       preempt_enable();
+ }
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..322fcbda834e955dfda7423964dfbb34a5e08813 100644 (file)
@@ -0,0 +1,5 @@
+s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
+drm-amd-display-don-t-reinitialize-dmcub-on-s0ix-resume.patch
+net-mana-add-handling-of-cqe_rx_truncated.patch
+zonefs-fix-zonefs_iomap_begin-for-reads.patch
+usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
diff --git a/queue-5.15/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch b/queue-5.15/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
new file mode 100644 (file)
index 0000000..14bb19c
--- /dev/null
@@ -0,0 +1,79 @@
+From b337af3a4d6147000b7ca6b3438bf5c820849b37 Mon Sep 17 00:00:00 2001
+From: Marian Postevca <posteuca@mutex.one>
+Date: Fri, 3 Jun 2022 18:34:59 +0300
+Subject: usb: gadget: u_ether: fix regression in setting fixed MAC address
+
+From: Marian Postevca <posteuca@mutex.one>
+
+commit b337af3a4d6147000b7ca6b3438bf5c820849b37 upstream.
+
+In systemd systems setting a fixed MAC address through
+the "dev_addr" module argument fails systematically.
+When checking the MAC address after the interface is created
+it always has the same but different MAC address to the one
+supplied as argument.
+
+This is partially caused by systemd which by default will
+set an internally generated permanent MAC address for interfaces
+that are marked as having a randomly generated address.
+
+Commit 890d5b40908bfd1a ("usb: gadget: u_ether: fix race in
+setting MAC address in setup phase") didn't take into account
+the fact that the interface must be marked as having a set
+MAC address when it's set as module argument.
+
+Fixed by marking the interface with NET_ADDR_SET when
+the "dev_addr" module argument is supplied.
+
+Fixes: 890d5b40908bfd1a ("usb: gadget: u_ether: fix race in setting MAC address in setup phase")
+Cc: stable@vger.kernel.org
+Signed-off-by: Marian Postevca <posteuca@mutex.one>
+Link: https://lore.kernel.org/r/20220603153459.32722-1-posteuca@mutex.one
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/u_ether.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -774,9 +774,13 @@ struct eth_dev *gether_setup_name(struct
+       dev->qmult = qmult;
+       snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+-      if (get_ether_addr(dev_addr, net->dev_addr))
++      if (get_ether_addr(dev_addr, net->dev_addr)) {
++              net->addr_assign_type = NET_ADDR_RANDOM;
+               dev_warn(&g->dev,
+                       "using random %s ethernet address\n", "self");
++      } else {
++              net->addr_assign_type = NET_ADDR_SET;
++      }
+       if (get_ether_addr(host_addr, dev->host_mac))
+               dev_warn(&g->dev,
+                       "using random %s ethernet address\n", "host");
+@@ -833,6 +837,9 @@ struct net_device *gether_setup_name_def
+       INIT_LIST_HEAD(&dev->tx_reqs);
+       INIT_LIST_HEAD(&dev->rx_reqs);
++      /* by default we always have a random MAC address */
++      net->addr_assign_type = NET_ADDR_RANDOM;
++
+       skb_queue_head_init(&dev->rx_frames);
+       /* network device setup */
+@@ -869,7 +876,6 @@ int gether_register_netdev(struct net_de
+       dev = netdev_priv(net);
+       g = dev->gadget;
+-      net->addr_assign_type = NET_ADDR_RANDOM;
+       eth_hw_addr_set(net, dev->dev_mac);
+       status = register_netdev(net);
+@@ -910,6 +916,7 @@ int gether_set_dev_addr(struct net_devic
+       if (get_ether_addr(dev_addr, new_addr))
+               return -EINVAL;
+       memcpy(dev->dev_mac, new_addr, ETH_ALEN);
++      net->addr_assign_type = NET_ADDR_SET;
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(gether_set_dev_addr);
diff --git a/queue-5.15/zonefs-fix-zonefs_iomap_begin-for-reads.patch b/queue-5.15/zonefs-fix-zonefs_iomap_begin-for-reads.patch
new file mode 100644 (file)
index 0000000..88f0d4d
--- /dev/null
@@ -0,0 +1,261 @@
+From c1c1204c0d0c1dccc1310b9277fb2bd8b663d8fe Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Date: Mon, 23 May 2022 16:29:10 +0900
+Subject: zonefs: fix zonefs_iomap_begin() for reads
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+commit c1c1204c0d0c1dccc1310b9277fb2bd8b663d8fe upstream.
+
+If a readahead is issued to a sequential zone file with an offset
+exactly equal to the current file size, the iomap type is set to
+IOMAP_UNWRITTEN, which will prevent an IO, but the iomap length is
+calculated as 0. This causes a WARN_ON() in iomap_iter():
+
+[17309.548939] WARNING: CPU: 3 PID: 2137 at fs/iomap/iter.c:34 iomap_iter+0x9cf/0xe80
+[...]
+[17309.650907] RIP: 0010:iomap_iter+0x9cf/0xe80
+[...]
+[17309.754560] Call Trace:
+[17309.757078]  <TASK>
+[17309.759240]  ? lock_is_held_type+0xd8/0x130
+[17309.763531]  iomap_readahead+0x1a8/0x870
+[17309.767550]  ? iomap_read_folio+0x4c0/0x4c0
+[17309.771817]  ? lockdep_hardirqs_on_prepare+0x400/0x400
+[17309.778848]  ? lock_release+0x370/0x750
+[17309.784462]  ? folio_add_lru+0x217/0x3f0
+[17309.790220]  ? reacquire_held_locks+0x4e0/0x4e0
+[17309.796543]  read_pages+0x17d/0xb60
+[17309.801854]  ? folio_add_lru+0x238/0x3f0
+[17309.807573]  ? readahead_expand+0x5f0/0x5f0
+[17309.813554]  ? policy_node+0xb5/0x140
+[17309.819018]  page_cache_ra_unbounded+0x27d/0x450
+[17309.825439]  filemap_get_pages+0x500/0x1450
+[17309.831444]  ? filemap_add_folio+0x140/0x140
+[17309.837519]  ? lock_is_held_type+0xd8/0x130
+[17309.843509]  filemap_read+0x28c/0x9f0
+[17309.848953]  ? zonefs_file_read_iter+0x1ea/0x4d0 [zonefs]
+[17309.856162]  ? trace_contention_end+0xd6/0x130
+[17309.862416]  ? __mutex_lock+0x221/0x1480
+[17309.868151]  ? zonefs_file_read_iter+0x166/0x4d0 [zonefs]
+[17309.875364]  ? filemap_get_pages+0x1450/0x1450
+[17309.881647]  ? __mutex_unlock_slowpath+0x15e/0x620
+[17309.888248]  ? wait_for_completion_io_timeout+0x20/0x20
+[17309.895231]  ? lock_is_held_type+0xd8/0x130
+[17309.901115]  ? lock_is_held_type+0xd8/0x130
+[17309.906934]  zonefs_file_read_iter+0x356/0x4d0 [zonefs]
+[17309.913750]  new_sync_read+0x2d8/0x520
+[17309.919035]  ? __x64_sys_lseek+0x1d0/0x1d0
+
+Furthermore, this causes iomap_readahead() to loop forever as
+iomap_readahead_iter() always returns 0, making no progress.
+
+Fix this by treating reads after the file size as access to holes,
+setting the iomap type to IOMAP_HOLE, the iomap addr to IOMAP_NULL_ADDR
+and using the length argument as is for the iomap length. To simplify
+the code with this change, zonefs_iomap_begin() is split into the read
+variant, zonefs_read_iomap_begin() and zonefs_read_iomap_ops, and the
+write variant, zonefs_write_iomap_begin() and zonefs_write_iomap_ops.
+
+Reported-by: Jorgen Hansen <Jorgen.Hansen@wdc.com>
+Fixes: 8dcc1a9d90c1 ("fs: New zonefs file system")
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Jorgen Hansen <Jorgen.Hansen@wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/zonefs/super.c |   94 ++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 64 insertions(+), 30 deletions(-)
+
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -72,15 +72,51 @@ static inline void zonefs_i_size_write(s
+               zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+ }
+-static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+-                            unsigned int flags, struct iomap *iomap,
+-                            struct iomap *srcmap)
++static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
++                                 loff_t length, unsigned int flags,
++                                 struct iomap *iomap, struct iomap *srcmap)
+ {
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       struct super_block *sb = inode->i_sb;
+       loff_t isize;
+-      /* All I/Os should always be within the file maximum size */
++      /*
++       * All blocks are always mapped below EOF. If reading past EOF,
++       * act as if there is a hole up to the file maximum size.
++       */
++      mutex_lock(&zi->i_truncate_mutex);
++      iomap->bdev = inode->i_sb->s_bdev;
++      iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++      isize = i_size_read(inode);
++      if (iomap->offset >= isize) {
++              iomap->type = IOMAP_HOLE;
++              iomap->addr = IOMAP_NULL_ADDR;
++              iomap->length = length;
++      } else {
++              iomap->type = IOMAP_MAPPED;
++              iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
++              iomap->length = isize - iomap->offset;
++      }
++      mutex_unlock(&zi->i_truncate_mutex);
++
++      trace_zonefs_iomap_begin(inode, iomap);
++
++      return 0;
++}
++
++static const struct iomap_ops zonefs_read_iomap_ops = {
++      .iomap_begin    = zonefs_read_iomap_begin,
++};
++
++static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
++                                  loff_t length, unsigned int flags,
++                                  struct iomap *iomap, struct iomap *srcmap)
++{
++      struct zonefs_inode_info *zi = ZONEFS_I(inode);
++      struct super_block *sb = inode->i_sb;
++      loff_t isize;
++
++      /* All write I/Os should always be within the file maximum size */
+       if (WARN_ON_ONCE(offset + length > zi->i_max_size))
+               return -EIO;
+@@ -90,7 +126,7 @@ static int zonefs_iomap_begin(struct ino
+        * operation.
+        */
+       if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-                       (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
++                       !(flags & IOMAP_DIRECT)))
+               return -EIO;
+       /*
+@@ -99,47 +135,44 @@ static int zonefs_iomap_begin(struct ino
+        * write pointer) and unwriten beyond.
+        */
+       mutex_lock(&zi->i_truncate_mutex);
++      iomap->bdev = inode->i_sb->s_bdev;
++      iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++      iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+       isize = i_size_read(inode);
+-      if (offset >= isize)
++      if (iomap->offset >= isize) {
+               iomap->type = IOMAP_UNWRITTEN;
+-      else
++              iomap->length = zi->i_max_size - iomap->offset;
++      } else {
+               iomap->type = IOMAP_MAPPED;
+-      if (flags & IOMAP_WRITE)
+-              length = zi->i_max_size - offset;
+-      else
+-              length = min(length, isize - offset);
++              iomap->length = isize - iomap->offset;
++      }
+       mutex_unlock(&zi->i_truncate_mutex);
+-      iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+-      iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
+-      iomap->bdev = inode->i_sb->s_bdev;
+-      iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+-
+       trace_zonefs_iomap_begin(inode, iomap);
+       return 0;
+ }
+-static const struct iomap_ops zonefs_iomap_ops = {
+-      .iomap_begin    = zonefs_iomap_begin,
++static const struct iomap_ops zonefs_write_iomap_ops = {
++      .iomap_begin    = zonefs_write_iomap_begin,
+ };
+ static int zonefs_readpage(struct file *unused, struct page *page)
+ {
+-      return iomap_readpage(page, &zonefs_iomap_ops);
++      return iomap_readpage(page, &zonefs_read_iomap_ops);
+ }
+ static void zonefs_readahead(struct readahead_control *rac)
+ {
+-      iomap_readahead(rac, &zonefs_iomap_ops);
++      iomap_readahead(rac, &zonefs_read_iomap_ops);
+ }
+ /*
+  * Map blocks for page writeback. This is used only on conventional zone files,
+  * which implies that the page range can only be within the fixed inode size.
+  */
+-static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
+-                           struct inode *inode, loff_t offset)
++static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
++                                 struct inode *inode, loff_t offset)
+ {
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+@@ -153,12 +186,12 @@ static int zonefs_map_blocks(struct ioma
+           offset < wpc->iomap.offset + wpc->iomap.length)
+               return 0;
+-      return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
+-                                IOMAP_WRITE, &wpc->iomap, NULL);
++      return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
++                                      IOMAP_WRITE, &wpc->iomap, NULL);
+ }
+ static const struct iomap_writeback_ops zonefs_writeback_ops = {
+-      .map_blocks             = zonefs_map_blocks,
++      .map_blocks             = zonefs_write_map_blocks,
+ };
+ static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
+@@ -188,7 +221,8 @@ static int zonefs_swap_activate(struct s
+               return -EINVAL;
+       }
+-      return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
++      return iomap_swapfile_activate(sis, swap_file, span,
++                                     &zonefs_read_iomap_ops);
+ }
+ static const struct address_space_operations zonefs_file_aops = {
+@@ -607,7 +641,7 @@ static vm_fault_t zonefs_filemap_page_mk
+       /* Serialize against truncates */
+       filemap_invalidate_lock_shared(inode->i_mapping);
+-      ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
++      ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
+       sb_end_pagefault(inode->i_sb);
+@@ -862,7 +896,7 @@ static ssize_t zonefs_file_dio_write(str
+       if (append)
+               ret = zonefs_file_dio_append(iocb, from);
+       else
+-              ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
++              ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+                                  &zonefs_write_dio_ops, 0, 0);
+       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+           (ret > 0 || ret == -EIOCBQUEUED)) {
+@@ -904,7 +938,7 @@ static ssize_t zonefs_file_buffered_writ
+       if (ret <= 0)
+               goto inode_unlock;
+-      ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
++      ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+       if (ret > 0)
+               iocb->ki_pos += ret;
+       else if (ret == -EIO)
+@@ -997,7 +1031,7 @@ static ssize_t zonefs_file_read_iter(str
+                       goto inode_unlock;
+               }
+               file_accessed(iocb->ki_filp);
+-              ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
++              ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
+                                  &zonefs_read_dio_ops, 0, 0);
+       } else {
+               ret = generic_file_read_iter(iocb, to);