]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 2 Apr 2026 12:13:11 +0000 (14:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 2 Apr 2026 12:13:11 +0000 (14:13 +0200)
added patches:
drm-amd-pm-disable-od_fan_curve-if-temp-or-pwm-range-invalid-for-smu-v13.patch
net-correctly-handle-tunneled-traffic-on-ipv6_csum-gso-fallback.patch
net-mana-fix-use-after-free-in-add_adev-error-path.patch
scsi-target-file-use-kzalloc_flex-for-aio_cmd.patch
scsi-target-tcm_loop-drain-commands-in-target_reset-handler.patch
series
xfs-close-crash-window-in-attr-dabtree-inactivation.patch
xfs-factor-out-xfs_attr3_leaf_init.patch
xfs-factor-out-xfs_attr3_node_entry_remove.patch

queue-6.19/drm-amd-pm-disable-od_fan_curve-if-temp-or-pwm-range-invalid-for-smu-v13.patch [new file with mode: 0644]
queue-6.19/net-correctly-handle-tunneled-traffic-on-ipv6_csum-gso-fallback.patch [new file with mode: 0644]
queue-6.19/net-mana-fix-use-after-free-in-add_adev-error-path.patch [new file with mode: 0644]
queue-6.19/scsi-target-file-use-kzalloc_flex-for-aio_cmd.patch [new file with mode: 0644]
queue-6.19/scsi-target-tcm_loop-drain-commands-in-target_reset-handler.patch [new file with mode: 0644]
queue-6.19/series [new file with mode: 0644]
queue-6.19/xfs-close-crash-window-in-attr-dabtree-inactivation.patch [new file with mode: 0644]
queue-6.19/xfs-factor-out-xfs_attr3_leaf_init.patch [new file with mode: 0644]
queue-6.19/xfs-factor-out-xfs_attr3_node_entry_remove.patch [new file with mode: 0644]

diff --git a/queue-6.19/drm-amd-pm-disable-od_fan_curve-if-temp-or-pwm-range-invalid-for-smu-v13.patch b/queue-6.19/drm-amd-pm-disable-od_fan_curve-if-temp-or-pwm-range-invalid-for-smu-v13.patch
new file mode 100644 (file)
index 0000000..83f9d26
--- /dev/null
@@ -0,0 +1,148 @@
+From stable+bounces-232812-greg=kroah.com@vger.kernel.org Wed Apr  1 18:37:48 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed,  1 Apr 2026 12:16:37 -0400
+Subject: drm/amd/pm: disable OD_FAN_CURVE if temp or pwm range invalid for smu v13
+To: stable@vger.kernel.org
+Cc: Yang Wang <kevinyang.wang@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260401161637.114960-1-sashal@kernel.org>
+
+From: Yang Wang <kevinyang.wang@amd.com>
+
+[ Upstream commit 3e6dd28a11083e83e11a284d99fcc9eb748c321c ]
+
+Forcibly disable the OD_FAN_CURVE feature when temperature or PWM range is invalid,
+otherwise PMFW will reject this configuration on smu v13.0.x
+
+example:
+$ sudo cat /sys/bus/pci/devices/<BDF>/gpu_od/fan_ctrl/fan_curve
+
+OD_FAN_CURVE:
+0: 0C 0%
+1: 0C 0%
+2: 0C 0%
+3: 0C 0%
+4: 0C 0%
+OD_RANGE:
+FAN_CURVE(hotspot temp): 0C 0C
+FAN_CURVE(fan speed): 0% 0%
+
+$ echo "0 50 40" | sudo tee fan_curve
+
+kernel log:
+[  756.442527] amdgpu 0000:03:00.0: amdgpu: Fan curve temp setting(50) must be within [0, 0]!
+[  777.345800] amdgpu 0000:03:00.0: amdgpu: Fan curve temp setting(50) must be within [0, 0]!
+
+Closes: https://github.com/ROCm/amdgpu/issues/208
+Signed-off-by: Yang Wang <kevinyang.wang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 470891606c5a97b1d0d937e0aa67a3bed9fcb056)
+Cc: stable@vger.kernel.org
+[ adapted forward declaration placement to existing FEATURE_MASK macro ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c |   33 ++++++++++++++++++-
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c |   33 ++++++++++++++++++-
+ 2 files changed, 64 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -59,6 +59,10 @@
+ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
++static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
++                                            int od_feature_bit,
++                                            int32_t *min, int32_t *max);
++
+ #define FEATURE_MASK(feature) (1ULL << feature)
+ #define SMC_DPM_FEATURE ( \
+       FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)     | \
+@@ -1061,8 +1065,35 @@ static bool smu_v13_0_0_is_od_feature_su
+       PPTable_t *pptable = smu->smu_table.driver_pptable;
+       const OverDriveLimits_t * const overdrive_upperlimits =
+                               &pptable->SkuTable.OverDriveLimitsBasicMax;
++      int32_t min_value, max_value;
++      bool feature_enabled;
++
++      switch (od_feature_bit) {
++      case PP_OD_FEATURE_FAN_CURVE_BIT:
++              feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
++              if (feature_enabled) {
++                      smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
++                                                        &min_value, &max_value);
++                      if (!min_value && !max_value) {
++                              feature_enabled = false;
++                              goto out;
++                      }
++
++                      smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
++                                                        &min_value, &max_value);
++                      if (!min_value && !max_value) {
++                              feature_enabled = false;
++                              goto out;
++                      }
++              }
++              break;
++      default:
++              feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
++              break;
++      }
+-      return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
++out:
++      return feature_enabled;
+ }
+ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -59,6 +59,10 @@
+ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
++static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
++                                            int od_feature_bit,
++                                            int32_t *min, int32_t *max);
++
+ #define FEATURE_MASK(feature) (1ULL << feature)
+ #define SMC_DPM_FEATURE ( \
+       FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)     | \
+@@ -1050,8 +1054,35 @@ static bool smu_v13_0_7_is_od_feature_su
+       PPTable_t *pptable = smu->smu_table.driver_pptable;
+       const OverDriveLimits_t * const overdrive_upperlimits =
+                               &pptable->SkuTable.OverDriveLimitsBasicMax;
++      int32_t min_value, max_value;
++      bool feature_enabled;
++
++      switch (od_feature_bit) {
++      case PP_OD_FEATURE_FAN_CURVE_BIT:
++              feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
++              if (feature_enabled) {
++                      smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
++                                                        &min_value, &max_value);
++                      if (!min_value && !max_value) {
++                              feature_enabled = false;
++                              goto out;
++                      }
++
++                      smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
++                                                        &min_value, &max_value);
++                      if (!min_value && !max_value) {
++                              feature_enabled = false;
++                              goto out;
++                      }
++              }
++              break;
++      default:
++              feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
++              break;
++      }
+-      return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
++out:
++      return feature_enabled;
+ }
+ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
diff --git a/queue-6.19/net-correctly-handle-tunneled-traffic-on-ipv6_csum-gso-fallback.patch b/queue-6.19/net-correctly-handle-tunneled-traffic-on-ipv6_csum-gso-fallback.patch
new file mode 100644 (file)
index 0000000..d0597f4
--- /dev/null
@@ -0,0 +1,77 @@
+From stable+bounces-231459-greg=kroah.com@vger.kernel.org Tue Mar 31 18:18:57 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 12:00:30 -0400
+Subject: net: correctly handle tunneled traffic on IPV6_CSUM GSO fallback
+To: stable@vger.kernel.org
+Cc: Willem de Bruijn <willemb@google.com>, Tangxin Xie <xietangxin@yeah.net>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331160030.2677853-1-sashal@kernel.org>
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit c4336a07eb6b2526dc2b62928b5104b41a7f81f5 ]
+
+NETIF_F_IPV6_CSUM only advertises support for checksum offload of
+packets without IPv6 extension headers. Packets with extension
+headers must fall back onto software checksumming. Since TSO
+depends on checksum offload, those must revert to GSO.
+
+The below commit introduces that fallback. It always checks
+network header length. For tunneled packets, the inner header length
+must be checked instead. Extend the check accordingly.
+
+A special case is tunneled packets without inner IP protocol. Such as
+RFC 6951 SCTP in UDP. Those are not standard IPv6 followed by
+transport header either, so also must revert to the software GSO path.
+
+Cc: stable@vger.kernel.org
+Fixes: 864e3396976e ("net: gso: Forbid IPv6 TSO with extensions on devices with only IPV6_CSUM")
+Reported-by: Tangxin Xie <xietangxin@yeah.net>
+Closes: https://lore.kernel.org/netdev/0414e7e2-9a1c-4d7c-a99d-b9039cf68f40@yeah.net/
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20260320190148.2409107-1-willemdebruijn.kernel@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |   22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3763,6 +3763,22 @@ static netdev_features_t dflt_features_c
+       return vlan_features_check(skb, features);
+ }
++static bool skb_gso_has_extension_hdr(const struct sk_buff *skb)
++{
++      if (!skb->encapsulation)
++              return ((skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++                       (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++                        vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++                      skb_transport_header_was_set(skb) &&
++                      skb_network_header_len(skb) != sizeof(struct ipv6hdr));
++      else
++              return (!skb_inner_network_header_was_set(skb) ||
++                      ((skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++                        (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++                         inner_ip_hdr(skb)->version == 6)) &&
++                       skb_inner_network_header_len(skb) != sizeof(struct ipv6hdr)));
++}
++
+ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+                                           struct net_device *dev,
+                                           netdev_features_t features)
+@@ -3810,11 +3826,7 @@ static netdev_features_t gso_features_ch
+        * so neither does TSO that depends on it.
+        */
+       if (features & NETIF_F_IPV6_CSUM &&
+-          (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
+-           (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+-            vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
+-          skb_transport_header_was_set(skb) &&
+-          skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++          skb_gso_has_extension_hdr(skb) &&
+           !ipv6_has_hopopt_jumbo(skb))
+               features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
diff --git a/queue-6.19/net-mana-fix-use-after-free-in-add_adev-error-path.patch b/queue-6.19/net-mana-fix-use-after-free-in-add_adev-error-path.patch
new file mode 100644 (file)
index 0000000..26a09da
--- /dev/null
@@ -0,0 +1,65 @@
+From stable+bounces-231458-greg=kroah.com@vger.kernel.org Tue Mar 31 18:04:26 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 12:00:15 -0400
+Subject: net: mana: fix use-after-free in add_adev() error path
+To: stable@vger.kernel.org
+Cc: Guangshuo Li <lgs201920130244@gmail.com>, Long Li <longli@microsoft.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331160015.2677476-1-sashal@kernel.org>
+
+From: Guangshuo Li <lgs201920130244@gmail.com>
+
+[ Upstream commit c4ea7d8907cf72b259bf70bd8c2e791e1c4ff70f ]
+
+If auxiliary_device_add() fails, add_adev() jumps to add_fail and calls
+auxiliary_device_uninit(adev).
+
+The auxiliary device has its release callback set to adev_release(),
+which frees the containing struct mana_adev. Since adev is embedded in
+struct mana_adev, the subsequent fall-through to init_fail and access
+to adev->id may result in a use-after-free.
+
+Fix this by saving the allocated auxiliary device id in a local
+variable before calling auxiliary_device_add(), and use that saved id
+in the cleanup path after auxiliary_device_uninit().
+
+Fixes: a69839d4327d ("net: mana: Add support for auxiliary device")
+Cc: stable@vger.kernel.org
+Reviewed-by: Long Li <longli@microsoft.com>
+Signed-off-by: Guangshuo Li <lgs201920130244@gmail.com>
+Link: https://patch.msgid.link/20260323165730.945365-1-lgs201920130244@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/mana_en.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -3376,6 +3376,7 @@ static int add_adev(struct gdma_dev *gd,
+       struct auxiliary_device *adev;
+       struct mana_adev *madev;
+       int ret;
++      int id;
+       madev = kzalloc(sizeof(*madev), GFP_KERNEL);
+       if (!madev)
+@@ -3385,7 +3386,8 @@ static int add_adev(struct gdma_dev *gd,
+       ret = mana_adev_idx_alloc();
+       if (ret < 0)
+               goto idx_fail;
+-      adev->id = ret;
++      id = ret;
++      adev->id = id;
+       adev->name = name;
+       adev->dev.parent = gd->gdma_context->dev;
+@@ -3411,7 +3413,7 @@ add_fail:
+       auxiliary_device_uninit(adev);
+ init_fail:
+-      mana_adev_idx_free(adev->id);
++      mana_adev_idx_free(id);
+ idx_fail:
+       kfree(madev);
diff --git a/queue-6.19/scsi-target-file-use-kzalloc_flex-for-aio_cmd.patch b/queue-6.19/scsi-target-file-use-kzalloc_flex-for-aio_cmd.patch
new file mode 100644 (file)
index 0000000..4841fbf
--- /dev/null
@@ -0,0 +1,45 @@
+From stable+bounces-231445-greg=kroah.com@vger.kernel.org Tue Mar 31 17:27:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 11:21:15 -0400
+Subject: scsi: target: file: Use kzalloc_flex for aio_cmd
+To: stable@vger.kernel.org
+Cc: Thinh Nguyen <Thinh.Nguyen@synopsys.com>, "Martin K. Petersen" <martin.petersen@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331152115.2613463-1-sashal@kernel.org>
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+[ Upstream commit 01f784fc9d0ab2a6dac45ee443620e517cb2a19b ]
+
+The target_core_file doesn't initialize the aio_cmd->iocb for the
+ki_write_stream. When a write command fd_execute_rw_aio() is executed,
+we may get a bogus ki_write_stream value, causing unintended write
+failure status when checking iocb->ki_write_stream > max_write_streams
+in the block device.
+
+Let's just use kzalloc_flex when allocating the aio_cmd and let
+ki_write_stream=0 to fix this issue.
+
+Fixes: 732f25a2895a ("fs: add a write stream field to the kiocb")
+Fixes: c27683da6406 ("block: expose write streams for block device nodes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://patch.msgid.link/f1a2f81c62f043e31f80bb92d5f29893400c8ee2.1773450782.git.Thinh.Nguyen@synopsys.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+[ changed kmalloc() to kzalloc() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/target/target_core_file.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -276,7 +276,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, st
+       ssize_t len = 0;
+       int ret = 0, i;
+-      aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
++      aio_cmd = kzalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
+       if (!aio_cmd)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/queue-6.19/scsi-target-tcm_loop-drain-commands-in-target_reset-handler.patch b/queue-6.19/scsi-target-tcm_loop-drain-commands-in-target_reset-handler.patch
new file mode 100644 (file)
index 0000000..84482af
--- /dev/null
@@ -0,0 +1,147 @@
+From stable+bounces-231435-greg=kroah.com@vger.kernel.org Tue Mar 31 16:33:18 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 10:31:29 -0400
+Subject: scsi: target: tcm_loop: Drain commands in target_reset handler
+To: stable@vger.kernel.org
+Cc: Josef Bacik <josef@toxicpanda.com>, "Martin K. Petersen" <martin.petersen@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331143129.2479621-1-sashal@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 1333eee56cdf3f0cf67c6ab4114c2c9e0a952026 ]
+
+tcm_loop_target_reset() violates the SCSI EH contract: it returns SUCCESS
+without draining any in-flight commands.  The SCSI EH documentation
+(scsi_eh.rst) requires that when a reset handler returns SUCCESS the driver
+has made lower layers "forget about timed out scmds" and is ready for new
+commands.  Every other SCSI LLD (virtio_scsi, mpt3sas, ipr, scsi_debug,
+mpi3mr) enforces this by draining or completing outstanding commands before
+returning SUCCESS.
+
+Because tcm_loop_target_reset() doesn't drain, the SCSI EH reuses in-flight
+scsi_cmnd structures for recovery commands (e.g. TUR) while the target core
+still has async completion work queued for the old se_cmd.  The memset in
+queuecommand zeroes se_lun and lun_ref_active, causing
+transport_lun_remove_cmd() to skip its percpu_ref_put().  The leaked LUN
+reference prevents transport_clear_lun_ref() from completing, hanging
+configfs LUN unlink forever in D-state:
+
+  INFO: task rm:264 blocked for more than 122 seconds.
+  rm              D    0   264    258 0x00004000
+  Call Trace:
+   __schedule+0x3d0/0x8e0
+   schedule+0x36/0xf0
+   transport_clear_lun_ref+0x78/0x90 [target_core_mod]
+   core_tpg_remove_lun+0x28/0xb0 [target_core_mod]
+   target_fabric_port_unlink+0x50/0x60 [target_core_mod]
+   configfs_unlink+0x156/0x1f0 [configfs]
+   vfs_unlink+0x109/0x290
+   do_unlinkat+0x1d5/0x2d0
+
+Fix this by making tcm_loop_target_reset() actually drain commands:
+
+ 1. Issue TMR_LUN_RESET via tcm_loop_issue_tmr() to drain all commands that
+    the target core knows about (those not yet CMD_T_COMPLETE).
+
+ 2. Use blk_mq_tagset_busy_iter() to iterate all started requests and
+    flush_work() on each se_cmd — this drains any deferred completion work
+    for commands that already had CMD_T_COMPLETE set before the TMR (which
+    the TMR skips via __target_check_io_state()).  This is the same pattern
+    used by mpi3mr, scsi_debug, and libsas to drain outstanding commands
+    during reset.
+
+Fixes: e0eb5d38b732 ("scsi: target: tcm_loop: Use block cmd allocator for se_cmds")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-6
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Link: https://patch.msgid.link/27011aa34c8f6b1b94d2e3cf5655b6d037f53428.1773706803.git.josef@toxicpanda.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/target/loopback/tcm_loop.c |   52 ++++++++++++++++++++++++++++++++-----
+ 1 file changed, 46 insertions(+), 6 deletions(-)
+
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -26,6 +26,7 @@
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/configfs.h>
++#include <linux/blk-mq.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi_host.h>
+@@ -268,15 +269,27 @@ static int tcm_loop_device_reset(struct
+       return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+ }
++static bool tcm_loop_flush_work_iter(struct request *rq, void *data)
++{
++      struct scsi_cmnd *sc = blk_mq_rq_to_pdu(rq);
++      struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
++      struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
++
++      flush_work(&se_cmd->work);
++      return true;
++}
++
+ static int tcm_loop_target_reset(struct scsi_cmnd *sc)
+ {
+       struct tcm_loop_hba *tl_hba;
+       struct tcm_loop_tpg *tl_tpg;
++      struct Scsi_Host *sh = sc->device->host;
++      int ret;
+       /*
+        * Locate the tcm_loop_hba_t pointer
+        */
+-      tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
++      tl_hba = *(struct tcm_loop_hba **)shost_priv(sh);
+       if (!tl_hba) {
+               pr_err("Unable to perform device reset without active I_T Nexus\n");
+               return FAILED;
+@@ -285,11 +298,38 @@ static int tcm_loop_target_reset(struct
+        * Locate the tl_tpg pointer from TargetID in sc->device->id
+        */
+       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+-      if (tl_tpg) {
+-              tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+-              return SUCCESS;
+-      }
+-      return FAILED;
++      if (!tl_tpg)
++              return FAILED;
++
++      /*
++       * Issue a LUN_RESET to drain all commands that the target core
++       * knows about.  This handles commands not yet marked CMD_T_COMPLETE.
++       */
++      ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 0, TMR_LUN_RESET);
++      if (ret != TMR_FUNCTION_COMPLETE)
++              return FAILED;
++
++      /*
++       * Flush any deferred target core completion work that may still be
++       * queued.  Commands that already had CMD_T_COMPLETE set before the TMR
++       * are skipped by the TMR drain, but their async completion work
++       * (transport_lun_remove_cmd → percpu_ref_put, release_cmd → scsi_done)
++       * may still be pending in target_completion_wq.
++       *
++       * The SCSI EH will reuse in-flight scsi_cmnd structures for recovery
++       * commands (e.g. TUR) immediately after this handler returns SUCCESS —
++       * if deferred work is still pending, the memset in queuecommand would
++       * zero the se_cmd while the work accesses it, leaking the LUN
++       * percpu_ref and hanging configfs unlink forever.
++       *
++       * Use blk_mq_tagset_busy_iter() to find all started requests and
++       * flush_work() on each — the same pattern used by mpi3mr, scsi_debug,
++       * and other SCSI drivers to drain outstanding commands during reset.
++       */
++      blk_mq_tagset_busy_iter(&sh->tag_set, tcm_loop_flush_work_iter, NULL);
++
++      tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
++      return SUCCESS;
+ }
+ static const struct scsi_host_template tcm_loop_driver_template = {
diff --git a/queue-6.19/series b/queue-6.19/series
new file mode 100644 (file)
index 0000000..229e3dd
--- /dev/null
@@ -0,0 +1,8 @@
+drm-amd-pm-disable-od_fan_curve-if-temp-or-pwm-range-invalid-for-smu-v13.patch
+net-correctly-handle-tunneled-traffic-on-ipv6_csum-gso-fallback.patch
+net-mana-fix-use-after-free-in-add_adev-error-path.patch
+scsi-target-file-use-kzalloc_flex-for-aio_cmd.patch
+scsi-target-tcm_loop-drain-commands-in-target_reset-handler.patch
+xfs-factor-out-xfs_attr3_node_entry_remove.patch
+xfs-factor-out-xfs_attr3_leaf_init.patch
+xfs-close-crash-window-in-attr-dabtree-inactivation.patch
diff --git a/queue-6.19/xfs-close-crash-window-in-attr-dabtree-inactivation.patch b/queue-6.19/xfs-close-crash-window-in-attr-dabtree-inactivation.patch
new file mode 100644 (file)
index 0000000..8caf618
--- /dev/null
@@ -0,0 +1,263 @@
+From stable+bounces-232960-greg=kroah.com@vger.kernel.org Thu Apr  2 11:44:36 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu,  2 Apr 2026 05:44:12 -0400
+Subject: xfs: close crash window in attr dabtree inactivation
+To: stable@vger.kernel.org
+Cc: Long Li <leo.lilong@huawei.com>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260402094412.717776-3-sashal@kernel.org>
+
+From: Long Li <leo.lilong@huawei.com>
+
+[ Upstream commit b854e1c4eff3473b6d3a9ae74129ac5c48bc0b61 ]
+
+When inactivating an inode with node-format extended attributes,
+xfs_attr3_node_inactive() invalidates all child leaf/node blocks via
+xfs_trans_binval(), but intentionally does not remove the corresponding
+entries from their parent node blocks.  The implicit assumption is that
+xfs_attr_inactive() will truncate the entire attr fork to zero extents
+afterwards, so log recovery will never reach the root node and follow
+those stale pointers.
+
+However, if a log shutdown occurs after the leaf/node block cancellations
+commit but before the attr bmap truncation commits, this assumption
+breaks.  Recovery replays the attr bmap intact (the inode still has
+attr fork extents), but suppresses replay of all cancelled leaf/node
+blocks, maybe leaving them as stale data on disk.  On the next mount,
+xlog_recover_process_iunlinks() retries inactivation and attempts to
+read the root node via the attr bmap. If the root node was not replayed,
+reading the unreplayed root block triggers a metadata verification
+failure immediately; if it was replayed, following its child pointers
+to unreplayed child blocks triggers the same failure:
+
+ XFS (pmem0): Metadata corruption detected at
+ xfs_da3_node_read_verify+0x53/0x220, xfs_da3_node block 0x78
+ XFS (pmem0): Unmount and run xfs_repair
+ XFS (pmem0): First 128 bytes of corrupted metadata buffer:
+ 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000040: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000050: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000060: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ 00000070: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+ XFS (pmem0): metadata I/O error in "xfs_da_read_buf+0x104/0x190" at daddr 0x78 len 8 error 117
+
+Fix this in two places:
+
+In xfs_attr3_node_inactive(), after calling xfs_trans_binval() on a
+child block, immediately remove the entry that references it from the
+parent node in the same transaction.  This eliminates the window where
+the parent holds a pointer to a cancelled block.  Once all children are
+removed, the now-empty root node is converted to a leaf block within the
+same transaction. This node-to-leaf conversion is necessary for crash
+safety. If the system shutdown after the empty node is written to the
+log but before the second-phase bmap truncation commits, log recovery
+will attempt to verify the root block on disk. xfs_da3_node_verify()
+does not permit a node block with count == 0; such a block will fail
+verification and trigger a metadata corruption shutdown. on the other
+hand, leaf blocks are allowed to have this transient state.
+
+In xfs_attr_inactive(), split the attr fork truncation into two explicit
+phases.  First, truncate all extents beyond the root block (the child
+extents whose parent references have already been removed above).
+Second, invalidate the root block and truncate the attr bmap to zero in
+a single transaction.  The two operations in the second phase must be
+atomic: as long as the attr bmap has any non-zero length, recovery can
+follow it to the root block, so the root block invalidation must commit
+together with the bmap-to-zero truncation.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_attr_inactive.c |   95 +++++++++++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 38 deletions(-)
+
+--- a/fs/xfs/xfs_attr_inactive.c
++++ b/fs/xfs/xfs_attr_inactive.c
+@@ -140,7 +140,7 @@ xfs_attr3_node_inactive(
+       xfs_daddr_t             parent_blkno, child_blkno;
+       struct xfs_buf          *child_bp;
+       struct xfs_da3_icnode_hdr ichdr;
+-      int                     error, i;
++      int                     error;
+       /*
+        * Since this code is recursive (gasp!) we must protect ourselves.
+@@ -152,7 +152,7 @@ xfs_attr3_node_inactive(
+               return -EFSCORRUPTED;
+       }
+-      xfs_da3_node_hdr_from_disk(dp->i_mount, &ichdr, bp->b_addr);
++      xfs_da3_node_hdr_from_disk(mp, &ichdr, bp->b_addr);
+       parent_blkno = xfs_buf_daddr(bp);
+       if (!ichdr.count) {
+               xfs_trans_brelse(*trans, bp);
+@@ -167,7 +167,7 @@ xfs_attr3_node_inactive(
+        * over the leaves removing all of them.  If this is higher up
+        * in the tree, recurse downward.
+        */
+-      for (i = 0; i < ichdr.count; i++) {
++      while (ichdr.count > 0) {
+               /*
+                * Read the subsidiary block to see what we have to work with.
+                * Don't do this in a transaction.  This is a depth-first
+@@ -218,29 +218,32 @@ xfs_attr3_node_inactive(
+               xfs_trans_binval(*trans, child_bp);
+               child_bp = NULL;
++              error = xfs_da3_node_read_mapped(*trans, dp,
++                              parent_blkno, &bp, XFS_ATTR_FORK);
++              if (error)
++                      return error;
++
+               /*
+-               * If we're not done, re-read the parent to get the next
+-               * child block number.
++               * Remove entry from parent node, prevents being indexed to.
+                */
+-              if (i + 1 < ichdr.count) {
+-                      struct xfs_da3_icnode_hdr phdr;
++              xfs_attr3_node_entry_remove(*trans, dp, bp, 0);
+-                      error = xfs_da3_node_read_mapped(*trans, dp,
+-                                      parent_blkno, &bp, XFS_ATTR_FORK);
++              xfs_da3_node_hdr_from_disk(mp, &ichdr, bp->b_addr);
++              bp = NULL;
++
++              if (ichdr.count > 0) {
++                      /*
++                       * If we're not done, get the next child block number.
++                       */
++                      child_fsb = be32_to_cpu(ichdr.btree[0].before);
++
++                      /*
++                       * Atomically commit the whole invalidate stuff.
++                       */
++                      error = xfs_trans_roll_inode(trans, dp);
+                       if (error)
+                               return error;
+-                      xfs_da3_node_hdr_from_disk(dp->i_mount, &phdr,
+-                                                bp->b_addr);
+-                      child_fsb = be32_to_cpu(phdr.btree[i + 1].before);
+-                      xfs_trans_brelse(*trans, bp);
+-                      bp = NULL;
+               }
+-              /*
+-               * Atomically commit the whole invalidate stuff.
+-               */
+-              error = xfs_trans_roll_inode(trans, dp);
+-              if (error)
+-                      return  error;
+       }
+       return 0;
+@@ -257,10 +260,8 @@ xfs_attr3_root_inactive(
+       struct xfs_trans        **trans,
+       struct xfs_inode        *dp)
+ {
+-      struct xfs_mount        *mp = dp->i_mount;
+       struct xfs_da_blkinfo   *info;
+       struct xfs_buf          *bp;
+-      xfs_daddr_t             blkno;
+       int                     error;
+       /*
+@@ -272,7 +273,6 @@ xfs_attr3_root_inactive(
+       error = xfs_da3_node_read(*trans, dp, 0, &bp, XFS_ATTR_FORK);
+       if (error)
+               return error;
+-      blkno = xfs_buf_daddr(bp);
+       /*
+        * Invalidate the tree, even if the "tree" is only a single leaf block.
+@@ -283,10 +283,26 @@ xfs_attr3_root_inactive(
+       case cpu_to_be16(XFS_DA_NODE_MAGIC):
+       case cpu_to_be16(XFS_DA3_NODE_MAGIC):
+               error = xfs_attr3_node_inactive(trans, dp, bp, 1);
++              /*
++               * Empty root node block are not allowed, convert it to leaf.
++               */
++              if (!error)
++                      error = xfs_attr3_leaf_init(*trans, dp, 0);
++              if (!error)
++                      error = xfs_trans_roll_inode(trans, dp);
+               break;
+       case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
+       case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
+               error = xfs_attr3_leaf_inactive(trans, dp, bp);
++              /*
++               * Reinit the leaf before truncating extents so that a crash
++               * mid-truncation leaves an empty leaf rather than one with
++               * entries that may reference freed remote value blocks.
++               */
++              if (!error)
++                      error = xfs_attr3_leaf_init(*trans, dp, 0);
++              if (!error)
++                      error = xfs_trans_roll_inode(trans, dp);
+               break;
+       default:
+               xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
+@@ -295,21 +311,6 @@ xfs_attr3_root_inactive(
+               xfs_trans_brelse(*trans, bp);
+               break;
+       }
+-      if (error)
+-              return error;
+-
+-      /*
+-       * Invalidate the incore copy of the root block.
+-       */
+-      error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
+-                      XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
+-      if (error)
+-              return error;
+-      xfs_trans_binval(*trans, bp);   /* remove from cache */
+-      /*
+-       * Commit the invalidate and start the next transaction.
+-       */
+-      error = xfs_trans_roll_inode(trans, dp);
+       return error;
+ }
+@@ -328,6 +329,7 @@ xfs_attr_inactive(
+ {
+       struct xfs_trans        *trans;
+       struct xfs_mount        *mp;
++      struct xfs_buf          *bp;
+       int                     lock_mode = XFS_ILOCK_SHARED;
+       int                     error = 0;
+@@ -363,10 +365,27 @@ xfs_attr_inactive(
+        * removal below.
+        */
+       if (dp->i_af.if_nextents > 0) {
++              /*
++               * Invalidate and truncate all blocks but leave the root block.
++               */
+               error = xfs_attr3_root_inactive(&trans, dp);
+               if (error)
+                       goto out_cancel;
++              error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK,
++                              XFS_FSB_TO_B(mp, mp->m_attr_geo->fsbcount));
++              if (error)
++                      goto out_cancel;
++
++              /*
++               * Invalidate and truncate the root block and ensure that the
++               * operation is completed within a single transaction.
++               */
++              error = xfs_da_get_buf(trans, dp, 0, &bp, XFS_ATTR_FORK);
++              if (error)
++                      goto out_cancel;
++
++              xfs_trans_binval(trans, bp);
+               error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+               if (error)
+                       goto out_cancel;
diff --git a/queue-6.19/xfs-factor-out-xfs_attr3_leaf_init.patch b/queue-6.19/xfs-factor-out-xfs_attr3_leaf_init.patch
new file mode 100644 (file)
index 0000000..212e8f5
--- /dev/null
@@ -0,0 +1,69 @@
+From stable+bounces-232959-greg=kroah.com@vger.kernel.org Thu Apr  2 11:44:28 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu,  2 Apr 2026 05:44:11 -0400
+Subject: xfs: factor out xfs_attr3_leaf_init
+To: stable@vger.kernel.org
+Cc: Long Li <leo.lilong@huawei.com>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260402094412.717776-2-sashal@kernel.org>
+
+From: Long Li <leo.lilong@huawei.com>
+
+[ Upstream commit e65bb55d7f8c2041c8fdb73cd29b0b4cad4ed847 ]
+
+Factor out wrapper xfs_attr3_leaf_init function, which exported for
+external use.
+
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Stable-dep-of: b854e1c4eff3 ("xfs: close crash window in attr dabtree inactivation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_attr_leaf.c |   22 ++++++++++++++++++++++
+ fs/xfs/libxfs/xfs_attr_leaf.h |    3 +++
+ 2 files changed, 25 insertions(+)
+
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -1317,6 +1317,28 @@ xfs_attr3_leaf_create(
+ }
+ /*
++ * Reinitialize an existing attr fork block as an empty leaf, and attach
++ * the buffer to tp.
++ */
++int
++xfs_attr3_leaf_init(
++      struct xfs_trans        *tp,
++      struct xfs_inode        *dp,
++      xfs_dablk_t             blkno)
++{
++      struct xfs_buf          *bp = NULL;
++      struct xfs_da_args      args = {
++              .trans          = tp,
++              .dp             = dp,
++              .owner          = dp->i_ino,
++              .geo            = dp->i_mount->m_attr_geo,
++      };
++
++      ASSERT(tp != NULL);
++
++      return xfs_attr3_leaf_create(&args, blkno, &bp);
++}
++/*
+  * Split the leaf node, rebalance, then add the new entry.
+  *
+  * Returns 0 if the entry was added, 1 if a further split is needed or a
+--- a/fs/xfs/libxfs/xfs_attr_leaf.h
++++ b/fs/xfs/libxfs/xfs_attr_leaf.h
+@@ -86,6 +86,9 @@ int  xfs_attr3_leaf_list_int(struct xfs_b
+ /*
+  * Routines used for shrinking the Btree.
+  */
++
++int   xfs_attr3_leaf_init(struct xfs_trans *tp, struct xfs_inode *dp,
++                              xfs_dablk_t blkno);
+ int   xfs_attr3_leaf_toosmall(struct xfs_da_state *state, int *retval);
+ void  xfs_attr3_leaf_unbalance(struct xfs_da_state *state,
+                                      struct xfs_da_state_blk *drop_blk,
diff --git a/queue-6.19/xfs-factor-out-xfs_attr3_node_entry_remove.patch b/queue-6.19/xfs-factor-out-xfs_attr3_node_entry_remove.patch
new file mode 100644 (file)
index 0000000..8155846
--- /dev/null
@@ -0,0 +1,128 @@
+From stable+bounces-232958-greg=kroah.com@vger.kernel.org Thu Apr  2 11:44:27 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu,  2 Apr 2026 05:44:10 -0400
+Subject: xfs: factor out xfs_attr3_node_entry_remove
+To: stable@vger.kernel.org
+Cc: Long Li <leo.lilong@huawei.com>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260402094412.717776-1-sashal@kernel.org>
+
+From: Long Li <leo.lilong@huawei.com>
+
+[ Upstream commit ce4e789cf3561c9fac73cc24445bfed9ea0c514b ]
+
+Factor out wrapper xfs_attr3_node_entry_remove function, which
+exported for external use.
+
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Stable-dep-of: b854e1c4eff3 ("xfs: close crash window in attr dabtree inactivation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_da_btree.c |   53 ++++++++++++++++++++++++++++++++++---------
+ fs/xfs/libxfs/xfs_da_btree.h |    2 +
+ 2 files changed, 44 insertions(+), 11 deletions(-)
+
+--- a/fs/xfs/libxfs/xfs_da_btree.c
++++ b/fs/xfs/libxfs/xfs_da_btree.c
+@@ -1506,21 +1506,20 @@ xfs_da3_fixhashpath(
+ }
+ /*
+- * Remove an entry from an intermediate node.
++ * Internal implementation to remove an entry from an intermediate node.
+  */
+ STATIC void
+-xfs_da3_node_remove(
+-      struct xfs_da_state     *state,
+-      struct xfs_da_state_blk *drop_blk)
++__xfs_da3_node_remove(
++      struct xfs_trans        *tp,
++      struct xfs_inode        *dp,
++      struct xfs_da_geometry  *geo,
++      struct xfs_da_state_blk *drop_blk)
+ {
+       struct xfs_da_intnode   *node;
+       struct xfs_da3_icnode_hdr nodehdr;
+       struct xfs_da_node_entry *btree;
+       int                     index;
+       int                     tmp;
+-      struct xfs_inode        *dp = state->args->dp;
+-
+-      trace_xfs_da_node_remove(state->args);
+       node = drop_blk->bp->b_addr;
+       xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+@@ -1536,17 +1535,17 @@ xfs_da3_node_remove(
+               tmp  = nodehdr.count - index - 1;
+               tmp *= (uint)sizeof(xfs_da_node_entry_t);
+               memmove(&btree[index], &btree[index + 1], tmp);
+-              xfs_trans_log_buf(state->args->trans, drop_blk->bp,
++              xfs_trans_log_buf(tp, drop_blk->bp,
+                   XFS_DA_LOGRANGE(node, &btree[index], tmp));
+               index = nodehdr.count - 1;
+       }
+       memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
+-      xfs_trans_log_buf(state->args->trans, drop_blk->bp,
++      xfs_trans_log_buf(tp, drop_blk->bp,
+           XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
+       nodehdr.count -= 1;
+       xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
+-      xfs_trans_log_buf(state->args->trans, drop_blk->bp,
+-          XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
++      xfs_trans_log_buf(tp, drop_blk->bp,
++          XFS_DA_LOGRANGE(node, &node->hdr, geo->node_hdr_size));
+       /*
+        * Copy the last hash value from the block to propagate upwards.
+@@ -1555,6 +1554,38 @@ xfs_da3_node_remove(
+ }
+ /*
++ * Remove an entry from an intermediate node.
++ */
++STATIC void
++xfs_da3_node_remove(
++      struct xfs_da_state     *state,
++      struct xfs_da_state_blk *drop_blk)
++{
++      trace_xfs_da_node_remove(state->args);
++
++      __xfs_da3_node_remove(state->args->trans, state->args->dp,
++                      state->args->geo, drop_blk);
++}
++
++/*
++ * Remove an entry from an intermediate attr node at the specified index.
++ */
++void
++xfs_attr3_node_entry_remove(
++      struct xfs_trans        *tp,
++      struct xfs_inode        *dp,
++      struct xfs_buf          *bp,
++      int                     index)
++{
++      struct xfs_da_state_blk blk = {
++              .index          = index,
++              .bp             = bp,
++      };
++
++      __xfs_da3_node_remove(tp, dp, dp->i_mount->m_attr_geo, &blk);
++}
++
++/*
+  * Unbalance the elements between two intermediate nodes,
+  * move all Btree elements from one node into another.
+  */
+--- a/fs/xfs/libxfs/xfs_da_btree.h
++++ b/fs/xfs/libxfs/xfs_da_btree.h
+@@ -184,6 +184,8 @@ int        xfs_da3_split(xfs_da_state_t *state)
+ int   xfs_da3_join(xfs_da_state_t *state);
+ void  xfs_da3_fixhashpath(struct xfs_da_state *state,
+                           struct xfs_da_state_path *path_to_to_fix);
++void  xfs_attr3_node_entry_remove(struct xfs_trans *tp, struct xfs_inode *dp,
++                          struct xfs_buf *bp, int index);
+ /*
+  * Routines used for finding things in the Btree.