]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jan 2026 15:15:19 +0000 (16:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jan 2026 15:15:19 +0000 (16:15 +0100)
added patches:
btrfs-fix-deadlock-in-wait_current_trans-due-to-ignored-transaction-type.patch
hid-intel-ish-hid-fix-wcast-function-type-strict-in-devm_ishtp_alloc_workqueue.patch
hid-intel-ish-hid-use-dedicated-unbound-workqueues-to-prevent-resume-blocking.patch
xfs-set-max_agbno-to-allow-sparse-alloc-of-last-full-inode-chunk.patch

queue-6.12/btrfs-fix-deadlock-in-wait_current_trans-due-to-ignored-transaction-type.patch [new file with mode: 0644]
queue-6.12/hid-intel-ish-hid-fix-wcast-function-type-strict-in-devm_ishtp_alloc_workqueue.patch [new file with mode: 0644]
queue-6.12/hid-intel-ish-hid-use-dedicated-unbound-workqueues-to-prevent-resume-blocking.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/xfs-set-max_agbno-to-allow-sparse-alloc-of-last-full-inode-chunk.patch [new file with mode: 0644]

diff --git a/queue-6.12/btrfs-fix-deadlock-in-wait_current_trans-due-to-ignored-transaction-type.patch b/queue-6.12/btrfs-fix-deadlock-in-wait_current_trans-due-to-ignored-transaction-type.patch
new file mode 100644 (file)
index 0000000..9a6d825
--- /dev/null
@@ -0,0 +1,154 @@
+From 5037b342825df7094a4906d1e2a9674baab50cb2 Mon Sep 17 00:00:00 2001
+From: Robbie Ko <robbieko@synology.com>
+Date: Thu, 11 Dec 2025 13:30:33 +0800
+Subject: btrfs: fix deadlock in wait_current_trans() due to ignored transaction type
+
+From: Robbie Ko <robbieko@synology.com>
+
+commit 5037b342825df7094a4906d1e2a9674baab50cb2 upstream.
+
+When wait_current_trans() is called during start_transaction(), it
+currently waits for a blocked transaction without considering whether
+the given transaction type actually needs to wait for that particular
+transaction state. The btrfs_blocked_trans_types[] array already defines
+which transaction types should wait for which transaction states, but
+this check was missing in wait_current_trans().
+
+This can lead to a deadlock scenario involving two transactions and
+pending ordered extents:
+
+  1. Transaction A is in TRANS_STATE_COMMIT_DOING state
+
+  2. A worker processing an ordered extent calls start_transaction()
+     with TRANS_JOIN
+
+  3. join_transaction() returns -EBUSY because Transaction A is in
+     TRANS_STATE_COMMIT_DOING
+
+  4. Transaction A moves to TRANS_STATE_UNBLOCKED and completes
+
+  5. A new Transaction B is created (TRANS_STATE_RUNNING)
+
+  6. The ordered extent from step 2 is added to Transaction B's
+     pending ordered extents
+
+  7. Transaction B immediately starts commit by another task and
+     enters TRANS_STATE_COMMIT_START
+
+  8. The worker finally reaches wait_current_trans(), sees Transaction B
+     in TRANS_STATE_COMMIT_START (a blocked state), and waits
+     unconditionally
+
+  9. However, TRANS_JOIN should NOT wait for TRANS_STATE_COMMIT_START
+     according to btrfs_blocked_trans_types[]
+
+  10. Transaction B is waiting for pending ordered extents to complete
+
+  11. Deadlock: Transaction B waits for ordered extent, ordered extent
+      waits for Transaction B
+
+This can be illustrated by the following call stacks:
+  CPU0                              CPU1
+                                    btrfs_finish_ordered_io()
+                                      start_transaction(TRANS_JOIN)
+                                        join_transaction()
+                                          # -EBUSY (Transaction A is
+                                          # TRANS_STATE_COMMIT_DOING)
+  # Transaction A completes
+  # Transaction B created
+  # ordered extent added to
+  # Transaction B's pending list
+  btrfs_commit_transaction()
+    # Transaction B enters
+    # TRANS_STATE_COMMIT_START
+    # waiting for pending ordered
+    # extents
+                                        wait_current_trans()
+                                          # waits for Transaction B
+                                          # (should not wait!)
+
+Task bstore_kv_sync in btrfs_commit_transaction waiting for ordered
+extents:
+
+  __schedule+0x2e7/0x8a0
+  schedule+0x64/0xe0
+  btrfs_commit_transaction+0xbf7/0xda0 [btrfs]
+  btrfs_sync_file+0x342/0x4d0 [btrfs]
+  __x64_sys_fdatasync+0x4b/0x80
+  do_syscall_64+0x33/0x40
+  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Task kworker in wait_current_trans waiting for transaction commit:
+
+  Workqueue: btrfs-syno_nocow btrfs_work_helper [btrfs]
+  __schedule+0x2e7/0x8a0
+  schedule+0x64/0xe0
+  wait_current_trans+0xb0/0x110 [btrfs]
+  start_transaction+0x346/0x5b0 [btrfs]
+  btrfs_finish_ordered_io.isra.0+0x49b/0x9c0 [btrfs]
+  btrfs_work_helper+0xe8/0x350 [btrfs]
+  process_one_work+0x1d3/0x3c0
+  worker_thread+0x4d/0x3e0
+  kthread+0x12d/0x150
+  ret_from_fork+0x1f/0x30
+
+Fix this by passing the transaction type to wait_current_trans() and
+checking btrfs_blocked_trans_types[cur_trans->state] against the given
+type before deciding to wait. This ensures that transaction types which
+are allowed to join during certain blocked states will not unnecessarily
+wait and cause deadlocks.
+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Robbie Ko <robbieko@synology.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Cc: Motiejus Jakštys <motiejus@jakstys.lt>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/transaction.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -520,13 +520,14 @@ static inline int is_transaction_blocked
+  * when this is done, it is safe to start a new transaction, but the current
+  * transaction might not be fully on disk.
+  */
+-static void wait_current_trans(struct btrfs_fs_info *fs_info)
++static void wait_current_trans(struct btrfs_fs_info *fs_info, unsigned int type)
+ {
+       struct btrfs_transaction *cur_trans;
+       spin_lock(&fs_info->trans_lock);
+       cur_trans = fs_info->running_transaction;
+-      if (cur_trans && is_transaction_blocked(cur_trans)) {
++      if (cur_trans && is_transaction_blocked(cur_trans) &&
++          (btrfs_blocked_trans_types[cur_trans->state] & type)) {
+               refcount_inc(&cur_trans->use_count);
+               spin_unlock(&fs_info->trans_lock);
+@@ -701,12 +702,12 @@ again:
+               sb_start_intwrite(fs_info->sb);
+       if (may_wait_transaction(fs_info, type))
+-              wait_current_trans(fs_info);
++              wait_current_trans(fs_info, type);
+       do {
+               ret = join_transaction(fs_info, type);
+               if (ret == -EBUSY) {
+-                      wait_current_trans(fs_info);
++                      wait_current_trans(fs_info, type);
+                       if (unlikely(type == TRANS_ATTACH ||
+                                    type == TRANS_JOIN_NOSTART))
+                               ret = -ENOENT;
+@@ -1003,7 +1004,7 @@ out:
+ void btrfs_throttle(struct btrfs_fs_info *fs_info)
+ {
+-      wait_current_trans(fs_info);
++      wait_current_trans(fs_info, TRANS_START);
+ }
+ bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
diff --git a/queue-6.12/hid-intel-ish-hid-fix-wcast-function-type-strict-in-devm_ishtp_alloc_workqueue.patch b/queue-6.12/hid-intel-ish-hid-fix-wcast-function-type-strict-in-devm_ishtp_alloc_workqueue.patch
new file mode 100644 (file)
index 0000000..931f852
--- /dev/null
@@ -0,0 +1,64 @@
+From 3644f4411713f52bf231574aa8759e3d8e20b341 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Wed, 22 Oct 2025 00:49:08 +0200
+Subject: HID: intel-ish-hid: Fix -Wcast-function-type-strict in devm_ishtp_alloc_workqueue()
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 3644f4411713f52bf231574aa8759e3d8e20b341 upstream.
+
+Clang warns (or errors with CONFIG_WERROR=y / W=e):
+
+  drivers/hid/intel-ish-hid/ipc/ipc.c:935:36: error: cast from 'void (*)(struct workqueue_struct *)' to 'void (*)(void *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+    935 |         if (devm_add_action_or_reset(dev, (void (*)(void *))destroy_workqueue,
+        |                                           ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  include/linux/device/devres.h:168:34: note: expanded from macro 'devm_add_action_or_reset'
+    168 |         __devm_add_action_or_ireset(dev, action, data, #action)
+        |                                         ^~~~~~
+
+This warning is pointing out a kernel control flow integrity (kCFI /
+CONFIG_CFI=y) violation will occur due to this function cast when the
+destroy_workqueue() is indirectly called via devm_action_release()
+because the prototype of destroy_workqueue() does not match the
+prototype of (*action)().
+
+Use a local function with the correct prototype to wrap
+destroy_workqueue() to resolve the warning and CFI violation.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202510190103.qTZvfdjj-lkp@intel.com/
+Closes: https://github.com/ClangBuiltLinux/linux/issues/2139
+Fixes: 0d30dae38fe0 ("HID: intel-ish-hid: Use dedicated unbound workqueues to prevent resume blocking")
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Reviewed-by: Zhang Lixu <lixu.zhang@intel.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/intel-ish-hid/ipc/ipc.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -932,6 +932,11 @@ static const struct ishtp_hw_ops ish_hw_
+       .dma_no_cache_snooping = _dma_no_cache_snooping
+ };
++static void ishtp_free_workqueue(void *wq)
++{
++      destroy_workqueue(wq);
++}
++
+ static struct workqueue_struct *devm_ishtp_alloc_workqueue(struct device *dev)
+ {
+       struct workqueue_struct *wq;
+@@ -940,8 +945,7 @@ static struct workqueue_struct *devm_ish
+       if (!wq)
+               return NULL;
+-      if (devm_add_action_or_reset(dev, (void (*)(void *))destroy_workqueue,
+-                                   wq))
++      if (devm_add_action_or_reset(dev, ishtp_free_workqueue, wq))
+               return NULL;
+       return wq;
diff --git a/queue-6.12/hid-intel-ish-hid-use-dedicated-unbound-workqueues-to-prevent-resume-blocking.patch b/queue-6.12/hid-intel-ish-hid-use-dedicated-unbound-workqueues-to-prevent-resume-blocking.patch
new file mode 100644 (file)
index 0000000..ec089a5
--- /dev/null
@@ -0,0 +1,201 @@
+From 0d30dae38fe01cd1de358c6039a0b1184689fe51 Mon Sep 17 00:00:00 2001
+From: Zhang Lixu <lixu.zhang@intel.com>
+Date: Fri, 10 Oct 2025 13:52:54 +0800
+Subject: HID: intel-ish-hid: Use dedicated unbound workqueues to prevent resume blocking
+
+From: Zhang Lixu <lixu.zhang@intel.com>
+
+commit 0d30dae38fe01cd1de358c6039a0b1184689fe51 upstream.
+
+During suspend/resume tests with S2IDLE, some ISH functional failures were
+observed because of delay in executing ISH resume handler. Here
+schedule_work() is used from resume handler to do actual work.
+schedule_work() uses system_wq, which is a per CPU work queue. Although
+the queuing is not bound to a CPU, but it prefers local CPU of the caller,
+unless prohibited.
+
+Users of this work queue are not supposed to queue long running work.
+But in practice, there are scenarios where long running work items are
+queued on other unbound workqueues, occupying the CPU. As a result, the
+ISH resume handler may not get a chance to execute in a timely manner.
+
+In one scenario, one of the ish_resume_handler() executions was delayed
+nearly 1 second because another work item on an unbound workqueue occupied
+the same CPU. This delay causes ISH functionality failures.
+
+A similar issue was previously observed where the ISH HID driver timed out
+while getting the HID descriptor during S4 resume in the recovery kernel,
+likely caused by the same workqueue contention problem.
+
+Create dedicated unbound workqueues for all ISH operations to allow work
+items to execute on any available CPU, eliminating CPU-specific bottlenecks
+and improving resume reliability under varying system loads. Also ISH has
+three different components, a bus driver which implements ISH protocols, a
+PCI interface layer and HID interface. Use one dedicated work queue for all
+of them.
+
+Signed-off-by: Zhang Lixu <lixu.zhang@intel.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/intel-ish-hid/ipc/ipc.c          |   21 ++++++++++++++++++++-
+ drivers/hid/intel-ish-hid/ipc/pci-ish.c      |    2 +-
+ drivers/hid/intel-ish-hid/ishtp-hid-client.c |    4 ++--
+ drivers/hid/intel-ish-hid/ishtp/bus.c        |   18 +++++++++++++++++-
+ drivers/hid/intel-ish-hid/ishtp/hbm.c        |    4 ++--
+ drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h  |    3 +++
+ include/linux/intel-ish-client-if.h          |    2 ++
+ 7 files changed, 47 insertions(+), 7 deletions(-)
+
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -627,7 +627,7 @@ static void        recv_ipc(struct ishtp_device
+               if (!ishtp_dev) {
+                       ishtp_dev = dev;
+               }
+-              schedule_work(&fw_reset_work);
++              queue_work(dev->unbound_wq, &fw_reset_work);
+               break;
+       case MNG_RESET_NOTIFY_ACK:
+@@ -932,6 +932,21 @@ static const struct ishtp_hw_ops ish_hw_
+       .dma_no_cache_snooping = _dma_no_cache_snooping
+ };
++static struct workqueue_struct *devm_ishtp_alloc_workqueue(struct device *dev)
++{
++      struct workqueue_struct *wq;
++
++      wq = alloc_workqueue("ishtp_unbound_%d", WQ_UNBOUND, 0, dev->id);
++      if (!wq)
++              return NULL;
++
++      if (devm_add_action_or_reset(dev, (void (*)(void *))destroy_workqueue,
++                                   wq))
++              return NULL;
++
++      return wq;
++}
++
+ /**
+  * ish_dev_init() -Initialize ISH devoce
+  * @pdev: PCI device
+@@ -952,6 +967,10 @@ struct ishtp_device *ish_dev_init(struct
+       if (!dev)
+               return NULL;
++      dev->unbound_wq = devm_ishtp_alloc_workqueue(&pdev->dev);
++      if (!dev->unbound_wq)
++              return NULL;
++
+       dev->devc = &pdev->dev;
+       ishtp_device_init(dev);
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -381,7 +381,7 @@ static int __maybe_unused ish_resume(str
+       ish_resume_device = device;
+       dev->resume_flag = 1;
+-      schedule_work(&resume_work);
++      queue_work(dev->unbound_wq, &resume_work);
+       return 0;
+ }
+--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
++++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+@@ -858,7 +858,7 @@ static int hid_ishtp_cl_reset(struct ish
+       hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+                       hid_ishtp_cl);
+-      schedule_work(&client_data->work);
++      queue_work(ishtp_get_workqueue(cl_device), &client_data->work);
+       return 0;
+ }
+@@ -900,7 +900,7 @@ static int hid_ishtp_cl_resume(struct de
+       hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+                       hid_ishtp_cl);
+-      schedule_work(&client_data->resume_work);
++      queue_work(ishtp_get_workqueue(cl_device), &client_data->resume_work);
+       return 0;
+ }
+--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
++++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
+@@ -541,7 +541,7 @@ void ishtp_cl_bus_rx_event(struct ishtp_
+               return;
+       if (device->event_cb)
+-              schedule_work(&device->event_work);
++              queue_work(device->ishtp_dev->unbound_wq, &device->event_work);
+ }
+ /**
+@@ -880,6 +880,22 @@ struct device *ishtp_get_pci_device(stru
+ EXPORT_SYMBOL(ishtp_get_pci_device);
+ /**
++ * ishtp_get_workqueue - Retrieve the workqueue associated with an ISHTP device
++ * @cl_device: Pointer to the ISHTP client device structure
++ *
++ * Returns the workqueue_struct pointer (unbound_wq) associated with the given
++ * ISHTP client device. This workqueue is typically used for scheduling work
++ * related to the device.
++ *
++ * Return: Pointer to struct workqueue_struct.
++ */
++struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device)
++{
++      return cl_device->ishtp_dev->unbound_wq;
++}
++EXPORT_SYMBOL(ishtp_get_workqueue);
++
++/**
+  * ishtp_trace_callback() - Return trace callback
+  * @cl_device: ISH-TP client device instance
+  *
+--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
++++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
+@@ -573,7 +573,7 @@ void ishtp_hbm_dispatch(struct ishtp_dev
+               /* Start firmware loading process if it has loader capability */
+               if (version_res->host_version_supported & ISHTP_SUPPORT_CAP_LOADER)
+-                      schedule_work(&dev->work_fw_loader);
++                      queue_work(dev->unbound_wq, &dev->work_fw_loader);
+               dev->version.major_version = HBM_MAJOR_VERSION;
+               dev->version.minor_version = HBM_MINOR_VERSION;
+@@ -864,7 +864,7 @@ void       recv_hbm(struct ishtp_device *dev,
+       dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+               (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+       spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+-      schedule_work(&dev->bh_hbm_work);
++      queue_work(dev->unbound_wq, &dev->bh_hbm_work);
+ eoi:
+       return;
+ }
+--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
++++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+@@ -166,6 +166,9 @@ struct ishtp_device {
+       struct hbm_version version;
+       int transfer_path; /* Choice of transfer path: IPC or DMA */
++      /* Alloc a dedicated unbound workqueue for ishtp device */
++      struct workqueue_struct *unbound_wq;
++
+       /* work structure for scheduling firmware loading tasks */
+       struct work_struct work_fw_loader;
+       /* waitq for waiting for command response from the firmware loader */
+--- a/include/linux/intel-ish-client-if.h
++++ b/include/linux/intel-ish-client-if.h
+@@ -87,6 +87,8 @@ bool ishtp_wait_resume(struct ishtp_devi
+ ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+ /* Get device pointer of PCI device for DMA acces */
+ struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
++/* Get the ISHTP workqueue */
++struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device);
+ struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
+ void ishtp_cl_free(struct ishtp_cl *cl);
index 2127b9e9aeb61482bb39eaaf80e7afb5c589cd9e..50a359ae1ec016da8395f0d1084c47e2a01d8810 100644 (file)
@@ -119,3 +119,7 @@ dmaengine-stm32-dmamux-fix-of-node-leak-on-route-allocation-failure.patch
 dmaengine-ti-dma-crossbar-fix-device-leak-on-dra7x-route-allocation.patch
 dmaengine-ti-dma-crossbar-fix-device-leak-on-am335x-route-allocation.patch
 dmaengine-ti-k3-udma-fix-device-leak-on-udma-lookup.patch
+hid-intel-ish-hid-use-dedicated-unbound-workqueues-to-prevent-resume-blocking.patch
+hid-intel-ish-hid-fix-wcast-function-type-strict-in-devm_ishtp_alloc_workqueue.patch
+btrfs-fix-deadlock-in-wait_current_trans-due-to-ignored-transaction-type.patch
+xfs-set-max_agbno-to-allow-sparse-alloc-of-last-full-inode-chunk.patch
diff --git a/queue-6.12/xfs-set-max_agbno-to-allow-sparse-alloc-of-last-full-inode-chunk.patch b/queue-6.12/xfs-set-max_agbno-to-allow-sparse-alloc-of-last-full-inode-chunk.patch
new file mode 100644 (file)
index 0000000..f5d1e64
--- /dev/null
@@ -0,0 +1,82 @@
+From c360004c0160dbe345870f59f24595519008926f Mon Sep 17 00:00:00 2001
+From: Brian Foster <bfoster@redhat.com>
+Date: Fri, 9 Jan 2026 12:49:05 -0500
+Subject: xfs: set max_agbno to allow sparse alloc of last full inode chunk
+
+From: Brian Foster <bfoster@redhat.com>
+
+commit c360004c0160dbe345870f59f24595519008926f upstream.
+
+Sparse inode cluster allocation sets min/max agbno values to avoid
+allocating an inode cluster that might map to an invalid inode
+chunk. For example, we can't have an inode record mapped to agbno 0
+or that extends past the end of a runt AG of misaligned size.
+
+The initial calculation of max_agbno is unnecessarily conservative,
+however. This has triggered a corner case allocation failure where a
+small runt AG (i.e. 2063 blocks) is mostly full save for an extent
+to the EOFS boundary: [2050,13]. max_agbno is set to 2048 in this
+case, which happens to be the offset of the last possible valid
+inode chunk in the AG. In practice, we should be able to allocate
+the 4-block cluster at agbno 2052 to map to the parent inode record
+at agbno 2048, but the max_agbno value precludes it.
+
+Note that this can result in filesystem shutdown via dirty trans
+cancel on stable kernels prior to commit 9eb775968b68 ("xfs: walk
+all AGs if TRYLOCK passed to xfs_alloc_vextent_iterate_ags") because
+the tail AG selection by the allocator sets t_highest_agno on the
+transaction. If the inode allocator spins around and finds an inode
+chunk with free inodes in an earlier AG, the subsequent dir name
+creation path may still fail to allocate due to the AG restriction
+and cancel.
+
+To avoid this problem, update the max_agbno calculation to the agbno
+prior to the last chunk aligned agbno in the AG. This is not
+necessarily the last valid allocation target for a sparse chunk, but
+since inode chunks (i.e. records) are chunk aligned and sparse
+allocs are cluster sized/aligned, this allows the sb_spino_align
+alignment restriction to take over and round down the max effective
+agbno to within the last valid inode chunk in the AG.
+
+Note that even though the allocator improvements in the
+aforementioned commit seem to avoid this particular dirty trans
+cancel situation, the max_agbno logic improvement still applies as
+we should be able to allocate from an AG that has been appropriately
+selected. The more important target for this patch however are
+older/stable kernels prior to this allocator rework/improvement.
+
+Cc: stable@vger.kernel.org # v4.2
+Fixes: 56d1115c9bc7 ("xfs: allocate sparse inode chunks on full chunk allocation failure")
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_ialloc.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/fs/xfs/libxfs/xfs_ialloc.c
++++ b/fs/xfs/libxfs/xfs_ialloc.c
+@@ -850,15 +850,16 @@ sparse_alloc:
+                * invalid inode records, such as records that start at agbno 0
+                * or extend beyond the AG.
+                *
+-               * Set min agbno to the first aligned, non-zero agbno and max to
+-               * the last aligned agbno that is at least one full chunk from
+-               * the end of the AG.
++               * Set min agbno to the first chunk aligned, non-zero agbno and
++               * max to one less than the last chunk aligned agbno from the
++               * end of the AG. We subtract 1 from max so that the cluster
++               * allocation alignment takes over and allows allocation within
++               * the last full inode chunk in the AG.
+                */
+               args.min_agbno = args.mp->m_sb.sb_inoalignmt;
+               args.max_agbno = round_down(xfs_ag_block_count(args.mp,
+                                                       pag->pag_agno),
+-                                          args.mp->m_sb.sb_inoalignmt) -
+-                               igeo->ialloc_blks;
++                                          args.mp->m_sb.sb_inoalignmt) - 1;
+               error = xfs_alloc_vextent_near_bno(&args,
+                               XFS_AGB_TO_FSB(args.mp, pag->pag_agno,