]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2025 10:32:26 +0000 (11:32 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2025 10:32:26 +0000 (11:32 +0100)
added patches:
btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch
mips-ftrace-declare-ftrace_get_parent_ra_addr-as-static.patch
revert-btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch
s390-fpu-add-fpc-exception-handler-remove-fixup-section-again.patch
spi-atmel-qspi-memory-barriers-after-memory-mapped-i-o.patch
spi-atmel-quadspi-create-atmel_qspi_ops-to-support-newer-soc-families.patch
xfs-attach-dquot-buffer-to-dquot-log-item-buffer.patch
xfs-avoid-nested-calls-to-__xfs_trans_commit.patch
xfs-clean-up-log-item-accesses-in-xfs_qm_dqflush-_done.patch
xfs-convert-quotacheck-to-attach-dquot-buffers.patch
xfs-don-t-lose-solo-dquot-update-transactions.patch
xfs-don-t-lose-solo-superblock-counter-update-transactions.patch
xfs-fix-mount-hang-during-primary-superblock-recovery-failure.patch
xfs-lock-dquot-buffer-before-detaching-dquot-from-b_li_list.patch
xfs-release-the-dquot-buf-outside-of-qli_lock.patch
xfs-separate-dquot-buffer-reads-from-xfs_dqflush.patch

17 files changed:
queue-6.12/btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch [new file with mode: 0644]
queue-6.12/mips-ftrace-declare-ftrace_get_parent_ra_addr-as-static.patch [new file with mode: 0644]
queue-6.12/revert-btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch [new file with mode: 0644]
queue-6.12/s390-fpu-add-fpc-exception-handler-remove-fixup-section-again.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/spi-atmel-qspi-memory-barriers-after-memory-mapped-i-o.patch [new file with mode: 0644]
queue-6.12/spi-atmel-quadspi-create-atmel_qspi_ops-to-support-newer-soc-families.patch [new file with mode: 0644]
queue-6.12/xfs-attach-dquot-buffer-to-dquot-log-item-buffer.patch [new file with mode: 0644]
queue-6.12/xfs-avoid-nested-calls-to-__xfs_trans_commit.patch [new file with mode: 0644]
queue-6.12/xfs-clean-up-log-item-accesses-in-xfs_qm_dqflush-_done.patch [new file with mode: 0644]
queue-6.12/xfs-convert-quotacheck-to-attach-dquot-buffers.patch [new file with mode: 0644]
queue-6.12/xfs-don-t-lose-solo-dquot-update-transactions.patch [new file with mode: 0644]
queue-6.12/xfs-don-t-lose-solo-superblock-counter-update-transactions.patch [new file with mode: 0644]
queue-6.12/xfs-fix-mount-hang-during-primary-superblock-recovery-failure.patch [new file with mode: 0644]
queue-6.12/xfs-lock-dquot-buffer-before-detaching-dquot-from-b_li_list.patch [new file with mode: 0644]
queue-6.12/xfs-release-the-dquot-buf-outside-of-qli_lock.patch [new file with mode: 0644]
queue-6.12/xfs-separate-dquot-buffer-reads-from-xfs_dqflush.patch [new file with mode: 0644]

diff --git a/queue-6.12/btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch b/queue-6.12/btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch
new file mode 100644 (file)
index 0000000..8e353d7
--- /dev/null
@@ -0,0 +1,42 @@
+From stable+bounces-114136-greg=kroah.com@vger.kernel.org Thu Feb  6 17:23:10 2025
+From: Koichiro Den <koichiro.den@canonical.com>
+Date: Fri,  7 Feb 2025 01:22:17 +0900
+Subject: btrfs: avoid monopolizing a core when activating a swap file
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: wqu@suse.com, fdmanana@suse.com, dsterba@suse.com
+Message-ID: <20250206162217.1387360-2-koichiro.den@canonical.com>
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 2c8507c63f5498d4ee4af404a8e44ceae4345056 upstream.
+
+This commit re-attempts the backport of the change to the linux-6.12.y
+branch. Commit 9f372e86b9bd ("btrfs: avoid monopolizing a core when
+activating a swap file") on this branch was reverted.
+
+During swap activation we iterate over the extents of a file and we can
+have many thousands of them, so we can end up in a busy loop monopolizing
+a core. Avoid this by doing a voluntary reschedule after processing each
+extent.
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Koichiro Den <koichiro.den@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10142,6 +10142,8 @@ static int btrfs_swap_activate(struct sw
+                       ret = -EINTR;
+                       goto out;
+               }
++
++              cond_resched();
+       }
+       if (bsi.block_len)
diff --git a/queue-6.12/mips-ftrace-declare-ftrace_get_parent_ra_addr-as-static.patch b/queue-6.12/mips-ftrace-declare-ftrace_get_parent_ra_addr-as-static.patch
new file mode 100644 (file)
index 0000000..33aeb3e
--- /dev/null
@@ -0,0 +1,47 @@
+From ddd068d81445b17ac0bed084dfeb9e58b4df3ddd Mon Sep 17 00:00:00 2001
+From: WangYuli <wangyuli@uniontech.com>
+Date: Sat, 4 Jan 2025 22:47:08 +0800
+Subject: MIPS: ftrace: Declare ftrace_get_parent_ra_addr() as static
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: WangYuli <wangyuli@uniontech.com>
+
+commit ddd068d81445b17ac0bed084dfeb9e58b4df3ddd upstream.
+
+Declare ftrace_get_parent_ra_addr() as static to suppress clang
+compiler warning that 'no previous prototype'. This function is
+not intended to be called from other parts.
+
+Fix follow error with clang-19:
+
+arch/mips/kernel/ftrace.c:251:15: error: no previous prototype for function 'ftrace_get_parent_ra_addr' [-Werror,-Wmissing-prototypes]
+  251 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+      |               ^
+arch/mips/kernel/ftrace.c:251:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
+  251 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+      | ^
+      | static
+1 error generated.
+
+Signed-off-by: WangYuli <wangyuli@uniontech.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/ftrace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kernel/ftrace.c
++++ b/arch/mips/kernel/ftrace.c
+@@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(v
+ #define S_R_SP        (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
+ #define OFFSET_MASK   0xffff  /* stack offset range: 0 ~ PT_SIZE */
+-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
++static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+               old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+ {
+       unsigned long sp, ip, tmp;
diff --git a/queue-6.12/revert-btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch b/queue-6.12/revert-btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch
new file mode 100644 (file)
index 0000000..eb1d669
--- /dev/null
@@ -0,0 +1,36 @@
+From stable+bounces-114135-greg=kroah.com@vger.kernel.org Thu Feb  6 17:23:08 2025
+From: Koichiro Den <koichiro.den@canonical.com>
+Date: Fri,  7 Feb 2025 01:22:16 +0900
+Subject: Revert "btrfs: avoid monopolizing a core when activating a swap file"
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: wqu@suse.com, fdmanana@suse.com, dsterba@suse.com
+Message-ID: <20250206162217.1387360-1-koichiro.den@canonical.com>
+
+From: Koichiro Den <koichiro.den@canonical.com>
+
+This reverts commit 9f372e86b9bd1914df58c8f6e30939b7a224c6b0.
+
+The backport for linux-6.12.y, commit 9f372e86b9bd ("btrfs: avoid
+monopolizing a core when activating a swap file"), inserted
+cond_resched() in the wrong location.
+
+Revert it now; a subsequent commit will re-backport the original patch.
+
+Fixes: 9f372e86b9bd ("btrfs: avoid monopolizing a core when activating a swap file") # linux-6.12.y
+Signed-off-by: Koichiro Den <koichiro.den@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7200,8 +7200,6 @@ noinline int can_nocow_extent(struct ino
+                       ret = -EAGAIN;
+                       goto out;
+               }
+-
+-              cond_resched();
+       }
+       if (file_extent)
diff --git a/queue-6.12/s390-fpu-add-fpc-exception-handler-remove-fixup-section-again.patch b/queue-6.12/s390-fpu-add-fpc-exception-handler-remove-fixup-section-again.patch
new file mode 100644 (file)
index 0000000..4bda544
--- /dev/null
@@ -0,0 +1,115 @@
+From ae02615b7fcea9ce9a4ec40b3c5b5dafd322b179 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Fri, 10 Jan 2025 11:52:17 +0100
+Subject: s390/fpu: Add fpc exception handler / remove fixup section again
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit ae02615b7fcea9ce9a4ec40b3c5b5dafd322b179 upstream.
+
+The fixup section was added again by mistake when test_fp_ctl() was
+removed. The reason for the removal of the fixup section is described in
+commit 484a8ed8b7d1 ("s390/extable: add dedicated uaccess handler").
+Remove it again for the same reason.
+
+Add an exception handler which handles exceptions when the floating point
+control register is attempted to be set to invalid values. The exception
+handler sets the floating point control register to zero and continues
+execution at the specified address.
+
+The new sfpc inline assembly is open-coded to make back porting a bit
+easier.
+
+Fixes: 702644249d3e ("s390/fpu: get rid of test_fp_ctl()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/asm-extable.h |    4 ++++
+ arch/s390/include/asm/fpu-insn.h    |   17 +++++------------
+ arch/s390/kernel/vmlinux.lds.S      |    1 -
+ arch/s390/mm/extable.c              |    9 +++++++++
+ 4 files changed, 18 insertions(+), 13 deletions(-)
+
+--- a/arch/s390/include/asm/asm-extable.h
++++ b/arch/s390/include/asm/asm-extable.h
+@@ -14,6 +14,7 @@
+ #define EX_TYPE_UA_LOAD_REG   5
+ #define EX_TYPE_UA_LOAD_REGPAIR       6
+ #define EX_TYPE_ZEROPAD               7
++#define EX_TYPE_FPC           8
+ #define EX_DATA_REG_ERR_SHIFT 0
+ #define EX_DATA_REG_ERR               GENMASK(3, 0)
+@@ -84,4 +85,7 @@
+ #define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr)         \
+       __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
++#define EX_TABLE_FPC(_fault, _target)                                 \
++      __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
++
+ #endif /* __ASM_EXTABLE_H */
+--- a/arch/s390/include/asm/fpu-insn.h
++++ b/arch/s390/include/asm/fpu-insn.h
+@@ -100,19 +100,12 @@ static __always_inline void fpu_lfpc(uns
+  */
+ static inline void fpu_lfpc_safe(unsigned int *fpc)
+ {
+-      u32 tmp;
+-
+       instrument_read(fpc, sizeof(*fpc));
+-      asm volatile("\n"
+-              "0:     lfpc    %[fpc]\n"
+-              "1:     nopr    %%r7\n"
+-              ".pushsection .fixup, \"ax\"\n"
+-              "2:     lghi    %[tmp],0\n"
+-              "       sfpc    %[tmp]\n"
+-              "       jg      1b\n"
+-              ".popsection\n"
+-              EX_TABLE(1b, 2b)
+-              : [tmp] "=d" (tmp)
++      asm_inline volatile(
++              "       lfpc    %[fpc]\n"
++              "0:     nopr    %%r7\n"
++              EX_TABLE_FPC(0b, 0b)
++              :
+               : [fpc] "Q" (*fpc)
+               : "memory");
+ }
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -52,7 +52,6 @@ SECTIONS
+               SOFTIRQENTRY_TEXT
+               FTRACE_HOTPATCH_TRAMPOLINES_TEXT
+               *(.text.*_indirect_*)
+-              *(.fixup)
+               *(.gnu.warning)
+               . = ALIGN(PAGE_SIZE);
+               _etext = .;             /* End of text section */
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -77,6 +77,13 @@ static bool ex_handler_zeropad(const str
+       return true;
+ }
++static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_regs *regs)
++{
++      asm volatile("sfpc      %[val]\n" : : [val] "d" (0));
++      regs->psw.addr = extable_fixup(ex);
++      return true;
++}
++
+ bool fixup_exception(struct pt_regs *regs)
+ {
+       const struct exception_table_entry *ex;
+@@ -99,6 +106,8 @@ bool fixup_exception(struct pt_regs *reg
+               return ex_handler_ua_load_reg(ex, true, regs);
+       case EX_TYPE_ZEROPAD:
+               return ex_handler_zeropad(ex, regs);
++      case EX_TYPE_FPC:
++              return ex_handler_fpc(ex, regs);
+       }
+       panic("invalid exception table entry");
+ }
index b0cb73b2048a84f83709c958e7bfff4ff4e5af70..dfd22fc47cafa9ea6d65be50342ea6922aa1b118 100644 (file)
@@ -392,3 +392,19 @@ ptp-ensure-info-enable-callback-is-always-set.patch
 rdma-mlx5-fix-a-race-for-an-odp-mr-which-leads-to-cqe-with-error.patch
 rtc-zynqmp-fix-optional-clock-name-property.patch
 timers-migration-fix-off-by-one-root-mis-connection.patch
+s390-fpu-add-fpc-exception-handler-remove-fixup-section-again.patch
+mips-ftrace-declare-ftrace_get_parent_ra_addr-as-static.patch
+xfs-avoid-nested-calls-to-__xfs_trans_commit.patch
+xfs-don-t-lose-solo-superblock-counter-update-transactions.patch
+xfs-don-t-lose-solo-dquot-update-transactions.patch
+xfs-separate-dquot-buffer-reads-from-xfs_dqflush.patch
+xfs-clean-up-log-item-accesses-in-xfs_qm_dqflush-_done.patch
+xfs-attach-dquot-buffer-to-dquot-log-item-buffer.patch
+xfs-convert-quotacheck-to-attach-dquot-buffers.patch
+xfs-release-the-dquot-buf-outside-of-qli_lock.patch
+xfs-lock-dquot-buffer-before-detaching-dquot-from-b_li_list.patch
+xfs-fix-mount-hang-during-primary-superblock-recovery-failure.patch
+spi-atmel-quadspi-create-atmel_qspi_ops-to-support-newer-soc-families.patch
+spi-atmel-qspi-memory-barriers-after-memory-mapped-i-o.patch
+revert-btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch
+btrfs-avoid-monopolizing-a-core-when-activating-a-swap-file.patch
diff --git a/queue-6.12/spi-atmel-qspi-memory-barriers-after-memory-mapped-i-o.patch b/queue-6.12/spi-atmel-qspi-memory-barriers-after-memory-mapped-i-o.patch
new file mode 100644 (file)
index 0000000..32f79ab
--- /dev/null
@@ -0,0 +1,90 @@
+From be92ab2de0ee1a13291c3b47b2d7eb24d80c0a2c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Bence=20Cs=C3=B3k=C3=A1s?= <csokas.bence@prolan.hu>
+Date: Thu, 19 Dec 2024 10:12:58 +0100
+Subject: spi: atmel-qspi: Memory barriers after memory-mapped I/O
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bence Csókás <csokas.bence@prolan.hu>
+
+commit be92ab2de0ee1a13291c3b47b2d7eb24d80c0a2c upstream.
+
+The QSPI peripheral control and status registers are
+accessible via the SoC's APB bus, whereas MMIO transactions'
+data travels on the AHB bus.
+
+Microchip documentation and even sample code from Atmel
+emphasises the need for a memory barrier before the first
+MMIO transaction to the AHB-connected QSPI, and before the
+last write to its registers via APB. This is achieved by
+the following lines in `atmel_qspi_transfer()`:
+
+       /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+       (void)atmel_qspi_read(aq, QSPI_IFR);
+
+However, the current documentation makes no mention to
+synchronization requirements in the other direction, i.e.
+after the last data written via AHB, and before the first
+register access on APB.
+
+In our case, we were facing an issue where the QSPI peripheral
+would cease to send any new CSR (nCS Rise) interrupts,
+leading to a timeout in `atmel_qspi_wait_for_completion()`
+and ultimately this panic in higher levels:
+
+       ubi0 error: ubi_io_write: error -110 while writing 63108 bytes
+ to PEB 491:128, written 63104 bytes
+
+After months of extensive research of the codebase, fiddling
+around the debugger with kgdb, and back-and-forth with
+Microchip, we came to the conclusion that the issue is
+probably that the peripheral is still busy receiving on AHB
+when the LASTXFER bit is written to its Control Register
+on APB, therefore this write gets lost, and the peripheral
+still thinks there is more data to come in the MMIO transfer.
+This was first formulated when we noticed that doubling the
+write() of QSPI_CR_LASTXFER seemed to solve the problem.
+
+Ultimately, the solution is to introduce memory barriers
+after the AHB-mapped MMIO transfers, to ensure ordering.
+
+Fixes: d5433def3153 ("mtd: spi-nor: atmel-quadspi: Add spi-mem support to atmel-quadspi")
+Cc: Hari.PrasathGE@microchip.com
+Cc: Mahesh.Abotula@microchip.com
+Cc: Marco.Cardellini@microchip.com
+Cc: stable@vger.kernel.org # c0a0203cf579: ("spi: atmel-quadspi: Create `atmel_qspi_ops`"...)
+Cc: stable@vger.kernel.org # 6.x.y
+Signed-off-by: Bence Csókás <csokas.bence@prolan.hu>
+Link: https://patch.msgid.link/20241219091258.395187-1-csokas.bence@prolan.hu
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/atmel-quadspi.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -454,13 +454,20 @@ static int atmel_qspi_transfer(struct sp
+       (void)atmel_qspi_read(aq, QSPI_IFR);
+       /* Send/Receive data */
+-      if (op->data.dir == SPI_MEM_DATA_IN)
++      if (op->data.dir == SPI_MEM_DATA_IN) {
+               memcpy_fromio(op->data.buf.in, aq->mem + offset,
+                             op->data.nbytes);
+-      else
++
++              /* Synchronize AHB and APB accesses again */
++              rmb();
++      } else {
+               memcpy_toio(aq->mem + offset, op->data.buf.out,
+                           op->data.nbytes);
++              /* Synchronize AHB and APB accesses again */
++              wmb();
++      }
++
+       /* Release the chip-select */
+       atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
diff --git a/queue-6.12/spi-atmel-quadspi-create-atmel_qspi_ops-to-support-newer-soc-families.patch b/queue-6.12/spi-atmel-quadspi-create-atmel_qspi_ops-to-support-newer-soc-families.patch
new file mode 100644 (file)
index 0000000..df88c88
--- /dev/null
@@ -0,0 +1,207 @@
+From c0a0203cf57963792d59b3e4317a1d07b73df42a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Cs=C3=B3k=C3=A1s=2C=20Bence?= <csokas.bence@prolan.hu>
+Date: Thu, 28 Nov 2024 18:43:14 +0100
+Subject: spi: atmel-quadspi: Create `atmel_qspi_ops` to support newer SoC families
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Csókás, Bence <csokas.bence@prolan.hu>
+
+commit c0a0203cf57963792d59b3e4317a1d07b73df42a upstream.
+
+Refactor the code to introduce an ops struct, to prepare for merging
+support for later SoCs, such as SAMA7G5. This code was based on the
+vendor's kernel (linux4microchip). Cc'ing original contributors.
+
+Signed-off-by: Csókás, Bence <csokas.bence@prolan.hu>
+Link: https://patch.msgid.link/20241128174316.3209354-2-csokas.bence@prolan.hu
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/atmel-quadspi.c |  111 ++++++++++++++++++++++++++++++--------------
+ 1 file changed, 77 insertions(+), 34 deletions(-)
+
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -138,11 +138,15 @@
+ #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
+ #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
++#define ATMEL_QSPI_TIMEOUT            1000    /* ms */
++
+ struct atmel_qspi_caps {
+       bool has_qspick;
+       bool has_ricr;
+ };
++struct atmel_qspi_ops;
++
+ struct atmel_qspi {
+       void __iomem            *regs;
+       void __iomem            *mem;
+@@ -150,13 +154,22 @@ struct atmel_qspi {
+       struct clk              *qspick;
+       struct platform_device  *pdev;
+       const struct atmel_qspi_caps *caps;
++      const struct atmel_qspi_ops *ops;
+       resource_size_t         mmap_size;
+       u32                     pending;
++      u32                     irq_mask;
+       u32                     mr;
+       u32                     scr;
+       struct completion       cmd_completion;
+ };
++struct atmel_qspi_ops {
++      int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
++                     u32 *offset);
++      int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
++                      u32 offset);
++};
++
+ struct atmel_qspi_mode {
+       u8 cmd_buswidth;
+       u8 addr_buswidth;
+@@ -404,10 +417,60 @@ static int atmel_qspi_set_cfg(struct atm
+       return 0;
+ }
++static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
++{
++      int err = 0;
++      u32 sr;
++
++      /* Poll INSTRuction End status */
++      sr = atmel_qspi_read(aq, QSPI_SR);
++      if ((sr & irq_mask) == irq_mask)
++              return 0;
++
++      /* Wait for INSTRuction End interrupt */
++      reinit_completion(&aq->cmd_completion);
++      aq->pending = sr & irq_mask;
++      aq->irq_mask = irq_mask;
++      atmel_qspi_write(irq_mask, aq, QSPI_IER);
++      if (!wait_for_completion_timeout(&aq->cmd_completion,
++                                       msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
++              err = -ETIMEDOUT;
++      atmel_qspi_write(irq_mask, aq, QSPI_IDR);
++
++      return err;
++}
++
++static int atmel_qspi_transfer(struct spi_mem *mem,
++                             const struct spi_mem_op *op, u32 offset)
++{
++      struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
++
++      /* Skip to the final steps if there is no data */
++      if (!op->data.nbytes)
++              return atmel_qspi_wait_for_completion(aq,
++                                                    QSPI_SR_CMD_COMPLETED);
++
++      /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
++      (void)atmel_qspi_read(aq, QSPI_IFR);
++
++      /* Send/Receive data */
++      if (op->data.dir == SPI_MEM_DATA_IN)
++              memcpy_fromio(op->data.buf.in, aq->mem + offset,
++                            op->data.nbytes);
++      else
++              memcpy_toio(aq->mem + offset, op->data.buf.out,
++                          op->data.nbytes);
++
++      /* Release the chip-select */
++      atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
++
++      return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
++}
++
+ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+ {
+       struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+-      u32 sr, offset;
++      u32 offset;
+       int err;
+       /*
+@@ -416,46 +479,20 @@ static int atmel_qspi_exec_op(struct spi
+        * when the flash memories overrun the controller's memory space.
+        */
+       if (op->addr.val + op->data.nbytes > aq->mmap_size)
+-              return -ENOTSUPP;
++              return -EOPNOTSUPP;
++
++      if (op->addr.nbytes > 4)
++              return -EOPNOTSUPP;
+       err = pm_runtime_resume_and_get(&aq->pdev->dev);
+       if (err < 0)
+               return err;
+-      err = atmel_qspi_set_cfg(aq, op, &offset);
++      err = aq->ops->set_cfg(aq, op, &offset);
+       if (err)
+               goto pm_runtime_put;
+-      /* Skip to the final steps if there is no data */
+-      if (op->data.nbytes) {
+-              /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+-              (void)atmel_qspi_read(aq, QSPI_IFR);
+-
+-              /* Send/Receive data */
+-              if (op->data.dir == SPI_MEM_DATA_IN)
+-                      memcpy_fromio(op->data.buf.in, aq->mem + offset,
+-                                    op->data.nbytes);
+-              else
+-                      memcpy_toio(aq->mem + offset, op->data.buf.out,
+-                                  op->data.nbytes);
+-
+-              /* Release the chip-select */
+-              atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+-      }
+-
+-      /* Poll INSTRuction End status */
+-      sr = atmel_qspi_read(aq, QSPI_SR);
+-      if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+-              goto pm_runtime_put;
+-
+-      /* Wait for INSTRuction End interrupt */
+-      reinit_completion(&aq->cmd_completion);
+-      aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+-      atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
+-      if (!wait_for_completion_timeout(&aq->cmd_completion,
+-                                       msecs_to_jiffies(1000)))
+-              err = -ETIMEDOUT;
+-      atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
++      err = aq->ops->transfer(mem, op, offset);
+ pm_runtime_put:
+       pm_runtime_mark_last_busy(&aq->pdev->dev);
+@@ -571,12 +608,17 @@ static irqreturn_t atmel_qspi_interrupt(
+               return IRQ_NONE;
+       aq->pending |= pending;
+-      if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
++      if ((aq->pending & aq->irq_mask) == aq->irq_mask)
+               complete(&aq->cmd_completion);
+       return IRQ_HANDLED;
+ }
++static const struct atmel_qspi_ops atmel_qspi_ops = {
++      .set_cfg = atmel_qspi_set_cfg,
++      .transfer = atmel_qspi_transfer,
++};
++
+ static int atmel_qspi_probe(struct platform_device *pdev)
+ {
+       struct spi_controller *ctrl;
+@@ -601,6 +643,7 @@ static int atmel_qspi_probe(struct platf
+       init_completion(&aq->cmd_completion);
+       aq->pdev = pdev;
++      aq->ops = &atmel_qspi_ops;
+       /* Map the registers */
+       aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
diff --git a/queue-6.12/xfs-attach-dquot-buffer-to-dquot-log-item-buffer.patch b/queue-6.12/xfs-attach-dquot-buffer-to-dquot-log-item-buffer.patch
new file mode 100644 (file)
index 0000000..482263c
--- /dev/null
@@ -0,0 +1,462 @@
+From stable+bounces-114297-greg=kroah.com@vger.kernel.org Fri Feb  7 20:27:57 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:27:51 -0800
+Subject: xfs: attach dquot buffer to dquot log item buffer
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601500.3373740.6190035885607102385.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit acc8f8628c3737108f36e5637f4d5daeaf96d90e upstream
+
+Ever since 6.12-rc1, I've observed a pile of warnings from the kernel
+when running fstests with quotas enabled:
+
+WARNING: CPU: 1 PID: 458580 at mm/page_alloc.c:4221 __alloc_pages_noprof+0xc9c/0xf18
+CPU: 1 UID: 0 PID: 458580 Comm: xfsaild/sda3 Tainted: G        W          6.12.0-rc6-djwa #rc6 6ee3e0e531f6457e2d26aa008a3b65ff184b377c
+<snip>
+Call trace:
+ __alloc_pages_noprof+0xc9c/0xf18
+ alloc_pages_mpol_noprof+0x94/0x240
+ alloc_pages_noprof+0x68/0xf8
+ new_slab+0x3e0/0x568
+ ___slab_alloc+0x5a0/0xb88
+ __slab_alloc.constprop.0+0x7c/0xf8
+ __kmalloc_noprof+0x404/0x4d0
+ xfs_buf_get_map+0x594/0xde0 [xfs 384cb02810558b4c490343c164e9407332118f88]
+ xfs_buf_read_map+0x64/0x2e0 [xfs 384cb02810558b4c490343c164e9407332118f88]
+ xfs_trans_read_buf_map+0x1dc/0x518 [xfs 384cb02810558b4c490343c164e9407332118f88]
+ xfs_qm_dqflush+0xac/0x468 [xfs 384cb02810558b4c490343c164e9407332118f88]
+ xfs_qm_dquot_logitem_push+0xe4/0x148 [xfs 384cb02810558b4c490343c164e9407332118f88]
+ xfsaild+0x3f4/0xde8 [xfs 384cb02810558b4c490343c164e9407332118f88]
+ kthread+0x110/0x128
+ ret_from_fork+0x10/0x20
+---[ end trace 0000000000000000 ]---
+
+This corresponds to the line:
+
+       WARN_ON_ONCE(current->flags & PF_MEMALLOC);
+
+within the NOFAIL checks.  What's happening here is that the XFS AIL is
+trying to write a disk quota update back into the filesystem, but for
+that it needs to read the ondisk buffer for the dquot.  The buffer is
+not in memory anymore, probably because it was evicted.  Regardless, the
+buffer cache tries to allocate a new buffer, but those allocations are
+NOFAIL.  The AIL thread has marked itself PF_MEMALLOC (aka noreclaim)
+since commit 43ff2122e6492b ("xfs: on-stack delayed write buffer lists")
+presumably because reclaim can push on XFS to push on the AIL.
+
+An easy way to fix this probably would have been to drop the NOFAIL flag
+from the xfs_buf allocation and open code a retry loop, but then there's
+still the problem that for bs>ps filesystems, the buffer itself could
+require up to 64k worth of pages.
+
+Inode items had similar behavior (multi-page cluster buffers that we
+don't want to allocate in the AIL) which we solved by making transaction
+precommit attach the inode cluster buffers to the dirty log item.  Let's
+solve the dquot problem in the same way.
+
+So: Make a real precommit handler to read the dquot buffer and attach it
+to the log item; pass it to dqflush in the push method; and have the
+iodone function detach the buffer once we've flushed everything.  Add a
+state flag to the log item to track when a thread has entered the
+precommit -> push mechanism to skip the detaching if it turns out that
+the dquot is very busy, as we don't hold the dquot lock between log item
+commit and AIL push).
+
+Reading and attaching the dquot buffer in the precommit hook is inspired
+by the work done for inode cluster buffers some time ago.
+
+Cc: <stable@vger.kernel.org> # v6.12
+Fixes: 903edea6c53f09 ("mm: warn about illegal __GFP_NOFAIL usage in a more appropriate location and manner")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c      |  129 ++++++++++++++++++++++++++++++++++++++++++++++--
+ fs/xfs/xfs_dquot.h      |    6 +-
+ fs/xfs/xfs_dquot_item.c |   39 +++++++++-----
+ fs/xfs/xfs_dquot_item.h |    7 ++
+ fs/xfs/xfs_qm.c         |    9 ++-
+ fs/xfs/xfs_trans_ail.c  |    2 
+ 6 files changed, 168 insertions(+), 24 deletions(-)
+
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -69,6 +69,30 @@ xfs_dquot_mark_sick(
+ }
+ /*
++ * Detach the dquot buffer if it's still attached, because we can get called
++ * through dqpurge after a log shutdown.  Caller must hold the dqflock or have
++ * otherwise isolated the dquot.
++ */
++void
++xfs_dquot_detach_buf(
++      struct xfs_dquot        *dqp)
++{
++      struct xfs_dq_logitem   *qlip = &dqp->q_logitem;
++      struct xfs_buf          *bp = NULL;
++
++      spin_lock(&qlip->qli_lock);
++      if (qlip->qli_item.li_buf) {
++              bp = qlip->qli_item.li_buf;
++              qlip->qli_item.li_buf = NULL;
++      }
++      spin_unlock(&qlip->qli_lock);
++      if (bp) {
++              list_del_init(&qlip->qli_item.li_bio_list);
++              xfs_buf_rele(bp);
++      }
++}
++
++/*
+  * This is called to free all the memory associated with a dquot
+  */
+ void
+@@ -76,6 +100,7 @@ xfs_qm_dqdestroy(
+       struct xfs_dquot        *dqp)
+ {
+       ASSERT(list_empty(&dqp->q_lru));
++      ASSERT(dqp->q_logitem.qli_item.li_buf == NULL);
+       kvfree(dqp->q_logitem.qli_item.li_lv_shadow);
+       mutex_destroy(&dqp->q_qlock);
+@@ -1140,6 +1165,7 @@ xfs_qm_dqflush_done(
+                       container_of(lip, struct xfs_dq_logitem, qli_item);
+       struct xfs_dquot        *dqp = qlip->qli_dquot;
+       struct xfs_ail          *ailp = lip->li_ailp;
++      struct xfs_buf          *bp = NULL;
+       xfs_lsn_t               tail_lsn;
+       /*
+@@ -1169,6 +1195,19 @@ xfs_qm_dqflush_done(
+        * Release the dq's flush lock since we're done with it.
+        */
+       xfs_dqfunlock(dqp);
++
++      /*
++       * If this dquot hasn't been dirtied since initiating the last dqflush,
++       * release the buffer reference.
++       */
++      spin_lock(&qlip->qli_lock);
++      if (!qlip->qli_dirty) {
++              bp = lip->li_buf;
++              lip->li_buf = NULL;
++      }
++      spin_unlock(&qlip->qli_lock);
++      if (bp)
++              xfs_buf_rele(bp);
+ }
+ void
+@@ -1191,7 +1230,7 @@ xfs_buf_dquot_io_fail(
+       spin_lock(&bp->b_mount->m_ail->ail_lock);
+       list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
+-              xfs_set_li_failed(lip, bp);
++              set_bit(XFS_LI_FAILED, &lip->li_flags);
+       spin_unlock(&bp->b_mount->m_ail->ail_lock);
+ }
+@@ -1243,6 +1282,7 @@ int
+ xfs_dquot_read_buf(
+       struct xfs_trans        *tp,
+       struct xfs_dquot        *dqp,
++      xfs_buf_flags_t         xbf_flags,
+       struct xfs_buf          **bpp)
+ {
+       struct xfs_mount        *mp = dqp->q_mount;
+@@ -1250,7 +1290,7 @@ xfs_dquot_read_buf(
+       int                     error;
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
+-                                 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
++                                 mp->m_quotainfo->qi_dqchunklen, xbf_flags,
+                                  &bp, &xfs_dquot_buf_ops);
+       if (error == -EAGAIN)
+               return error;
+@@ -1270,6 +1310,77 @@ out_abort:
+ }
+ /*
++ * Attach a dquot buffer to this dquot to avoid allocating a buffer during a
++ * dqflush, since dqflush can be called from reclaim context.
++ */
++int
++xfs_dquot_attach_buf(
++      struct xfs_trans        *tp,
++      struct xfs_dquot        *dqp)
++{
++      struct xfs_dq_logitem   *qlip = &dqp->q_logitem;
++      struct xfs_log_item     *lip = &qlip->qli_item;
++      int                     error;
++
++      spin_lock(&qlip->qli_lock);
++      if (!lip->li_buf) {
++              struct xfs_buf  *bp = NULL;
++
++              spin_unlock(&qlip->qli_lock);
++              error = xfs_dquot_read_buf(tp, dqp, 0, &bp);
++              if (error)
++                      return error;
++
++              /*
++               * Attach the dquot to the buffer so that the AIL does not have
++               * to read the dquot buffer to push this item.
++               */
++              xfs_buf_hold(bp);
++              spin_lock(&qlip->qli_lock);
++              lip->li_buf = bp;
++              xfs_trans_brelse(tp, bp);
++      }
++      qlip->qli_dirty = true;
++      spin_unlock(&qlip->qli_lock);
++
++      return 0;
++}
++
++/*
++ * Get a new reference the dquot buffer attached to this dquot for a dqflush
++ * operation.
++ *
++ * Returns 0 and a NULL bp if none was attached to the dquot; 0 and a locked
++ * bp; or -EAGAIN if the buffer could not be locked.
++ */
++int
++xfs_dquot_use_attached_buf(
++      struct xfs_dquot        *dqp,
++      struct xfs_buf          **bpp)
++{
++      struct xfs_buf          *bp = dqp->q_logitem.qli_item.li_buf;
++
++      /*
++       * A NULL buffer can happen if the dquot dirty flag was set but the
++       * filesystem shut down before transaction commit happened.  In that
++       * case we're not going to flush anyway.
++       */
++      if (!bp) {
++              ASSERT(xfs_is_shutdown(dqp->q_mount));
++
++              *bpp = NULL;
++              return 0;
++      }
++
++      if (!xfs_buf_trylock(bp))
++              return -EAGAIN;
++
++      xfs_buf_hold(bp);
++      *bpp = bp;
++      return 0;
++}
++
++/*
+  * Write a modified dquot to disk.
+  * The dquot must be locked and the flush lock too taken by caller.
+  * The flush lock will not be unlocked until the dquot reaches the disk,
+@@ -1283,7 +1394,8 @@ xfs_qm_dqflush(
+       struct xfs_buf          *bp)
+ {
+       struct xfs_mount        *mp = dqp->q_mount;
+-      struct xfs_log_item     *lip = &dqp->q_logitem.qli_item;
++      struct xfs_dq_logitem   *qlip = &dqp->q_logitem;
++      struct xfs_log_item     *lip = &qlip->qli_item;
+       struct xfs_dqblk        *dqblk;
+       xfs_failaddr_t          fa;
+       int                     error;
+@@ -1313,8 +1425,15 @@ xfs_qm_dqflush(
+        */
+       dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
+-      xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
+-                      &lip->li_lsn);
++      /*
++       * We hold the dquot lock, so nobody can dirty it while we're
++       * scheduling the write out.  Clear the dirty-since-flush flag.
++       */
++      spin_lock(&qlip->qli_lock);
++      qlip->qli_dirty = false;
++      spin_unlock(&qlip->qli_lock);
++
++      xfs_trans_ail_copy_lsn(mp->m_ail, &qlip->qli_flush_lsn, &lip->li_lsn);
+       /*
+        * copy the lsn into the on-disk dquot now while we have the in memory
+--- a/fs/xfs/xfs_dquot.h
++++ b/fs/xfs/xfs_dquot.h
+@@ -205,7 +205,7 @@ void xfs_dquot_to_disk(struct xfs_disk_d
+ void          xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+ int           xfs_dquot_read_buf(struct xfs_trans *tp, struct xfs_dquot *dqp,
+-                              struct xfs_buf **bpp);
++                              xfs_buf_flags_t flags, struct xfs_buf **bpp);
+ int           xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf *bp);
+ void          xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+ void          xfs_qm_adjust_dqtimers(struct xfs_dquot *d);
+@@ -229,6 +229,10 @@ void              xfs_dqlockn(struct xfs_dqtrx *q);
+ void          xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
++int xfs_dquot_attach_buf(struct xfs_trans *tp, struct xfs_dquot *dqp);
++int xfs_dquot_use_attached_buf(struct xfs_dquot *dqp, struct xfs_buf **bpp);
++void xfs_dquot_detach_buf(struct xfs_dquot *dqp);
++
+ static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
+ {
+       xfs_dqlock(dqp);
+--- a/fs/xfs/xfs_dquot_item.c
++++ b/fs/xfs/xfs_dquot_item.c
+@@ -123,8 +123,9 @@ xfs_qm_dquot_logitem_push(
+               __releases(&lip->li_ailp->ail_lock)
+               __acquires(&lip->li_ailp->ail_lock)
+ {
+-      struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
+-      struct xfs_buf          *bp = lip->li_buf;
++      struct xfs_dq_logitem   *qlip = DQUOT_ITEM(lip);
++      struct xfs_dquot        *dqp = qlip->qli_dquot;
++      struct xfs_buf          *bp;
+       uint                    rval = XFS_ITEM_SUCCESS;
+       int                     error;
+@@ -155,11 +156,10 @@ xfs_qm_dquot_logitem_push(
+       spin_unlock(&lip->li_ailp->ail_lock);
+-      error = xfs_dquot_read_buf(NULL, dqp, &bp);
+-      if (error) {
+-              if (error == -EAGAIN)
+-                      rval = XFS_ITEM_LOCKED;
++      error = xfs_dquot_use_attached_buf(dqp, &bp);
++      if (error == -EAGAIN) {
+               xfs_dqfunlock(dqp);
++              rval = XFS_ITEM_LOCKED;
+               goto out_relock_ail;
+       }
+@@ -207,12 +207,10 @@ xfs_qm_dquot_logitem_committing(
+ }
+ #ifdef DEBUG_EXPENSIVE
+-static int
+-xfs_qm_dquot_logitem_precommit(
+-      struct xfs_trans        *tp,
+-      struct xfs_log_item     *lip)
++static void
++xfs_qm_dquot_logitem_precommit_check(
++      struct xfs_dquot        *dqp)
+ {
+-      struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
+       struct xfs_mount        *mp = dqp->q_mount;
+       struct xfs_disk_dquot   ddq = { };
+       xfs_failaddr_t          fa;
+@@ -228,13 +226,24 @@ xfs_qm_dquot_logitem_precommit(
+               xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+               ASSERT(fa == NULL);
+       }
+-
+-      return 0;
+ }
+ #else
+-# define xfs_qm_dquot_logitem_precommit       NULL
++# define xfs_qm_dquot_logitem_precommit_check(...)    ((void)0)
+ #endif
++static int
++xfs_qm_dquot_logitem_precommit(
++      struct xfs_trans        *tp,
++      struct xfs_log_item     *lip)
++{
++      struct xfs_dq_logitem   *qlip = DQUOT_ITEM(lip);
++      struct xfs_dquot        *dqp = qlip->qli_dquot;
++
++      xfs_qm_dquot_logitem_precommit_check(dqp);
++
++      return xfs_dquot_attach_buf(tp, dqp);
++}
++
+ static const struct xfs_item_ops xfs_dquot_item_ops = {
+       .iop_size       = xfs_qm_dquot_logitem_size,
+       .iop_precommit  = xfs_qm_dquot_logitem_precommit,
+@@ -259,5 +268,7 @@ xfs_qm_dquot_logitem_init(
+       xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
+                                       &xfs_dquot_item_ops);
++      spin_lock_init(&lp->qli_lock);
+       lp->qli_dquot = dqp;
++      lp->qli_dirty = false;
+ }
+--- a/fs/xfs/xfs_dquot_item.h
++++ b/fs/xfs/xfs_dquot_item.h
+@@ -14,6 +14,13 @@ struct xfs_dq_logitem {
+       struct xfs_log_item     qli_item;       /* common portion */
+       struct xfs_dquot        *qli_dquot;     /* dquot ptr */
+       xfs_lsn_t               qli_flush_lsn;  /* lsn at last flush */
++
++      /*
++       * We use this spinlock to coordinate access to the li_buf pointer in
++       * the log item and the qli_dirty flag.
++       */
++      spinlock_t              qli_lock;
++      bool                    qli_dirty;      /* dirtied since last flush? */
+ };
+ void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -146,7 +146,7 @@ xfs_qm_dqpurge(
+                * We don't care about getting disk errors here. We need
+                * to purge this dquot anyway, so we go ahead regardless.
+                */
+-              error = xfs_dquot_read_buf(NULL, dqp, &bp);
++              error = xfs_dquot_read_buf(NULL, dqp, XBF_TRYLOCK, &bp);
+               if (error == -EAGAIN) {
+                       xfs_dqfunlock(dqp);
+                       dqp->q_flags &= ~XFS_DQFLAG_FREEING;
+@@ -166,6 +166,7 @@ xfs_qm_dqpurge(
+               }
+               xfs_dqflock(dqp);
+       }
++      xfs_dquot_detach_buf(dqp);
+ out_funlock:
+       ASSERT(atomic_read(&dqp->q_pincount) == 0);
+@@ -473,7 +474,7 @@ xfs_qm_dquot_isolate(
+               /* we have to drop the LRU lock to flush the dquot */
+               spin_unlock(lru_lock);
+-              error = xfs_dquot_read_buf(NULL, dqp, &bp);
++              error = xfs_dquot_read_buf(NULL, dqp, XBF_TRYLOCK, &bp);
+               if (error) {
+                       xfs_dqfunlock(dqp);
+                       goto out_unlock_dirty;
+@@ -491,6 +492,8 @@ xfs_qm_dquot_isolate(
+               xfs_buf_relse(bp);
+               goto out_unlock_dirty;
+       }
++
++      xfs_dquot_detach_buf(dqp);
+       xfs_dqfunlock(dqp);
+       /*
+@@ -1308,7 +1311,7 @@ xfs_qm_flush_one(
+               goto out_unlock;
+       }
+-      error = xfs_dquot_read_buf(NULL, dqp, &bp);
++      error = xfs_dquot_read_buf(NULL, dqp, XBF_TRYLOCK, &bp);
+       if (error)
+               goto out_unlock;
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -360,7 +360,7 @@ xfsaild_resubmit_item(
+       /* protected by ail_lock */
+       list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
+-              if (bp->b_flags & _XBF_INODES)
++              if (bp->b_flags & (_XBF_INODES | _XBF_DQUOTS))
+                       clear_bit(XFS_LI_FAILED, &lip->li_flags);
+               else
+                       xfs_clear_li_failed(lip);
diff --git a/queue-6.12/xfs-avoid-nested-calls-to-__xfs_trans_commit.patch b/queue-6.12/xfs-avoid-nested-calls-to-__xfs_trans_commit.patch
new file mode 100644 (file)
index 0000000..60ef8aa
--- /dev/null
@@ -0,0 +1,70 @@
+From stable+bounces-114291-greg=kroah.com@vger.kernel.org Fri Feb  7 20:26:38 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:26:33 -0800
+Subject: xfs: avoid nested calls to __xfs_trans_commit
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601419.3373740.4927786739399794017.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit e96c1e2f262e0993859e266e751977bfad3ca98a upstream
+
+Currently, __xfs_trans_commit calls xfs_defer_finish_noroll, which calls
+__xfs_trans_commit again on the same transaction.  In other words,
+there's function recursion that has caused minor amounts of confusion in
+the past.  There's no reason to keep this around, since there's only one
+place where we actually want the xfs_defer_finish_noroll, and that is in
+the top level xfs_trans_commit call.
+
+Fixes: 98719051e75ccf ("xfs: refactor internal dfops initialization")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_trans.c |   26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -834,18 +834,6 @@ __xfs_trans_commit(
+       trace_xfs_trans_commit(tp, _RET_IP_);
+-      /*
+-       * Finish deferred items on final commit. Only permanent transactions
+-       * should ever have deferred ops.
+-       */
+-      WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
+-                   !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
+-      if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
+-              error = xfs_defer_finish_noroll(&tp);
+-              if (error)
+-                      goto out_unreserve;
+-      }
+-
+       error = xfs_trans_run_precommits(tp);
+       if (error)
+               goto out_unreserve;
+@@ -924,6 +912,20 @@ int
+ xfs_trans_commit(
+       struct xfs_trans        *tp)
+ {
++      /*
++       * Finish deferred items on final commit. Only permanent transactions
++       * should ever have deferred ops.
++       */
++      WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
++                   !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
++      if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) {
++              int error = xfs_defer_finish_noroll(&tp);
++              if (error) {
++                      xfs_trans_cancel(tp);
++                      return error;
++              }
++      }
++
+       return __xfs_trans_commit(tp, false);
+ }
diff --git a/queue-6.12/xfs-clean-up-log-item-accesses-in-xfs_qm_dqflush-_done.patch b/queue-6.12/xfs-clean-up-log-item-accesses-in-xfs_qm_dqflush-_done.patch
new file mode 100644 (file)
index 0000000..aa82ed0
--- /dev/null
@@ -0,0 +1,79 @@
+From stable+bounces-114296-greg=kroah.com@vger.kernel.org Fri Feb  7 20:27:40 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:27:35 -0800
+Subject: xfs: clean up log item accesses in xfs_qm_dqflush{,_done}
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601484.3373740.13028897086121340654.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit ec88b41b932d5731291dcc0d0d63ea13ab8e07d5 upstream
+
+Clean up these functions a little bit before we move on to the real
+modifications, and make the variable naming consistent for dquot log items.
+
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1136,8 +1136,9 @@ static void
+ xfs_qm_dqflush_done(
+       struct xfs_log_item     *lip)
+ {
+-      struct xfs_dq_logitem   *qip = (struct xfs_dq_logitem *)lip;
+-      struct xfs_dquot        *dqp = qip->qli_dquot;
++      struct xfs_dq_logitem   *qlip =
++                      container_of(lip, struct xfs_dq_logitem, qli_item);
++      struct xfs_dquot        *dqp = qlip->qli_dquot;
+       struct xfs_ail          *ailp = lip->li_ailp;
+       xfs_lsn_t               tail_lsn;
+@@ -1150,12 +1151,12 @@ xfs_qm_dqflush_done(
+        * holding the lock before removing the dquot from the AIL.
+        */
+       if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
+-          ((lip->li_lsn == qip->qli_flush_lsn) ||
++          ((lip->li_lsn == qlip->qli_flush_lsn) ||
+            test_bit(XFS_LI_FAILED, &lip->li_flags))) {
+               spin_lock(&ailp->ail_lock);
+               xfs_clear_li_failed(lip);
+-              if (lip->li_lsn == qip->qli_flush_lsn) {
++              if (lip->li_lsn == qlip->qli_flush_lsn) {
+                       /* xfs_ail_update_finish() drops the AIL lock */
+                       tail_lsn = xfs_ail_delete_one(ailp, lip);
+                       xfs_ail_update_finish(ailp, tail_lsn);
+@@ -1313,7 +1314,7 @@ xfs_qm_dqflush(
+       dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
+       xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
+-                                      &dqp->q_logitem.qli_item.li_lsn);
++                      &lip->li_lsn);
+       /*
+        * copy the lsn into the on-disk dquot now while we have the in memory
+@@ -1325,7 +1326,7 @@ xfs_qm_dqflush(
+        * of a dquot without an up-to-date CRC getting to disk.
+        */
+       if (xfs_has_crc(mp)) {
+-              dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
++              dqblk->dd_lsn = cpu_to_be64(lip->li_lsn);
+               xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
+@@ -1335,7 +1336,7 @@ xfs_qm_dqflush(
+        * the AIL and release the flush lock once the dquot is synced to disk.
+        */
+       bp->b_flags |= _XBF_DQUOTS;
+-      list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
++      list_add_tail(&lip->li_bio_list, &bp->b_li_list);
+       /*
+        * If the buffer is pinned then push on the log so we won't
diff --git a/queue-6.12/xfs-convert-quotacheck-to-attach-dquot-buffers.patch b/queue-6.12/xfs-convert-quotacheck-to-attach-dquot-buffers.patch
new file mode 100644 (file)
index 0000000..225dce7
--- /dev/null
@@ -0,0 +1,132 @@
+From stable+bounces-114298-greg=kroah.com@vger.kernel.org Fri Feb  7 20:28:12 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:28:07 -0800
+Subject: xfs: convert quotacheck to attach dquot buffers
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601517.3373740.8627744405060077168.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit ca378189fdfa890a4f0622f85ee41b710bbac271 upstream
+
+Now that we've converted the dquot logging machinery to attach the dquot
+buffer to the li_buf pointer so that the AIL dqflush doesn't have to
+allocate or read buffers in a reclaim path, do the same for the
+quotacheck code so that the reclaim shrinker dqflush call doesn't have
+to do that either.
+
+Cc: <stable@vger.kernel.org> # v6.12
+Fixes: 903edea6c53f09 ("mm: warn about illegal __GFP_NOFAIL usage in a more appropriate location and manner")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c |    9 +++------
+ fs/xfs/xfs_dquot.h |    2 --
+ fs/xfs/xfs_qm.c    |   18 +++++++++++++-----
+ 3 files changed, 16 insertions(+), 13 deletions(-)
+
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1278,11 +1278,10 @@ xfs_qm_dqflush_check(
+  * Requires dquot flush lock, will clear the dirty flag, delete the quota log
+  * item from the AIL, and shut down the system if something goes wrong.
+  */
+-int
++static int
+ xfs_dquot_read_buf(
+       struct xfs_trans        *tp,
+       struct xfs_dquot        *dqp,
+-      xfs_buf_flags_t         xbf_flags,
+       struct xfs_buf          **bpp)
+ {
+       struct xfs_mount        *mp = dqp->q_mount;
+@@ -1290,10 +1289,8 @@ xfs_dquot_read_buf(
+       int                     error;
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
+-                                 mp->m_quotainfo->qi_dqchunklen, xbf_flags,
++                                 mp->m_quotainfo->qi_dqchunklen, 0,
+                                  &bp, &xfs_dquot_buf_ops);
+-      if (error == -EAGAIN)
+-              return error;
+       if (xfs_metadata_is_sick(error))
+               xfs_dquot_mark_sick(dqp);
+       if (error)
+@@ -1327,7 +1324,7 @@ xfs_dquot_attach_buf(
+               struct xfs_buf  *bp = NULL;
+               spin_unlock(&qlip->qli_lock);
+-              error = xfs_dquot_read_buf(tp, dqp, 0, &bp);
++              error = xfs_dquot_read_buf(tp, dqp, &bp);
+               if (error)
+                       return error;
+--- a/fs/xfs/xfs_dquot.h
++++ b/fs/xfs/xfs_dquot.h
+@@ -204,8 +204,6 @@ void xfs_dquot_to_disk(struct xfs_disk_d
+ #define XFS_DQ_IS_DIRTY(dqp)  ((dqp)->q_flags & XFS_DQFLAG_DIRTY)
+ void          xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+-int           xfs_dquot_read_buf(struct xfs_trans *tp, struct xfs_dquot *dqp,
+-                              xfs_buf_flags_t flags, struct xfs_buf **bpp);
+ int           xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf *bp);
+ void          xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+ void          xfs_qm_adjust_dqtimers(struct xfs_dquot *d);
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -146,13 +146,13 @@ xfs_qm_dqpurge(
+                * We don't care about getting disk errors here. We need
+                * to purge this dquot anyway, so we go ahead regardless.
+                */
+-              error = xfs_dquot_read_buf(NULL, dqp, XBF_TRYLOCK, &bp);
++              error = xfs_dquot_use_attached_buf(dqp, &bp);
+               if (error == -EAGAIN) {
+                       xfs_dqfunlock(dqp);
+                       dqp->q_flags &= ~XFS_DQFLAG_FREEING;
+                       goto out_unlock;
+               }
+-              if (error)
++              if (!bp)
+                       goto out_funlock;
+               /*
+@@ -474,8 +474,8 @@ xfs_qm_dquot_isolate(
+               /* we have to drop the LRU lock to flush the dquot */
+               spin_unlock(lru_lock);
+-              error = xfs_dquot_read_buf(NULL, dqp, XBF_TRYLOCK, &bp);
+-              if (error) {
++              error = xfs_dquot_use_attached_buf(dqp, &bp);
++              if (!bp || error == -EAGAIN) {
+                       xfs_dqfunlock(dqp);
+                       goto out_unlock_dirty;
+               }
+@@ -1132,6 +1132,10 @@ xfs_qm_quotacheck_dqadjust(
+               return error;
+       }
++      error = xfs_dquot_attach_buf(NULL, dqp);
++      if (error)
++              return error;
++
+       trace_xfs_dqadjust(dqp);
+       /*
+@@ -1311,9 +1315,13 @@ xfs_qm_flush_one(
+               goto out_unlock;
+       }
+-      error = xfs_dquot_read_buf(NULL, dqp, XBF_TRYLOCK, &bp);
++      error = xfs_dquot_use_attached_buf(dqp, &bp);
+       if (error)
+               goto out_unlock;
++      if (!bp) {
++              error = -EFSCORRUPTED;
++              goto out_unlock;
++      }
+       error = xfs_qm_dqflush(dqp, bp);
+       if (!error)
diff --git a/queue-6.12/xfs-don-t-lose-solo-dquot-update-transactions.patch b/queue-6.12/xfs-don-t-lose-solo-dquot-update-transactions.patch
new file mode 100644 (file)
index 0000000..44d6e6e
--- /dev/null
@@ -0,0 +1,165 @@
+From stable+bounces-114293-greg=kroah.com@vger.kernel.org Fri Feb  7 20:27:09 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:27:04 -0800
+Subject: xfs: don't lose solo dquot update transactions
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601451.3373740.13218256058657142856.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit d00ffba4adacd0d4d905f6e64bd8cd87011f5711 upstream
+
+Quota counter updates are tracked via incore objects which hang off the
+xfs_trans object.  These changes are then turned into dirty log items in
+xfs_trans_apply_dquot_deltas just prior to commiting the log items to
+the CIL.
+
+However, updating the incore deltas do not cause XFS_TRANS_DIRTY to be
+set on the transaction.  In other words, a pure quota counter update
+will be silently discarded if there are no other dirty log items
+attached to the transaction.
+
+This is currently not the case anywhere in the filesystem because quota
+updates always dirty at least one other metadata item, but a subsequent
+bug fix will add dquot log item precommits, so we actually need a dirty
+dquot log item prior to xfs_trans_run_precommits.  Also let's not leave
+a logic bomb.
+
+Cc: <stable@vger.kernel.org> # v2.6.35
+Fixes: 0924378a689ccb ("xfs: split out iclog writing from xfs_trans_commit()")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_quota.h       |    7 ++++---
+ fs/xfs/xfs_trans.c       |   10 +++-------
+ fs/xfs/xfs_trans_dquot.c |   31 ++++++++++++++++++++++++++-----
+ 3 files changed, 33 insertions(+), 15 deletions(-)
+
+
+--- a/fs/xfs/xfs_quota.h
++++ b/fs/xfs/xfs_quota.h
+@@ -96,7 +96,8 @@ extern void xfs_trans_free_dqinfo(struct
+ extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *,
+               uint, int64_t);
+ extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *);
+-extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *);
++void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *tp,
++              bool already_locked);
+ int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, struct xfs_inode *ip,
+               int64_t dblocks, int64_t rblocks, bool force);
+ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
+@@ -165,8 +166,8 @@ static inline void xfs_trans_mod_dquot_b
+               struct xfs_inode *ip, uint field, int64_t delta)
+ {
+ }
+-#define xfs_trans_apply_dquot_deltas(tp)
+-#define xfs_trans_unreserve_and_mod_dquots(tp)
++#define xfs_trans_apply_dquot_deltas(tp, a)
++#define xfs_trans_unreserve_and_mod_dquots(tp, a)
+ static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
+               struct xfs_inode *ip, int64_t dblocks, int64_t rblocks,
+               bool force)
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -840,6 +840,7 @@ __xfs_trans_commit(
+        */
+       if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+               xfs_trans_apply_sb_deltas(tp);
++      xfs_trans_apply_dquot_deltas(tp);
+       error = xfs_trans_run_precommits(tp);
+       if (error)
+@@ -868,11 +869,6 @@ __xfs_trans_commit(
+       ASSERT(tp->t_ticket != NULL);
+-      /*
+-       * If we need to update the superblock, then do it now.
+-       */
+-      xfs_trans_apply_dquot_deltas(tp);
+-
+       xlog_cil_commit(log, tp, &commit_seq, regrant);
+       xfs_trans_free(tp);
+@@ -898,7 +894,7 @@ out_unreserve:
+        * the dqinfo portion to be.  All that means is that we have some
+        * (non-persistent) quota reservations that need to be unreserved.
+        */
+-      xfs_trans_unreserve_and_mod_dquots(tp);
++      xfs_trans_unreserve_and_mod_dquots(tp, true);
+       if (tp->t_ticket) {
+               if (regrant && !xlog_is_shutdown(log))
+                       xfs_log_ticket_regrant(log, tp->t_ticket);
+@@ -992,7 +988,7 @@ xfs_trans_cancel(
+       }
+ #endif
+       xfs_trans_unreserve_and_mod_sb(tp);
+-      xfs_trans_unreserve_and_mod_dquots(tp);
++      xfs_trans_unreserve_and_mod_dquots(tp, false);
+       if (tp->t_ticket) {
+               xfs_log_ticket_ungrant(log, tp->t_ticket);
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -602,6 +602,24 @@ xfs_trans_apply_dquot_deltas(
+                       ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
+                       ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
+                       ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
++
++                      /*
++                       * We've applied the count changes and given back
++                       * whatever reservation we didn't use.  Zero out the
++                       * dqtrx fields.
++                       */
++                      qtrx->qt_blk_res = 0;
++                      qtrx->qt_bcount_delta = 0;
++                      qtrx->qt_delbcnt_delta = 0;
++
++                      qtrx->qt_rtblk_res = 0;
++                      qtrx->qt_rtblk_res_used = 0;
++                      qtrx->qt_rtbcount_delta = 0;
++                      qtrx->qt_delrtb_delta = 0;
++
++                      qtrx->qt_ino_res = 0;
++                      qtrx->qt_ino_res_used = 0;
++                      qtrx->qt_icount_delta = 0;
+               }
+       }
+ }
+@@ -638,7 +656,8 @@ xfs_trans_unreserve_and_mod_dquots_hook(
+  */
+ void
+ xfs_trans_unreserve_and_mod_dquots(
+-      struct xfs_trans        *tp)
++      struct xfs_trans        *tp,
++      bool                    already_locked)
+ {
+       int                     i, j;
+       struct xfs_dquot        *dqp;
+@@ -667,10 +686,12 @@ xfs_trans_unreserve_and_mod_dquots(
+                        * about the number of blocks used field, or deltas.
+                        * Also we don't bother to zero the fields.
+                        */
+-                      locked = false;
++                      locked = already_locked;
+                       if (qtrx->qt_blk_res) {
+-                              xfs_dqlock(dqp);
+-                              locked = true;
++                              if (!locked) {
++                                      xfs_dqlock(dqp);
++                                      locked = true;
++                              }
+                               dqp->q_blk.reserved -=
+                                       (xfs_qcnt_t)qtrx->qt_blk_res;
+                       }
+@@ -691,7 +712,7 @@ xfs_trans_unreserve_and_mod_dquots(
+                               dqp->q_rtb.reserved -=
+                                       (xfs_qcnt_t)qtrx->qt_rtblk_res;
+                       }
+-                      if (locked)
++                      if (locked && !already_locked)
+                               xfs_dqunlock(dqp);
+               }
diff --git a/queue-6.12/xfs-don-t-lose-solo-superblock-counter-update-transactions.patch b/queue-6.12/xfs-don-t-lose-solo-superblock-counter-update-transactions.patch
new file mode 100644 (file)
index 0000000..72f43ad
--- /dev/null
@@ -0,0 +1,61 @@
+From stable+bounces-114292-greg=kroah.com@vger.kernel.org Fri Feb  7 20:27:02 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:26:48 -0800
+Subject: xfs: don't lose solo superblock counter update transactions
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601435.3373740.16661001569520012189.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit c817aabd3b08e8770d89a9a29ae80fead561a1a1 upstream
+
+Superblock counter updates are tracked via per-transaction counters in
+the xfs_trans object.  These changes are then turned into dirty log
+items in xfs_trans_apply_sb_deltas just prior to commiting the log items
+to the CIL.
+
+However, updating the per-transaction counter deltas do not cause
+XFS_TRANS_DIRTY to be set on the transaction.  In other words, a pure sb
+counter update will be silently discarded if there are no other dirty
+log items attached to the transaction.
+
+This is currently not the case anywhere in the filesystem because sb
+counter updates always dirty at least one other metadata item, but let's
+not leave a logic bomb.
+
+Cc: <stable@vger.kernel.org> # v2.6.35
+Fixes: 0924378a689ccb ("xfs: split out iclog writing from xfs_trans_commit()")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_trans.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -834,6 +834,13 @@ __xfs_trans_commit(
+       trace_xfs_trans_commit(tp, _RET_IP_);
++      /*
++       * Commit per-transaction changes that are not already tracked through
++       * log items.  This can add dirty log items to the transaction.
++       */
++      if (tp->t_flags & XFS_TRANS_SB_DIRTY)
++              xfs_trans_apply_sb_deltas(tp);
++
+       error = xfs_trans_run_precommits(tp);
+       if (error)
+               goto out_unreserve;
+@@ -864,8 +871,6 @@ __xfs_trans_commit(
+       /*
+        * If we need to update the superblock, then do it now.
+        */
+-      if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+-              xfs_trans_apply_sb_deltas(tp);
+       xfs_trans_apply_dquot_deltas(tp);
+       xlog_cil_commit(log, tp, &commit_seq, regrant);
diff --git a/queue-6.12/xfs-fix-mount-hang-during-primary-superblock-recovery-failure.patch b/queue-6.12/xfs-fix-mount-hang-during-primary-superblock-recovery-failure.patch
new file mode 100644 (file)
index 0000000..36d462f
--- /dev/null
@@ -0,0 +1,110 @@
+From stable+bounces-114302-greg=kroah.com@vger.kernel.org Fri Feb  7 20:29:15 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:29:09 -0800
+Subject: xfs: fix mount hang during primary superblock recovery failure
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: stable@vger.kernel.org, leo.lilong@huawei.com, hch@lst.de, cem@kernel.org, stable@vger.kernel.org
+Message-ID: <173895601583.3373740.17804889699108024787.stgit@frogsfrogsfrogs>
+
+From: Long Li <leo.lilong@huawei.com>
+
+commit efebe42d95fbba91dca6e3e32cb9e0612eb56de5 upstream
+
+When mounting an image containing a log with sb modifications that require
+log replay, the mount process hang all the time and stack as follows:
+
+  [root@localhost ~]# cat /proc/557/stack
+  [<0>] xfs_buftarg_wait+0x31/0x70
+  [<0>] xfs_buftarg_drain+0x54/0x350
+  [<0>] xfs_mountfs+0x66e/0xe80
+  [<0>] xfs_fs_fill_super+0x7f1/0xec0
+  [<0>] get_tree_bdev_flags+0x186/0x280
+  [<0>] get_tree_bdev+0x18/0x30
+  [<0>] xfs_fs_get_tree+0x1d/0x30
+  [<0>] vfs_get_tree+0x2d/0x110
+  [<0>] path_mount+0xb59/0xfc0
+  [<0>] do_mount+0x92/0xc0
+  [<0>] __x64_sys_mount+0xc2/0x160
+  [<0>] x64_sys_call+0x2de4/0x45c0
+  [<0>] do_syscall_64+0xa7/0x240
+  [<0>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+During log recovery, while updating the in-memory superblock from the
+primary SB buffer, if an error is encountered, such as superblock
+corruption occurs or some other reasons, we will proceed to out_release
+and release the xfs_buf. However, this is insufficient because the
+xfs_buf's log item has already been initialized and the xfs_buf is held
+by the buffer log item as follows, the xfs_buf will not be released,
+causing the mount thread to hang.
+
+  xlog_recover_do_primary_sb_buffer
+    xlog_recover_do_reg_buffer
+      xlog_recover_validate_buf_type
+        xfs_buf_item_init(bp, mp)
+
+The solution is straightforward, we simply need to allow it to be
+handled by the normal buffer write process. The filesystem will be
+shutdown before the submission of buffer_list in xlog_do_recovery_pass(),
+ensuring the correct release of the xfs_buf as follows:
+
+  xlog_do_recovery_pass
+    error = xlog_recover_process
+      xlog_recover_process_data
+        xlog_recover_process_ophdr
+          xlog_recovery_process_trans
+            ...
+              xlog_recover_buf_commit_pass2
+                error = xlog_recover_do_primary_sb_buffer
+                  //Encounter error and return
+                if (error)
+                  goto out_writebuf
+                ...
+              out_writebuf:
+                xfs_buf_delwri_queue(bp, buffer_list) //add bp to list
+                return  error
+            ...
+    if (!list_empty(&buffer_list))
+      if (error)
+        xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); //shutdown first
+      xfs_buf_delwri_submit(&buffer_list); //submit buffers in list
+        __xfs_buf_submit
+          if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log))
+            xfs_buf_ioend_fail(bp)  //release bp correctly
+
+Fixes: 6a18765b54e2 ("xfs: update the file system geometry after recoverying superblock buffers")
+Cc: stable@vger.kernel.org # v6.12
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_buf_item_recover.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+
+--- a/fs/xfs/xfs_buf_item_recover.c
++++ b/fs/xfs/xfs_buf_item_recover.c
+@@ -1036,12 +1036,21 @@ xlog_recover_buf_commit_pass2(
+               error = xlog_recover_do_primary_sb_buffer(mp, item, bp, buf_f,
+                               current_lsn);
+               if (error)
+-                      goto out_release;
++                      goto out_writebuf;
+       } else {
+               xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
+       }
+       /*
++       * Buffer held by buf log item during 'normal' buffer recovery must
++       * be committed through buffer I/O submission path to ensure proper
++       * release. When error occurs during sb buffer recovery, log shutdown
++       * will be done before submitting buffer list so that buffers can be
++       * released correctly through ioend failure path.
++       */
++out_writebuf:
++
++      /*
+        * Perform delayed write on the buffer.  Asynchronous writes will be
+        * slower when taking into account all the buffers to be flushed.
+        *
diff --git a/queue-6.12/xfs-lock-dquot-buffer-before-detaching-dquot-from-b_li_list.patch b/queue-6.12/xfs-lock-dquot-buffer-before-detaching-dquot-from-b_li_list.patch
new file mode 100644 (file)
index 0000000..fc4ebd9
--- /dev/null
@@ -0,0 +1,39 @@
+From stable+bounces-114301-greg=kroah.com@vger.kernel.org Fri Feb  7 20:28:59 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:28:54 -0800
+Subject: xfs: lock dquot buffer before detaching dquot from b_li_list
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, cem@kernel.org, stable@vger.kernel.org
+Message-ID: <173895601566.3373740.7315790259628729484.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit 111d36d6278756128b7d7fab787fdcbf8221cd98 upstream
+
+We have to lock the buffer before we can delete the dquot log item from
+the buffer's log item list.
+
+Cc: <stable@vger.kernel.org> # v6.13-rc3
+Fixes: acc8f8628c3737 ("xfs: attach dquot buffer to dquot log item buffer")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -87,8 +87,9 @@ xfs_dquot_detach_buf(
+       }
+       spin_unlock(&qlip->qli_lock);
+       if (bp) {
++              xfs_buf_lock(bp);
+               list_del_init(&qlip->qli_item.li_bio_list);
+-              xfs_buf_rele(bp);
++              xfs_buf_relse(bp);
+       }
+ }
diff --git a/queue-6.12/xfs-release-the-dquot-buf-outside-of-qli_lock.patch b/queue-6.12/xfs-release-the-dquot-buf-outside-of-qli_lock.patch
new file mode 100644 (file)
index 0000000..8a10d38
--- /dev/null
@@ -0,0 +1,77 @@
+From stable+bounces-114300-greg=kroah.com@vger.kernel.org Fri Feb  7 20:28:43 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:28:38 -0800
+Subject: xfs: release the dquot buf outside of qli_lock
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: syzbot+3126ab3db03db42e7a31@syzkaller.appspotmail.com, hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601550.3373740.12378168291194427407.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit 1aacd3fac248902ea1f7607f2d12b93929a4833b upstream
+
+Lai Yi reported a lockdep complaint about circular locking:
+
+ Chain exists of:
+   &lp->qli_lock --> &bch->bc_lock --> &l->lock
+
+  Possible unsafe locking scenario:
+
+        CPU0                    CPU1
+        ----                    ----
+   lock(&l->lock);
+                                lock(&bch->bc_lock);
+                                lock(&l->lock);
+   lock(&lp->qli_lock);
+
+I /think/ the problem here is that xfs_dquot_attach_buf during
+quotacheck will release the buffer while it's holding the qli_lock.
+Because this is a cached buffer, xfs_buf_rele_cached takes b_lock before
+decrementing b_hold.  Other threads have taught lockdep that a locking
+dependency chain is bp->b_lock -> bch->bc_lock -> l(ru)->lock; and that
+another chain is l(ru)->lock -> lp->qli_lock.  Hence we do not want to
+take b_lock while holding qli_lock.
+
+Reported-by: syzbot+3126ab3db03db42e7a31@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org> # v6.13-rc3
+Fixes: ca378189fdfa89 ("xfs: convert quotacheck to attach dquot buffers")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1308,7 +1308,8 @@ out_abort:
+ /*
+  * Attach a dquot buffer to this dquot to avoid allocating a buffer during a
+- * dqflush, since dqflush can be called from reclaim context.
++ * dqflush, since dqflush can be called from reclaim context.  Caller must hold
++ * the dqlock.
+  */
+ int
+ xfs_dquot_attach_buf(
+@@ -1329,13 +1330,16 @@ xfs_dquot_attach_buf(
+                       return error;
+               /*
+-               * Attach the dquot to the buffer so that the AIL does not have
+-               * to read the dquot buffer to push this item.
++               * Hold the dquot buffer so that we retain our ref to it after
++               * detaching it from the transaction, then give that ref to the
++               * dquot log item so that the AIL does not have to read the
++               * dquot buffer to push this item.
+                */
+               xfs_buf_hold(bp);
++              xfs_trans_brelse(tp, bp);
++
+               spin_lock(&qlip->qli_lock);
+               lip->li_buf = bp;
+-              xfs_trans_brelse(tp, bp);
+       }
+       qlip->qli_dirty = true;
+       spin_unlock(&qlip->qli_lock);
diff --git a/queue-6.12/xfs-separate-dquot-buffer-reads-from-xfs_dqflush.patch b/queue-6.12/xfs-separate-dquot-buffer-reads-from-xfs_dqflush.patch
new file mode 100644 (file)
index 0000000..65f9c33
--- /dev/null
@@ -0,0 +1,245 @@
+From stable+bounces-114295-greg=kroah.com@vger.kernel.org Fri Feb  7 20:27:24 2025
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Fri, 07 Feb 2025 11:27:20 -0800
+Subject: xfs: separate dquot buffer reads from xfs_dqflush
+To: djwong@kernel.org, xfs-stable@lists.linux.dev
+Cc: hch@lst.de, stable@vger.kernel.org
+Message-ID: <173895601467.3373740.15562775549295313376.stgit@frogsfrogsfrogs>
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit a40fe30868ba433ac08376e30132400bec067583 upstream
+
+The first step towards holding the dquot buffer in the li_buf instead of
+reading it in the AIL is to separate the part that reads the buffer from
+the actual flush code.  There should be no functional changes.
+
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c      |   57 +++++++++++++++++++++++++++++++-----------------
+ fs/xfs/xfs_dquot.h      |    4 ++-
+ fs/xfs/xfs_dquot_item.c |   20 +++++++++++++---
+ fs/xfs/xfs_qm.c         |   37 +++++++++++++++++++++++++------
+ 4 files changed, 86 insertions(+), 32 deletions(-)
+
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1233,6 +1233,42 @@ xfs_qm_dqflush_check(
+ }
+ /*
++ * Get the buffer containing the on-disk dquot.
++ *
++ * Requires dquot flush lock, will clear the dirty flag, delete the quota log
++ * item from the AIL, and shut down the system if something goes wrong.
++ */
++int
++xfs_dquot_read_buf(
++      struct xfs_trans        *tp,
++      struct xfs_dquot        *dqp,
++      struct xfs_buf          **bpp)
++{
++      struct xfs_mount        *mp = dqp->q_mount;
++      struct xfs_buf          *bp = NULL;
++      int                     error;
++
++      error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
++                                 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
++                                 &bp, &xfs_dquot_buf_ops);
++      if (error == -EAGAIN)
++              return error;
++      if (xfs_metadata_is_sick(error))
++              xfs_dquot_mark_sick(dqp);
++      if (error)
++              goto out_abort;
++
++      *bpp = bp;
++      return 0;
++
++out_abort:
++      dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
++      xfs_trans_ail_delete(&dqp->q_logitem.qli_item, 0);
++      xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
++      return error;
++}
++
++/*
+  * Write a modified dquot to disk.
+  * The dquot must be locked and the flush lock too taken by caller.
+  * The flush lock will not be unlocked until the dquot reaches the disk,
+@@ -1243,11 +1279,10 @@ xfs_qm_dqflush_check(
+ int
+ xfs_qm_dqflush(
+       struct xfs_dquot        *dqp,
+-      struct xfs_buf          **bpp)
++      struct xfs_buf          *bp)
+ {
+       struct xfs_mount        *mp = dqp->q_mount;
+       struct xfs_log_item     *lip = &dqp->q_logitem.qli_item;
+-      struct xfs_buf          *bp;
+       struct xfs_dqblk        *dqblk;
+       xfs_failaddr_t          fa;
+       int                     error;
+@@ -1257,28 +1292,12 @@ xfs_qm_dqflush(
+       trace_xfs_dqflush(dqp);
+-      *bpp = NULL;
+-
+       xfs_qm_dqunpin_wait(dqp);
+-      /*
+-       * Get the buffer containing the on-disk dquot
+-       */
+-      error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
+-                                 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
+-                                 &bp, &xfs_dquot_buf_ops);
+-      if (error == -EAGAIN)
+-              goto out_unlock;
+-      if (xfs_metadata_is_sick(error))
+-              xfs_dquot_mark_sick(dqp);
+-      if (error)
+-              goto out_abort;
+-
+       fa = xfs_qm_dqflush_check(dqp);
+       if (fa) {
+               xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
+                               dqp->q_id, fa);
+-              xfs_buf_relse(bp);
+               xfs_dquot_mark_sick(dqp);
+               error = -EFSCORRUPTED;
+               goto out_abort;
+@@ -1328,14 +1347,12 @@ xfs_qm_dqflush(
+       }
+       trace_xfs_dqflush_done(dqp);
+-      *bpp = bp;
+       return 0;
+ out_abort:
+       dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
+       xfs_trans_ail_delete(lip, 0);
+       xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+-out_unlock:
+       xfs_dqfunlock(dqp);
+       return error;
+ }
+--- a/fs/xfs/xfs_dquot.h
++++ b/fs/xfs/xfs_dquot.h
+@@ -204,7 +204,9 @@ void xfs_dquot_to_disk(struct xfs_disk_d
+ #define XFS_DQ_IS_DIRTY(dqp)  ((dqp)->q_flags & XFS_DQFLAG_DIRTY)
+ void          xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+-int           xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
++int           xfs_dquot_read_buf(struct xfs_trans *tp, struct xfs_dquot *dqp,
++                              struct xfs_buf **bpp);
++int           xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf *bp);
+ void          xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+ void          xfs_qm_adjust_dqtimers(struct xfs_dquot *d);
+ void          xfs_qm_adjust_dqlimits(struct xfs_dquot *d);
+--- a/fs/xfs/xfs_dquot_item.c
++++ b/fs/xfs/xfs_dquot_item.c
+@@ -155,14 +155,26 @@ xfs_qm_dquot_logitem_push(
+       spin_unlock(&lip->li_ailp->ail_lock);
+-      error = xfs_qm_dqflush(dqp, &bp);
++      error = xfs_dquot_read_buf(NULL, dqp, &bp);
++      if (error) {
++              if (error == -EAGAIN)
++                      rval = XFS_ITEM_LOCKED;
++              xfs_dqfunlock(dqp);
++              goto out_relock_ail;
++      }
++
++      /*
++       * dqflush completes dqflock on error, and the delwri ioend does it on
++       * success.
++       */
++      error = xfs_qm_dqflush(dqp, bp);
+       if (!error) {
+               if (!xfs_buf_delwri_queue(bp, buffer_list))
+                       rval = XFS_ITEM_FLUSHING;
+-              xfs_buf_relse(bp);
+-      } else if (error == -EAGAIN)
+-              rval = XFS_ITEM_LOCKED;
++      }
++      xfs_buf_relse(bp);
++out_relock_ail:
+       spin_lock(&lip->li_ailp->ail_lock);
+ out_unlock:
+       xfs_dqunlock(dqp);
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -146,17 +146,28 @@ xfs_qm_dqpurge(
+                * We don't care about getting disk errors here. We need
+                * to purge this dquot anyway, so we go ahead regardless.
+                */
+-              error = xfs_qm_dqflush(dqp, &bp);
++              error = xfs_dquot_read_buf(NULL, dqp, &bp);
++              if (error == -EAGAIN) {
++                      xfs_dqfunlock(dqp);
++                      dqp->q_flags &= ~XFS_DQFLAG_FREEING;
++                      goto out_unlock;
++              }
++              if (error)
++                      goto out_funlock;
++
++              /*
++               * dqflush completes dqflock on error, and the bwrite ioend
++               * does it on success.
++               */
++              error = xfs_qm_dqflush(dqp, bp);
+               if (!error) {
+                       error = xfs_bwrite(bp);
+                       xfs_buf_relse(bp);
+-              } else if (error == -EAGAIN) {
+-                      dqp->q_flags &= ~XFS_DQFLAG_FREEING;
+-                      goto out_unlock;
+               }
+               xfs_dqflock(dqp);
+       }
++out_funlock:
+       ASSERT(atomic_read(&dqp->q_pincount) == 0);
+       ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
+               !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
+@@ -462,7 +473,17 @@ xfs_qm_dquot_isolate(
+               /* we have to drop the LRU lock to flush the dquot */
+               spin_unlock(lru_lock);
+-              error = xfs_qm_dqflush(dqp, &bp);
++              error = xfs_dquot_read_buf(NULL, dqp, &bp);
++              if (error) {
++                      xfs_dqfunlock(dqp);
++                      goto out_unlock_dirty;
++              }
++
++              /*
++               * dqflush completes dqflock on error, and the delwri ioend
++               * does it on success.
++               */
++              error = xfs_qm_dqflush(dqp, bp);
+               if (error)
+                       goto out_unlock_dirty;
+@@ -1287,11 +1308,13 @@ xfs_qm_flush_one(
+               goto out_unlock;
+       }
+-      error = xfs_qm_dqflush(dqp, &bp);
++      error = xfs_dquot_read_buf(NULL, dqp, &bp);
+       if (error)
+               goto out_unlock;
+-      xfs_buf_delwri_queue(bp, buffer_list);
++      error = xfs_qm_dqflush(dqp, bp);
++      if (!error)
++              xfs_buf_delwri_queue(bp, buffer_list);
+       xfs_buf_relse(bp);
+ out_unlock:
+       xfs_dqunlock(dqp);