--- /dev/null
+From 6bc907b3f5e547e475a5ed1adecca2870735e857 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 09:35:27 +0000
+Subject: btrfs: mark dirty extent range for out of bound prealloc extents
+
+From: austinchang <austinchang@synology.com>
+
+[ Upstream commit 3b1a4a59a2086badab391687a6a0b86e03048393 ]
+
+In btrfs_fallocate(), when the allocated range overlaps with a prealloc
+extent and the extent starts after i_size, the range doesn't get marked
+dirty in file_extent_tree. This results in persisting an incorrect
+disk_i_size for the inode when not using the no-holes feature.
+
+This is reproducible since commit 41a2ee75aab0 ("btrfs: introduce
+per-inode file extent tree"), then became hidden since commit 3d7db6e8bd22
+("btrfs: don't allocate file extent tree for non regular files") and then
+visible again after commit 8679d2687c35 ("btrfs: initialize
+inode::file_extent_tree after i_mode has been set"), which fixes the
+previous commit.
+
+The following reproducer triggers the problem:
+
+$ cat test.sh
+
+MNT=/mnt/test
+DEV=/dev/vdb
+
+mkdir -p $MNT
+
+mkfs.btrfs -f -O ^no-holes $DEV
+mount $DEV $MNT
+
+touch $MNT/file1
+fallocate -n -o 1M -l 2M $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+len=$((1 * 1024 * 1024))
+
+fallocate -o 1M -l $len $MNT/file1
+
+du --bytes $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+du --bytes $MNT/file1
+
+umount $MNT
+
+Running the reproducer gives the following result:
+
+$ ./test.sh
+(...)
+2097152 /mnt/test/file1
+1048576 /mnt/test/file1
+
+The difference is exactly 1048576 as we assigned.
+
+Fix by adding a call to btrfs_inode_set_file_extent_range() in
+btrfs_fallocate_update_isize().
+
+Fixes: 41a2ee75aab0 ("btrfs: introduce per-inode file extent tree")
+Signed-off-by: austinchang <austinchang@synology.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 53a3c32a0f8ce..38a293a9d0644 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3049,12 +3049,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
+ {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
++ u64 range_start;
++ u64 range_end;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
++ range_end = round_up(end, root->fs_info->sectorsize);
++
++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
++ range_end - range_start);
++ if (ret)
++ return ret;
++
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+--
+2.51.0
+
page_pool-clamp-pool-size-to-max-16k-pages.patch
orangefs-fix-xattr-related-buffer-overflow.patch
acpica-update-dsmethod.c-to-get-rid-of-unused-variab.patch
+btrfs-mark-dirty-extent-range-for-out-of-bound-preal.patch
--- /dev/null
+From 2ee909d150f52eed752c06b2d6ae627719c15e77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 09:35:27 +0000
+Subject: btrfs: mark dirty extent range for out of bound prealloc extents
+
+From: austinchang <austinchang@synology.com>
+
+[ Upstream commit 3b1a4a59a2086badab391687a6a0b86e03048393 ]
+
+In btrfs_fallocate(), when the allocated range overlaps with a prealloc
+extent and the extent starts after i_size, the range doesn't get marked
+dirty in file_extent_tree. This results in persisting an incorrect
+disk_i_size for the inode when not using the no-holes feature.
+
+This is reproducible since commit 41a2ee75aab0 ("btrfs: introduce
+per-inode file extent tree"), then became hidden since commit 3d7db6e8bd22
+("btrfs: don't allocate file extent tree for non regular files") and then
+visible again after commit 8679d2687c35 ("btrfs: initialize
+inode::file_extent_tree after i_mode has been set"), which fixes the
+previous commit.
+
+The following reproducer triggers the problem:
+
+$ cat test.sh
+
+MNT=/mnt/test
+DEV=/dev/vdb
+
+mkdir -p $MNT
+
+mkfs.btrfs -f -O ^no-holes $DEV
+mount $DEV $MNT
+
+touch $MNT/file1
+fallocate -n -o 1M -l 2M $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+len=$((1 * 1024 * 1024))
+
+fallocate -o 1M -l $len $MNT/file1
+
+du --bytes $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+du --bytes $MNT/file1
+
+umount $MNT
+
+Running the reproducer gives the following result:
+
+$ ./test.sh
+(...)
+2097152 /mnt/test/file1
+1048576 /mnt/test/file1
+
+The difference is exactly 1048576 as we assigned.
+
+Fix by adding a call to btrfs_inode_set_file_extent_range() in
+btrfs_fallocate_update_isize().
+
+Fixes: 41a2ee75aab0 ("btrfs: introduce per-inode file extent tree")
+Signed-off-by: austinchang <austinchang@synology.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0dd5a90feca34..cfc515f0d25f9 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3216,12 +3216,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
+ {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
++ u64 range_start;
++ u64 range_end;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
++ range_end = round_up(end, root->fs_info->sectorsize);
++
++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
++ range_end - range_start);
++ if (ret)
++ return ret;
++
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+--
+2.51.0
+
--- /dev/null
+From 084b8cd017260f323d0e0050782e3e83d8d4f0ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:50 +0800
+Subject: RDMA/hns: Fix wrong WQE data when QP wraps around
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fe9622011f955e35ba84d3af7b2f2fed31cf8ca1 ]
+
+When QP wraps around, WQE data from the previous use at the same
+position still remains as driver does not clear it. The WQE field
+layout differs across different opcodes, causing that the fields
+that are not explicitly assigned for the current opcode retain
+stale values, and are issued to HW by mistake. Such fields are as
+follows:
+
+* MSG_START_SGE_IDX field in ATOMIC WQE
+* BLOCK_SIZE and ZBVA fields in FRMR WQE
+* DirectWQE fields when DirectWQE not used
+
+For ATOMIC WQE, always set the latest sge index in MSG_START_SGE_IDX
+as required by HW.
+
+For FRMR WQE and DirectWQE, clear only those unassigned fields
+instead of the entire WQE to avoid performance penalty.
+
+Fixes: 68a997c5d28c ("RDMA/hns: Add FRMR support for hip08")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-4-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 74f48e201031d..8baf6fb2d1fa5 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -125,6 +125,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
++ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
++ hr_reg_clear(fseg, FRMR_ZBVA);
+ }
+
+ static void set_atomic_seg(const struct ib_send_wr *wr,
+@@ -309,9 +311,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ int j = 0;
+ int i;
+
+- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+- (*sge_ind) & (qp->sge.sge_cnt - 1));
+-
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+@@ -574,6 +573,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
++ curr_idx & (qp->sge.sge_cnt - 1));
++
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
+@@ -717,6 +719,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
++ /* RC and UD share the same DirectWQE field layout */
++ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
++
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+--
+2.51.0
+
--- /dev/null
+From 52e0140d6b8621c22b1d399426615ffecde45932 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 19:08:50 +0000
+Subject: RDMA/irdma: Fix SD index calculation
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 8d158f47f1f33d8747e80c3afbea5aa337e59d41 ]
+
+In some cases, it is possible for pble_rsrc->next_fpm_addr to be
+larger than u32, so remove the u32 cast to avoid unintentional
+truncation.
+
+This fixes the following error that can be observed when registering
+massive memory regions:
+
+[ 447.227494] (NULL ib_device): cqp opcode = 0x1f maj_err_code = 0xffff min_err_code = 0x800c
+[ 447.227505] (NULL ib_device): [Update PE SDs Cmd Error][op_code=21] status=-5 waiting=1 completion_err=1 maj=0xffff min=0x800c
+
+Fixes: e8c4dbc2fcac ("RDMA/irdma: Add PBLE resource manager")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923190850.1022773-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/pble.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
+index fed49da770f3b..6562592695b70 100644
+--- a/drivers/infiniband/hw/irdma/pble.c
++++ b/drivers/infiniband/hw/irdma/pble.c
+@@ -74,7 +74,7 @@ irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+ {
+- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
++ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+ }
+--
+2.51.0
+
--- /dev/null
+From d699c23ecefb3583a38ba302db7e6d6f4c7e7302 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:21:28 +0000
+Subject: RDMA/irdma: Remove unused struct irdma_cq fields
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 880245fd029a8f8ee8fd557c2681d077c1b1a959 ]
+
+These fields were set but not used anywhere, so remove them.
+
+Link: https://patch.msgid.link/r/20250923142128.943240-1-jmoroni@google.com
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Stable-dep-of: 5575b7646b94 ("RDMA/irdma: Set irdma_cq cq_num field during CQ create")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 3 ---
+ drivers/infiniband/hw/irdma/verbs.h | 6 ------
+ 2 files changed, 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index b2bf147883edb..8896cbf9ec4d0 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2006,8 +2006,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ goto cq_free_rsrc;
+ }
+
+- iwcq->iwpbl = iwpbl;
+- iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+@@ -2022,7 +2020,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+- iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index a934c985dbb4d..a74b24429b246 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -107,19 +107,13 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_head;
+- u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+- u32 polled_cmpls;
+- u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ spinlock_t lock; /* for poll cq */
+- struct irdma_pbl *iwpbl;
+- struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+--
+2.51.0
+
--- /dev/null
+From e1ad17e208536a72a88e37a40ce2f2c5c4c836d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:24:39 +0000
+Subject: RDMA/irdma: Set irdma_cq cq_num field during CQ create
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 5575b7646b94c0afb0f4c0d86e00e13cf3397a62 ]
+
+The driver maintains a CQ table that is used to ensure that a CQ is
+still valid when processing CQ related AEs. When a CQ is destroyed,
+the table entry is cleared, using irdma_cq.cq_num as the index. This
+field was never being set, so it was just always clearing out entry
+0.
+
+Additionally, the cq_num field size was increased to accommodate HW
+supporting more than 64K CQs.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923142439.943930-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 1 +
+ drivers/infiniband/hw/irdma/verbs.h | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 8896cbf9ec4d0..e62a825622834 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -1968,6 +1968,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
++ iwcq->cq_num = cq_num;
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index a74b24429b246..13c66908411f7 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -107,7 +107,7 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_num;
++ u32 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+--
+2.51.0
+
page_pool-clamp-pool-size-to-max-16k-pages.patch
orangefs-fix-xattr-related-buffer-overflow.patch
acpica-update-dsmethod.c-to-get-rid-of-unused-variab.patch
+rdma-irdma-fix-sd-index-calculation.patch
+rdma-irdma-remove-unused-struct-irdma_cq-fields.patch
+rdma-irdma-set-irdma_cq-cq_num-field-during-cq-creat.patch
+rdma-hns-fix-wrong-wqe-data-when-qp-wraps-around.patch
+btrfs-mark-dirty-extent-range-for-out-of-bound-preal.patch
--- /dev/null
+From cc74a1322763ebc4d445444404271c55aeb4b246 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 09:35:27 +0000
+Subject: btrfs: mark dirty extent range for out of bound prealloc extents
+
+From: austinchang <austinchang@synology.com>
+
+[ Upstream commit 3b1a4a59a2086badab391687a6a0b86e03048393 ]
+
+In btrfs_fallocate(), when the allocated range overlaps with a prealloc
+extent and the extent starts after i_size, the range doesn't get marked
+dirty in file_extent_tree. This results in persisting an incorrect
+disk_i_size for the inode when not using the no-holes feature.
+
+This is reproducible since commit 41a2ee75aab0 ("btrfs: introduce
+per-inode file extent tree"), then became hidden since commit 3d7db6e8bd22
+("btrfs: don't allocate file extent tree for non regular files") and then
+visible again after commit 8679d2687c35 ("btrfs: initialize
+inode::file_extent_tree after i_mode has been set"), which fixes the
+previous commit.
+
+The following reproducer triggers the problem:
+
+$ cat test.sh
+
+MNT=/mnt/test
+DEV=/dev/vdb
+
+mkdir -p $MNT
+
+mkfs.btrfs -f -O ^no-holes $DEV
+mount $DEV $MNT
+
+touch $MNT/file1
+fallocate -n -o 1M -l 2M $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+len=$((1 * 1024 * 1024))
+
+fallocate -o 1M -l $len $MNT/file1
+
+du --bytes $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+du --bytes $MNT/file1
+
+umount $MNT
+
+Running the reproducer gives the following result:
+
+$ ./test.sh
+(...)
+2097152 /mnt/test/file1
+1048576 /mnt/test/file1
+
+The difference is exactly 1048576 as we assigned.
+
+Fix by adding a call to btrfs_inode_set_file_extent_range() in
+btrfs_fallocate_update_isize().
+
+Fixes: 41a2ee75aab0 ("btrfs: introduce per-inode file extent tree")
+Signed-off-by: austinchang <austinchang@synology.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 3814f09dc4ae0..b670d5d72a382 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2828,12 +2828,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
+ {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
++ u64 range_start;
++ u64 range_end;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
++ range_end = round_up(end, root->fs_info->sectorsize);
++
++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
++ range_end - range_start);
++ if (ret)
++ return ret;
++
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+--
+2.51.0
+
--- /dev/null
+From 4acadb6365d5253f5e96c7755e16d9bab32b0ce5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:49 +0800
+Subject: RDMA/hns: Fix the modification of max_send_sge
+
+From: wenglianfa <wenglianfa@huawei.com>
+
+[ Upstream commit f5a7cbea5411668d429eb4ffe96c4063fe8dac9e ]
+
+The actual sge number may exceed the value specified in init_attr->cap
+when HW needs extra sge to enable inline feature. Since these extra
+sges are not expected by ULP, return the user-specified value to ULP
+instead of the expanded sge number.
+
+Fixes: 0c5e259b06a8 ("RDMA/hns: Fix incorrect sge nums calculation")
+Signed-off-by: wenglianfa <wenglianfa@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-3-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 0f0351abe9b46..72787ff924f44 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -661,7 +661,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+@@ -743,7 +742,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From f505dcc1f94ac7313a75428b8396cb902d265625 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:50 +0800
+Subject: RDMA/hns: Fix wrong WQE data when QP wraps around
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fe9622011f955e35ba84d3af7b2f2fed31cf8ca1 ]
+
+When QP wraps around, WQE data from the previous use at the same
+position still remains as driver does not clear it. The WQE field
+layout differs across different opcodes, causing that the fields
+that are not explicitly assigned for the current opcode retain
+stale values, and are issued to HW by mistake. Such fields are as
+follows:
+
+* MSG_START_SGE_IDX field in ATOMIC WQE
+* BLOCK_SIZE and ZBVA fields in FRMR WQE
+* DirectWQE fields when DirectWQE not used
+
+For ATOMIC WQE, always set the latest sge index in MSG_START_SGE_IDX
+as required by HW.
+
+For FRMR WQE and DirectWQE, clear only those unassigned fields
+instead of the entire WQE to avoid performance penalty.
+
+Fixes: 68a997c5d28c ("RDMA/hns: Add FRMR support for hip08")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-4-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 72c719805af32..5fdab366fb32d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -161,6 +161,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
++ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
++ hr_reg_clear(fseg, FRMR_ZBVA);
+ }
+
+ static void set_atomic_seg(const struct ib_send_wr *wr,
+@@ -335,9 +337,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ int j = 0;
+ int i;
+
+- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+- (*sge_ind) & (qp->sge.sge_cnt - 1));
+-
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+@@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
++ curr_idx & (qp->sge.sge_cnt - 1));
++
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ if (msg_len != ATOMIC_WR_LEN)
+@@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
++ /* RC and UD share the same DirectWQE field layout */
++ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
++
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+--
+2.51.0
+
--- /dev/null
+From 3064f3bc1a6bceb8aed90fe8e9ecb3a05c031096 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 19:08:50 +0000
+Subject: RDMA/irdma: Fix SD index calculation
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 8d158f47f1f33d8747e80c3afbea5aa337e59d41 ]
+
+In some cases, it is possible for pble_rsrc->next_fpm_addr to be
+larger than u32, so remove the u32 cast to avoid unintentional
+truncation.
+
+This fixes the following error that can be observed when registering
+massive memory regions:
+
+[ 447.227494] (NULL ib_device): cqp opcode = 0x1f maj_err_code = 0xffff min_err_code = 0x800c
+[ 447.227505] (NULL ib_device): [Update PE SDs Cmd Error][op_code=21] status=-5 waiting=1 completion_err=1 maj=0xffff min=0x800c
+
+Fixes: e8c4dbc2fcac ("RDMA/irdma: Add PBLE resource manager")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923190850.1022773-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/pble.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
+index cdc0b8a6ed483..8dd9e44ed2a4c 100644
+--- a/drivers/infiniband/hw/irdma/pble.c
++++ b/drivers/infiniband/hw/irdma/pble.c
+@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+ {
+- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
++ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+ }
+--
+2.51.0
+
--- /dev/null
+From 82a6c250d8128fcd9469f229a69eaa0d9dc459b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:21:28 +0000
+Subject: RDMA/irdma: Remove unused struct irdma_cq fields
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 880245fd029a8f8ee8fd557c2681d077c1b1a959 ]
+
+These fields were set but not used anywhere, so remove them.
+
+Link: https://patch.msgid.link/r/20250923142128.943240-1-jmoroni@google.com
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Stable-dep-of: 5575b7646b94 ("RDMA/irdma: Set irdma_cq cq_num field during CQ create")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 3 ---
+ drivers/infiniband/hw/irdma/verbs.h | 6 ------
+ 2 files changed, 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index baa3dff6faab1..fb02017a1aa63 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2088,8 +2088,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ goto cq_free_rsrc;
+ }
+
+- iwcq->iwpbl = iwpbl;
+- iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+@@ -2104,7 +2102,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+- iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 0bc0d0faa0868..b55d30df96261 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -113,19 +113,13 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_head;
+- u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+- u32 polled_cmpls;
+- u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ spinlock_t lock; /* for poll cq */
+- struct irdma_pbl *iwpbl;
+- struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+--
+2.51.0
+
--- /dev/null
+From 0c48d6a941bca3d1b8cfe4a2567d8ac6b98128c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:24:39 +0000
+Subject: RDMA/irdma: Set irdma_cq cq_num field during CQ create
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 5575b7646b94c0afb0f4c0d86e00e13cf3397a62 ]
+
+The driver maintains a CQ table that is used to ensure that a CQ is
+still valid when processing CQ related AEs. When a CQ is destroyed,
+the table entry is cleared, using irdma_cq.cq_num as the index. This
+field was never being set, so it was just always clearing out entry
+0.
+
+Additionally, the cq_num field size was increased to accommodate HW
+supporting more than 64K CQs.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923142439.943930-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 1 +
+ drivers/infiniband/hw/irdma/verbs.h | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index fb02017a1aa63..6fc622e3eb07a 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2050,6 +2050,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
++ iwcq->cq_num = cq_num;
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index b55d30df96261..8809465020e13 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -113,7 +113,7 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_num;
++ u32 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+--
+2.51.0
+
ksmbd-use-sock_create_kern-interface-to-create-kerne.patch
smb-client-transport-avoid-reconnects-triggered-by-p.patch
acpica-update-dsmethod.c-to-get-rid-of-unused-variab.patch
+rdma-irdma-fix-sd-index-calculation.patch
+rdma-irdma-remove-unused-struct-irdma_cq-fields.patch
+rdma-irdma-set-irdma_cq-cq_num-field-during-cq-creat.patch
+rdma-hns-fix-the-modification-of-max_send_sge.patch
+rdma-hns-fix-wrong-wqe-data-when-qp-wraps-around.patch
+btrfs-mark-dirty-extent-range-for-out-of-bound-preal.patch
--- /dev/null
+From e6c6c953cccb5d4b23c7c63944ccba73835f07b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Oct 2025 01:30:21 +0530
+Subject: btrfs: fix memory leak of qgroup_list in btrfs_add_qgroup_relation
+
+From: Shardul Bankar <shardulsb08@gmail.com>
+
+[ Upstream commit f260c6aff0b8af236084012d14f9f1bf792ea883 ]
+
+When btrfs_add_qgroup_relation() is called with invalid qgroup levels
+(src >= dst), the function returns -EINVAL directly without freeing the
+preallocated qgroup_list structure passed by the caller. This causes a
+memory leak because the caller unconditionally sets the pointer to NULL
+after the call, preventing any cleanup.
+
+The issue occurs because the level validation check happens before the
+mutex is acquired and before any error handling path that would free
+the prealloc pointer. On this early return, the cleanup code at the
+'out' label (which includes kfree(prealloc)) is never reached.
+
+In btrfs_ioctl_qgroup_assign(), the code pattern is:
+
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc);
+ prealloc = NULL; // Always set to NULL regardless of return value
+ ...
+ kfree(prealloc); // This becomes kfree(NULL), does nothing
+
+When the level check fails, 'prealloc' is never freed by either the
+callee or the caller, resulting in a 64-byte memory leak per failed
+operation. This can be triggered repeatedly by an unprivileged user
+with access to a writable btrfs mount, potentially exhausting kernel
+memory.
+
+Fix this by freeing prealloc before the early return, ensuring prealloc
+is always freed on all error paths.
+
+Fixes: 4addc1ffd67a ("btrfs: qgroup: preallocate memory before adding a relation")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Shardul Bankar <shardulsb08@gmail.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/qgroup.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 2c9b38ae40da2..3c77f3506faf3 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1585,8 +1585,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst
+ ASSERT(prealloc);
+
+ /* Check the level of src and dst first */
+- if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
++ if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
++ kfree(prealloc);
+ return -EINVAL;
++ }
+
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ if (!fs_info->quota_root) {
+--
+2.51.0
+
--- /dev/null
+From f5cb87db44bfe4a42d5260a78a51295c1264ee27 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 09:35:27 +0000
+Subject: btrfs: mark dirty extent range for out of bound prealloc extents
+
+From: austinchang <austinchang@synology.com>
+
+[ Upstream commit 3b1a4a59a2086badab391687a6a0b86e03048393 ]
+
+In btrfs_fallocate(), when the allocated range overlaps with a prealloc
+extent and the extent starts after i_size, the range doesn't get marked
+dirty in file_extent_tree. This results in persisting an incorrect
+disk_i_size for the inode when not using the no-holes feature.
+
+This is reproducible since commit 41a2ee75aab0 ("btrfs: introduce
+per-inode file extent tree"), then became hidden since commit 3d7db6e8bd22
+("btrfs: don't allocate file extent tree for non regular files") and then
+visible again after commit 8679d2687c35 ("btrfs: initialize
+inode::file_extent_tree after i_mode has been set"), which fixes the
+previous commit.
+
+The following reproducer triggers the problem:
+
+$ cat test.sh
+
+MNT=/mnt/test
+DEV=/dev/vdb
+
+mkdir -p $MNT
+
+mkfs.btrfs -f -O ^no-holes $DEV
+mount $DEV $MNT
+
+touch $MNT/file1
+fallocate -n -o 1M -l 2M $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+len=$((1 * 1024 * 1024))
+
+fallocate -o 1M -l $len $MNT/file1
+
+du --bytes $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+du --bytes $MNT/file1
+
+umount $MNT
+
+Running the reproducer gives the following result:
+
+$ ./test.sh
+(...)
+2097152 /mnt/test/file1
+1048576 /mnt/test/file1
+
+The difference is exactly 1048576 as we assigned.
+
+Fix by adding a call to btrfs_inode_set_file_extent_range() in
+btrfs_fallocate_update_isize().
+
+Fixes: 41a2ee75aab0 ("btrfs: introduce per-inode file extent tree")
+Signed-off-by: austinchang <austinchang@synology.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0e63603ac5c78..9ed2771f303c9 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2839,12 +2839,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
+ {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
++ u64 range_start;
++ u64 range_end;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
++ range_end = round_up(end, root->fs_info->sectorsize);
++
++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
++ range_end - range_start);
++ if (ret)
++ return ret;
++
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+--
+2.51.0
+
--- /dev/null
+From d39f367b11696beda8497781e28010ad306b3d6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:48 +0800
+Subject: RDMA/hns: Fix recv CQ and QP cache affinity
+
+From: Chengchang Tang <tangchengchang@huawei.com>
+
+[ Upstream commit c4b67b514af8c2d73c64b36e0cd99e9b26b9ac82 ]
+
+Currently driver enforces affinity between QP cache and send CQ
+cache, which helps improve the performance of sending, but doesn't
+set affinity with recv CQ cache, resulting in suboptimal performance
+of receiving.
+
+Use one CQ bank per context to ensure the affinity among QP, send CQ
+and recv CQ. For kernel ULP, CQ bank is fixed to 0.
+
+Fixes: 9e03dbea2b06 ("RDMA/hns: Fix CQ and QP cache affinity")
+Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-2-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_cq.c | 58 +++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_device.h | 4 ++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 4 ++
+ 3 files changed, 63 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 3a5c93c9fb3e6..6aa82fe9dd3df 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -30,6 +30,7 @@
+ * SOFTWARE.
+ */
+
++#include <linux/pci.h>
+ #include <rdma/ib_umem.h>
+ #include <rdma/uverbs_ioctl.h>
+ #include "hns_roce_device.h"
+@@ -37,6 +38,43 @@
+ #include "hns_roce_hem.h"
+ #include "hns_roce_common.h"
+
++void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
++{
++ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
++ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
++
++ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
++ return;
++
++ mutex_lock(&cq_table->bank_mutex);
++ cq_table->ctx_num[uctx->cq_bank_id]--;
++ mutex_unlock(&cq_table->bank_mutex);
++}
++
++void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
++{
++ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
++ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
++ u32 least_load = cq_table->ctx_num[0];
++ u8 bankid = 0;
++ u8 i;
++
++ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
++ return;
++
++ mutex_lock(&cq_table->bank_mutex);
++ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
++ if (cq_table->ctx_num[i] < least_load) {
++ least_load = cq_table->ctx_num[i];
++ bankid = i;
++ }
++ }
++ cq_table->ctx_num[bankid]++;
++ mutex_unlock(&cq_table->bank_mutex);
++
++ uctx->cq_bank_id = bankid;
++}
++
+ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+ {
+ u32 least_load = bank[0].inuse;
+@@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+ return bankid;
+ }
+
+-static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
++static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
++ struct hns_roce_bank *bank, struct ib_udata *udata)
++{
++ struct hns_roce_ucontext *uctx = udata ?
++ rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
++ ibucontext) : NULL;
++
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++ return uctx ? uctx->cq_bank_id : 0;
++
++ return get_least_load_bankid_for_cq(bank);
++}
++
++static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
++ struct ib_udata *udata)
+ {
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+@@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ int id;
+
+ mutex_lock(&cq_table->bank_mutex);
+- bankid = get_least_load_bankid_for_cq(cq_table->bank);
++ bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
+ bank = &cq_table->bank[bankid];
+
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
+@@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ goto err_cq_buf;
+ }
+
+- ret = alloc_cqn(hr_dev, hr_cq);
++ ret = alloc_cqn(hr_dev, hr_cq, udata);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
+ goto err_cq_db;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index cbe73d9ad5253..e184a715d1661 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -217,6 +217,7 @@ struct hns_roce_ucontext {
+ struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
+ u32 config;
++ u8 cq_bank_id;
+ };
+
+ struct hns_roce_pd {
+@@ -505,6 +506,7 @@ struct hns_roce_cq_table {
+ struct hns_roce_hem_table table;
+ struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
+ struct mutex bank_mutex;
++ u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
+ };
+
+ struct hns_roce_srq_table {
+@@ -1303,5 +1305,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type);
+ bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
++void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
++void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
+
+ #endif /* _HNS_ROCE_DEVICE_H */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 11fa64044a8d8..ed9b5e5778ed8 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ if (ret)
+ goto error_fail_copy_to_udata;
+
++ hns_roce_get_cq_bankid_for_uctx(context);
++
+ return 0;
+
+ error_fail_copy_to_udata:
+@@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+
++ hns_roce_put_cq_bankid_for_uctx(context);
++
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&context->page_mutex);
+--
+2.51.0
+
--- /dev/null
+From 9426d3cfc483f4f7b3cabadc76b1092ae24d37e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:49 +0800
+Subject: RDMA/hns: Fix the modification of max_send_sge
+
+From: wenglianfa <wenglianfa@huawei.com>
+
+[ Upstream commit f5a7cbea5411668d429eb4ffe96c4063fe8dac9e ]
+
+The actual sge number may exceed the value specified in init_attr->cap
+when HW needs extra sge to enable inline feature. Since these extra
+sges are not expected by ULP, return the user-specified value to ULP
+instead of the expanded sge number.
+
+Fixes: 0c5e259b06a8 ("RDMA/hns: Fix incorrect sge nums calculation")
+Signed-off-by: wenglianfa <wenglianfa@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-3-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 8901c142c1b65..66d4c693694e9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+@@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From a670c28d24b7913722347c04b3b530935c20c092 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:50 +0800
+Subject: RDMA/hns: Fix wrong WQE data when QP wraps around
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fe9622011f955e35ba84d3af7b2f2fed31cf8ca1 ]
+
+When QP wraps around, WQE data from the previous use at the same
+position still remains as driver does not clear it. The WQE field
+layout differs across different opcodes, causing that the fields
+that are not explicitly assigned for the current opcode retain
+stale values, and are issued to HW by mistake. Such fields are as
+follows:
+
+* MSG_START_SGE_IDX field in ATOMIC WQE
+* BLOCK_SIZE and ZBVA fields in FRMR WQE
+* DirectWQE fields when DirectWQE not used
+
+For ATOMIC WQE, always set the latest sge index in MSG_START_SGE_IDX
+as required by HW.
+
+For FRMR WQE and DirectWQE, clear only those unassigned fields
+instead of the entire WQE to avoid performance penalty.
+
+Fixes: 68a997c5d28c ("RDMA/hns: Add FRMR support for hip08")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-4-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 6a6daca9f606c..f9356cb89497b 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -162,6 +162,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
++ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
++ hr_reg_clear(fseg, FRMR_ZBVA);
+ }
+
+ static void set_atomic_seg(const struct ib_send_wr *wr,
+@@ -336,9 +338,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ int j = 0;
+ int i;
+
+- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+- (*sge_ind) & (qp->sge.sge_cnt - 1));
+-
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+@@ -583,6 +582,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
++ curr_idx & (qp->sge.sge_cnt - 1));
++
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ if (msg_len != ATOMIC_WR_LEN)
+@@ -731,6 +733,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
++ /* RC and UD share the same DirectWQE field layout */
++ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
++
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+--
+2.51.0
+
--- /dev/null
+From db052dc439faf8b41f2306241e6dee4528cfec06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 19:08:50 +0000
+Subject: RDMA/irdma: Fix SD index calculation
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 8d158f47f1f33d8747e80c3afbea5aa337e59d41 ]
+
+In some cases, it is possible for pble_rsrc->next_fpm_addr to be
+larger than u32, so remove the u32 cast to avoid unintentional
+truncation.
+
+This fixes the following error that can be observed when registering
+massive memory regions:
+
+[ 447.227494] (NULL ib_device): cqp opcode = 0x1f maj_err_code = 0xffff min_err_code = 0x800c
+[ 447.227505] (NULL ib_device): [Update PE SDs Cmd Error][op_code=21] status=-5 waiting=1 completion_err=1 maj=0xffff min=0x800c
+
+Fixes: e8c4dbc2fcac ("RDMA/irdma: Add PBLE resource manager")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923190850.1022773-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/pble.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
+index e7ce6840755fd..f381b8d51f532 100644
+--- a/drivers/infiniband/hw/irdma/pble.c
++++ b/drivers/infiniband/hw/irdma/pble.c
+@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+ {
+- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
++ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+ }
+--
+2.51.0
+
--- /dev/null
+From fa38325881312a7528cb227e7b2dc9e84d3842ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:21:28 +0000
+Subject: RDMA/irdma: Remove unused struct irdma_cq fields
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 880245fd029a8f8ee8fd557c2681d077c1b1a959 ]
+
+These fields were set but not used anywhere, so remove them.
+
+Link: https://patch.msgid.link/r/20250923142128.943240-1-jmoroni@google.com
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Stable-dep-of: 5575b7646b94 ("RDMA/irdma: Set irdma_cq cq_num field during CQ create")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 3 ---
+ drivers/infiniband/hw/irdma/verbs.h | 6 ------
+ 2 files changed, 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index eeb932e587303..ba844f4b1c21a 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2116,8 +2116,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ goto cq_free_rsrc;
+ }
+
+- iwcq->iwpbl = iwpbl;
+- iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+@@ -2132,7 +2130,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+- iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index cfa140b36395a..4381e5dbe782a 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -115,21 +115,15 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_head;
+- u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+- u32 polled_cmpls;
+- u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll cq */
+- struct irdma_pbl *iwpbl;
+- struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+--
+2.51.0
+
--- /dev/null
+From eb10946d1d109a10657e9dc5451f9cdc2dc78cb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:24:39 +0000
+Subject: RDMA/irdma: Set irdma_cq cq_num field during CQ create
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 5575b7646b94c0afb0f4c0d86e00e13cf3397a62 ]
+
+The driver maintains a CQ table that is used to ensure that a CQ is
+still valid when processing CQ related AEs. When a CQ is destroyed,
+the table entry is cleared, using irdma_cq.cq_num as the index. This
+field was never being set, so it was just always clearing out entry
+0.
+
+Additionally, the cq_num field size was increased to accommodate HW
+supporting more than 64K CQs.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923142439.943930-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 1 +
+ drivers/infiniband/hw/irdma/verbs.h | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index ba844f4b1c21a..63d07fcab6569 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2078,6 +2078,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
++ iwcq->cq_num = cq_num;
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 4381e5dbe782a..36ff8dd712f00 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -115,7 +115,7 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_num;
++ u32 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+--
+2.51.0
+
char-misc-restrict-the-dynamic-range-to-exclude-rese.patch
drm-amd-display-add-fallback-path-for-ycbcr422.patch
acpica-update-dsmethod.c-to-get-rid-of-unused-variab.patch
+rdma-irdma-fix-sd-index-calculation.patch
+rdma-irdma-remove-unused-struct-irdma_cq-fields.patch
+rdma-irdma-set-irdma_cq-cq_num-field-during-cq-creat.patch
+rdma-hns-fix-recv-cq-and-qp-cache-affinity.patch
+rdma-hns-fix-the-modification-of-max_send_sge.patch
+rdma-hns-fix-wrong-wqe-data-when-qp-wraps-around.patch
+btrfs-fix-memory-leak-of-qgroup_list-in-btrfs_add_qg.patch
+btrfs-mark-dirty-extent-range-for-out-of-bound-preal.patch
--- /dev/null
+From 225f3c5c94229b24f396e1a28b684520638be7a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Oct 2025 01:30:21 +0530
+Subject: btrfs: fix memory leak of qgroup_list in btrfs_add_qgroup_relation
+
+From: Shardul Bankar <shardulsb08@gmail.com>
+
+[ Upstream commit f260c6aff0b8af236084012d14f9f1bf792ea883 ]
+
+When btrfs_add_qgroup_relation() is called with invalid qgroup levels
+(src >= dst), the function returns -EINVAL directly without freeing the
+preallocated qgroup_list structure passed by the caller. This causes a
+memory leak because the caller unconditionally sets the pointer to NULL
+after the call, preventing any cleanup.
+
+The issue occurs because the level validation check happens before the
+mutex is acquired and before any error handling path that would free
+the prealloc pointer. On this early return, the cleanup code at the
+'out' label (which includes kfree(prealloc)) is never reached.
+
+In btrfs_ioctl_qgroup_assign(), the code pattern is:
+
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc);
+ prealloc = NULL; // Always set to NULL regardless of return value
+ ...
+ kfree(prealloc); // This becomes kfree(NULL), does nothing
+
+When the level check fails, 'prealloc' is never freed by either the
+callee or the caller, resulting in a 64-byte memory leak per failed
+operation. This can be triggered repeatedly by an unprivileged user
+with access to a writable btrfs mount, potentially exhausting kernel
+memory.
+
+Fix this by freeing prealloc before the early return, ensuring prealloc
+is always freed on all error paths.
+
+Fixes: 4addc1ffd67a ("btrfs: qgroup: preallocate memory before adding a relation")
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Shardul Bankar <shardulsb08@gmail.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/qgroup.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index da102da169fde..4958c6b324291 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1539,8 +1539,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst
+ ASSERT(prealloc);
+
+ /* Check the level of src and dst first */
+- if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
++ if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
++ kfree(prealloc);
+ return -EINVAL;
++ }
+
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ if (!fs_info->quota_root) {
+--
+2.51.0
+
--- /dev/null
+From 05d430775bb02f9dfdb5260e0d36bf4fffe67359 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 09:35:27 +0000
+Subject: btrfs: mark dirty extent range for out of bound prealloc extents
+
+From: austinchang <austinchang@synology.com>
+
+[ Upstream commit 3b1a4a59a2086badab391687a6a0b86e03048393 ]
+
+In btrfs_fallocate(), when the allocated range overlaps with a prealloc
+extent and the extent starts after i_size, the range doesn't get marked
+dirty in file_extent_tree. This results in persisting an incorrect
+disk_i_size for the inode when not using the no-holes feature.
+
+This is reproducible since commit 41a2ee75aab0 ("btrfs: introduce
+per-inode file extent tree"), then became hidden since commit 3d7db6e8bd22
+("btrfs: don't allocate file extent tree for non regular files") and then
+visible again after commit 8679d2687c35 ("btrfs: initialize
+inode::file_extent_tree after i_mode has been set"), which fixes the
+previous commit.
+
+The following reproducer triggers the problem:
+
+$ cat test.sh
+
+MNT=/mnt/test
+DEV=/dev/vdb
+
+mkdir -p $MNT
+
+mkfs.btrfs -f -O ^no-holes $DEV
+mount $DEV $MNT
+
+touch $MNT/file1
+fallocate -n -o 1M -l 2M $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+len=$((1 * 1024 * 1024))
+
+fallocate -o 1M -l $len $MNT/file1
+
+du --bytes $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+du --bytes $MNT/file1
+
+umount $MNT
+
+Running the reproducer gives the following result:
+
+$ ./test.sh
+(...)
+2097152 /mnt/test/file1
+1048576 /mnt/test/file1
+
+The difference is exactly 1048576 as we assigned.
+
+Fix by adding a call to btrfs_inode_set_file_extent_range() in
+btrfs_fallocate_update_isize().
+
+Fixes: 41a2ee75aab0 ("btrfs: introduce per-inode file extent tree")
+Signed-off-by: austinchang <austinchang@synology.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 204674934795c..9f6dcae252189 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2855,12 +2855,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
+ {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
++ u64 range_start;
++ u64 range_end;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
++ range_end = round_up(end, root->fs_info->sectorsize);
++
++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
++ range_end - range_start);
++ if (ret)
++ return ret;
++
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+--
+2.51.0
+
--- /dev/null
+From 3ec3bc8b9631a51b2cae5ef4f40e42b242bfedf1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Sep 2025 14:14:44 +0800
+Subject: RDMA/bnxt_re: Fix a potential memory leak in destroy_gsi_sqp
+
+From: YanLong Dai <daiyanlong@kylinos.cn>
+
+[ Upstream commit 88de89f184661ebb946804a5abdf2bdec7f0a7ab ]
+
+The current error handling path in bnxt_re_destroy_gsi_sqp() could lead
+to a resource leak. When bnxt_qplib_destroy_qp() fails, the function
+jumps to the 'fail' label and returns immediately, skipping the call
+to bnxt_qplib_free_qp_res().
+
+Continue the resource teardown even if bnxt_qplib_destroy_qp() fails,
+which aligns with the driver's general error handling strategy and
+prevents the potential leak.
+
+Fixes: 8dae419f9ec73 ("RDMA/bnxt_re: Refactor queue pair creation code")
+Signed-off-by: YanLong Dai <daiyanlong@kylinos.cn>
+Link: https://patch.msgid.link/20250924061444.11288-1-daiyanlong@kylinos.cn
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 260dc67b8b87c..12fee23de81e7 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -911,7 +911,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+ spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
+ }
+
+-static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
++static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+ {
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+@@ -931,10 +931,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+- if (rc) {
++ if (rc)
+ ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
+- goto fail;
+- }
++
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+
+ /* remove from active qp list */
+@@ -949,10 +948,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+-
+- return 0;
+-fail:
+- return rc;
+ }
+
+ /* Queue Pairs */
+--
+2.51.0
+
--- /dev/null
+From c0ce859aaf64198f91456c0d79a83c1e0f2149dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:48 +0800
+Subject: RDMA/hns: Fix recv CQ and QP cache affinity
+
+From: Chengchang Tang <tangchengchang@huawei.com>
+
+[ Upstream commit c4b67b514af8c2d73c64b36e0cd99e9b26b9ac82 ]
+
+Currently driver enforces affinity between QP cache and send CQ
+cache, which helps improve the performance of sending, but doesn't
+set affinity with recv CQ cache, resulting in suboptimal performance
+of receiving.
+
+Use one CQ bank per context to ensure the affinity among QP, send CQ
+and recv CQ. For kernel ULP, CQ bank is fixed to 0.
+
+Fixes: 9e03dbea2b06 ("RDMA/hns: Fix CQ and QP cache affinity")
+Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-2-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_cq.c | 58 +++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_device.h | 4 ++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 4 ++
+ 3 files changed, 63 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 3a5c93c9fb3e6..6aa82fe9dd3df 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -30,6 +30,7 @@
+ * SOFTWARE.
+ */
+
++#include <linux/pci.h>
+ #include <rdma/ib_umem.h>
+ #include <rdma/uverbs_ioctl.h>
+ #include "hns_roce_device.h"
+@@ -37,6 +38,43 @@
+ #include "hns_roce_hem.h"
+ #include "hns_roce_common.h"
+
++void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
++{
++ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
++ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
++
++ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
++ return;
++
++ mutex_lock(&cq_table->bank_mutex);
++ cq_table->ctx_num[uctx->cq_bank_id]--;
++ mutex_unlock(&cq_table->bank_mutex);
++}
++
++void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
++{
++ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
++ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
++ u32 least_load = cq_table->ctx_num[0];
++ u8 bankid = 0;
++ u8 i;
++
++ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
++ return;
++
++ mutex_lock(&cq_table->bank_mutex);
++ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
++ if (cq_table->ctx_num[i] < least_load) {
++ least_load = cq_table->ctx_num[i];
++ bankid = i;
++ }
++ }
++ cq_table->ctx_num[bankid]++;
++ mutex_unlock(&cq_table->bank_mutex);
++
++ uctx->cq_bank_id = bankid;
++}
++
+ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+ {
+ u32 least_load = bank[0].inuse;
+@@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+ return bankid;
+ }
+
+-static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
++static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
++ struct hns_roce_bank *bank, struct ib_udata *udata)
++{
++ struct hns_roce_ucontext *uctx = udata ?
++ rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
++ ibucontext) : NULL;
++
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++ return uctx ? uctx->cq_bank_id : 0;
++
++ return get_least_load_bankid_for_cq(bank);
++}
++
++static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
++ struct ib_udata *udata)
+ {
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+@@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ int id;
+
+ mutex_lock(&cq_table->bank_mutex);
+- bankid = get_least_load_bankid_for_cq(cq_table->bank);
++ bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
+ bank = &cq_table->bank[bankid];
+
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
+@@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ goto err_cq_buf;
+ }
+
+- ret = alloc_cqn(hr_dev, hr_cq);
++ ret = alloc_cqn(hr_dev, hr_cq, udata);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
+ goto err_cq_db;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 78ee04a48a74a..06832c0ac0556 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -217,6 +217,7 @@ struct hns_roce_ucontext {
+ struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
+ u32 config;
++ u8 cq_bank_id;
+ };
+
+ struct hns_roce_pd {
+@@ -495,6 +496,7 @@ struct hns_roce_cq_table {
+ struct hns_roce_hem_table table;
+ struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
+ struct mutex bank_mutex;
++ u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
+ };
+
+ struct hns_roce_srq_table {
+@@ -1305,5 +1307,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type);
+ bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
++void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
++void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
+
+ #endif /* _HNS_ROCE_DEVICE_H */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index d50f36f8a1107..f3607fe107a7f 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ if (ret)
+ goto error_fail_copy_to_udata;
+
++ hns_roce_get_cq_bankid_for_uctx(context);
++
+ return 0;
+
+ error_fail_copy_to_udata:
+@@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+
++ hns_roce_put_cq_bankid_for_uctx(context);
++
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&context->page_mutex);
+--
+2.51.0
+
--- /dev/null
+From eb34d5a929c57a1e6121a10702e44df7be3a35f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:49 +0800
+Subject: RDMA/hns: Fix the modification of max_send_sge
+
+From: wenglianfa <wenglianfa@huawei.com>
+
+[ Upstream commit f5a7cbea5411668d429eb4ffe96c4063fe8dac9e ]
+
+The actual sge number may exceed the value specified in init_attr->cap
+when HW needs extra sge to enable inline feature. Since these extra
+sges are not expected by ULP, return the user-specified value to ULP
+instead of the expanded sge number.
+
+Fixes: 0c5e259b06a8 ("RDMA/hns: Fix incorrect sge nums calculation")
+Signed-off-by: wenglianfa <wenglianfa@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-3-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 6ff1b8ce580c5..bdd879ac12dda 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+@@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 8d6bfb55357600aa05ba675e182136a86297b3d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:50 +0800
+Subject: RDMA/hns: Fix wrong WQE data when QP wraps around
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fe9622011f955e35ba84d3af7b2f2fed31cf8ca1 ]
+
+When QP wraps around, WQE data from the previous use at the same
+position still remains as driver does not clear it. The WQE field
+layout differs across different opcodes, causing that the fields
+that are not explicitly assigned for the current opcode retain
+stale values, and are issued to HW by mistake. Such fields are as
+follows:
+
+* MSG_START_SGE_IDX field in ATOMIC WQE
+* BLOCK_SIZE and ZBVA fields in FRMR WQE
+* DirectWQE fields when DirectWQE not used
+
+For ATOMIC WQE, always set the latest sge index in MSG_START_SGE_IDX
+as required by HW.
+
+For FRMR WQE and DirectWQE, clear only those unassigned fields
+instead of the entire WQE to avoid performance penalty.
+
+Fixes: 68a997c5d28c ("RDMA/hns: Add FRMR support for hip08")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-4-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index f82bdd46a9174..ab378525b296a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -165,6 +165,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
++ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
++ hr_reg_clear(fseg, FRMR_ZBVA);
+ }
+
+ static void set_atomic_seg(const struct ib_send_wr *wr,
+@@ -339,9 +341,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ int j = 0;
+ int i;
+
+- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+- (*sge_ind) & (qp->sge.sge_cnt - 1));
+-
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+@@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
++ curr_idx & (qp->sge.sge_cnt - 1));
++
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ if (msg_len != ATOMIC_WR_LEN)
+@@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
++ /* RC and UD share the same DirectWQE field layout */
++ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
++
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+--
+2.51.0
+
--- /dev/null
+From bb00cbc970e7bd7d4ca2193210fef31dba33d521 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 19:08:50 +0000
+Subject: RDMA/irdma: Fix SD index calculation
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 8d158f47f1f33d8747e80c3afbea5aa337e59d41 ]
+
+In some cases, it is possible for pble_rsrc->next_fpm_addr to be
+larger than u32, so remove the u32 cast to avoid unintentional
+truncation.
+
+This fixes the following error that can be observed when registering
+massive memory regions:
+
+[ 447.227494] (NULL ib_device): cqp opcode = 0x1f maj_err_code = 0xffff min_err_code = 0x800c
+[ 447.227505] (NULL ib_device): [Update PE SDs Cmd Error][op_code=21] status=-5 waiting=1 completion_err=1 maj=0xffff min=0x800c
+
+Fixes: e8c4dbc2fcac ("RDMA/irdma: Add PBLE resource manager")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923190850.1022773-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/pble.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
+index 37ce35cb10e74..24f455e6dbbc8 100644
+--- a/drivers/infiniband/hw/irdma/pble.c
++++ b/drivers/infiniband/hw/irdma/pble.c
+@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+ {
+- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
++ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+ }
+--
+2.51.0
+
--- /dev/null
+From 77485afd15171687a62d9f10c1a80624a543de76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:21:28 +0000
+Subject: RDMA/irdma: Remove unused struct irdma_cq fields
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 880245fd029a8f8ee8fd557c2681d077c1b1a959 ]
+
+These fields were set but not used anywhere, so remove them.
+
+Link: https://patch.msgid.link/r/20250923142128.943240-1-jmoroni@google.com
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Stable-dep-of: 5575b7646b94 ("RDMA/irdma: Set irdma_cq cq_num field during CQ create")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 3 ---
+ drivers/infiniband/hw/irdma/verbs.h | 6 ------
+ 2 files changed, 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index da5a41b275d83..105ffb1764b80 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2116,8 +2116,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ goto cq_free_rsrc;
+ }
+
+- iwcq->iwpbl = iwpbl;
+- iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+@@ -2132,7 +2130,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+- iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index cfa140b36395a..4381e5dbe782a 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -115,21 +115,15 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_head;
+- u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+- u32 polled_cmpls;
+- u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll cq */
+- struct irdma_pbl *iwpbl;
+- struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+--
+2.51.0
+
--- /dev/null
+From 03a73728572e08d4863bfe17d5ca8f67456c10c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:24:39 +0000
+Subject: RDMA/irdma: Set irdma_cq cq_num field during CQ create
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 5575b7646b94c0afb0f4c0d86e00e13cf3397a62 ]
+
+The driver maintains a CQ table that is used to ensure that a CQ is
+still valid when processing CQ related AEs. When a CQ is destroyed,
+the table entry is cleared, using irdma_cq.cq_num as the index. This
+field was never being set, so it was just always clearing out entry
+0.
+
+Additionally, the cq_num field size was increased to accommodate HW
+supporting more than 64K CQs.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923142439.943930-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 1 +
+ drivers/infiniband/hw/irdma/verbs.h | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 105ffb1764b80..eb4683b248af9 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2078,6 +2078,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
++ iwcq->cq_num = cq_num;
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 4381e5dbe782a..36ff8dd712f00 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -115,7 +115,7 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_num;
++ u32 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+--
+2.51.0
+
--- /dev/null
+From 9095a95a9bd784eeea2b8f9499b01b834bc9b4d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Oct 2025 10:55:17 +0800
+Subject: RDMA/uverbs: Fix umem release in UVERBS_METHOD_CQ_CREATE
+
+From: Shuhao Fu <sfual@cse.ust.hk>
+
+[ Upstream commit d8713158faad0fd4418cb2f4e432c3876ad53a1f ]
+
+In `UVERBS_METHOD_CQ_CREATE`, umem should be released if anything goes
+wrong. Currently, if `create_cq_umem` fails, umem would not be
+released or referenced, causing a possible leak.
+
+In this patch, we release umem at `UVERBS_METHOD_CQ_CREATE`, the driver
+should not release umem if it returns an error code.
+
+Fixes: 1a40c362ae26 ("RDMA/uverbs: Add a common way to create CQ with umem")
+Signed-off-by: Shuhao Fu <sfual@cse.ust.hk>
+Link: https://patch.msgid.link/aOh1le4YqtYwj-hH@osx.local
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/uverbs_std_types_cq.c | 1 +
+ drivers/infiniband/hw/efa/efa_verbs.c | 16 +++++++---------
+ 2 files changed, 8 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
+index 37cd375565104..fab5d914029dd 100644
+--- a/drivers/infiniband/core/uverbs_std_types_cq.c
++++ b/drivers/infiniband/core/uverbs_std_types_cq.c
+@@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
+ return ret;
+
+ err_free:
++ ib_umem_release(umem);
+ rdma_restrack_put(&cq->res);
+ kfree(cq);
+ err_event_file:
+diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
+index 886923d5fe506..542d25e191ea6 100644
+--- a/drivers/infiniband/hw/efa/efa_verbs.c
++++ b/drivers/infiniband/hw/efa/efa_verbs.c
+@@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ if (umem->length < cq->size) {
+ ibdev_dbg(&dev->ibdev, "External memory too small\n");
+ err = -EINVAL;
+- goto err_free_mem;
++ goto err_out;
+ }
+
+ if (!ib_umem_is_contiguous(umem)) {
+ ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
+ err = -EINVAL;
+- goto err_free_mem;
++ goto err_out;
+ }
+
+ cq->cpu_addr = NULL;
+@@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+
+ err = efa_com_create_cq(&dev->edev, ¶ms, &result);
+ if (err)
+- goto err_free_mem;
++ goto err_free_mapped;
+
+ resp.db_off = result.db_off;
+ resp.cq_idx = result.cq_idx;
+@@ -1299,12 +1299,10 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ efa_cq_user_mmap_entries_remove(cq);
+ err_destroy_cq:
+ efa_destroy_cq_idx(dev, cq->cq_idx);
+-err_free_mem:
+- if (umem)
+- ib_umem_release(umem);
+- else
+- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
+-
++err_free_mapped:
++ if (!umem)
++ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
++ DMA_FROM_DEVICE);
+ err_out:
+ atomic64_inc(&dev->stats.create_cq_err);
+ return err;
+--
+2.51.0
+
usb-xhci-pci-fix-usb2-only-root-hub-registration.patch
drm-amd-display-add-fallback-path-for-ycbcr422.patch
acpica-update-dsmethod.c-to-get-rid-of-unused-variab.patch
+rdma-bnxt_re-fix-a-potential-memory-leak-in-destroy_.patch
+rdma-irdma-fix-sd-index-calculation.patch
+rdma-irdma-remove-unused-struct-irdma_cq-fields.patch
+rdma-irdma-set-irdma_cq-cq_num-field-during-cq-creat.patch
+rdma-uverbs-fix-umem-release-in-uverbs_method_cq_cre.patch
+rdma-hns-fix-recv-cq-and-qp-cache-affinity.patch
+rdma-hns-fix-the-modification-of-max_send_sge.patch
+rdma-hns-fix-wrong-wqe-data-when-qp-wraps-around.patch
+btrfs-fix-memory-leak-of-qgroup_list-in-btrfs_add_qg.patch
+btrfs-mark-dirty-extent-range-for-out-of-bound-preal.patch
--- /dev/null
+From 292847ecd9093c460f6631facf3416d6a267dc62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Oct 2025 09:35:27 +0000
+Subject: btrfs: mark dirty extent range for out of bound prealloc extents
+
+From: austinchang <austinchang@synology.com>
+
+[ Upstream commit 3b1a4a59a2086badab391687a6a0b86e03048393 ]
+
+In btrfs_fallocate(), when the allocated range overlaps with a prealloc
+extent and the extent starts after i_size, the range doesn't get marked
+dirty in file_extent_tree. This results in persisting an incorrect
+disk_i_size for the inode when not using the no-holes feature.
+
+This is reproducible since commit 41a2ee75aab0 ("btrfs: introduce
+per-inode file extent tree"), then became hidden since commit 3d7db6e8bd22
+("btrfs: don't allocate file extent tree for non regular files") and then
+visible again after commit 8679d2687c35 ("btrfs: initialize
+inode::file_extent_tree after i_mode has been set"), which fixes the
+previous commit.
+
+The following reproducer triggers the problem:
+
+$ cat test.sh
+
+MNT=/mnt/test
+DEV=/dev/vdb
+
+mkdir -p $MNT
+
+mkfs.btrfs -f -O ^no-holes $DEV
+mount $DEV $MNT
+
+touch $MNT/file1
+fallocate -n -o 1M -l 2M $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+len=$((1 * 1024 * 1024))
+
+fallocate -o 1M -l $len $MNT/file1
+
+du --bytes $MNT/file1
+
+umount $MNT
+mount $DEV $MNT
+
+du --bytes $MNT/file1
+
+umount $MNT
+
+Running the reproducer gives the following result:
+
+$ ./test.sh
+(...)
+2097152 /mnt/test/file1
+1048576 /mnt/test/file1
+
+The difference is exactly 1048576 as we assigned.
+
+Fix by adding a call to btrfs_inode_set_file_extent_range() in
+btrfs_fallocate_update_isize().
+
+Fixes: 41a2ee75aab0 ("btrfs: introduce per-inode file extent tree")
+Signed-off-by: austinchang <austinchang@synology.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index e794606e7c780..9ef543db8aab9 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2825,12 +2825,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
+ {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
++ u64 range_start;
++ u64 range_end;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
++ range_end = round_up(end, root->fs_info->sectorsize);
++
++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
++ range_end - range_start);
++ if (ret)
++ return ret;
++
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+--
+2.51.0
+
--- /dev/null
+From 1ee629196eec3bf4fb95b52b9a9b81f88cb69b2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:49 +0800
+Subject: RDMA/hns: Fix the modification of max_send_sge
+
+From: wenglianfa <wenglianfa@huawei.com>
+
+[ Upstream commit f5a7cbea5411668d429eb4ffe96c4063fe8dac9e ]
+
+The actual sge number may exceed the value specified in init_attr->cap
+when HW needs extra sge to enable inline feature. Since these extra
+sges are not expected by ULP, return the user-specified value to ULP
+instead of the expanded sge number.
+
+Fixes: 0c5e259b06a8 ("RDMA/hns: Fix incorrect sge nums calculation")
+Signed-off-by: wenglianfa <wenglianfa@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-3-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 0cad6fc7bf32c..26784b296ffa6 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -654,7 +654,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+@@ -736,7 +735,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
+- cap->max_send_sge = hr_qp->sq.max_gs;
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e881456a569d4505980c484fda36f3b29473e1a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:40:50 +0800
+Subject: RDMA/hns: Fix wrong WQE data when QP wraps around
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fe9622011f955e35ba84d3af7b2f2fed31cf8ca1 ]
+
+When QP wraps around, WQE data from the previous use at the same
+position still remains as driver does not clear it. The WQE field
+layout differs across different opcodes, causing that the fields
+that are not explicitly assigned for the current opcode retain
+stale values, and are issued to HW by mistake. Such fields are as
+follows:
+
+* MSG_START_SGE_IDX field in ATOMIC WQE
+* BLOCK_SIZE and ZBVA fields in FRMR WQE
+* DirectWQE fields when DirectWQE not used
+
+For ATOMIC WQE, always set the latest sge index in MSG_START_SGE_IDX
+as required by HW.
+
+For FRMR WQE and DirectWQE, clear only those unassigned fields
+instead of the entire WQE to avoid performance penalty.
+
+Fixes: 68a997c5d28c ("RDMA/hns: Add FRMR support for hip08")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20251016114051.1963197-4-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 4a10b826d15a3..f1d4494c7d008 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -161,6 +161,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
++ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
++ hr_reg_clear(fseg, FRMR_ZBVA);
+ }
+
+ static void set_atomic_seg(const struct ib_send_wr *wr,
+@@ -335,9 +337,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ int j = 0;
+ int i;
+
+- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+- (*sge_ind) & (qp->sge.sge_cnt - 1));
+-
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+@@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
++ curr_idx & (qp->sge.sge_cnt - 1));
++
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ if (msg_len != ATOMIC_WR_LEN)
+@@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+
++ /* RC and UD share the same DirectWQE field layout */
++ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
++
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+--
+2.51.0
+
--- /dev/null
+From 27867be563235724ce509d3ceaebc68d473b33c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 19:08:50 +0000
+Subject: RDMA/irdma: Fix SD index calculation
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 8d158f47f1f33d8747e80c3afbea5aa337e59d41 ]
+
+In some cases, it is possible for pble_rsrc->next_fpm_addr to be
+larger than u32, so remove the u32 cast to avoid unintentional
+truncation.
+
+This fixes the following error that can be observed when registering
+massive memory regions:
+
+[ 447.227494] (NULL ib_device): cqp opcode = 0x1f maj_err_code = 0xffff min_err_code = 0x800c
+[ 447.227505] (NULL ib_device): [Update PE SDs Cmd Error][op_code=21] status=-5 waiting=1 completion_err=1 maj=0xffff min=0x800c
+
+Fixes: e8c4dbc2fcac ("RDMA/irdma: Add PBLE resource manager")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923190850.1022773-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/pble.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
+index c0bef11436b94..fa096557adc83 100644
+--- a/drivers/infiniband/hw/irdma/pble.c
++++ b/drivers/infiniband/hw/irdma/pble.c
+@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+ {
+- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
++ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+ }
+--
+2.51.0
+
--- /dev/null
+From 214d25afe280e52b16350fc71eba1bebe31cb3fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:21:28 +0000
+Subject: RDMA/irdma: Remove unused struct irdma_cq fields
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 880245fd029a8f8ee8fd557c2681d077c1b1a959 ]
+
+These fields were set but not used anywhere, so remove them.
+
+Link: https://patch.msgid.link/r/20250923142128.943240-1-jmoroni@google.com
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Stable-dep-of: 5575b7646b94 ("RDMA/irdma: Set irdma_cq cq_num field during CQ create")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 3 ---
+ drivers/infiniband/hw/irdma/verbs.h | 6 ------
+ 2 files changed, 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 38cecb28d322e..7243255c224f4 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2115,8 +2115,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ goto cq_free_rsrc;
+ }
+
+- iwcq->iwpbl = iwpbl;
+- iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+@@ -2131,7 +2129,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+- iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 20297a14c9a61..50c2610d1cbfb 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -113,21 +113,15 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_head;
+- u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+- u32 polled_cmpls;
+- u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll cq */
+- struct irdma_pbl *iwpbl;
+- struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+--
+2.51.0
+
--- /dev/null
+From 117e2de256feaa0db98bde4fc8bc639691d7e068 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 14:24:39 +0000
+Subject: RDMA/irdma: Set irdma_cq cq_num field during CQ create
+
+From: Jacob Moroni <jmoroni@google.com>
+
+[ Upstream commit 5575b7646b94c0afb0f4c0d86e00e13cf3397a62 ]
+
+The driver maintains a CQ table that is used to ensure that a CQ is
+still valid when processing CQ related AEs. When a CQ is destroyed,
+the table entry is cleared, using irdma_cq.cq_num as the index. This
+field was never being set, so it was just always clearing out entry
+0.
+
+Additionally, the cq_num field size was increased to accommodate HW
+supporting more than 64K CQs.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Jacob Moroni <jmoroni@google.com>
+Link: https://patch.msgid.link/20250923142439.943930-1-jmoroni@google.com
+Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 1 +
+ drivers/infiniband/hw/irdma/verbs.h | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 7243255c224f4..29540b2b2373c 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2077,6 +2077,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
++ iwcq->cq_num = cq_num;
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 50c2610d1cbfb..bb9ab945938e0 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -113,7 +113,7 @@ struct irdma_mr {
+ struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+- u16 cq_num;
++ u32 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+--
+2.51.0
+
ima-don-t-clear-ima_digsig-flag-when-setting-or-remo.patch
char-misc-restrict-the-dynamic-range-to-exclude-rese.patch
acpica-update-dsmethod.c-to-get-rid-of-unused-variab.patch
+rdma-irdma-fix-sd-index-calculation.patch
+rdma-irdma-remove-unused-struct-irdma_cq-fields.patch
+rdma-irdma-set-irdma_cq-cq_num-field-during-cq-creat.patch
+rdma-hns-fix-the-modification-of-max_send_sge.patch
+rdma-hns-fix-wrong-wqe-data-when-qp-wraps-around.patch
+btrfs-mark-dirty-extent-range-for-out-of-bound-preal.patch