--- /dev/null
+From 75e91c888989cf2df5c78b251b07de1f5052e30e Mon Sep 17 00:00:00 2001
+From: Chao Yu <yuchao0@huawei.com>
+Date: Wed, 9 Dec 2020 16:42:14 +0800
+Subject: f2fs: compress: fix compression chksum
+
+From: Chao Yu <yuchao0@huawei.com>
+
+commit 75e91c888989cf2df5c78b251b07de1f5052e30e upstream.
+
+This patch addresses minor issues in compression chksum.
+
+Fixes: b28f047b28c5 ("f2fs: compress: support chksum")
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/compress.c | 3 +--
+ fs/f2fs/compress.h | 0
+ fs/f2fs/compress.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+ create mode 100644 fs/f2fs/compress.h
+
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -783,7 +783,7 @@ void f2fs_decompress_pages(struct bio *b
+
+ ret = cops->decompress_pages(dic);
+
+- if (!ret && fi->i_compress_flag & 1 << COMPRESS_CHKSUM) {
++ if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
+ u32 provided = le32_to_cpu(dic->cbuf->chksum);
+ u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
+
+@@ -796,7 +796,6 @@ void f2fs_decompress_pages(struct bio *b
+ provided, calculated);
+ }
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- WARN_ON_ONCE(1);
+ }
+ }
+
--- /dev/null
+From 9e03dbea2b0634b21a45946b4f8097e0dc86ebe1 Mon Sep 17 00:00:00 2001
+From: Chengchang Tang <tangchengchang@huawei.com>
+Date: Fri, 4 Aug 2023 09:27:11 +0800
+Subject: RDMA/hns: Fix CQ and QP cache affinity
+
+From: Chengchang Tang <tangchengchang@huawei.com>
+
+commit 9e03dbea2b0634b21a45946b4f8097e0dc86ebe1 upstream.
+
+Currently, the affinity between QP cache and CQ cache is not
+considered when assigning QPN, it will affect the message rate of HW.
+
+Allocate QPN from QP cache with better CQ affinity to get better
+performance.
+
+Fixes: 71586dd20010 ("RDMA/hns: Create QP with selected QPN for bank load balance")
+Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://lore.kernel.org/r/20230804012711.808069-5-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 2 ++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 28 ++++++++++++++++++++++------
+ 2 files changed, 24 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -122,6 +122,8 @@
+ */
+ #define EQ_DEPTH_COEFF 2
+
++#define CQ_BANKID_MASK GENMASK(1, 0)
++
+ enum {
+ SERV_TYPE_RC,
+ SERV_TYPE_UC,
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -154,14 +154,29 @@ static void hns_roce_ib_qp_event(struct
+ }
+ }
+
+-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
++static u8 get_affinity_cq_bank(u8 qp_bank)
+ {
+- u32 least_load = bank[0].inuse;
++ return (qp_bank >> 1) & CQ_BANKID_MASK;
++}
++
++static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
++ struct hns_roce_bank *bank)
++{
++#define INVALID_LOAD_QPNUM 0xFFFFFFFF
++ struct ib_cq *scq = init_attr->send_cq;
++ u32 least_load = INVALID_LOAD_QPNUM;
++ unsigned long cqn = 0;
+ u8 bankid = 0;
+ u32 bankcnt;
+ u8 i;
+
+- for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
++ if (scq)
++ cqn = to_hr_cq(scq)->cqn;
++
++ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
++ if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
++ continue;
++
+ bankcnt = bank[i].inuse;
+ if (bankcnt < least_load) {
+ least_load = bankcnt;
+@@ -193,7 +208,8 @@ static int alloc_qpn_with_bankid(struct
+
+ return 0;
+ }
+-static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
++static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
++ struct ib_qp_init_attr *init_attr)
+ {
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned long num = 0;
+@@ -211,7 +227,7 @@ static int alloc_qpn(struct hns_roce_dev
+ hr_qp->doorbell_qpn = 1;
+ } else {
+ mutex_lock(&qp_table->bank_mutex);
+- bankid = get_least_load_bankid_for_qp(qp_table->bank);
++ bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
+
+ ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
+ &num);
+@@ -1005,7 +1021,7 @@ static int hns_roce_create_qp_common(str
+ goto err_db;
+ }
+
+- ret = alloc_qpn(hr_dev, hr_qp);
++ ret = alloc_qpn(hr_dev, hr_qp, init_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
+ goto err_buf;
--- /dev/null
+From 9293d3fcb70583f2c786f04ca788af026b7c4c5c Mon Sep 17 00:00:00 2001
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 19 Jan 2021 17:28:33 +0800
+Subject: RDMA/hns: Use mutex instead of spinlock for ida allocation
+
+From: Yangyang Li <liyangyang20@huawei.com>
+
+commit 9293d3fcb70583f2c786f04ca788af026b7c4c5c upstream.
+
+GFP_KERNEL may cause ida_alloc_range() to sleep, but the spinlock covering
+this function is not allowed to sleep, so the spinlock needs to be changed
+to mutex.
+
+As there is a certain chance of memory allocation failure, GFP_ATOMIC is
+not suitable for QP allocation scenarios.
+
+Fixes: 71586dd20010 ("RDMA/hns: Create QP with selected QPN for bank load balance")
+Link: https://lore.kernel.org/r/1611048513-28663-1-git-send-email-liweihang@huawei.com
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Weihang Li <liweihang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 2 +-
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 11 ++++++-----
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -537,7 +537,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table sccc_table;
+ struct mutex scc_mutex;
+ struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
+- spinlock_t bank_lock;
++ struct mutex bank_mutex;
+ };
+
+ struct hns_roce_cq_table {
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -210,7 +210,7 @@ static int alloc_qpn(struct hns_roce_dev
+
+ hr_qp->doorbell_qpn = 1;
+ } else {
+- spin_lock(&qp_table->bank_lock);
++ mutex_lock(&qp_table->bank_mutex);
+ bankid = get_least_load_bankid_for_qp(qp_table->bank);
+
+ ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
+@@ -218,12 +218,12 @@ static int alloc_qpn(struct hns_roce_dev
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to alloc QPN, ret = %d\n", ret);
+- spin_unlock(&qp_table->bank_lock);
++ mutex_unlock(&qp_table->bank_mutex);
+ return ret;
+ }
+
+ qp_table->bank[bankid].inuse++;
+- spin_unlock(&qp_table->bank_lock);
++ mutex_unlock(&qp_table->bank_mutex);
+
+ hr_qp->doorbell_qpn = (u32)num;
+ }
+@@ -409,9 +409,9 @@ static void free_qpn(struct hns_roce_dev
+
+ ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
+
+- spin_lock(&hr_dev->qp_table.bank_lock);
++ mutex_lock(&hr_dev->qp_table.bank_mutex);
+ hr_dev->qp_table.bank[bankid].inuse--;
+- spin_unlock(&hr_dev->qp_table.bank_lock);
++ mutex_unlock(&hr_dev->qp_table.bank_mutex);
+ }
+
+ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+@@ -1358,6 +1358,7 @@ int hns_roce_init_qp_table(struct hns_ro
+ unsigned int i;
+
+ mutex_init(&qp_table->scc_mutex);
++ mutex_init(&qp_table->bank_mutex);
+ xa_init(&hr_dev->qp_table_xa);
+
+ reserved_from_bot = hr_dev->caps.reserved_qps;
nfs-fix-undefined-behavior-in-nfs_block_bits.patch
nfs-fix-read_plus-when-server-doesn-t-support-op_read_plus.patch
scsi-ufs-ufs-qcom-clear-qunipro_g4_sel-for-hw-major-version-5.patch
+f2fs-compress-fix-compression-chksum.patch
+rdma-hns-use-mutex-instead-of-spinlock-for-ida-allocation.patch
+rdma-hns-fix-cq-and-qp-cache-affinity.patch