--- /dev/null
+From 19fc1004537aadc78e6b4ba167bc25d2977b285f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:10 +0100
+Subject: crypto: qat - add backlog mechanism
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 38682383973280e5be2802ba8a8d4a636d36cb19 ]
+
+The implementations of the crypto algorithms (aead, skcipher, etc) in
+the QAT driver do not properly support requests with the
+CRYPTO_TFM_REQ_MAY_BACKLOG flag set. If the HW queue is full, the driver
+returns -EBUSY but does not enqueue the request. This can result in
+applications like dm-crypt waiting indefinitely for the completion of a
+request that was never submitted to the hardware.
+
+Fix this by adding a software backlog queue: if the ring buffer is more
+than eighty percent full, then the request is enqueued to a backlog
+list and the error code -EBUSY is returned back to the caller.
+Requests in the backlog queue are resubmitted at a later time, in the
+context of the callback of a previously submitted request.
+The request for which -EBUSY is returned is then marked as -EINPROGRESS
+once submitted to the HW queues.
+
+The submission loop inside the function qat_alg_send_message() has been
+modified to decide which submission policy to use based on the request
+flags. If the request does not have the CRYPTO_TFM_REQ_MAY_BACKLOG set,
+the previous behaviour has been preserved.
+
+Based on a patch by
+Vishnu Das Ramachandran <vishnu.dasx.ramachandran@intel.com>
+
+Cc: stable@vger.kernel.org
+Fixes: d370cec32194 ("crypto: qat - Intel(R) QAT crypto interface")
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Reported-by: Kyle Sanderson <kyle.leet@gmail.com>
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/adf_transport.c | 11 +++
+ drivers/crypto/qat/qat_common/adf_transport.h | 1 +
+ .../qat/qat_common/adf_transport_internal.h | 1 +
+ drivers/crypto/qat/qat_common/qat_algs.c | 24 ++++---
+ drivers/crypto/qat/qat_common/qat_algs_send.c | 67 ++++++++++++++++++-
+ drivers/crypto/qat/qat_common/qat_algs_send.h | 1 +
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 23 ++++---
+ drivers/crypto/qat/qat_common/qat_crypto.c | 3 +
+ drivers/crypto/qat/qat_common/qat_crypto.h | 10 +++
+ 9 files changed, 123 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
+index 8ba28409fb74..630d0483c4e0 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport.c
++++ b/drivers/crypto/qat/qat_common/adf_transport.c
+@@ -8,6 +8,9 @@
+ #include "adf_cfg.h"
+ #include "adf_common_drv.h"
+
++#define ADF_MAX_RING_THRESHOLD 80
++#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
++
+ static inline u32 adf_modulo(u32 data, u32 shift)
+ {
+ u32 div = data >> shift;
+@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+ bank->irq_mask);
+ }
+
++bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
++{
++ return atomic_read(ring->inflights) > ring->threshold;
++}
++
+ int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
+ {
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ struct adf_etr_bank_data *bank;
+ struct adf_etr_ring_data *ring;
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
++ int max_inflights;
+ u32 ring_num;
+ int ret;
+
+@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+ ring->head = 0;
+ ring->tail = 0;
++ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
++ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
+ atomic_set(ring->inflights, 0);
+ ret = adf_init_ring(ring);
+ if (ret)
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
+index 2c95f1697c76..e6ef6f9b7691 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport.h
++++ b/drivers/crypto/qat/qat_common/adf_transport.h
+@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ const char *ring_name, adf_callback_fn callback,
+ int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
++bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
+ int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
+ void adf_remove_ring(struct adf_etr_ring_data *ring);
+ #endif
+diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
+index 501bcf0f1809..8b2c92ba7ca1 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
++++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
+@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
+ spinlock_t lock; /* protects ring data struct */
+ u16 head;
+ u16 tail;
++ u32 threshold;
+ u8 ring_number;
+ u8 ring_size;
+ u8 msg_size;
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index 6017ae82c713..873533dc43a7 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -935,19 +935,25 @@ void qat_alg_callback(void *resp)
+ struct icp_qat_fw_la_resp *qat_resp = resp;
+ struct qat_crypto_request *qat_req =
+ (void *)(__force long)qat_resp->opaque_data;
++ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+ qat_req->cb(qat_resp, qat_req);
++
++ qat_alg_send_backlog(backlog);
+ }
+
+ static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+- struct qat_crypto_instance *inst)
++ struct qat_crypto_instance *inst,
++ struct crypto_async_request *base)
+ {
+- struct qat_alg_req req;
++ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+- req.fw_req = (u32 *)&qat_req->req;
+- req.tx_ring = inst->sym_tx;
++ alg_req->fw_req = (u32 *)&qat_req->req;
++ alg_req->tx_ring = inst->sym_tx;
++ alg_req->base = base;
++ alg_req->backlog = &inst->backlog;
+
+- return qat_alg_send_message(&req);
++ return qat_alg_send_message(alg_req);
+ }
+
+ static int qat_alg_aead_dec(struct aead_request *areq)
+@@ -987,7 +993,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+
+- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+
+@@ -1031,7 +1037,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+
+@@ -1212,7 +1218,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+
+ qat_alg_set_req_iv(qat_req);
+
+- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+
+@@ -1278,7 +1284,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ qat_alg_set_req_iv(qat_req);
+ qat_alg_update_iv(qat_req);
+
+- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
+index 78f1bb8c26c0..ff5b4347f783 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs_send.c
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
+@@ -6,7 +6,7 @@
+
+ #define ADF_MAX_RETRIES 20
+
+-int qat_alg_send_message(struct qat_alg_req *req)
++static int qat_alg_send_message_retry(struct qat_alg_req *req)
+ {
+ int ret = 0, ctr = 0;
+
+@@ -19,3 +19,68 @@ int qat_alg_send_message(struct qat_alg_req *req)
+
+ return -EINPROGRESS;
+ }
++
++void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
++{
++ struct qat_alg_req *req, *tmp;
++
++ spin_lock_bh(&backlog->lock);
++ list_for_each_entry_safe(req, tmp, &backlog->list, list) {
++ if (adf_send_message(req->tx_ring, req->fw_req)) {
++ /* The HW ring is full. Do nothing.
++ * qat_alg_send_backlog() will be invoked again by
++ * another callback.
++ */
++ break;
++ }
++ list_del(&req->list);
++ req->base->complete(req->base, -EINPROGRESS);
++ }
++ spin_unlock_bh(&backlog->lock);
++}
++
++static void qat_alg_backlog_req(struct qat_alg_req *req,
++ struct qat_instance_backlog *backlog)
++{
++ INIT_LIST_HEAD(&req->list);
++
++ spin_lock_bh(&backlog->lock);
++ list_add_tail(&req->list, &backlog->list);
++ spin_unlock_bh(&backlog->lock);
++}
++
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++{
++ struct qat_instance_backlog *backlog = req->backlog;
++ struct adf_etr_ring_data *tx_ring = req->tx_ring;
++ u32 *fw_req = req->fw_req;
++
++ /* If any request is already backlogged, then add to backlog list */
++ if (!list_empty(&backlog->list))
++ goto enqueue;
++
++ /* If ring is nearly full, then add to backlog list */
++ if (adf_ring_nearly_full(tx_ring))
++ goto enqueue;
++
++ /* If adding request to HW ring fails, then add to backlog list */
++ if (adf_send_message(tx_ring, fw_req))
++ goto enqueue;
++
++ return -EINPROGRESS;
++
++enqueue:
++ qat_alg_backlog_req(req, backlog);
++
++ return -EBUSY;
++}
++
++int qat_alg_send_message(struct qat_alg_req *req)
++{
++ u32 flags = req->base->flags;
++
++ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
++ return qat_alg_send_message_maybacklog(req);
++ else
++ return qat_alg_send_message_retry(req);
++}
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
+index 3fa685d0c293..5ce9f4f69d8f 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs_send.h
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.h
+@@ -6,5 +6,6 @@
+ #include "qat_crypto.h"
+
+ int qat_alg_send_message(struct qat_alg_req *req);
++void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
+
+ #endif
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 08b8d83e070a..ff7249c093c9 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -136,17 +136,21 @@ struct qat_asym_request {
+ } areq;
+ int err;
+ void (*cb)(struct icp_qat_fw_pke_resp *resp);
++ struct qat_alg_req alg_req;
+ } __aligned(64);
+
+ static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
+- struct qat_crypto_instance *inst)
++ struct qat_crypto_instance *inst,
++ struct crypto_async_request *base)
+ {
+- struct qat_alg_req req;
++ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+- req.fw_req = (u32 *)&qat_req->req;
+- req.tx_ring = inst->pke_tx;
++ alg_req->fw_req = (u32 *)&qat_req->req;
++ alg_req->tx_ring = inst->pke_tx;
++ alg_req->base = base;
++ alg_req->backlog = &inst->backlog;
+
+- return qat_alg_send_message(&req);
++ return qat_alg_send_message(alg_req);
+ }
+
+ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+@@ -350,7 +354,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ msg->input_param_count = n_input_params;
+ msg->output_param_count = 1;
+
+- ret = qat_alg_send_asym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+@@ -557,8 +561,11 @@ void qat_alg_asym_callback(void *_resp)
+ {
+ struct icp_qat_fw_pke_resp *resp = _resp;
+ struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
++ struct qat_instance_backlog *backlog = areq->alg_req.backlog;
+
+ areq->cb(resp);
++
++ qat_alg_send_backlog(backlog);
+ }
+
+ #define PKE_RSA_EP_512 0x1c161b21
+@@ -748,7 +755,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+
+- ret = qat_alg_send_asym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+@@ -901,7 +908,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
+
+ msg->output_param_count = 1;
+
+- ret = qat_alg_send_asym_message(qat_req, ctx->inst);
++ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
+index 67c9588e89df..80d905ed102e 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.c
++++ b/drivers/crypto/qat/qat_common/qat_crypto.c
+@@ -353,6 +353,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+ &inst->pke_rx);
+ if (ret)
+ goto err;
++
++ INIT_LIST_HEAD(&inst->backlog.list);
++ spin_lock_init(&inst->backlog.lock);
+ }
+ return 0;
+ err:
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
+index 0dcba6fc358c..245b6d9a3650 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.h
++++ b/drivers/crypto/qat/qat_common/qat_crypto.h
+@@ -9,9 +9,17 @@
+ #include "adf_accel_devices.h"
+ #include "icp_qat_fw_la.h"
+
++struct qat_instance_backlog {
++ struct list_head list;
++ spinlock_t lock; /* protects backlog list */
++};
++
+ struct qat_alg_req {
+ u32 *fw_req;
+ struct adf_etr_ring_data *tx_ring;
++ struct crypto_async_request *base;
++ struct list_head list;
++ struct qat_instance_backlog *backlog;
+ };
+
+ struct qat_crypto_instance {
+@@ -24,6 +32,7 @@ struct qat_crypto_instance {
+ unsigned long state;
+ int id;
+ atomic_t refctr;
++ struct qat_instance_backlog backlog;
+ };
+
+ #define QAT_MAX_BUFF_DESC 4
+@@ -82,6 +91,7 @@ struct qat_crypto_request {
+ u8 iv[AES_BLOCK_SIZE];
+ };
+ bool encryption;
++ struct qat_alg_req alg_req;
+ };
+
+ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
+--
+2.35.1
+
--- /dev/null
+From 4bafe3c81c4a62558c26171ed449631a4ebc7a55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:15 +0100
+Subject: crypto: qat - add param check for DH
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 2acbb8771f6ac82422886e63832ee7a0f4b1635b ]
+
+Reject requests with a source buffer that is bigger than the size of the
+key. This is to prevent a possible integer underflow that might happen
+when copying the source scatterlist into a linear buffer.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 947eeff181b4..7173a2a0a484 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -235,6 +235,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ req->dst_len = ctx->p_size;
+ return -EOVERFLOW;
+ }
++
++ if (req->src_len > ctx->p_size)
++ return -EINVAL;
++
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+--
+2.35.1
+
--- /dev/null
+From c8a43abd20431ad5f6dc039539322fa0b97c19f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:14 +0100
+Subject: crypto: qat - add param check for RSA
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 9714061423b8b24b8afb31b8eb4df977c63f19c4 ]
+
+Reject requests with a source buffer that is bigger than the size of the
+key. This is to prevent a possible integer underflow that might happen
+when copying the source scatterlist into a linear buffer.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 25bbd22085c3..947eeff181b4 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -656,6 +656,10 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
++
++ if (req->src_len > ctx->key_sz)
++ return -EINVAL;
++
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+@@ -785,6 +789,10 @@ static int qat_rsa_dec(struct akcipher_request *req)
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
++
++ if (req->src_len > ctx->key_sz)
++ return -EINVAL;
++
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+--
+2.35.1
+
--- /dev/null
+From 0319165b726c69e86a682436c7183779982ddbd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:11 +0100
+Subject: crypto: qat - fix memory leak in RSA
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 80a52e1ee7757b742f96bfb0d58f0c14eb6583d0 ]
+
+When an RSA key represented in form 2 (as defined in PKCS #1 V2.1) is
+used, some components of the private key persist even after the TFM is
+released.
+Replace the explicit calls to free the buffers in qat_rsa_exit_tfm()
+with a call to qat_rsa_clear_ctx() which frees all buffers referenced in
+the TFM context.
+
+Cc: stable@vger.kernel.org
+Fixes: 879f77e9071f ("crypto: qat - Add RSA CRT mode")
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index ff7249c093c9..2bc02c75398e 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -1257,18 +1257,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+- if (ctx->n)
+- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+- if (ctx->e)
+- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+- if (ctx->d) {
+- memset(ctx->d, '\0', ctx->key_sz);
+- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+- }
++ qat_rsa_clear_ctx(dev, ctx);
+ qat_crypto_put_instance(ctx->inst);
+- ctx->n = NULL;
+- ctx->e = NULL;
+- ctx->d = NULL;
+ }
+
+ static struct akcipher_alg rsa = {
+--
+2.35.1
+
--- /dev/null
+From 04d304a8d98fd8601bc5b1747767e1e5b5c1e49c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:17 +0100
+Subject: crypto: qat - re-enable registration of algorithms
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit d09144745959bf7852ccafd73243dd7d1eaeb163 ]
+
+Re-enable the registration of algorithms after fixes to (1) use
+pre-allocated buffers in the datapath and (2) support the
+CRYPTO_TFM_REQ_MAY_BACKLOG flag.
+
+This reverts commit 8893d27ffcaf6ec6267038a177cb87bcde4dd3de.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_4xxx/adf_drv.c | 7 -------
+ drivers/crypto/qat/qat_common/qat_crypto.c | 7 -------
+ 2 files changed, 14 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
+index fa4c350c1bf9..a6c78b9c730b 100644
+--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
+@@ -75,13 +75,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+ if (ret)
+ goto err;
+
+- /* Temporarily set the number of crypto instances to zero to avoid
+- * registering the crypto algorithms.
+- * This will be removed when the algorithms will support the
+- * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+- */
+- instances = 0;
+-
+ for (i = 0; i < instances; i++) {
+ val = i;
+ bank = i * 2;
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
+index 80d905ed102e..9341d892533a 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.c
++++ b/drivers/crypto/qat/qat_common/qat_crypto.c
+@@ -161,13 +161,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
+ if (ret)
+ goto err;
+
+- /* Temporarily set the number of crypto instances to zero to avoid
+- * registering the crypto algorithms.
+- * This will be removed when the algorithms will support the
+- * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+- */
+- instances = 0;
+-
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+--
+2.35.1
+
--- /dev/null
+From a1dfd989edb768ece14929f35acb8d617ae9f765 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:09 +0100
+Subject: crypto: qat - refactor submission logic
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit af88d3c109aa5edfaa11c9a26d9c0ff21ddf501c ]
+
+All the algorithms in qat_algs.c and qat_asym_algs.c use the same
+pattern to submit messages to the HW queues. Move the submission loop
+to a new function, qat_alg_send_message(), and share it between the
+symmetric and the asymmetric algorithms.
+
+As part of this rework, since the number of retries before returning an
+error is inconsistent between the symmetric and asymmetric
+implementations, set it to a value that works for both (i.e. 20, was 10
+in qat_algs.c and 100 in qat_asym_algs.c)
+
+In addition fix the return code reported when the HW queues are full.
+In that case return -ENOSPC instead of -EBUSY.
+
+Including stable in CC since (1) the error code returned if the HW queues
+are full is incorrect and (2) to facilitate the backport of the next fix
+"crypto: qat - add backlog mechanism".
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/Makefile | 1 +
+ drivers/crypto/qat/qat_common/qat_algs.c | 68 +++++++++----------
+ drivers/crypto/qat/qat_common/qat_algs_send.c | 21 ++++++
+ drivers/crypto/qat/qat_common/qat_algs_send.h | 10 +++
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 50 +++++++++-----
+ drivers/crypto/qat/qat_common/qat_crypto.h | 5 ++
+ 6 files changed, 101 insertions(+), 54 deletions(-)
+ create mode 100644 drivers/crypto/qat/qat_common/qat_algs_send.c
+ create mode 100644 drivers/crypto/qat/qat_common/qat_algs_send.h
+
+diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
+index f25a6c8edfc7..04f058acc4d3 100644
+--- a/drivers/crypto/qat/qat_common/Makefile
++++ b/drivers/crypto/qat/qat_common/Makefile
+@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \
+ qat_crypto.o \
+ qat_algs.o \
+ qat_asym_algs.o \
++ qat_algs_send.o \
+ qat_uclo.o \
+ qat_hal.o
+
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index ec635fe44c1f..6017ae82c713 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -17,7 +17,7 @@
+ #include <crypto/xts.h>
+ #include <linux/dma-mapping.h>
+ #include "adf_accel_devices.h"
+-#include "adf_transport.h"
++#include "qat_algs_send.h"
+ #include "adf_common_drv.h"
+ #include "qat_crypto.h"
+ #include "icp_qat_hw.h"
+@@ -939,6 +939,17 @@ void qat_alg_callback(void *resp)
+ qat_req->cb(qat_resp, qat_req);
+ }
+
++static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
++ struct qat_crypto_instance *inst)
++{
++ struct qat_alg_req req;
++
++ req.fw_req = (u32 *)&qat_req->req;
++ req.tx_ring = inst->sym_tx;
++
++ return qat_alg_send_message(&req);
++}
++
+ static int qat_alg_aead_dec(struct aead_request *areq)
+ {
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+@@ -949,7 +960,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int digst_size = crypto_aead_authsize(aead_tfm);
+- int ret, ctr = 0;
++ int ret;
+ u32 cipher_len;
+
+ cipher_len = areq->cryptlen - digst_size;
+@@ -975,15 +986,12 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+- do {
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
+- } while (ret == -EAGAIN && ctr++ < 10);
+
+- if (ret == -EAGAIN) {
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+- return -EBUSY;
+- }
+- return -EINPROGRESS;
++
++ return ret;
+ }
+
+ static int qat_alg_aead_enc(struct aead_request *areq)
+@@ -996,7 +1004,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ u8 *iv = areq->iv;
+- int ret, ctr = 0;
++ int ret;
+
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+@@ -1023,15 +1031,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+- do {
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
+- } while (ret == -EAGAIN && ctr++ < 10);
+-
+- if (ret == -EAGAIN) {
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+- return -EBUSY;
+- }
+- return -EINPROGRESS;
++
++ return ret;
+ }
+
+ static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+@@ -1184,7 +1188,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+- int ret, ctr = 0;
++ int ret;
+
+ if (req->cryptlen == 0)
+ return 0;
+@@ -1208,15 +1212,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+
+ qat_alg_set_req_iv(qat_req);
+
+- do {
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
+- } while (ret == -EAGAIN && ctr++ < 10);
+-
+- if (ret == -EAGAIN) {
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+- return -EBUSY;
+- }
+- return -EINPROGRESS;
++
++ return ret;
+ }
+
+ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
+@@ -1253,7 +1253,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+- int ret, ctr = 0;
++ int ret;
+
+ if (req->cryptlen == 0)
+ return 0;
+@@ -1278,15 +1278,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ qat_alg_set_req_iv(qat_req);
+ qat_alg_update_iv(qat_req);
+
+- do {
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
+- } while (ret == -EAGAIN && ctr++ < 10);
+-
+- if (ret == -EAGAIN) {
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
+ qat_alg_free_bufl(ctx->inst, qat_req);
+- return -EBUSY;
+- }
+- return -EINPROGRESS;
++
++ return ret;
+ }
+
+ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
+new file mode 100644
+index 000000000000..78f1bb8c26c0
+--- /dev/null
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
+@@ -0,0 +1,21 @@
++// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
++/* Copyright(c) 2022 Intel Corporation */
++#include "adf_transport.h"
++#include "qat_algs_send.h"
++#include "qat_crypto.h"
++
++#define ADF_MAX_RETRIES 20
++
++int qat_alg_send_message(struct qat_alg_req *req)
++{
++ int ret = 0, ctr = 0;
++
++ do {
++ ret = adf_send_message(req->tx_ring, req->fw_req);
++ } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
++
++ if (ret == -EAGAIN)
++ return -ENOSPC;
++
++ return -EINPROGRESS;
++}
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
+new file mode 100644
+index 000000000000..3fa685d0c293
+--- /dev/null
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.h
+@@ -0,0 +1,10 @@
++/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
++/* Copyright(c) 2022 Intel Corporation */
++#ifndef QAT_ALGS_SEND_H
++#define QAT_ALGS_SEND_H
++
++#include "qat_crypto.h"
++
++int qat_alg_send_message(struct qat_alg_req *req);
++
++#endif
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 5633f9df3b6f..08b8d83e070a 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -12,6 +12,7 @@
+ #include <crypto/scatterwalk.h>
+ #include "icp_qat_fw_pke.h"
+ #include "adf_accel_devices.h"
++#include "qat_algs_send.h"
+ #include "adf_transport.h"
+ #include "adf_common_drv.h"
+ #include "qat_crypto.h"
+@@ -137,6 +138,17 @@ struct qat_asym_request {
+ void (*cb)(struct icp_qat_fw_pke_resp *resp);
+ } __aligned(64);
+
++static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
++ struct qat_crypto_instance *inst)
++{
++ struct qat_alg_req req;
++
++ req.fw_req = (u32 *)&qat_req->req;
++ req.tx_ring = inst->pke_tx;
++
++ return qat_alg_send_message(&req);
++}
++
+ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+ {
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+@@ -213,7 +225,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(kpp_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+- int ret, ctr = 0;
++ int ret;
+ int n_input_params = 0;
+
+ if (unlikely(!ctx->xa))
+@@ -338,13 +350,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ msg->input_param_count = n_input_params;
+ msg->output_param_count = 1;
+
+- do {
+- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
+- } while (ret == -EBUSY && ctr++ < 100);
++ ret = qat_alg_send_asym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
++ goto unmap_all;
+
+- if (!ret)
+- return -EINPROGRESS;
++ return ret;
+
++unmap_all:
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_dh_output_params),
+@@ -645,7 +657,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+- int ret, ctr = 0;
++ int ret;
+
+ if (unlikely(!ctx->n || !ctx->e))
+ return -EINVAL;
+@@ -735,13 +747,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+- do {
+- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
+- } while (ret == -EBUSY && ctr++ < 100);
+
+- if (!ret)
+- return -EINPROGRESS;
++ ret = qat_alg_send_asym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
++ goto unmap_all;
++
++ return ret;
+
++unmap_all:
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+@@ -779,7 +792,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+- int ret, ctr = 0;
++ int ret;
+
+ if (unlikely(!ctx->n || !ctx->d))
+ return -EINVAL;
+@@ -887,13 +900,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
+ msg->input_param_count = 3;
+
+ msg->output_param_count = 1;
+- do {
+- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
+- } while (ret == -EBUSY && ctr++ < 100);
+
+- if (!ret)
+- return -EINPROGRESS;
++ ret = qat_alg_send_asym_message(qat_req, ctx->inst);
++ if (ret == -ENOSPC)
++ goto unmap_all;
++
++ return ret;
+
++unmap_all:
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
+index 0928f159ea99..0dcba6fc358c 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.h
++++ b/drivers/crypto/qat/qat_common/qat_crypto.h
+@@ -9,6 +9,11 @@
+ #include "adf_accel_devices.h"
+ #include "icp_qat_fw_la.h"
+
++struct qat_alg_req {
++ u32 *fw_req;
++ struct adf_etr_ring_data *tx_ring;
++};
++
+ struct qat_crypto_instance {
+ struct adf_etr_ring_data *sym_tx;
+ struct adf_etr_ring_data *sym_rx;
+--
+2.35.1
+
--- /dev/null
+From 103aab27073c4d526e7a46fea8e9557bbb557776 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:13 +0100
+Subject: crypto: qat - remove dma_free_coherent() for DH
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 029aa4624a7fe35233bdd3d1354dc7be260380bf ]
+
+The functions qat_dh_compute_value() allocates memory with
+dma_alloc_coherent() if the source or the destination buffers are made
+of multiple flat buffers or of a size that is not compatible with the
+hardware.
+This memory is then freed with dma_free_coherent() in the context of a
+tasklet invoked to handle the response for the corresponding request.
+
+According to Documentation/core-api/dma-api-howto.rst, the function
+dma_free_coherent() cannot be called in an interrupt context.
+
+Replace allocations with dma_alloc_coherent() in the function
+qat_dh_compute_value() with kmalloc() + dma_map_single().
+
+Cc: stable@vger.kernel.org
+Fixes: c9839143ebbf ("crypto: qat - Add DH support")
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 83 ++++++++-----------
+ 1 file changed, 34 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index b31372bddb96..25bbd22085c3 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -164,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (areq->src) {
+- if (req->src_align)
+- dma_free_coherent(dev, req->ctx.dh->p_size,
+- req->src_align, req->in.dh.in.b);
+- else
+- dma_unmap_single(dev, req->in.dh.in.b,
+- req->ctx.dh->p_size, DMA_TO_DEVICE);
++ dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
++ DMA_TO_DEVICE);
++ kfree_sensitive(req->src_align);
+ }
+
+ areq->dst_len = req->ctx.dh->p_size;
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+-
+- dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
+- req->out.dh.r);
+- } else {
+- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+- DMA_FROM_DEVICE);
++ kfree_sensitive(req->dst_align);
+ }
+
++ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
++ DMA_FROM_DEVICE);
++
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+@@ -231,6 +226,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret;
+ int n_input_params = 0;
++ u8 *vaddr;
+
+ if (unlikely(!ctx->xa))
+ return -EINVAL;
+@@ -287,27 +283,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+ qat_req->src_align = NULL;
+- qat_req->in.dh.in.b = dma_map_single(dev,
+- sg_virt(req->src),
+- req->src_len,
+- DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(dev,
+- qat_req->in.dh.in.b)))
+- return ret;
+-
++ vaddr = sg_virt(req->src);
+ } else {
+ int shift = ctx->p_size - req->src_len;
+
+- qat_req->src_align = dma_alloc_coherent(dev,
+- ctx->p_size,
+- &qat_req->in.dh.in.b,
+- GFP_KERNEL);
++ qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift,
+ req->src, 0, req->src_len, 0);
++
++ vaddr = qat_req->src_align;
+ }
++
++ qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
++ goto unmap_src;
+ }
+ /*
+ * dst can be of any size in valid range, but HW expects it to be the
+@@ -318,20 +311,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ */
+ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+ qat_req->dst_align = NULL;
+- qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
+- req->dst_len,
+- DMA_FROM_DEVICE);
+-
+- if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+- goto unmap_src;
+-
++ vaddr = sg_virt(req->dst);
+ } else {
+- qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
+- &qat_req->out.dh.r,
+- GFP_KERNEL);
++ qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
++
++ vaddr = qat_req->dst_align;
+ }
++ qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
++ goto unmap_dst;
+
+ qat_req->in.dh.in_tab[n_input_params] = 0;
+ qat_req->out.dh.out_tab[1] = 0;
+@@ -371,23 +362,17 @@ static int qat_dh_compute_value(struct kpp_request *req)
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ unmap_dst:
+- if (qat_req->dst_align)
+- dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
+- qat_req->out.dh.r);
+- else
+- if (!dma_mapping_error(dev, qat_req->out.dh.r))
+- dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+- DMA_FROM_DEVICE);
++ if (!dma_mapping_error(dev, qat_req->out.dh.r))
++ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
++ DMA_FROM_DEVICE);
++ kfree_sensitive(qat_req->dst_align);
+ unmap_src:
+ if (req->src) {
+- if (qat_req->src_align)
+- dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
+- qat_req->in.dh.in.b);
+- else
+- if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+- dma_unmap_single(dev, qat_req->in.dh.in.b,
+- ctx->p_size,
+- DMA_TO_DEVICE);
++ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
++ dma_unmap_single(dev, qat_req->in.dh.in.b,
++ ctx->p_size,
++ DMA_TO_DEVICE);
++ kfree_sensitive(qat_req->src_align);
+ }
+ return ret;
+ }
+--
+2.35.1
+
--- /dev/null
+From 6a5b6e6f050d691770b2b28ca6b7a9c5911703ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:12 +0100
+Subject: crypto: qat - remove dma_free_coherent() for RSA
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 3dfaf0071ed74d7a9c6b3c9ea4df7a6f8e423c2a ]
+
+After commit f5ff79fddf0e ("dma-mapping: remove CONFIG_DMA_REMAP"), if
+the algorithms are enabled, the driver crashes with a BUG_ON while
+executing vunmap() in the context of a tasklet. This is due to the fact
+that the function dma_free_coherent() cannot be called in an interrupt
+context (see Documentation/core-api/dma-api-howto.rst).
+
+The functions qat_rsa_enc() and qat_rsa_dec() allocate memory with
+dma_alloc_coherent() if the source or the destination buffers are made
+of multiple flat buffers or of a size that is not compatible with the
+hardware.
+This memory is then freed with dma_free_coherent() in the context of a
+tasklet invoked to handle the response for the corresponding request.
+
+Replace allocations with dma_alloc_coherent() in the functions
+qat_rsa_enc() and qat_rsa_dec() with kmalloc() + dma_map_single().
+
+Cc: stable@vger.kernel.org
+Fixes: a990532023b9 ("crypto: qat - Add support for RSA algorithm")
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 137 ++++++++----------
+ 1 file changed, 60 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 2bc02c75398e..b31372bddb96 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -529,25 +529,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+- if (req->src_align)
+- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
+- req->in.rsa.enc.m);
+- else
+- dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+- DMA_TO_DEVICE);
++ kfree_sensitive(req->src_align);
++
++ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
++ DMA_TO_DEVICE);
+
+ areq->dst_len = req->ctx.rsa->key_sz;
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+
+- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
+- req->out.rsa.enc.c);
+- } else {
+- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+- DMA_FROM_DEVICE);
++ kfree_sensitive(req->dst_align);
+ }
+
++ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
++ DMA_FROM_DEVICE);
++
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+@@ -664,6 +661,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
++ u8 *vaddr;
+ int ret;
+
+ if (unlikely(!ctx->n || !ctx->e))
+@@ -701,40 +699,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+ qat_req->src_align = NULL;
+- qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
+- req->src_len, DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+- return ret;
+-
++ vaddr = sg_virt(req->src);
+ } else {
+ int shift = ctx->key_sz - req->src_len;
+
+- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+- &qat_req->in.rsa.enc.m,
+- GFP_KERNEL);
++ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+ 0, req->src_len, 0);
++ vaddr = qat_req->src_align;
+ }
+- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+- qat_req->dst_align = NULL;
+- qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
+- req->dst_len,
+- DMA_FROM_DEVICE);
+
+- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+- goto unmap_src;
++ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
++ goto unmap_src;
+
++ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
++ qat_req->dst_align = NULL;
++ vaddr = sg_virt(req->dst);
+ } else {
+- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+- &qat_req->out.rsa.enc.c,
+- GFP_KERNEL);
++ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+-
++ vaddr = qat_req->dst_align;
+ }
++
++ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
++ goto unmap_dst;
++
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
+@@ -772,21 +769,15 @@ static int qat_rsa_enc(struct akcipher_request *req)
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ unmap_dst:
+- if (qat_req->dst_align)
+- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+- qat_req->out.rsa.enc.c);
+- else
+- if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+- dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+- ctx->key_sz, DMA_FROM_DEVICE);
++ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
++ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
++ ctx->key_sz, DMA_FROM_DEVICE);
++ kfree_sensitive(qat_req->dst_align);
+ unmap_src:
+- if (qat_req->src_align)
+- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+- qat_req->in.rsa.enc.m);
+- else
+- if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+- dma_unmap_single(dev, qat_req->in.rsa.enc.m,
+- ctx->key_sz, DMA_TO_DEVICE);
++ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
++ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
++ DMA_TO_DEVICE);
++ kfree_sensitive(qat_req->src_align);
+ return ret;
+ }
+
+@@ -799,6 +790,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
++ u8 *vaddr;
+ int ret;
+
+ if (unlikely(!ctx->n || !ctx->d))
+@@ -846,40 +838,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+ qat_req->src_align = NULL;
+- qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
+- req->dst_len, DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+- return ret;
+-
++ vaddr = sg_virt(req->src);
+ } else {
+ int shift = ctx->key_sz - req->src_len;
+
+- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+- &qat_req->in.rsa.dec.c,
+- GFP_KERNEL);
++ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+ 0, req->src_len, 0);
++ vaddr = qat_req->src_align;
+ }
+- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+- qat_req->dst_align = NULL;
+- qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
+- req->dst_len,
+- DMA_FROM_DEVICE);
+
+- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+- goto unmap_src;
++ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
++ goto unmap_src;
+
++ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
++ qat_req->dst_align = NULL;
++ vaddr = sg_virt(req->dst);
+ } else {
+- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+- &qat_req->out.rsa.dec.m,
+- GFP_KERNEL);
++ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+-
++ vaddr = qat_req->dst_align;
+ }
++ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
++ goto unmap_dst;
+
+ if (ctx->crt_mode)
+ qat_req->in.rsa.in_tab[6] = 0;
+@@ -925,21 +914,15 @@ static int qat_rsa_dec(struct akcipher_request *req)
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ unmap_dst:
+- if (qat_req->dst_align)
+- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+- qat_req->out.rsa.dec.m);
+- else
+- if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+- dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+- ctx->key_sz, DMA_FROM_DEVICE);
++ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
++ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
++ ctx->key_sz, DMA_FROM_DEVICE);
++ kfree_sensitive(qat_req->dst_align);
+ unmap_src:
+- if (qat_req->src_align)
+- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+- qat_req->in.rsa.dec.c);
+- else
+- if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+- dma_unmap_single(dev, qat_req->in.rsa.dec.c,
+- ctx->key_sz, DMA_TO_DEVICE);
++ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
++ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
++ DMA_TO_DEVICE);
++ kfree_sensitive(qat_req->src_align);
+ return ret;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 3379a1479bbf0df56cf45af50c41261c708c2a5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:19:27 +0100
+Subject: crypto: qat - set to zero DH parameters before free
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit 1731160ff7c7bbb11bb1aacb14dd25e18d522779 ]
+
+Set to zero the context buffers containing the DH key before they are
+freed.
+This is a defense in depth measure that avoids keys to be recovered from
+memory in case the system is compromised between the free of the buffer
+and when that area of memory (containing keys) gets overwritten.
+
+Cc: stable@vger.kernel.org
+Fixes: c9839143ebbf ("crypto: qat - Add DH support")
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Adam Guerin <adam.guerin@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_asym_algs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index b0b78445418b..5633f9df3b6f 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -420,14 +420,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+ static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+ {
+ if (ctx->g) {
++ memset(ctx->g, 0, ctx->p_size);
+ dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+ ctx->g = NULL;
+ }
+ if (ctx->xa) {
++ memset(ctx->xa, 0, ctx->p_size);
+ dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+ ctx->xa = NULL;
+ }
+ if (ctx->p) {
++ memset(ctx->p, 0, ctx->p_size);
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ }
+--
+2.35.1
+
--- /dev/null
+From 888c3dca05c0db4544e0c1f1165abcd22f27f3a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:34:08 +0100
+Subject: crypto: qat - use pre-allocated buffers in datapath
+
+From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+
+[ Upstream commit e0831e7af4e03f2715de102e18e9179ec0a81562 ]
+
+In order to do DMAs, the QAT device requires that the scatterlist
+structures are mapped and translated into a format that the firmware can
+understand. This is defined as the composition of a scatter gather list
+(SGL) descriptor header, the struct qat_alg_buf_list, plus a variable
+number of flat buffer descriptors, the struct qat_alg_buf.
+
+The allocation and mapping of these data structures is done each time a
+request is received from the skcipher and aead APIs.
+In an OOM situation, this behaviour might lead to a dead-lock if an
+allocation fails.
+
+Based on the conversation in [1], increase the size of the aead and
+skcipher request contexts to include an SGL descriptor that can handle
+a maximum of 4 flat buffers.
+If requests exceed 4 entries buffers, memory is allocated dynamically.
+
+[1] https://lore.kernel.org/linux-crypto/20200722072932.GA27544@gondor.apana.org.au/
+
+Cc: stable@vger.kernel.org
+Fixes: d370cec32194 ("crypto: qat - Intel(R) QAT crypto interface")
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
+Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/qat_algs.c | 64 +++++++++++++---------
+ drivers/crypto/qat/qat_common/qat_crypto.h | 24 ++++++++
+ 2 files changed, 61 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index f998ed58457c..ec635fe44c1f 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -46,19 +46,6 @@
+ static DEFINE_MUTEX(algs_lock);
+ static unsigned int active_devs;
+
+-struct qat_alg_buf {
+- u32 len;
+- u32 resrvd;
+- u64 addr;
+-} __packed;
+-
+-struct qat_alg_buf_list {
+- u64 resrvd;
+- u32 num_bufs;
+- u32 num_mapped_bufs;
+- struct qat_alg_buf bufers[];
+-} __packed __aligned(64);
+-
+ /* Common content descriptor */
+ struct qat_alg_cd {
+ union {
+@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+ bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+- kfree(bl);
++
++ if (!qat_req->buf.sgl_src_valid)
++ kfree(bl);
++
+ if (blp != blpout) {
+ /* If out of place operation dma unmap only data */
+ int bufless = blout->num_bufs - blout->num_mapped_bufs;
+@@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+ DMA_BIDIRECTIONAL);
+ }
+ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+- kfree(blout);
++
++ if (!qat_req->buf.sgl_dst_valid)
++ kfree(blout);
+ }
+ }
+
+@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
+ struct scatterlist *sg;
+- size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
++ size_t sz_out, sz = struct_size(bufl, bufers, n);
++ int node = dev_to_node(&GET_DEV(inst->accel_dev));
+
+ if (unlikely(!n))
+ return -EINVAL;
+
+- bufl = kzalloc_node(sz, GFP_ATOMIC,
+- dev_to_node(&GET_DEV(inst->accel_dev)));
+- if (unlikely(!bufl))
+- return -ENOMEM;
++ qat_req->buf.sgl_src_valid = false;
++ qat_req->buf.sgl_dst_valid = false;
++
++ if (n > QAT_MAX_BUFF_DESC) {
++ bufl = kzalloc_node(sz, GFP_ATOMIC, node);
++ if (unlikely(!bufl))
++ return -ENOMEM;
++ } else {
++ bufl = &qat_req->buf.sgl_src.sgl_hdr;
++ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
++ qat_req->buf.sgl_src_valid = true;
++ }
+
+ for_each_sg(sgl, sg, n, i)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+@@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ struct qat_alg_buf *bufers;
+
+ n = sg_nents(sglout);
+- sz_out = struct_size(buflout, bufers, n + 1);
++ sz_out = struct_size(buflout, bufers, n);
+ sg_nctr = 0;
+- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
+- dev_to_node(&GET_DEV(inst->accel_dev)));
+- if (unlikely(!buflout))
+- goto err_in;
++
++ if (n > QAT_MAX_BUFF_DESC) {
++ buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
++ if (unlikely(!buflout))
++ goto err_in;
++ } else {
++ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
++ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
++ qat_req->buf.sgl_dst_valid = true;
++ }
+
+ bufers = buflout->bufers;
+ for_each_sg(sglout, sg, n, i)
+@@ -810,7 +817,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+- kfree(buflout);
++
++ if (!qat_req->buf.sgl_dst_valid)
++ kfree(buflout);
+
+ err_in:
+ if (!dma_mapping_error(dev, blp))
+@@ -823,7 +832,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ bufl->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+
+- kfree(bufl);
++ if (!qat_req->buf.sgl_src_valid)
++ kfree(bufl);
+
+ dev_err(dev, "Failed to map buf for dma\n");
+ return -ENOMEM;
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
+index b6a4c95ae003..0928f159ea99 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.h
++++ b/drivers/crypto/qat/qat_common/qat_crypto.h
+@@ -21,6 +21,26 @@ struct qat_crypto_instance {
+ atomic_t refctr;
+ };
+
++#define QAT_MAX_BUFF_DESC 4
++
++struct qat_alg_buf {
++ u32 len;
++ u32 resrvd;
++ u64 addr;
++} __packed;
++
++struct qat_alg_buf_list {
++ u64 resrvd;
++ u32 num_bufs;
++ u32 num_mapped_bufs;
++ struct qat_alg_buf bufers[];
++} __packed;
++
++struct qat_alg_fixed_buf_list {
++ struct qat_alg_buf_list sgl_hdr;
++ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
++} __packed __aligned(64);
++
+ struct qat_crypto_request_buffs {
+ struct qat_alg_buf_list *bl;
+ dma_addr_t blp;
+@@ -28,6 +48,10 @@ struct qat_crypto_request_buffs {
+ dma_addr_t bloutp;
+ size_t sz;
+ size_t sz_out;
++ bool sgl_src_valid;
++ bool sgl_dst_valid;
++ struct qat_alg_fixed_buf_list sgl_src;
++ struct qat_alg_fixed_buf_list sgl_dst;
+ };
+
+ struct qat_crypto_request;
+--
+2.35.1
+
--- /dev/null
+From 31887131855eb587f2a5092ce8f5ea05950ef187 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 13:34:16 -0400
+Subject: dlm: fix pending remove if msg allocation fails
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit ba58995909b5098ca4003af65b0ccd5a8d13dd25 ]
+
+This patch unsets ls_remove_len and ls_remove_name if a message
+allocation of a remove messages fails. In this case we never send a
+remove message out but set the per ls ls_remove_len ls_remove_name
+variable for a pending remove. Unset those variable should indicate
+possible waiters in wait_pending_remove() that no pending remove is
+going on at this moment.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lock.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 5b485cd96c93..5298a3a43bc7 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -4085,13 +4085,14 @@ static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
+ rv = _create_message(ls, sizeof(struct dlm_message) + len,
+ dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
+ if (rv)
+- return;
++ goto out;
+
+ memcpy(ms->m_extra, name, len);
+ ms->m_hash = hash;
+
+ send_message(mh, ms);
+
++out:
+ spin_lock(&ls->ls_remove_spin);
+ ls->ls_remove_len = 0;
+ memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
+--
+2.35.1
+
--- /dev/null
+From 74c3b4262458eebc8a495363109d5ffdac90ce1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 11:58:06 +0900
+Subject: exfat: fix referencing wrong parent directory information after
+ renaming
+
+From: Yuezhang Mo <Yuezhang.Mo@sony.com>
+
+[ Upstream commit d8dad2588addd1d861ce19e7df3b702330f0c7e3 ]
+
+During renaming, the parent directory information maybe
+updated. But the file/directory still references to the
+old parent directory information.
+
+This bug will cause 2 problems.
+
+(1) The renamed file can not be written.
+
+ [10768.175172] exFAT-fs (sda1): error, failed to bmap (inode : 7afd50e4 iblock : 0, err : -5)
+ [10768.184285] exFAT-fs (sda1): Filesystem has been set read-only
+ ash: write error: Input/output error
+
+(2) Some dentries of the renamed file/directory are not set
+ to deleted after removing the file/directory.
+
+exfat_update_parent_info() is a workaround for the wrong parent
+directory information being used after renaming. Now that bug is
+fixed, this is no longer needed, so remove it.
+
+Fixes: 5f2aa075070c ("exfat: add inode operations")
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Yuezhang Mo <Yuezhang.Mo@sony.com>
+Reviewed-by: Andy Wu <Andy.Wu@sony.com>
+Reviewed-by: Aoyama Wataru <wataru.aoyama@sony.com>
+Reviewed-by: Daniel Palmer <daniel.palmer@sony.com>
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/exfat/namei.c | 27 +--------------------------
+ 1 file changed, 1 insertion(+), 26 deletions(-)
+
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index a02a04a993bf..76acc3721951 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -1080,6 +1080,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
+
+ exfat_remove_entries(inode, p_dir, oldentry, 0,
+ num_old_entries);
++ ei->dir = *p_dir;
+ ei->entry = newentry;
+ } else {
+ if (exfat_get_entry_type(epold) == TYPE_FILE) {
+@@ -1167,28 +1168,6 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
+ return 0;
+ }
+
+-static void exfat_update_parent_info(struct exfat_inode_info *ei,
+- struct inode *parent_inode)
+-{
+- struct exfat_sb_info *sbi = EXFAT_SB(parent_inode->i_sb);
+- struct exfat_inode_info *parent_ei = EXFAT_I(parent_inode);
+- loff_t parent_isize = i_size_read(parent_inode);
+-
+- /*
+- * the problem that struct exfat_inode_info caches wrong parent info.
+- *
+- * because of flag-mismatch of ei->dir,
+- * there is abnormal traversing cluster chain.
+- */
+- if (unlikely(parent_ei->flags != ei->dir.flags ||
+- parent_isize != EXFAT_CLU_TO_B(ei->dir.size, sbi) ||
+- parent_ei->start_clu != ei->dir.dir)) {
+- exfat_chain_set(&ei->dir, parent_ei->start_clu,
+- EXFAT_B_TO_CLU_ROUND_UP(parent_isize, sbi),
+- parent_ei->flags);
+- }
+-}
+-
+ /* rename or move a old file into a new file */
+ static int __exfat_rename(struct inode *old_parent_inode,
+ struct exfat_inode_info *ei, struct inode *new_parent_inode,
+@@ -1219,8 +1198,6 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ return -ENOENT;
+ }
+
+- exfat_update_parent_info(ei, old_parent_inode);
+-
+ exfat_chain_dup(&olddir, &ei->dir);
+ dentry = ei->entry;
+
+@@ -1241,8 +1218,6 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ goto out;
+ }
+
+- exfat_update_parent_info(new_ei, new_parent_inode);
+-
+ p_dir = &(new_ei->dir);
+ new_entry = new_ei->entry;
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh);
+--
+2.35.1
+
perf-x86-intel-lbr-fix-unchecked-msr-access-error-on-hsw.patch
x86-bugs-warn-when-ibrs-mitigation-is-selected-on-enhanced-ibrs-parts.patch
clk-lan966x-fix-the-lan966x-clock-gate-register-address.patch
+dlm-fix-pending-remove-if-msg-allocation-fails.patch
+crypto-qat-set-to-zero-dh-parameters-before-free.patch
+crypto-qat-use-pre-allocated-buffers-in-datapath.patch
+crypto-qat-refactor-submission-logic.patch
+crypto-qat-add-backlog-mechanism.patch
+crypto-qat-fix-memory-leak-in-rsa.patch
+crypto-qat-remove-dma_free_coherent-for-rsa.patch
+crypto-qat-remove-dma_free_coherent-for-dh.patch
+crypto-qat-add-param-check-for-rsa.patch
+crypto-qat-add-param-check-for-dh.patch
+crypto-qat-re-enable-registration-of-algorithms.patch
+exfat-fix-referencing-wrong-parent-directory-informa.patch