--- /dev/null
+From 825f5abe123a576446efb669c403789f55d757e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Feb 2020 12:46:48 +0200
+Subject: crypto: caam - update xts sector size for large input length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+[ Upstream commit 3f142b6a7b573bde6cff926f246da05652c61eb4 ]
+
+Since in the software implementation of XTS-AES there is
+no notion of sector every input length is processed the same way.
+CAAM implementation has the notion of sector which causes different
+results between the software implementation and the one in CAAM
+for input lengths bigger than 512 bytes.
+Increase sector size to maximum value on 16 bits.
+
+Fixes: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/caam/caamalg_desc.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
+index edacf9b39b638..ceb033930535f 100644
+--- a/drivers/crypto/caam/caamalg_desc.c
++++ b/drivers/crypto/caam/caamalg_desc.c
+@@ -1457,7 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+ */
+ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+ {
+- __be64 sector_size = cpu_to_be64(512);
++ /*
++ * Set sector size to a big value, practically disabling
++ * sector size segmentation in xts implementation. We cannot
++ * take full advantage of this HW feature with existing
++ * crypto API / dm-crypt SW architecture.
++ */
++ __be64 sector_size = cpu_to_be64(BIT(15));
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+@@ -1509,7 +1515,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+ */
+ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+ {
+- __be64 sector_size = cpu_to_be64(512);
++ /*
++ * Set sector size to a big value, practically disabling
++ * sector size segmentation in xts implementation. We cannot
++ * take full advantage of this HW feature with existing
++ * crypto API / dm-crypt SW architecture.
++ */
++ __be64 sector_size = cpu_to_be64(BIT(15));
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+--
+2.20.1
+
--- /dev/null
+From f31b955b9048e6dcbe7752da7dd52993af9c3016 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Feb 2020 18:19:14 +0200
+Subject: crypto: ccree - dec auth tag size from cryptlen map
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+[ Upstream commit 8962c6d2c2b8ca51b0f188109015b15fc5f4da44 ]
+
+Remove the auth tag size from cryptlen before mapping the destination
+in out-of-place AEAD decryption thus resolving a crash with
+extended testmgr tests.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: stable@vger.kernel.org # v4.19+
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/ccree/cc_buffer_mgr.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
+index 6681d113c0d67..77e31191e408a 100644
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -1021,8 +1021,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+
+ if (req->src != req->dst) {
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
+- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+- authsize : 0;
++
++ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
++ size_for_map += authsize;
++ else
++ size_for_map -= authsize;
++
+ if (is_gcm4543)
+ size_for_map += crypto_aead_ivsize(tfm);
+
+--
+2.20.1
+
--- /dev/null
+From f8a1cd444d30ad2c53fabb9e075e5d0ea4a3382a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Apr 2019 16:38:59 +0300
+Subject: crypto: ccree - don't mangle the request assoclen
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+[ Upstream commit da3cf67f1bcf25b069a54ff70fd108860242c8f7 ]
+
+We were mangling the request struct assoclen field.
+Fix it by keeping an internal version and working on it.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/ccree/cc_aead.c | 40 +++++++++++++++++-----------
+ drivers/crypto/ccree/cc_aead.h | 1 +
+ drivers/crypto/ccree/cc_buffer_mgr.c | 22 +++++++--------
+ 3 files changed, 37 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
+index c9233420fe421..57aac15a335f5 100644
+--- a/drivers/crypto/ccree/cc_aead.c
++++ b/drivers/crypto/ccree/cc_aead.c
+@@ -731,7 +731,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ dev_dbg(dev, "ASSOC buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
+- areq->assoclen, NS_BIT);
++ areq_ctx->assoclen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+@@ -1080,9 +1080,11 @@ static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+ {
++ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
++
+ /* Hash associated data */
+- if (req->assoclen > 0)
++ if (areq_ctx->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+
+ /* Hash IV */
+@@ -1310,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
+ {
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+- unsigned int assoclen = req->assoclen;
++ unsigned int assoclen = areq_ctx->assoclen;
+ unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ (req->cryptlen - ctx->authsize) : req->cryptlen;
+
+@@ -1469,7 +1471,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ idx++;
+
+ /* process assoc data */
+- if (req->assoclen > 0) {
++ if (req_ctx->assoclen > 0) {
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+@@ -1561,7 +1563,7 @@ static int config_ccm_adata(struct aead_request *req)
+ * NIST Special Publication 800-38C
+ */
+ *b0 |= (8 * ((m - 2) / 2));
+- if (req->assoclen > 0)
++ if (req_ctx->assoclen > 0)
+ *b0 |= 64; /* Enable bit 6 if Adata exists. */
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
+@@ -1572,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req)
+ /* END of "taken from crypto/ccm.c" */
+
+ /* l(a) - size of associated data. */
+- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
++ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
+
+ memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
+ req->iv[15] = 1;
+@@ -1604,7 +1606,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+ CCM_BLOCK_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+- req->assoclen -= CCM_BLOCK_IV_SIZE;
++ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
+ }
+
+ static void cc_set_ghash_desc(struct aead_request *req,
+@@ -1812,7 +1814,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ // for gcm and rfc4106.
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+- if (req->assoclen > 0)
++ if (req_ctx->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ /* process(gctr+ghash) */
+@@ -1836,8 +1838,8 @@ static int config_gcm_context(struct aead_request *req)
+ (req->cryptlen - ctx->authsize);
+ __be32 counter = cpu_to_be32(2);
+
+- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
+- __func__, cryptlen, req->assoclen, ctx->authsize);
++ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
++ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
+
+ memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
+
+@@ -1853,7 +1855,7 @@ static int config_gcm_context(struct aead_request *req)
+ if (!req_ctx->plaintext_authenticate_only) {
+ __be64 temp64;
+
+- temp64 = cpu_to_be64(req->assoclen * 8);
++ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = cpu_to_be64(cryptlen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+@@ -1863,8 +1865,8 @@ static int config_gcm_context(struct aead_request *req)
+ */
+ __be64 temp64;
+
+- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+- cryptlen) * 8);
++ temp64 = cpu_to_be64((req_ctx->assoclen +
++ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = 0;
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+@@ -1884,7 +1886,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+ GCM_BLOCK_RFC4_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
++ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ }
+
+ static int cc_proc_aead(struct aead_request *req,
+@@ -1909,7 +1911,7 @@ static int cc_proc_aead(struct aead_request *req,
+ /* Check data length according to mode */
+ if (validate_data_size(ctx, direct, req)) {
+ dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+- req->cryptlen, req->assoclen);
++ req->cryptlen, areq_ctx->assoclen);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ return -EINVAL;
+ }
+@@ -2062,6 +2064,7 @@ static int cc_aead_encrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+@@ -2093,6 +2096,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = true;
+
+@@ -2114,6 +2118,7 @@ static int cc_aead_decrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+@@ -2143,6 +2148,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->is_gcm4543 = true;
+@@ -2262,6 +2268,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+@@ -2290,6 +2297,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+
+ cc_proc_rfc4_gcm(req);
+@@ -2321,6 +2329,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+@@ -2349,6 +2358,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
++ areq_ctx->assoclen = req->assoclen;
+ areq_ctx->backup_giv = NULL;
+
+ cc_proc_rfc4_gcm(req);
+diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
+index 5edf3b351fa44..74bc99067f180 100644
+--- a/drivers/crypto/ccree/cc_aead.h
++++ b/drivers/crypto/ccree/cc_aead.h
+@@ -67,6 +67,7 @@ struct aead_req_ctx {
+ u8 backup_mac[MAX_MAC_SIZE];
+ u8 *backup_iv; /*store iv for generated IV flow*/
+ u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
++ u32 assoclen; /* internal assoclen */
+ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
+ /* buffer for internal ccm configurations */
+ dma_addr_t ccm_iv0_dma_addr;
+diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
+index 7489887cc7802..6681d113c0d67 100644
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ {
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+- u32 skip = req->assoclen + req->cryptlen;
++ u32 skip = areq_ctx->assoclen + req->cryptlen;
+
+ if (areq_ctx->is_gcm4543)
+ skip += crypto_aead_ivsize(tfm);
+@@ -574,8 +574,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
+
+ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+- req->assoclen, req->cryptlen);
+- size_to_unmap = req->assoclen + req->cryptlen;
++ areq_ctx->assoclen, req->cryptlen);
++ size_to_unmap = areq_ctx->assoclen + req->cryptlen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_unmap += areq_ctx->req_authsize;
+ if (areq_ctx->is_gcm4543)
+@@ -717,7 +717,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ struct scatterlist *current_sg = req->src;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int sg_index = 0;
+- u32 size_of_assoc = req->assoclen;
++ u32 size_of_assoc = areq_ctx->assoclen;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (areq_ctx->is_gcm4543)
+@@ -728,7 +728,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ goto chain_assoc_exit;
+ }
+
+- if (req->assoclen == 0) {
++ if (areq_ctx->assoclen == 0) {
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
+ areq_ctx->assoc.nents = 0;
+ areq_ctx->assoc.mlli_nents = 0;
+@@ -788,7 +788,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+- req->assoclen, 0, is_last,
++ areq_ctx->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+ }
+@@ -972,11 +972,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+ u32 offset = 0;
+ /* non-inplace mode */
+- unsigned int size_for_map = req->assoclen + req->cryptlen;
++ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 sg_index = 0;
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+- u32 size_to_skip = req->assoclen;
++ u32 size_to_skip = areq_ctx->assoclen;
+
+ if (is_gcm4543)
+ size_to_skip += crypto_aead_ivsize(tfm);
+@@ -1020,7 +1020,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ areq_ctx->src_offset = offset;
+
+ if (req->src != req->dst) {
+- size_for_map = req->assoclen + req->cryptlen;
++ size_for_map = areq_ctx->assoclen + req->cryptlen;
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ if (is_gcm4543)
+@@ -1186,7 +1186,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+- &sg_data, req->assoclen);
++ &sg_data, areq_ctx->assoclen);
+ if (rc)
+ goto aead_map_failure;
+ }
+@@ -1237,7 +1237,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
+ }
+
+- size_to_map = req->cryptlen + req->assoclen;
++ size_to_map = req->cryptlen + areq_ctx->assoclen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_map += authsize;
+
+--
+2.20.1
+
--- /dev/null
+From b73d7886e989878759a7932a721c9004be624fe0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Jan 2019 15:43:11 +0200
+Subject: crypto: ccree - improve error handling
+
+From: Hadar Gat <hadar.gat@arm.com>
+
+[ Upstream commit ccba2f1112d4871982ae3f09d1984c0443c89a97 ]
+
+pass the returned error code to the higher level functions
+
+Signed-off-by: Hadar Gat <hadar.gat@arm.com>
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/ccree/cc_buffer_mgr.c | 74 +++++++++++++---------------
+ 1 file changed, 35 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
+index 90b4870078fb7..7489887cc7802 100644
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -460,10 +460,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ /* Map the src SGL */
+ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+- if (rc) {
+- rc = -ENOMEM;
++ if (rc)
+ goto cipher_exit;
+- }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+@@ -477,12 +475,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ }
+ } else {
+ /* Map the dst sg */
+- if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+- &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+- &dummy, &mapped_nents)) {
+- rc = -ENOMEM;
++ rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
++ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
++ &dummy, &mapped_nents);
++ if (rc)
+ goto cipher_exit;
+- }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+@@ -1033,10 +1030,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+- if (rc) {
+- rc = -ENOMEM;
++ if (rc)
+ goto chain_data_exit;
+- }
+ }
+
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+@@ -1190,11 +1185,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+ }
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+- if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+- &sg_data, req->assoclen)) {
+- rc = -ENOMEM;
++ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
++ &sg_data, req->assoclen);
++ if (rc)
+ goto aead_map_failure;
+- }
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+@@ -1254,10 +1248,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+- if (rc) {
+- rc = -ENOMEM;
++ if (rc)
+ goto aead_map_failure;
+- }
+
+ if (areq_ctx->is_single_pass) {
+ /*
+@@ -1341,6 +1333,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
++ int rc = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+@@ -1360,18 +1353,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ /*TODO: copy data in case that buffer is enough for operation */
+ /* map the previous buffer */
+ if (*curr_buff_cnt) {
+- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+- &sg_data)) {
+- return -ENOMEM;
+- }
++ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
++ &sg_data);
++ if (rc)
++ return rc;
+ }
+
+ if (src && nbytes > 0 && do_update) {
+- if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+- &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+- &dummy, &mapped_nents)) {
++ rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
++ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
++ &dummy, &mapped_nents);
++ if (rc)
+ goto unmap_curr_buff;
+- }
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ memcpy(areq_ctx->buff_sg, src,
+@@ -1390,7 +1383,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
++ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
++ if (rc)
+ goto fail_unmap_din;
+ }
+ /* change the buffer index for the unmap function */
+@@ -1406,7 +1400,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+- return -ENOMEM;
++ return rc;
+ }
+
+ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+@@ -1425,6 +1419,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ unsigned int swap_index = 0;
++ int rc = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+@@ -1469,21 +1464,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ }
+
+ if (*curr_buff_cnt) {
+- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+- &sg_data)) {
+- return -ENOMEM;
+- }
++ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
++ &sg_data);
++ if (rc)
++ return rc;
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (update_data_len > *curr_buff_cnt) {
+- if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+- DMA_TO_DEVICE, &areq_ctx->in_nents,
+- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+- &mapped_nents)) {
++ rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
++ DMA_TO_DEVICE, &areq_ctx->in_nents,
++ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
++ &mapped_nents);
++ if (rc)
+ goto unmap_curr_buff;
+- }
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ /* only one entry in the SG and no previous data */
+@@ -1503,7 +1498,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
++ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
++ if (rc)
+ goto fail_unmap_din;
+ }
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
+@@ -1517,7 +1513,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+- return -ENOMEM;
++ return rc;
+ }
+
+ void cc_unmap_hash_request(struct device *dev, void *ctx,
+--
+2.20.1
+
--- /dev/null
+From 815bf0e3f15554c1a176fa9db323ab022415ad06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Jan 2020 16:37:55 +0200
+Subject: crypto: ccree - only try to map auth tag if needed
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+[ Upstream commit 504e84abec7a635b861afd8d7f92ecd13eaa2b09 ]
+
+Make sure to only add the size of the auth tag to the source mapping
+for encryption if it is an in-place operation. Failing to do this
+previously caused us to try and map auth size len bytes from a NULL
+mapping and crashing if both the cryptlen and assoclen are zero.
+
+Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/ccree/cc_buffer_mgr.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
+index 77e31191e408a..630020255941a 100644
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -1242,9 +1242,11 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+ }
+
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
+- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
++ /* If we do in-place encryption, we also need the auth tag */
++ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
++ (req->src == req->dst)) {
+ size_to_map += authsize;
+-
++ }
+ if (is_gcm4543)
+ size_to_map += crypto_aead_ivsize(tfm);
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+--
+2.20.1
+
--- /dev/null
+From e918b1f9d5784a54e3ea88a2545cf24643cebf84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Apr 2019 16:38:54 +0300
+Subject: crypto: ccree - zero out internal struct before use
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+[ Upstream commit 9f31eb6e08cc1b0eb3926eebf4c51467479a7722 ]
+
+We did not zero out the internal struct before use causing problem
+in some rare error code paths.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/ccree/cc_aead.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
+index aa6b45bc13b98..c9233420fe421 100644
+--- a/drivers/crypto/ccree/cc_aead.c
++++ b/drivers/crypto/ccree/cc_aead.c
+@@ -2058,6 +2058,8 @@ static int cc_aead_encrypt(struct aead_request *req)
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+@@ -2087,6 +2089,8 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
+ goto out;
+ }
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+@@ -2106,6 +2110,8 @@ static int cc_aead_decrypt(struct aead_request *req)
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+@@ -2133,6 +2139,8 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
+ goto out;
+ }
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+@@ -2250,6 +2258,8 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
+ goto out;
+ }
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+@@ -2273,6 +2283,8 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ //plaintext is not encryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+@@ -2305,6 +2317,8 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
+ goto out;
+ }
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+@@ -2328,6 +2342,8 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
++ memset(areq_ctx, 0, sizeof(*areq_ctx));
++
+ //plaintext is not decryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+--
+2.20.1
+
--- /dev/null
+From 17d0a47fda2127bceabb4313c70276640b060682 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 21:22:45 +0800
+Subject: dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
+
+From: Bob Liu <bob.liu@oracle.com>
+
+[ Upstream commit b8fdd090376a7a46d17db316638fe54b965c2fb0 ]
+
+zmd->nr_rnd_zones was increased twice by mistake. The other place it
+is increased in dmz_init_zone() is the only one needed:
+
+1131 zmd->nr_useable_zones++;
+1132 if (dmz_is_rnd(zone)) {
+1133 zmd->nr_rnd_zones++;
+ ^^^
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bob Liu <bob.liu@oracle.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-zoned-metadata.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 086a870087cff..53eb21343b11f 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1105,7 +1105,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
+
+ if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ set_bit(DMZ_RND, &zone->flags);
+- zmd->nr_rnd_zones++;
+ } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
+ blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
+ set_bit(DMZ_SEQ, &zone->flags);
+--
+2.20.1
+
--- /dev/null
+From f55a25a6ba2d0488e64bee8913cf2e6dcba75911 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 14:43:20 -0500
+Subject: drm/dp_mst: Fix clearing payload state on topology disable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lyude Paul <lyude@redhat.com>
+
+[ Upstream commit 8732fe46b20c951493bfc4dba0ad08efdf41de81 ]
+
+The issues caused by:
+
+commit 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology
+mgr")
+
+Prompted me to take a closer look at how we clear the payload state in
+general when disabling the topology, and it turns out there's actually
+two subtle issues here.
+
+The first is that we're not grabbing &mgr.payload_lock when clearing the
+payloads in drm_dp_mst_topology_mgr_set_mst(). Seeing as the canonical
+lock order is &mgr.payload_lock -> &mgr.lock (because we always want
+&mgr.lock to be the inner-most lock so topology validation always
+works), this makes perfect sense. It also means that -technically- there
+could be racing between someone calling
+drm_dp_mst_topology_mgr_set_mst() to disable the topology, along with a
+modeset occurring that's modifying the payload state at the same time.
+
+The second is the more obvious issue that Wayne Lin discovered, that
+we're not clearing proposed_payloads when disabling the topology.
+
+I actually can't see any obvious places where the racing caused by the
+first issue would break something, and it could be that some of our
+higher-level locks already prevent this by happenstance, but better safe
+then sorry. So, let's make it so that drm_dp_mst_topology_mgr_set_mst()
+first grabs &mgr.payload_lock followed by &mgr.lock so that we never
+race when modifying the payload state. Then, we also clear
+proposed_payloads to fix the original issue of enabling a new topology
+with a dirty payload state. This doesn't clear any of the drm_dp_vcpi
+structures, but those are getting destroyed along with the ports anyway.
+
+Changes since v1:
+* Use sizeof(mgr->payloads[0])/sizeof(mgr->proposed_vcpis[0]) instead -
+ vsyrjala
+
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200122194321.14953-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index b4fd20062bb80..5f508ec321fef 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2117,6 +2117,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
++ mutex_lock(&mgr->payload_lock);
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+@@ -2175,7 +2176,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
++ memset(mgr->payloads, 0,
++ mgr->max_payloads * sizeof(mgr->payloads[0]));
++ memset(mgr->proposed_vcpis, 0,
++ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+ mgr->vcpi_mask = 0;
+@@ -2183,6 +2187,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+
+ out_unlock:
+ mutex_unlock(&mgr->lock);
++ mutex_unlock(&mgr->payload_lock);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+--
+2.20.1
+
--- /dev/null
+From 87ce653790139befa381431dc939c8d10bb5c219 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Feb 2020 17:16:31 +0000
+Subject: drm: Remove PageReserved manipulation from drm_pci_alloc
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit ea36ec8623f56791c6ff6738d0509b7920f85220 ]
+
+drm_pci_alloc/drm_pci_free are very thin wrappers around the core dma
+facilities, and we have no special reason within the drm layer to behave
+differently. In particular, since
+
+commit de09d31dd38a50fdce106c15abd68432eebbd014
+Author: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Date: Fri Jan 15 16:51:42 2016 -0800
+
+ page-flags: define PG_reserved behavior on compound pages
+
+ As far as I can see there's no users of PG_reserved on compound pages.
+ Let's use PF_NO_COMPOUND here.
+
+it has been illegal to combine GFP_COMP with SetPageReserved, so lets
+stop doing both and leave the dma layer to its own devices.
+
+Reported-by: Taketo Kabe
+Bug: https://gitlab.freedesktop.org/drm/intel/issues/1027
+Fixes: de09d31dd38a ("page-flags: define PG_reserved behavior on compound pages")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v4.5+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200202171635.4039044-1-chris@chris-wilson.co.uk
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_pci.c | 25 ++-----------------------
+ 1 file changed, 2 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
+index 896e42a34895d..d89a992829beb 100644
+--- a/drivers/gpu/drm/drm_pci.c
++++ b/drivers/gpu/drm/drm_pci.c
+@@ -46,8 +46,6 @@
+ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
+ {
+ drm_dma_handle_t *dmah;
+- unsigned long addr;
+- size_t sz;
+
+ /* pci_alloc_consistent only guarantees alignment to the smallest
+ * PAGE_SIZE order which is greater than or equal to the requested size.
+@@ -61,22 +59,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
+ return NULL;
+
+ dmah->size = size;
+- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
++ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL);
+
+ if (dmah->vaddr == NULL) {
+ kfree(dmah);
+ return NULL;
+ }
+
+- memset(dmah->vaddr, 0, size);
+-
+- /* XXX - Is virt_to_page() legal for consistent mem? */
+- /* Reserve */
+- for (addr = (unsigned long)dmah->vaddr, sz = size;
+- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+- SetPageReserved(virt_to_page((void *)addr));
+- }
+-
+ return dmah;
+ }
+
+@@ -89,19 +78,9 @@ EXPORT_SYMBOL(drm_pci_alloc);
+ */
+ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+ {
+- unsigned long addr;
+- size_t sz;
+-
+- if (dmah->vaddr) {
+- /* XXX - Is virt_to_page() legal for consistent mem? */
+- /* Unreserve */
+- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+- ClearPageReserved(virt_to_page((void *)addr));
+- }
++ if (dmah->vaddr)
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
+- }
+ }
+
+ /**
+--
+2.20.1
+
--- /dev/null
+From a1c440c1a8378145109a164169253114fd8fd27d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2019 23:30:34 +0200
+Subject: etnaviv: perfmon: fix total and idle HI cyleces readout
+
+From: Christian Gmeiner <christian.gmeiner@gmail.com>
+
+[ Upstream commit 15ff4a7b584163b12b118a2c381529f05ff3a94d ]
+
+As seen at CodeAurora's linux-imx git repo in imx_4.19.35_1.0.0 branch.
+
+Signed-off-by: Christian Gmeiner <christian.gmeiner@gmail.com>
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/etnaviv/etnaviv_perfmon.c | 43 +++++++++++++++++------
+ 1 file changed, 32 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+index f86cb66a84b9c..3ce77cbad4ae3 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+@@ -37,13 +37,6 @@ struct etnaviv_pm_domain_meta {
+ u32 nr_domains;
+ };
+
+-static u32 simple_reg_read(struct etnaviv_gpu *gpu,
+- const struct etnaviv_pm_domain *domain,
+- const struct etnaviv_pm_signal *signal)
+-{
+- return gpu_read(gpu, signal->data);
+-}
+-
+ static u32 perf_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+@@ -77,6 +70,34 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
+ return value;
+ }
+
++static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
++ const struct etnaviv_pm_domain *domain,
++ const struct etnaviv_pm_signal *signal)
++{
++ u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
++
++ if (gpu->identity.model == chipModel_GC880 ||
++ gpu->identity.model == chipModel_GC2000 ||
++ gpu->identity.model == chipModel_GC2100)
++ reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
++
++ return gpu_read(gpu, reg);
++}
++
++static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
++ const struct etnaviv_pm_domain *domain,
++ const struct etnaviv_pm_signal *signal)
++{
++ u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
++
++ if (gpu->identity.model == chipModel_GC880 ||
++ gpu->identity.model == chipModel_GC2000 ||
++ gpu->identity.model == chipModel_GC2100)
++ reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
++
++ return gpu_read(gpu, reg);
++}
++
+ static const struct etnaviv_pm_domain doms_3d[] = {
+ {
+ .name = "HI",
+@@ -86,13 +107,13 @@ static const struct etnaviv_pm_domain doms_3d[] = {
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_CYCLES",
+- VIVS_HI_PROFILE_TOTAL_CYCLES,
+- &simple_reg_read
++ 0,
++ &hi_total_cycle_read
+ },
+ {
+ "IDLE_CYCLES",
+- VIVS_HI_PROFILE_IDLE_CYCLES,
+- &simple_reg_read
++ 0,
++ &hi_total_idle_cycle_read
+ },
+ {
+ "AXI_CYCLES_READ_REQUEST_STALLED",
+--
+2.20.1
+
--- /dev/null
+From c88904ba8f81998e595913b86b21ec62acd397fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 16:34:48 +0900
+Subject: ftrace/kprobe: Show the maxactive number on kprobe_events
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+[ Upstream commit 6a13a0d7b4d1171ef9b80ad69abc37e1daa941b3 ]
+
+Show maxactive parameter on kprobe_events.
+This allows user to save the current configuration and
+restore it without losing maxactive parameter.
+
+Link: http://lkml.kernel.org/r/4762764a-6df7-bc93-ed60-e336146dce1f@gmail.com
+Link: http://lkml.kernel.org/r/158503528846.22706.5549974121212526020.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: 696ced4fb1d76 ("tracing/kprobes: expose maxactive for kretprobe in kprobe_events")
+Reported-by: Taeung Song <treeze.taeung@gmail.com>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_kprobe.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index c61b2b0a99e9c..65b4e28ff425f 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -975,6 +975,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
+ int i;
+
+ seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
++ if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
++ seq_printf(m, "%d", tk->rp.maxactive);
+ seq_printf(m, ":%s/%s", tk->tp.call.class->system,
+ trace_event_name(&tk->tp.call));
+
+--
+2.20.1
+
--- /dev/null
+From 5eb6aae6acad5299b740847df8d4cd9bf92f1b9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Sep 2018 23:43:37 -0700
+Subject: misc: echo: Remove unnecessary parentheses and simplify check for
+ zero
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+[ Upstream commit 85dc2c65e6c975baaf36ea30f2ccc0a36a8c8add ]
+
+Clang warns when multiple pairs of parentheses are used for a single
+conditional statement.
+
+drivers/misc/echo/echo.c:384:27: warning: equality comparison with
+extraneous parentheses [-Wparentheses-equality]
+ if ((ec->nonupdate_dwell == 0)) {
+ ~~~~~~~~~~~~~~~~~~~~^~~~
+drivers/misc/echo/echo.c:384:27: note: remove extraneous parentheses
+around the comparison to silence this warning
+ if ((ec->nonupdate_dwell == 0)) {
+ ~ ^ ~
+drivers/misc/echo/echo.c:384:27: note: use '=' to turn this equality
+comparison into an assignment
+ if ((ec->nonupdate_dwell == 0)) {
+ ^~
+ =
+1 warning generated.
+
+Remove them and while we're at it, simplify the zero check as '!var' is
+used more than 'var == 0'.
+
+Reported-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/misc/echo/echo.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
+index 8a5adc0d2e887..3ebe5d75ad6a2 100644
+--- a/drivers/misc/echo/echo.c
++++ b/drivers/misc/echo/echo.c
+@@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
+ */
+ ec->factor = 0;
+ ec->shift = 0;
+- if ((ec->nonupdate_dwell == 0)) {
++ if (!ec->nonupdate_dwell) {
+ int p, logp, shift;
+
+ /* Determine:
+--
+2.20.1
+
--- /dev/null
+From bcb46d40ad0fd164a89109b8e9b6567003d8c4dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2020 11:19:25 +0000
+Subject: powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
+
+From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+
+[ Upstream commit aa4113340ae6c2811e046f08c2bc21011d20a072 ]
+
+In the current implementation, the call to loadcam_multi() is wrapped
+between switch_to_as1() and restore_to_as0() calls so, when it tries
+to create its own temporary AS=1 TLB1 entry, it ends up duplicating
+the existing one created by switch_to_as1(). Add a check to skip
+creating the temporary entry if already running in AS=1.
+
+Fixes: d9e1831a4202 ("powerpc/85xx: Load all early TLB entries at once")
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Acked-by: Scott Wood <oss@buserror.net>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200123111914.2565-1-laurentiu.tudor@nxp.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/tlb_nohash_low.S | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
+index e066a658acac6..56f58a362ea56 100644
+--- a/arch/powerpc/mm/tlb_nohash_low.S
++++ b/arch/powerpc/mm/tlb_nohash_low.S
+@@ -402,7 +402,7 @@ _GLOBAL(set_context)
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+- * Must preserve r7, r8, r9, and r10
++ * Must preserve r7, r8, r9, r10 and r11
+ */
+ _GLOBAL(loadcam_entry)
+ mflr r5
+@@ -438,6 +438,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
+ */
+ _GLOBAL(loadcam_multi)
+ mflr r8
++ /* Don't switch to AS=1 if already there */
++ mfmsr r11
++ andi. r11,r11,MSR_IS
++ bne 10f
+
+ /*
+ * Set up temporary TLB entry that is the same as what we're
+@@ -463,6 +467,7 @@ _GLOBAL(loadcam_multi)
+ mtmsr r6
+ isync
+
++10:
+ mr r9,r3
+ add r10,r3,r4
+ 2: bl loadcam_entry
+@@ -471,6 +476,10 @@ _GLOBAL(loadcam_multi)
+ mr r3,r9
+ blt 2b
+
++ /* Don't return to AS=0 if we were in AS=1 at function start */
++ andi. r11,r11,MSR_IS
++ bne 3f
++
+ /* Return to AS=0 and clear the temporary entry */
+ mfmsr r6
+ rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
+@@ -486,6 +495,7 @@ _GLOBAL(loadcam_multi)
+ tlbwe
+ isync
+
++3:
+ mtlr r8
+ blr
+ #endif
+--
+2.20.1
+
--- /dev/null
+From 85c49aaf82e1c3d8d6410abbfed2d3543d144f1d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 22:08:22 -0400
+Subject: Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
+
+[ Upstream commit a86675968e2300fb567994459da3dbc4cd1b322a ]
+
+This reverts commit 64e62bdf04ab8529f45ed0a85122c703035dec3a.
+
+This commit ends up causing some lockdep splats due to trying to grab the
+payload lock while holding the mgr's lock:
+
+[ 54.010099]
+[ 54.011765] ======================================================
+[ 54.018670] WARNING: possible circular locking dependency detected
+[ 54.025577] 5.5.0-rc6-02274-g77381c23ee63 #47 Not tainted
+[ 54.031610] ------------------------------------------------------
+[ 54.038516] kworker/1:6/1040 is trying to acquire lock:
+[ 54.044354] ffff888272af3228 (&mgr->payload_lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.054957]
+[ 54.054957] but task is already holding lock:
+[ 54.061473] ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[ 54.071193]
+[ 54.071193] which lock already depends on the new lock.
+[ 54.071193]
+[ 54.080334]
+[ 54.080334] the existing dependency chain (in reverse order) is:
+[ 54.088697]
+[ 54.088697] -> #1 (&mgr->lock){+.+.}:
+[ 54.094440] __mutex_lock+0xc3/0x498
+[ 54.099015] drm_dp_mst_topology_get_port_validated+0x25/0x80
+[ 54.106018] drm_dp_update_payload_part1+0xa2/0x2e2
+[ 54.112051] intel_mst_pre_enable_dp+0x144/0x18f
+[ 54.117791] intel_encoders_pre_enable+0x63/0x70
+[ 54.123532] hsw_crtc_enable+0xa1/0x722
+[ 54.128396] intel_update_crtc+0x50/0x194
+[ 54.133455] skl_commit_modeset_enables+0x40c/0x540
+[ 54.139485] intel_atomic_commit_tail+0x5f7/0x130d
+[ 54.145418] intel_atomic_commit+0x2c8/0x2d8
+[ 54.150770] drm_atomic_helper_set_config+0x5a/0x70
+[ 54.156801] drm_mode_setcrtc+0x2ab/0x833
+[ 54.161862] drm_ioctl+0x2e5/0x424
+[ 54.166242] vfs_ioctl+0x21/0x2f
+[ 54.170426] do_vfs_ioctl+0x5fb/0x61e
+[ 54.175096] ksys_ioctl+0x55/0x75
+[ 54.179377] __x64_sys_ioctl+0x1a/0x1e
+[ 54.184146] do_syscall_64+0x5c/0x6d
+[ 54.188721] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 54.194946]
+[ 54.194946] -> #0 (&mgr->payload_lock){+.+.}:
+[ 54.201463]
+[ 54.201463] other info that might help us debug this:
+[ 54.201463]
+[ 54.210410] Possible unsafe locking scenario:
+[ 54.210410]
+[ 54.217025] CPU0 CPU1
+[ 54.222082] ---- ----
+[ 54.227138] lock(&mgr->lock);
+[ 54.230643] lock(&mgr->payload_lock);
+[ 54.237742] lock(&mgr->lock);
+[ 54.244062] lock(&mgr->payload_lock);
+[ 54.248346]
+[ 54.248346] *** DEADLOCK ***
+[ 54.248346]
+[ 54.254959] 7 locks held by kworker/1:6/1040:
+[ 54.259822] #0: ffff888275c4f528 ((wq_completion)events){+.+.},
+at: worker_thread+0x455/0x6e2
+[ 54.269451] #1: ffffc9000119beb0
+((work_completion)(&(&dev_priv->hotplug.hotplug_work)->work)){+.+.},
+at: worker_thread+0x455/0x6e2
+[ 54.282768] #2: ffff888272a403f0 (&dev->mode_config.mutex){+.+.},
+at: i915_hotplug_work_func+0x4b/0x2be
+[ 54.293368] #3: ffffffff824fc6c0 (drm_connector_list_iter){.+.+},
+at: i915_hotplug_work_func+0x17e/0x2be
+[ 54.304061] #4: ffffc9000119bc58 (crtc_ww_class_acquire){+.+.},
+at: drm_helper_probe_detect_ctx+0x40/0xfd
+[ 54.314855] #5: ffff888272a40470 (crtc_ww_class_mutex){+.+.}, at:
+drm_modeset_lock+0x74/0xe2
+[ 54.324385] #6: ffff888272af3060 (&mgr->lock){+.+.}, at:
+drm_dp_mst_topology_mgr_set_mst+0x3c/0x2e4
+[ 54.334597]
+[ 54.334597] stack backtrace:
+[ 54.339464] CPU: 1 PID: 1040 Comm: kworker/1:6 Not tainted
+5.5.0-rc6-02274-g77381c23ee63 #47
+[ 54.348893] Hardware name: Google Fizz/Fizz, BIOS
+Google_Fizz.10139.39.0 01/04/2018
+[ 54.357451] Workqueue: events i915_hotplug_work_func
+[ 54.362995] Call Trace:
+[ 54.365724] dump_stack+0x71/0x9c
+[ 54.369427] check_noncircular+0x91/0xbc
+[ 54.373809] ? __lock_acquire+0xc9e/0xf66
+[ 54.378286] ? __lock_acquire+0xc9e/0xf66
+[ 54.382763] ? lock_acquire+0x175/0x1ac
+[ 54.387048] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.393177] ? __mutex_lock+0xc3/0x498
+[ 54.397362] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.403492] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.409620] ? drm_dp_dpcd_access+0xd9/0x101
+[ 54.414390] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.420517] ? drm_dp_mst_topology_mgr_set_mst+0x218/0x2e4
+[ 54.426645] ? intel_digital_port_connected+0x34d/0x35c
+[ 54.432482] ? intel_dp_detect+0x227/0x44e
+[ 54.437056] ? ww_mutex_lock+0x49/0x9a
+[ 54.441242] ? drm_helper_probe_detect_ctx+0x75/0xfd
+[ 54.446789] ? intel_encoder_hotplug+0x4b/0x97
+[ 54.451752] ? intel_ddi_hotplug+0x61/0x2e0
+[ 54.456423] ? mark_held_locks+0x53/0x68
+[ 54.460803] ? _raw_spin_unlock_irqrestore+0x3a/0x51
+[ 54.466347] ? lockdep_hardirqs_on+0x187/0x1a4
+[ 54.471310] ? drm_connector_list_iter_next+0x89/0x9a
+[ 54.476953] ? i915_hotplug_work_func+0x206/0x2be
+[ 54.482208] ? worker_thread+0x4d5/0x6e2
+[ 54.486587] ? worker_thread+0x455/0x6e2
+[ 54.490966] ? queue_work_on+0x64/0x64
+[ 54.495151] ? kthread+0x1e9/0x1f1
+[ 54.498946] ? queue_work_on+0x64/0x64
+[ 54.503130] ? kthread_unpark+0x5e/0x5e
+[ 54.507413] ? ret_from_fork+0x3a/0x50
+
+The proper fix for this is probably cleanup the VCPI allocations when we're
+enabling the topology, or on the first payload allocation. For now though,
+let's just revert.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 64e62bdf04ab ("drm/dp_mst: Remove VCPI while disabling topology mgr")
+Cc: Sean Paul <sean@poorly.run>
+Cc: Wayne Lin <Wayne.Lin@amd.com>
+Reviewed-by: Sean Paul <sean@poorly.run>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200117205149.97262-1-lyude@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 7c3c323773d3d..b4fd20062bb80 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2115,7 +2115,6 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
+ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+ {
+ int ret = 0;
+- int i = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
+ mutex_lock(&mgr->lock);
+@@ -2176,21 +2175,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+- mutex_lock(&mgr->payload_lock);
+ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+- for (i = 0; i < mgr->max_payloads; i++) {
+- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+-
+- if (vcpi) {
+- vcpi->vcpi = 0;
+- vcpi->num_slots = 0;
+- }
+- mgr->proposed_vcpis[i] = NULL;
+- }
+ mgr->vcpi_mask = 0;
+- mutex_unlock(&mgr->payload_lock);
+ }
+
+ out_unlock:
+--
+2.20.1
+
powerpc-add-attributes-for-setjmp-longjmp.patch
powerpc-make-setjmp-longjmp-signature-standard.patch
btrfs-use-nofs-allocations-for-running-delayed-items.patch
+dm-zoned-remove-duplicate-nr_rnd_zones-increase-in-d.patch
+crypto-caam-update-xts-sector-size-for-large-input-l.patch
+crypto-ccree-improve-error-handling.patch
+crypto-ccree-zero-out-internal-struct-before-use.patch
+crypto-ccree-don-t-mangle-the-request-assoclen.patch
+crypto-ccree-dec-auth-tag-size-from-cryptlen-map.patch
+crypto-ccree-only-try-to-map-auth-tag-if-needed.patch
+revert-drm-dp_mst-remove-vcpi-while-disabling-topolo.patch
+drm-dp_mst-fix-clearing-payload-state-on-topology-di.patch
+drm-remove-pagereserved-manipulation-from-drm_pci_al.patch
+ftrace-kprobe-show-the-maxactive-number-on-kprobe_ev.patch
+powerpc-fsl_booke-avoid-creating-duplicate-tlb1-entr.patch
+misc-echo-remove-unnecessary-parentheses-and-simplif.patch
+etnaviv-perfmon-fix-total-and-idle-hi-cyleces-readou.patch