--- /dev/null
+From 1d4aaf16defa86d2665ae7db0259d6cb07e2091f Mon Sep 17 00:00:00 2001
+From: Elena Petrova <lenaptr@google.com>
+Date: Tue, 28 May 2019 13:41:52 +0100
+Subject: crypto: arm64/sha1-ce - correct digest for empty data in finup
+
+From: Elena Petrova <lenaptr@google.com>
+
+commit 1d4aaf16defa86d2665ae7db0259d6cb07e2091f upstream.
+
+The sha1-ce finup implementation for ARM64 produces wrong digest
+for empty input (len=0). Expected: da39a3ee..., result: 67452301...
+(initial value of SHA internal state). The error is in sha1_ce_finup:
+for empty data `finalize` will be 1, so the code is relying on
+sha1_ce_transform to make the final round. However, in
+sha1_base_do_update, the block function will not be called when
+len == 0.
+
+Fix it by setting finalize to 0 if data is empty.
+
+Fixes: 07eb54d306f4 ("crypto: arm64/sha1-ce - move SHA-1 ARMv8 implementation to base layer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Elena Petrova <lenaptr@google.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/sha1-ce-glue.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -54,7 +54,7 @@ static int sha1_ce_finup(struct shash_de
+ unsigned int len, u8 *out)
+ {
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
++ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
+
+ if (!may_use_simd())
+ return crypto_sha1_finup(desc, data, len, out);
--- /dev/null
+From 6bd934de1e393466b319d29c4427598fda096c57 Mon Sep 17 00:00:00 2001
+From: Elena Petrova <lenaptr@google.com>
+Date: Tue, 28 May 2019 15:35:06 +0100
+Subject: crypto: arm64/sha2-ce - correct digest for empty data in finup
+
+From: Elena Petrova <lenaptr@google.com>
+
+commit 6bd934de1e393466b319d29c4427598fda096c57 upstream.
+
+The sha256-ce finup implementation for ARM64 produces wrong digest
+for empty input (len=0). Expected: the actual digest, result: initial
+value of SHA internal state. The error is in sha256_ce_finup:
+for empty data `finalize` will be 1, so the code is relying on
+sha2_ce_transform to make the final round. However, in
+sha256_base_do_update, the block function will not be called when
+len == 0.
+
+Fix it by setting finalize to 0 if data is empty.
+
+Fixes: 03802f6a80b3a ("crypto: arm64/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Elena Petrova <lenaptr@google.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/sha2-ce-glue.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/crypto/sha2-ce-glue.c
++++ b/arch/arm64/crypto/sha2-ce-glue.c
+@@ -59,7 +59,7 @@ static int sha256_ce_finup(struct shash_
+ unsigned int len, u8 *out)
+ {
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
++ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
+
+ if (!may_use_simd()) {
+ if (len)
--- /dev/null
+From ed527b13d800dd515a9e6c582f0a73eca65b2e1b Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Fri, 31 May 2019 10:13:06 +0200
+Subject: crypto: caam - limit output IV to CBC to work around CTR mode DMA issue
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit ed527b13d800dd515a9e6c582f0a73eca65b2e1b upstream.
+
+The CAAM driver currently violates an undocumented and slightly
+controversial requirement imposed by the crypto stack that a buffer
+referred to by the request structure via its virtual address may not
+be modified while any scatterlists passed via the same request
+structure are mapped for inbound DMA.
+
+This may result in errors like
+
+ alg: aead: decryption failed on test 1 for gcm_base(ctr-aes-caam,ghash-generic): ret=74
+ alg: aead: Failed to load transform for gcm(aes): -2
+
+on non-cache coherent systems, due to the fact that the GCM driver
+passes an IV buffer by virtual address which shares a cacheline with
+the auth_tag buffer passed via a scatterlist, resulting in corruption
+of the auth_tag when the IV is updated while the DMA mapping is live.
+
+Since the IV that is returned to the caller is only valid for CBC mode,
+and given that the in-kernel users of CBC (such as CTS) don't trigger the
+same issue as the GCM driver, let's just disable the output IV generation
+for all modes except CBC for the time being.
+
+Fixes: 854b06f76879 ("crypto: caam - properly set IV after {en,de}crypt")
+Cc: Horia Geanta <horia.geanta@nxp.com>
+Cc: Iuliana Prodan <iuliana.prodan@nxp.com>
+Reported-by: Sascha Hauer <s.hauer@pengutronix.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Horia Geanta <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -965,6 +965,7 @@ static void skcipher_encrypt_done(struct
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ #ifdef DEBUG
+@@ -989,9 +990,9 @@ static void skcipher_encrypt_done(struct
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+- * ciphertext block. This is used e.g. by the CTS mode.
++ * ciphertext block when running in CBC mode.
+ */
+- if (ivsize)
++ if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
+ ivsize, ivsize, 0);
+
+@@ -1809,9 +1810,9 @@ static int skcipher_decrypt(struct skcip
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+- * ciphertext block.
++ * ciphertext block when running in CBC mode.
+ */
+- if (ivsize)
++ if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
+ ivsize, ivsize, 0);
+
--- /dev/null
+From 538a5a072e6ef04377b180ee9b3ce5bae0a85da4 Mon Sep 17 00:00:00 2001
+From: Cfir Cohen <cfir@google.com>
+Date: Tue, 2 Jul 2019 10:32:56 -0700
+Subject: crypto: ccp/gcm - use const time tag comparison.
+
+From: Cfir Cohen <cfir@google.com>
+
+commit 538a5a072e6ef04377b180ee9b3ce5bae0a85da4 upstream.
+
+Avoid leaking GCM tag through timing side channel.
+
+Fixes: 36cf515b9bbe ("crypto: ccp - Enable support for AES GCM on v5 CCPs")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Cfir Cohen <cfir@google.com>
+Acked-by: Gary R Hook <ghook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-ops.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -853,7 +853,8 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ if (ret)
+ goto e_tag;
+
+- ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
++ ret = crypto_memneq(tag.address, final_wa.address,
++ AES_BLOCK_SIZE) ? -EBADMSG : 0;
+ ccp_dm_free(&tag);
+ }
+
--- /dev/null
+From 20e833dc36355ed642d00067641a679c618303fa Mon Sep 17 00:00:00 2001
+From: "Hook, Gary" <Gary.Hook@amd.com>
+Date: Wed, 10 Jul 2019 00:09:22 +0000
+Subject: crypto: ccp - memset structure fields to zero before reuse
+
+From: Hook, Gary <Gary.Hook@amd.com>
+
+commit 20e833dc36355ed642d00067641a679c618303fa upstream.
+
+The AES GCM function reuses an 'op' data structure, which members
+contain values that must be cleared for each (re)use.
+
+This fix resolves a crypto self-test failure:
+alg: aead: gcm-aes-ccp encryption test failed (wrong result) on test vector 2, cfg="two even aligned splits"
+
+Fixes: 36cf515b9bbe ("crypto: ccp - Enable support for AES GCM on v5 CCPs")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-ops.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct cc
+
+ unsigned long long *final;
+ unsigned int dm_offset;
++ unsigned int jobid;
+ unsigned int ilen;
+ bool in_place = true; /* Default value */
+ int ret;
+@@ -663,9 +664,11 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+ }
+
++ jobid = CCP_NEW_JOBID(cmd_q->ccp);
++
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
++ op.jobid = jobid;
+ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ op.init = 1;
+@@ -816,6 +819,13 @@ static int ccp_run_aes_gcm_cmd(struct cc
+ final[0] = cpu_to_be64(aes->aad_len * 8);
+ final[1] = cpu_to_be64(ilen * 8);
+
++ memset(&op, 0, sizeof(op));
++ op.cmd_q = cmd_q;
++ op.jobid = jobid;
++ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
++ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
++ op.init = 1;
++ op.u.aes.type = aes->type;
+ op.u.aes.mode = CCP_AES_MODE_GHASH;
+ op.u.aes.action = CCP_AES_GHASHFINAL;
+ op.src.type = CCP_MEMTYPE_SYSTEM;
--- /dev/null
+From 52393d617af7b554f03531e6756facf2ea687d2e Mon Sep 17 00:00:00 2001
+From: "Hook, Gary" <Gary.Hook@amd.com>
+Date: Thu, 27 Jun 2019 16:16:23 +0000
+Subject: crypto: ccp - Validate the the error value used to index error messages
+
+From: Hook, Gary <Gary.Hook@amd.com>
+
+commit 52393d617af7b554f03531e6756facf2ea687d2e upstream.
+
+The error code read from the queue status register is only 6 bits wide,
+but we need to verify its value is within range before indexing the error
+messages.
+
+Fixes: 81422badb3907 ("crypto: ccp - Make syslog errors human-readable")
+Cc: <stable@vger.kernel.org>
+Reported-by: Cfir Cohen <cfir@google.com>
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev.c | 96 ++++++++++++++++++++++---------------------
+ drivers/crypto/ccp/ccp-dev.h | 2
+ 2 files changed, 52 insertions(+), 46 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev.c
++++ b/drivers/crypto/ccp/ccp-dev.c
+@@ -35,56 +35,62 @@ struct ccp_tasklet_data {
+ };
+
+ /* Human-readable error strings */
++#define CCP_MAX_ERROR_CODE 64
+ static char *ccp_error_codes[] = {
+ "",
+- "ERR 01: ILLEGAL_ENGINE",
+- "ERR 02: ILLEGAL_KEY_ID",
+- "ERR 03: ILLEGAL_FUNCTION_TYPE",
+- "ERR 04: ILLEGAL_FUNCTION_MODE",
+- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
+- "ERR 06: ILLEGAL_FUNCTION_SIZE",
+- "ERR 07: Zlib_MISSING_INIT_EOM",
+- "ERR 08: ILLEGAL_FUNCTION_RSVD",
+- "ERR 09: ILLEGAL_BUFFER_LENGTH",
+- "ERR 10: VLSB_FAULT",
+- "ERR 11: ILLEGAL_MEM_ADDR",
+- "ERR 12: ILLEGAL_MEM_SEL",
+- "ERR 13: ILLEGAL_CONTEXT_ID",
+- "ERR 14: ILLEGAL_KEY_ADDR",
+- "ERR 15: 0xF Reserved",
+- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
+- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
+- "ERR 18: CMD_TIMEOUT",
+- "ERR 19: IDMA0_AXI_SLVERR",
+- "ERR 20: IDMA0_AXI_DECERR",
+- "ERR 21: 0x15 Reserved",
+- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+- "ERR 23: IDMA1_AIXI_DECERR",
+- "ERR 24: 0x18 Reserved",
+- "ERR 25: ZLIBVHB_AXI_SLVERR",
+- "ERR 26: ZLIBVHB_AXI_DECERR",
+- "ERR 27: 0x1B Reserved",
+- "ERR 27: ZLIB_UNEXPECTED_EOM",
+- "ERR 27: ZLIB_EXTRA_DATA",
+- "ERR 30: ZLIB_BTYPE",
+- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
+- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
+- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
+- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
+- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
+- "ERR 36: ZLIB_LIMIT_REACHED",
+- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
+- "ERR 38: ODMA0_AXI_SLVERR",
+- "ERR 39: ODMA0_AXI_DECERR",
+- "ERR 40: 0x28 Reserved",
+- "ERR 41: ODMA1_AXI_SLVERR",
+- "ERR 42: ODMA1_AXI_DECERR",
+- "ERR 43: LSB_PARITY_ERR",
++ "ILLEGAL_ENGINE",
++ "ILLEGAL_KEY_ID",
++ "ILLEGAL_FUNCTION_TYPE",
++ "ILLEGAL_FUNCTION_MODE",
++ "ILLEGAL_FUNCTION_ENCRYPT",
++ "ILLEGAL_FUNCTION_SIZE",
++ "Zlib_MISSING_INIT_EOM",
++ "ILLEGAL_FUNCTION_RSVD",
++ "ILLEGAL_BUFFER_LENGTH",
++ "VLSB_FAULT",
++ "ILLEGAL_MEM_ADDR",
++ "ILLEGAL_MEM_SEL",
++ "ILLEGAL_CONTEXT_ID",
++ "ILLEGAL_KEY_ADDR",
++ "0xF Reserved",
++ "Zlib_ILLEGAL_MULTI_QUEUE",
++ "Zlib_ILLEGAL_JOBID_CHANGE",
++ "CMD_TIMEOUT",
++ "IDMA0_AXI_SLVERR",
++ "IDMA0_AXI_DECERR",
++ "0x15 Reserved",
++ "IDMA1_AXI_SLAVE_FAULT",
++ "IDMA1_AIXI_DECERR",
++ "0x18 Reserved",
++ "ZLIBVHB_AXI_SLVERR",
++ "ZLIBVHB_AXI_DECERR",
++ "0x1B Reserved",
++ "ZLIB_UNEXPECTED_EOM",
++ "ZLIB_EXTRA_DATA",
++ "ZLIB_BTYPE",
++ "ZLIB_UNDEFINED_SYMBOL",
++ "ZLIB_UNDEFINED_DISTANCE_S",
++ "ZLIB_CODE_LENGTH_SYMBOL",
++ "ZLIB _VHB_ILLEGAL_FETCH",
++ "ZLIB_UNCOMPRESSED_LEN",
++ "ZLIB_LIMIT_REACHED",
++ "ZLIB_CHECKSUM_MISMATCH0",
++ "ODMA0_AXI_SLVERR",
++ "ODMA0_AXI_DECERR",
++ "0x28 Reserved",
++ "ODMA1_AXI_SLVERR",
++ "ODMA1_AXI_DECERR",
+ };
+
+-void ccp_log_error(struct ccp_device *d, int e)
++void ccp_log_error(struct ccp_device *d, unsigned int e)
+ {
+- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
++ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
++ return;
++
++ if (e < ARRAY_SIZE(ccp_error_codes))
++ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
++ else
++ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
+ }
+
+ /* List of CCPs, CCP count, read-write access lock, and access functions
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -632,7 +632,7 @@ struct ccp5_desc {
+ void ccp_add_device(struct ccp_device *ccp);
+ void ccp_del_device(struct ccp_device *ccp);
+
+-extern void ccp_log_error(struct ccp_device *, int);
++extern void ccp_log_error(struct ccp_device *, unsigned int);
+
+ struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
+ bool ccp_queues_suspended(struct ccp_device *ccp);
--- /dev/null
+From 7545b6c2087f4ef0287c8c9b7eba6a728c67ff8e Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 31 May 2019 11:12:30 -0700
+Subject: crypto: chacha20poly1305 - fix atomic sleep when using async algorithm
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 7545b6c2087f4ef0287c8c9b7eba6a728c67ff8e upstream.
+
+Clear the CRYPTO_TFM_REQ_MAY_SLEEP flag when the chacha20poly1305
+operation is being continued from an async completion callback, since
+sleeping may not be allowed in that context.
+
+This is basically the same bug that was recently fixed in the xts and
+lrw templates. But, it's always been broken in chacha20poly1305 too.
+This was found using syzkaller in combination with the updated crypto
+self-tests which actually test the MAY_SLEEP flag now.
+
+Reproducer:
+
+ python -c 'import socket; socket.socket(socket.AF_ALG, 5, 0).bind(
+ ("aead", "rfc7539(cryptd(chacha20-generic),poly1305-generic)"))'
+
+Kernel output:
+
+ BUG: sleeping function called from invalid context at include/crypto/algapi.h:426
+ in_atomic(): 1, irqs_disabled(): 0, pid: 1001, name: kworker/2:2
+ [...]
+ CPU: 2 PID: 1001 Comm: kworker/2:2 Not tainted 5.2.0-rc2 #5
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-20181126_142135-anatol 04/01/2014
+ Workqueue: crypto cryptd_queue_worker
+ Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x4d/0x6a lib/dump_stack.c:113
+ ___might_sleep kernel/sched/core.c:6138 [inline]
+ ___might_sleep.cold.19+0x8e/0x9f kernel/sched/core.c:6095
+ crypto_yield include/crypto/algapi.h:426 [inline]
+ crypto_hash_walk_done+0xd6/0x100 crypto/ahash.c:113
+ shash_ahash_update+0x41/0x60 crypto/shash.c:251
+ shash_async_update+0xd/0x10 crypto/shash.c:260
+ crypto_ahash_update include/crypto/hash.h:539 [inline]
+ poly_setkey+0xf6/0x130 crypto/chacha20poly1305.c:337
+ poly_init+0x51/0x60 crypto/chacha20poly1305.c:364
+ async_done_continue crypto/chacha20poly1305.c:78 [inline]
+ poly_genkey_done+0x15/0x30 crypto/chacha20poly1305.c:369
+ cryptd_skcipher_complete+0x29/0x70 crypto/cryptd.c:279
+ cryptd_skcipher_decrypt+0xcd/0x110 crypto/cryptd.c:339
+ cryptd_queue_worker+0x70/0xa0 crypto/cryptd.c:184
+ process_one_work+0x1ed/0x420 kernel/workqueue.c:2269
+ worker_thread+0x3e/0x3a0 kernel/workqueue.c:2415
+ kthread+0x11f/0x140 kernel/kthread.c:255
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
+
+Fixes: 71ebc4d1b27d ("crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD construction, RFC7539")
+Cc: <stable@vger.kernel.org> # v4.2+
+Cc: Martin Willi <martin@strongswan.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/chacha20poly1305.c | 30 +++++++++++++++++++-----------
+ 1 file changed, 19 insertions(+), 11 deletions(-)
+
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -65,6 +65,8 @@ struct chachapoly_req_ctx {
+ unsigned int cryptlen;
+ /* Actual AD, excluding IV */
+ unsigned int assoclen;
++ /* request flags, with MAY_SLEEP cleared if needed */
++ u32 flags;
+ union {
+ struct poly_req poly;
+ struct chacha_req chacha;
+@@ -74,8 +76,12 @@ struct chachapoly_req_ctx {
+ static inline void async_done_continue(struct aead_request *req, int err,
+ int (*cont)(struct aead_request *))
+ {
+- if (!err)
++ if (!err) {
++ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
++
++ rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = cont(req);
++ }
+
+ if (err != -EINPROGRESS && err != -EBUSY)
+ aead_request_complete(req, err);
+@@ -142,7 +148,7 @@ static int chacha_decrypt(struct aead_re
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+ }
+
+- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++ skcipher_request_set_callback(&creq->req, rctx->flags,
+ chacha_decrypt_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, src, dst,
+@@ -186,7 +192,7 @@ static int poly_tail(struct aead_request
+ memcpy(&preq->tail.cryptlen, &len, sizeof(len));
+ sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_tail_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src,
+@@ -217,7 +223,7 @@ static int poly_cipherpad(struct aead_re
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, &preq->pad, padlen);
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_cipherpad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+@@ -248,7 +254,7 @@ static int poly_cipher(struct aead_reque
+ sg_init_table(rctx->src, 2);
+ crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_cipher_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
+@@ -278,7 +284,7 @@ static int poly_adpad(struct aead_reques
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, preq->pad, padlen);
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_adpad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+@@ -302,7 +308,7 @@ static int poly_ad(struct aead_request *
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_ad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
+@@ -329,7 +335,7 @@ static int poly_setkey(struct aead_reque
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_setkey_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
+@@ -353,7 +359,7 @@ static int poly_init(struct aead_request
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_init_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+
+@@ -391,7 +397,7 @@ static int poly_genkey(struct aead_reque
+
+ chacha_iv(creq->iv, req, 0);
+
+- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++ skcipher_request_set_callback(&creq->req, rctx->flags,
+ poly_genkey_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
+@@ -431,7 +437,7 @@ static int chacha_encrypt(struct aead_re
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+ }
+
+- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++ skcipher_request_set_callback(&creq->req, rctx->flags,
+ chacha_encrypt_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, src, dst,
+@@ -449,6 +455,7 @@ static int chachapoly_encrypt(struct aea
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->cryptlen = req->cryptlen;
++ rctx->flags = aead_request_flags(req);
+
+ /* encrypt call chain:
+ * - chacha_encrypt/done()
+@@ -470,6 +477,7 @@ static int chachapoly_decrypt(struct aea
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
++ rctx->flags = aead_request_flags(req);
+
+ /* decrypt call chain:
+ * - poly_genkey/done()
--- /dev/null
+From 0f7a81374060828280fcfdfbaa162cb559017f9f Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sat, 18 May 2019 23:28:12 +0200
+Subject: crypto: crypto4xx - block ciphers should only accept complete blocks
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit 0f7a81374060828280fcfdfbaa162cb559017f9f upstream.
+
+The hardware automatically zero pads incomplete block ciphers
+blocks without raising any errors. This is a screw-up. This
+was noticed by CONFIG_CRYPTO_MANAGER_EXTRA_TESTS tests that
+sent a incomplete blocks and expect them to fail.
+
+This fixes:
+cbc-aes-ppc4xx encryption unexpectedly succeeded on test vector
+"random: len=2409 klen=32"; expected_error=-22, cfg="random:
+may_sleep use_digest src_divs=[96.90%@+2295, 2.34%@+4066,
+0.32%@alignmask+12, 0.34%@+4087, 0.9%@alignmask+1787, 0.1%@+3767]
+iv_offset=6"
+
+ecb-aes-ppc4xx encryption unexpectedly succeeded on test vector
+"random: len=1011 klen=32"; expected_error=-22, cfg="random:
+may_sleep use_digest src_divs=[100.0%@alignmask+20]
+dst_divs=[3.12%@+3001, 96.88%@+4070]"
+
+Cc: Eric Biggers <ebiggers@kernel.org>
+Cc: stable@vger.kernel.org [4.19, 5.0 and 5.1]
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_alg.c | 36 ++++++++++++++++++++++++-----------
+ drivers/crypto/amcc/crypto4xx_core.c | 16 +++++++--------
+ drivers/crypto/amcc/crypto4xx_core.h | 10 +++++----
+ 3 files changed, 39 insertions(+), 23 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -76,12 +76,16 @@ static void set_dynamic_sa_command_1(str
+ }
+
+ static inline int crypto4xx_crypt(struct skcipher_request *req,
+- const unsigned int ivlen, bool decrypt)
++ const unsigned int ivlen, bool decrypt,
++ bool check_blocksize)
+ {
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ __le32 iv[AES_IV_SIZE];
+
++ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
++ return -EINVAL;
++
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
+
+@@ -90,24 +94,34 @@ static inline int crypto4xx_crypt(struct
+ ctx->sa_len, 0, NULL);
+ }
+
+-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
++int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
++{
++ return crypto4xx_crypt(req, 0, false, true);
++}
++
++int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
++{
++ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
++}
++
++int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, 0, false);
++ return crypto4xx_crypt(req, 0, true, true);
+ }
+
+-int crypto4xx_encrypt_iv(struct skcipher_request *req)
++int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, AES_IV_SIZE, false);
++ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
+ }
+
+-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
++int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, 0, true);
++ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
+ }
+
+-int crypto4xx_decrypt_iv(struct skcipher_request *req)
++int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, AES_IV_SIZE, true);
++ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
+ }
+
+ /**
+@@ -278,8 +292,8 @@ crypto4xx_ctr_crypt(struct skcipher_requ
+ return ret;
+ }
+
+- return encrypt ? crypto4xx_encrypt_iv(req)
+- : crypto4xx_decrypt_iv(req);
++ return encrypt ? crypto4xx_encrypt_iv_stream(req)
++ : crypto4xx_decrypt_iv_stream(req);
+ }
+
+ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1226,8 +1226,8 @@ static struct crypto4xx_alg_common crypt
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+- .encrypt = crypto4xx_encrypt_iv,
+- .decrypt = crypto4xx_decrypt_iv,
++ .encrypt = crypto4xx_encrypt_iv_block,
++ .decrypt = crypto4xx_decrypt_iv_block,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+@@ -1246,8 +1246,8 @@ static struct crypto4xx_alg_common crypt
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+- .encrypt = crypto4xx_encrypt_iv,
+- .decrypt = crypto4xx_decrypt_iv,
++ .encrypt = crypto4xx_encrypt_iv_stream,
++ .decrypt = crypto4xx_decrypt_iv_stream,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+@@ -1306,8 +1306,8 @@ static struct crypto4xx_alg_common crypt
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+- .encrypt = crypto4xx_encrypt_noiv,
+- .decrypt = crypto4xx_decrypt_noiv,
++ .encrypt = crypto4xx_encrypt_noiv_block,
++ .decrypt = crypto4xx_decrypt_noiv_block,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+@@ -1326,8 +1326,8 @@ static struct crypto4xx_alg_common crypt
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+- .encrypt = crypto4xx_encrypt_iv,
+- .decrypt = crypto4xx_decrypt_iv,
++ .encrypt = crypto4xx_encrypt_iv_stream,
++ .decrypt = crypto4xx_decrypt_iv_stream,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -183,10 +183,12 @@ int crypto4xx_setkey_rfc3686(struct cryp
+ const u8 *key, unsigned int keylen);
+ int crypto4xx_encrypt_ctr(struct skcipher_request *req);
+ int crypto4xx_decrypt_ctr(struct skcipher_request *req);
+-int crypto4xx_encrypt_iv(struct skcipher_request *req);
+-int crypto4xx_decrypt_iv(struct skcipher_request *req);
+-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
+-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
++int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
++int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
++int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
++int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
++int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
++int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
+ int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
+ int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
+ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
--- /dev/null
+From 95566aa75cd6b3b404502c06f66956b5481194b3 Mon Sep 17 00:00:00 2001
+From: Wen Yang <wen.yang99@zte.com.cn>
+Date: Mon, 8 Jul 2019 14:19:03 +0800
+Subject: crypto: crypto4xx - fix a potential double free in ppc4xx_trng_probe
+
+From: Wen Yang <wen.yang99@zte.com.cn>
+
+commit 95566aa75cd6b3b404502c06f66956b5481194b3 upstream.
+
+There is a possible double free issue in ppc4xx_trng_probe():
+
+85: dev->trng_base = of_iomap(trng, 0);
+86: of_node_put(trng); ---> released here
+87: if (!dev->trng_base)
+88: goto err_out;
+...
+110: ierr_out:
+111: of_node_put(trng); ---> double released here
+...
+
+This issue was detected by using the Coccinelle software.
+We fix it by removing the unnecessary of_node_put().
+
+Fixes: 5343e674f32f ("crypto4xx: integrate ppc4xx-rng into crypto4xx")
+Signed-off-by: Wen Yang <wen.yang99@zte.com.cn>
+Cc: <stable@vger.kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Armijn Hemel <armijn@tjaldur.nl>
+Cc: Julia Lawall <Julia.Lawall@lip6.fr>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Acked-by: Julia Lawall <julia.lawall@lip6.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_trng.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_trng.c
++++ b/drivers/crypto/amcc/crypto4xx_trng.c
+@@ -111,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_
+ return;
+
+ err_out:
+- of_node_put(trng);
+ iounmap(dev->trng_base);
+ kfree(rng);
+ dev->trng_base = NULL;
--- /dev/null
+From bfa2ba7d9e6b20aca82b99e6842fe18842ae3a0f Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Fri, 17 May 2019 23:15:57 +0200
+Subject: crypto: crypto4xx - fix AES CTR blocksize value
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit bfa2ba7d9e6b20aca82b99e6842fe18842ae3a0f upstream.
+
+This patch fixes a issue with crypto4xx's ctr(aes) that was
+discovered by libcapi's kcapi-enc-test.sh test.
+
+The some of the ctr(aes) encryptions test were failing on the
+non-power-of-two test:
+
+kcapi-enc - Error: encryption failed with error 0
+kcapi-enc - Error: decryption failed with error 0
+[FAILED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (128 bits):
+original file (1d100e..cc96184c) and generated file (e3b0c442..1b7852b855)
+[FAILED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (128 bits)
+(openssl generated CT): original file (e3b0..5) and generated file (3..8e)
+[PASSED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (128 bits)
+(openssl generated PT)
+[FAILED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (password):
+original file (1d1..84c) and generated file (e3b..852b855)
+
+But the 16, 32, 512, 65536 tests always worked.
+
+Thankfully, this isn't a hidden hardware problem like previously,
+instead this turned out to be a copy and paste issue.
+
+With this patch, all the tests are passing with and
+kcapi-enc-test.sh gives crypto4xx's a clean bill of health:
+ "Number of failures: 0" :).
+
+Cc: stable@vger.kernel.org
+Fixes: 98e87e3d933b ("crypto: crypto4xx - add aes-ctr support")
+Fixes: f2a13e7cba9e ("crypto: crypto4xx - enable AES RFC3686, ECB, CFB and OFB offloads")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1259,7 +1259,7 @@ static struct crypto4xx_alg_common crypt
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+@@ -1279,7 +1279,7 @@ static struct crypto4xx_alg_common crypt
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
--- /dev/null
+From 70c4997f34b6c6888b3ac157adec49e01d0df2d5 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sat, 18 May 2019 23:28:11 +0200
+Subject: crypto: crypto4xx - fix blocksize for cfb and ofb
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit 70c4997f34b6c6888b3ac157adec49e01d0df2d5 upstream.
+
+While the hardware consider them to be blockciphers, the
+reference implementation defines them as streamciphers.
+
+Do the right thing and set the blocksize to 1. This
+was found by CONFIG_CRYPTO_MANAGER_EXTRA_TESTS.
+
+This fixes the following issues:
+skcipher: blocksize for ofb-aes-ppc4xx (16) doesn't match generic impl (1)
+skcipher: blocksize for cfb-aes-ppc4xx (16) doesn't match generic impl (1)
+
+Cc: Eric Biggers <ebiggers@kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: f2a13e7cba9e ("crypto: crypto4xx - enable AES RFC3686, ECB, CFB and OFB offloads")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1238,7 +1238,7 @@ static struct crypto4xx_alg_common crypt
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+@@ -1318,7 +1318,7 @@ static struct crypto4xx_alg_common crypt
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
--- /dev/null
+From 5c6bc4dfa515738149998bb0db2481a4fdead979 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 30 May 2019 10:50:39 -0700
+Subject: crypto: ghash - fix unaligned memory access in ghash_setkey()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 5c6bc4dfa515738149998bb0db2481a4fdead979 upstream.
+
+Changing ghash_mod_init() to be subsys_initcall made it start running
+before the alignment fault handler has been installed on ARM. In kernel
+builds where the keys in the ghash test vectors happened to be
+misaligned in the kernel image, this exposed the longstanding bug that
+ghash_setkey() is incorrectly casting the key buffer (which can have any
+alignment) to be128 for passing to gf128mul_init_4k_lle().
+
+Fix this by memcpy()ing the key to a temporary buffer.
+
+Don't fix it by setting an alignmask on the algorithm instead because
+that would unnecessarily force alignment of the data too.
+
+Fixes: 2cdc6899a88e ("crypto: ghash - Add GHASH digest algorithm for GCM")
+Reported-by: Peter Robinson <pbrobinson@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Tested-by: Peter Robinson <pbrobinson@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ghash-generic.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/crypto/ghash-generic.c
++++ b/crypto/ghash-generic.c
+@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_sh
+ const u8 *key, unsigned int keylen)
+ {
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
++ be128 k;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_sh
+
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+- ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
++
++ BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
++ memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
++ ctx->gf128 = gf128mul_init_4k_lle(&k);
++ memzero_explicit(&k, GHASH_BLOCK_SIZE);
++
+ if (!ctx->gf128)
+ return -ENOMEM;
+
--- /dev/null
+From 25fcf94a2fa89dd3e73e965ebb0b38a2a4f72aa4 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: Revert "scsi: ncr5380: Increase register polling limit"
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 25fcf94a2fa89dd3e73e965ebb0b38a2a4f72aa4 upstream.
+
+This reverts commit 4822827a69d7cd3bc5a07b7637484ebd2cf88db6.
+
+The purpose of that commit was to suppress a timeout warning message which
+appeared to be caused by target latency. But suppressing the warning is
+undesirable as the warning may indicate a messed up transfer count.
+
+Another problem with that commit is that 15 ms is too long to keep
+interrupts disabled as interrupt latency can cause system clock drift and
+other problems.
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: stable@vger.kernel.org
+Fixes: 4822827a69d7 ("scsi: ncr5380: Increase register polling limit")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/NCR5380.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/NCR5380.h
++++ b/drivers/scsi/NCR5380.h
+@@ -235,7 +235,7 @@ struct NCR5380_cmd {
+ #define NCR5380_PIO_CHUNK_SIZE 256
+
+ /* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+-#define NCR5380_REG_POLL_TIME 15
++#define NCR5380_REG_POLL_TIME 10
+
+ static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
+ {
--- /dev/null
+From f9b0530fa02e0c73f31a49ef743e8f44eb8e32cc Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Fri, 12 Jul 2019 10:08:19 +0800
+Subject: scsi: core: Fix race on creating sense cache
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit f9b0530fa02e0c73f31a49ef743e8f44eb8e32cc upstream.
+
+When scsi_init_sense_cache(host) is called concurrently from different
+hosts, each code path may find that no cache has been created and
+allocate a new one. The lack of locking can lead to potentially
+overriding a cache allocated by a different host.
+
+Fix the issue by moving 'mutex_lock(&scsi_sense_cache_mutex)' before
+scsi_select_sense_cache().
+
+Fixes: 0a6ac4ee7c21 ("scsi: respect unchecked_isa_dma for blk-mq")
+Cc: Stable <stable@vger.kernel.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Ewan D. Milne <emilne@redhat.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/scsi_lib.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -71,11 +71,11 @@ int scsi_init_sense_cache(struct Scsi_Ho
+ struct kmem_cache *cache;
+ int ret = 0;
+
++ mutex_lock(&scsi_sense_cache_mutex);
+ cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
+ if (cache)
+- return 0;
++ goto exit;
+
+- mutex_lock(&scsi_sense_cache_mutex);
+ if (shost->unchecked_isa_dma) {
+ scsi_sense_isadma_cache =
+ kmem_cache_create("scsi_sense_cache(DMA)",
+@@ -91,7 +91,7 @@ int scsi_init_sense_cache(struct Scsi_Ho
+ if (!scsi_sense_cache)
+ ret = -ENOMEM;
+ }
+-
++ exit:
+ mutex_unlock(&scsi_sense_cache_mutex);
+ return ret;
+ }
--- /dev/null
+From 78ff751f8e6a9446e9fb26b2bff0b8d3f8974cbd Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: scsi: mac_scsi: Fix pseudo DMA implementation, take 2
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 78ff751f8e6a9446e9fb26b2bff0b8d3f8974cbd upstream.
+
+A system bus error during a PDMA transfer can mess up the calculation of
+the transfer residual (the PDMA handshaking hardware lacks a byte
+counter). This results in data corruption.
+
+The algorithm in this patch anticipates a bus error by starting each
+transfer with a MOVE.B instruction. If a bus error is caught the transfer
+will be retried. If a bus error is caught later in the transfer (for a
+MOVE.W instruction) the transfer gets failed and subsequent requests for
+that target will use PIO instead of PDMA.
+
+This avoids the "!REQ and !ACK" error so the severity level of that message
+is reduced to KERN_DEBUG.
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: stable@vger.kernel.org # v4.14+
+Fixes: 3a0f64bfa907 ("mac_scsi: Fix pseudo DMA implementation")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Reported-by: Chris Jones <chris@martin-jones.com>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/mac_scsi.c | 369 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 217 insertions(+), 152 deletions(-)
+
+--- a/drivers/scsi/mac_scsi.c
++++ b/drivers/scsi/mac_scsi.c
+@@ -3,6 +3,8 @@
+ *
+ * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
+ *
++ * Copyright 2019 Finn Thain
++ *
+ * derived in part from:
+ */
+ /*
+@@ -11,6 +13,7 @@
+ * Copyright 1995, Russell King
+ */
+
++#include <linux/delay.h>
+ #include <linux/types.h>
+ #include <linux/module.h>
+ #include <linux/ioport.h>
+@@ -89,101 +92,217 @@ static int __init mac_scsi_setup(char *s
+ __setup("mac5380=", mac_scsi_setup);
+ #endif /* !MODULE */
+
+-/* Pseudo DMA asm originally by Ove Edlund */
++/*
++ * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
++ * specify the number of bytes between the delays expected from a SCSI target.
++ * This allows the operating system to "prevent bus errors when a target fails
++ * to deliver the next byte within the processor bus error timeout period."
++ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
++ * so bus errors are unavoidable.
++ *
++ * If a MOVE.B instruction faults, we assume that zero bytes were transferred
++ * and simply retry. That assumption probably depends on target behaviour but
++ * seems to hold up okay. The NOP provides synchronization: without it the
++ * fault can sometimes occur after the program counter has moved past the
++ * offending instruction. Post-increment addressing can't be used.
++ */
++
++#define MOVE_BYTE(operands) \
++ asm volatile ( \
++ "1: moveb " operands " \n" \
++ "11: nop \n" \
++ " addq #1,%0 \n" \
++ " subq #1,%1 \n" \
++ "40: \n" \
++ " \n" \
++ ".section .fixup,\"ax\" \n" \
++ ".even \n" \
++ "90: movel #1, %2 \n" \
++ " jra 40b \n" \
++ ".previous \n" \
++ " \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".align 4 \n" \
++ ".long 1b,90b \n" \
++ ".long 11b,90b \n" \
++ ".previous \n" \
++ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+-#define CP_IO_TO_MEM(s,d,n) \
+-__asm__ __volatile__ \
+- (" cmp.w #4,%2\n" \
+- " bls 8f\n" \
+- " move.w %1,%%d0\n" \
+- " neg.b %%d0\n" \
+- " and.w #3,%%d0\n" \
+- " sub.w %%d0,%2\n" \
+- " bra 2f\n" \
+- " 1: move.b (%0),(%1)+\n" \
+- " 2: dbf %%d0,1b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #5,%%d0\n" \
+- " bra 4f\n" \
+- " 3: move.l (%0),(%1)+\n" \
+- "31: move.l (%0),(%1)+\n" \
+- "32: move.l (%0),(%1)+\n" \
+- "33: move.l (%0),(%1)+\n" \
+- "34: move.l (%0),(%1)+\n" \
+- "35: move.l (%0),(%1)+\n" \
+- "36: move.l (%0),(%1)+\n" \
+- "37: move.l (%0),(%1)+\n" \
+- " 4: dbf %%d0,3b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #2,%%d0\n" \
+- " and.w #7,%%d0\n" \
+- " bra 6f\n" \
+- " 5: move.l (%0),(%1)+\n" \
+- " 6: dbf %%d0,5b\n" \
+- " and.w #3,%2\n" \
+- " bra 8f\n" \
+- " 7: move.b (%0),(%1)+\n" \
+- " 8: dbf %2,7b\n" \
+- " moveq.l #0, %2\n" \
+- " 9: \n" \
+- ".section .fixup,\"ax\"\n" \
+- " .even\n" \
+- "91: moveq.l #1, %2\n" \
+- " jra 9b\n" \
+- "94: moveq.l #4, %2\n" \
+- " jra 9b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 1b,91b\n" \
+- " .long 3b,94b\n" \
+- " .long 31b,94b\n" \
+- " .long 32b,94b\n" \
+- " .long 33b,94b\n" \
+- " .long 34b,94b\n" \
+- " .long 35b,94b\n" \
+- " .long 36b,94b\n" \
+- " .long 37b,94b\n" \
+- " .long 5b,94b\n" \
+- " .long 7b,91b\n" \
+- ".previous" \
+- : "=a"(s), "=a"(d), "=d"(n) \
+- : "0"(s), "1"(d), "2"(n) \
+- : "d0")
++/*
++ * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
++ * the residual byte count would be uncertain. In that situation the MOVE_WORD
++ * macro clears n in the fixup section to abort the transfer.
++ */
++
++#define MOVE_WORD(operands) \
++ asm volatile ( \
++ "1: movew " operands " \n" \
++ "11: nop \n" \
++ " subq #2,%1 \n" \
++ "40: \n" \
++ " \n" \
++ ".section .fixup,\"ax\" \n" \
++ ".even \n" \
++ "90: movel #0, %1 \n" \
++ " movel #2, %2 \n" \
++ " jra 40b \n" \
++ ".previous \n" \
++ " \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".align 4 \n" \
++ ".long 1b,90b \n" \
++ ".long 11b,90b \n" \
++ ".previous \n" \
++ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
++
++#define MOVE_16_WORDS(operands) \
++ asm volatile ( \
++ "1: movew " operands " \n" \
++ "2: movew " operands " \n" \
++ "3: movew " operands " \n" \
++ "4: movew " operands " \n" \
++ "5: movew " operands " \n" \
++ "6: movew " operands " \n" \
++ "7: movew " operands " \n" \
++ "8: movew " operands " \n" \
++ "9: movew " operands " \n" \
++ "10: movew " operands " \n" \
++ "11: movew " operands " \n" \
++ "12: movew " operands " \n" \
++ "13: movew " operands " \n" \
++ "14: movew " operands " \n" \
++ "15: movew " operands " \n" \
++ "16: movew " operands " \n" \
++ "17: nop \n" \
++ " subl #32,%1 \n" \
++ "40: \n" \
++ " \n" \
++ ".section .fixup,\"ax\" \n" \
++ ".even \n" \
++ "90: movel #0, %1 \n" \
++ " movel #2, %2 \n" \
++ " jra 40b \n" \
++ ".previous \n" \
++ " \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".align 4 \n" \
++ ".long 1b,90b \n" \
++ ".long 2b,90b \n" \
++ ".long 3b,90b \n" \
++ ".long 4b,90b \n" \
++ ".long 5b,90b \n" \
++ ".long 6b,90b \n" \
++ ".long 7b,90b \n" \
++ ".long 8b,90b \n" \
++ ".long 9b,90b \n" \
++ ".long 10b,90b \n" \
++ ".long 11b,90b \n" \
++ ".long 12b,90b \n" \
++ ".long 13b,90b \n" \
++ ".long 14b,90b \n" \
++ ".long 15b,90b \n" \
++ ".long 16b,90b \n" \
++ ".long 17b,90b \n" \
++ ".previous \n" \
++ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
++
++#define MAC_PDMA_DELAY 32
++
++static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
++{
++ unsigned char *addr = start;
++ int result = 0;
++
++ if (n >= 1) {
++ MOVE_BYTE("%3@,%0@");
++ if (result)
++ goto out;
++ }
++ if (n >= 1 && ((unsigned long)addr & 1)) {
++ MOVE_BYTE("%3@,%0@");
++ if (result)
++ goto out;
++ }
++ while (n >= 32)
++ MOVE_16_WORDS("%3@,%0@+");
++ while (n >= 2)
++ MOVE_WORD("%3@,%0@+");
++ if (result)
++ return start - addr; /* Negated to indicate uncertain length */
++ if (n == 1)
++ MOVE_BYTE("%3@,%0@");
++out:
++ return addr - start;
++}
++
++static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
++{
++ unsigned char *addr = start;
++ int result = 0;
++
++ if (n >= 1) {
++ MOVE_BYTE("%0@,%3@");
++ if (result)
++ goto out;
++ }
++ if (n >= 1 && ((unsigned long)addr & 1)) {
++ MOVE_BYTE("%0@,%3@");
++ if (result)
++ goto out;
++ }
++ while (n >= 32)
++ MOVE_16_WORDS("%0@+,%3@");
++ while (n >= 2)
++ MOVE_WORD("%0@+,%3@");
++ if (result)
++ return start - addr; /* Negated to indicate uncertain length */
++ if (n == 1)
++ MOVE_BYTE("%0@,%3@");
++out:
++ return addr - start;
++}
+
+ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
+ {
+ u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
+ unsigned char *d = dst;
+- int n = len;
+- int transferred;
++
++ hostdata->pdma_residual = len;
+
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+ BASR_DRQ | BASR_PHASE_MATCH,
+ BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+- CP_IO_TO_MEM(s, d, n);
++ int bytes;
++
++ bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
+
+- transferred = d - dst - n;
+- hostdata->pdma_residual = len - transferred;
++ if (bytes > 0) {
++ d += bytes;
++ hostdata->pdma_residual -= bytes;
++ }
+
+- /* No bus error. */
+- if (n == 0)
++ if (hostdata->pdma_residual == 0)
+ return 0;
+
+- /* Target changed phase early? */
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+- scmd_printk(KERN_ERR, hostdata->connected,
++ BUS_AND_STATUS_REG, BASR_ACK,
++ BASR_ACK, HZ / 64) < 0)
++ scmd_printk(KERN_DEBUG, hostdata->connected,
+ "%s: !REQ and !ACK\n", __func__);
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+ return 0;
+
++ if (bytes == 0)
++ udelay(MAC_PDMA_DELAY);
++
++ if (bytes >= 0)
++ continue;
++
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+- "%s: bus error (%d/%d)\n", __func__, transferred, len);
++ "%s: bus error (%d/%d)\n", __func__, d - dst, len);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- d = dst + transferred;
+- n = len - transferred;
++ return -1;
+ }
+
+ scmd_printk(KERN_ERR, hostdata->connected,
+@@ -192,93 +311,27 @@ static inline int macscsi_pread(struct N
+ return -1;
+ }
+
+-
+-#define CP_MEM_TO_IO(s,d,n) \
+-__asm__ __volatile__ \
+- (" cmp.w #4,%2\n" \
+- " bls 8f\n" \
+- " move.w %0,%%d0\n" \
+- " neg.b %%d0\n" \
+- " and.w #3,%%d0\n" \
+- " sub.w %%d0,%2\n" \
+- " bra 2f\n" \
+- " 1: move.b (%0)+,(%1)\n" \
+- " 2: dbf %%d0,1b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #5,%%d0\n" \
+- " bra 4f\n" \
+- " 3: move.l (%0)+,(%1)\n" \
+- "31: move.l (%0)+,(%1)\n" \
+- "32: move.l (%0)+,(%1)\n" \
+- "33: move.l (%0)+,(%1)\n" \
+- "34: move.l (%0)+,(%1)\n" \
+- "35: move.l (%0)+,(%1)\n" \
+- "36: move.l (%0)+,(%1)\n" \
+- "37: move.l (%0)+,(%1)\n" \
+- " 4: dbf %%d0,3b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #2,%%d0\n" \
+- " and.w #7,%%d0\n" \
+- " bra 6f\n" \
+- " 5: move.l (%0)+,(%1)\n" \
+- " 6: dbf %%d0,5b\n" \
+- " and.w #3,%2\n" \
+- " bra 8f\n" \
+- " 7: move.b (%0)+,(%1)\n" \
+- " 8: dbf %2,7b\n" \
+- " moveq.l #0, %2\n" \
+- " 9: \n" \
+- ".section .fixup,\"ax\"\n" \
+- " .even\n" \
+- "91: moveq.l #1, %2\n" \
+- " jra 9b\n" \
+- "94: moveq.l #4, %2\n" \
+- " jra 9b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 1b,91b\n" \
+- " .long 3b,94b\n" \
+- " .long 31b,94b\n" \
+- " .long 32b,94b\n" \
+- " .long 33b,94b\n" \
+- " .long 34b,94b\n" \
+- " .long 35b,94b\n" \
+- " .long 36b,94b\n" \
+- " .long 37b,94b\n" \
+- " .long 5b,94b\n" \
+- " .long 7b,91b\n" \
+- ".previous" \
+- : "=a"(s), "=a"(d), "=d"(n) \
+- : "0"(s), "1"(d), "2"(n) \
+- : "d0")
+-
+ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ unsigned char *src, int len)
+ {
+ unsigned char *s = src;
+ u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
+- int n = len;
+- int transferred;
++
++ hostdata->pdma_residual = len;
+
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+ BASR_DRQ | BASR_PHASE_MATCH,
+ BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+- CP_MEM_TO_IO(s, d, n);
++ int bytes;
+
+- transferred = s - src - n;
+- hostdata->pdma_residual = len - transferred;
++ bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
+
+- /* Target changed phase early? */
+- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+- scmd_printk(KERN_ERR, hostdata->connected,
+- "%s: !REQ and !ACK\n", __func__);
+- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+- return 0;
++ if (bytes > 0) {
++ s += bytes;
++ hostdata->pdma_residual -= bytes;
++ }
+
+- /* No bus error. */
+- if (n == 0) {
++ if (hostdata->pdma_residual == 0) {
+ if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
+ TCR_LAST_BYTE_SENT,
+ TCR_LAST_BYTE_SENT, HZ / 64) < 0)
+@@ -287,17 +340,29 @@ static inline int macscsi_pwrite(struct
+ return 0;
+ }
+
++ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
++ BUS_AND_STATUS_REG, BASR_ACK,
++ BASR_ACK, HZ / 64) < 0)
++ scmd_printk(KERN_DEBUG, hostdata->connected,
++ "%s: !REQ and !ACK\n", __func__);
++ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
++ return 0;
++
++ if (bytes == 0)
++ udelay(MAC_PDMA_DELAY);
++
++ if (bytes >= 0)
++ continue;
++
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+- "%s: bus error (%d/%d)\n", __func__, transferred, len);
++ "%s: bus error (%d/%d)\n", __func__, s - src, len);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- s = src + transferred;
+- n = len - transferred;
++ return -1;
+ }
+
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: phase mismatch or !DRQ\n", __func__);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+-
+ return -1;
+ }
+
--- /dev/null
+From 7398cee4c3e6aea1ba07a6449e5533ecd0b92cdd Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: scsi: mac_scsi: Increase PIO/PDMA transfer length threshold
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 7398cee4c3e6aea1ba07a6449e5533ecd0b92cdd upstream.
+
+Some targets introduce delays when handshaking the response to certain
+commands. For example, a disk may send a 96-byte response to an INQUIRY
+command (or a 24-byte response to a MODE SENSE command) too slowly.
+
+Apparently the first 12 or 14 bytes are handshaked okay but then the system
+bus error timeout is reached while transferring the next word.
+
+Since the scsi bus phase hasn't changed, the driver then sets the target
+borken flag to prevent further PDMA transfers. The driver also logs the
+warning, "switching to slow handshake".
+
+Raise the PDMA threshold to 512 bytes so that PIO transfers will be used
+for these commands. This default is sufficiently low that PDMA will still
+be used for READ and WRITE commands.
+
+The existing threshold (16 bytes) was chosen more or less at random.
+However, best performance requires the threshold to be as low as possible.
+Those systems that don't need the PIO workaround at all may benefit from
+mac_scsi.setup_use_pdma=1
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: stable@vger.kernel.org # v4.14+
+Fixes: 3a0f64bfa907 ("mac_scsi: Fix pseudo DMA implementation")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/mac_scsi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/mac_scsi.c
++++ b/drivers/scsi/mac_scsi.c
+@@ -52,7 +52,7 @@ static int setup_cmd_per_lun = -1;
+ module_param(setup_cmd_per_lun, int, 0);
+ static int setup_sg_tablesize = -1;
+ module_param(setup_sg_tablesize, int, 0);
+-static int setup_use_pdma = -1;
++static int setup_use_pdma = 512;
+ module_param(setup_use_pdma, int, 0);
+ static int setup_hostid = -1;
+ module_param(setup_hostid, int, 0);
+@@ -305,7 +305,7 @@ static int macscsi_dma_xfer_len(struct N
+ struct scsi_cmnd *cmd)
+ {
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
+- cmd->SCp.this_residual < 16)
++ cmd->SCp.this_residual < setup_use_pdma)
+ return 0;
+
+ return cmd->SCp.this_residual;
--- /dev/null
+From c8f96df5b8e633056b7ebf5d52a9d6fb1b156ce3 Mon Sep 17 00:00:00 2001
+From: Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+Date: Fri, 28 Jun 2019 18:02:12 -0700
+Subject: scsi: megaraid_sas: Fix calculation of target ID
+
+From: Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+
+commit c8f96df5b8e633056b7ebf5d52a9d6fb1b156ce3 upstream.
+
+In megasas_get_target_prop(), driver is incorrectly calculating the target
+ID for devices with channel 1 and 3. Due to this, firmware will either
+fail the command (if there is no device with the target id sent from
+driver) or could return the properties for a target which was not
+intended. Devices could end up with the wrong queue depth due to this.
+
+Fix target id calculation for channel 1 and 3.
+
+Fixes: 96188a89cc6d ("scsi: megaraid_sas: NVME interface target prop added")
+Cc: stable@vger.kernel.org
+Signed-off-by: Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/megaraid/megaraid_sas_base.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6168,7 +6168,8 @@ megasas_get_target_prop(struct megasas_i
+ int ret;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+- u16 targetId = (sdev->channel % 2) + sdev->id;
++ u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
++ sdev->id;
+
+ cmd = megasas_get_cmd(instance);
+
--- /dev/null
+From 57f31326518e98ee4cabf9a04efe00ed57c54147 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: scsi: NCR5380: Always re-enable reselection interrupt
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 57f31326518e98ee4cabf9a04efe00ed57c54147 upstream.
+
+The reselection interrupt gets disabled during selection and must be
+re-enabled when hostdata->connected becomes NULL. If it isn't re-enabled a
+disconnected command may time-out or the target may wedge the bus while
+trying to reselect the host. This can happen after a command is aborted.
+
+Fix this by enabling the reselection interrupt in NCR5380_main() after
+calls to NCR5380_select() and NCR5380_information_transfer() return.
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: stable@vger.kernel.org # v4.9+
+Fixes: 8b00c3d5d40d ("ncr5380: Implement new eh_abort_handler")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/NCR5380.c | 12 ++----------
+ 1 file changed, 2 insertions(+), 10 deletions(-)
+
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -710,6 +710,8 @@ static void NCR5380_main(struct work_str
+ NCR5380_information_transfer(instance);
+ done = 0;
+ }
++ if (!hostdata->connected)
++ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ spin_unlock_irq(&hostdata->lock);
+ if (!done)
+ cond_resched();
+@@ -1111,8 +1113,6 @@ static bool NCR5380_select(struct Scsi_H
+ spin_lock_irq(&hostdata->lock);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+- if (!hostdata->connected)
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
+ goto out;
+ }
+@@ -1120,7 +1120,6 @@ static bool NCR5380_select(struct Scsi_H
+ if (err < 0) {
+ spin_lock_irq(&hostdata->lock);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
+ /* Can't touch cmd if it has been reclaimed by the scsi ML */
+ if (!hostdata->selecting)
+@@ -1158,7 +1157,6 @@ static bool NCR5380_select(struct Scsi_H
+ if (err < 0) {
+ shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ goto out;
+ }
+ if (!hostdata->selecting) {
+@@ -1827,9 +1825,6 @@ static void NCR5380_information_transfer
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+- /* Enable reselect interrupts */
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+-
+ maybe_release_dma_irq(instance);
+ return;
+ case MESSAGE_REJECT:
+@@ -1861,8 +1856,6 @@ static void NCR5380_information_transfer
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+- /* Enable reselect interrupts */
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ #ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+ #endif
+@@ -1965,7 +1958,6 @@ static void NCR5380_information_transfer
+ cmd->result = DID_ERROR << 16;
+ complete_cmd(instance, cmd);
+ maybe_release_dma_irq(instance);
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return;
+ }
+ msgout = NOP;
--- /dev/null
+From f9dfed1c785734b95b08d67600e05d2092508ab0 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: scsi: NCR5380: Handle PDMA failure reliably
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit f9dfed1c785734b95b08d67600e05d2092508ab0 upstream.
+
+A PDMA error is handled in the core driver by setting the device's 'borken'
+flag and aborting the command. Unfortunately, do_abort() is not
+dependable. Perform a SCSI bus reset instead, to make sure that the command
+fails and gets retried.
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: stable@vger.kernel.org # v4.20+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/NCR5380.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -1762,10 +1762,8 @@ static void NCR5380_information_transfer
+ scmd_printk(KERN_INFO, cmd,
+ "switching to slow handshake\n");
+ cmd->device->borken = 1;
+- sink = 1;
+- do_abort(instance);
+- cmd->result = DID_ERROR << 16;
+- /* XXX - need to source or sink data here, as appropriate */
++ do_reset(instance);
++ bus_reset_cleanup(instance);
+ }
+ } else {
+ /* Transfer a small chunk so that the
--- /dev/null
+From 0cdc58580b37a160fac4b884266b8b7cb096f539 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Wed, 17 Jul 2019 10:51:49 +0900
+Subject: scsi: sd_zbc: Fix compilation warning
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 0cdc58580b37a160fac4b884266b8b7cb096f539 upstream.
+
+kbuild test robot gets the following compilation warning using gcc 7.4
+cross compilation for c6x (GCC_VERSION=7.4.0 make.cross ARCH=c6x).
+
+ In file included from include/asm-generic/bug.h:18:0,
+ from arch/c6x/include/asm/bug.h:12,
+ from include/linux/bug.h:5,
+ from include/linux/thread_info.h:12,
+ from include/asm-generic/current.h:5,
+ from ./arch/c6x/include/generated/asm/current.h:1,
+ from include/linux/sched.h:12,
+ from include/linux/blkdev.h:5,
+ from drivers//scsi/sd_zbc.c:11:
+ drivers//scsi/sd_zbc.c: In function 'sd_zbc_read_zones':
+>> include/linux/kernel.h:62:48: warning: 'zone_blocks' may be used
+ uninitialized in this function [-Wmaybe-uninitialized]
+ #define __round_mask(x, y) ((__typeof__(x))((y)-1))
+ ^
+ drivers//scsi/sd_zbc.c:464:6: note: 'zone_blocks' was declared here
+ u32 zone_blocks;
+ ^~~~~~~~~~~
+
+This is a false-positive report. The variable zone_blocks is always
+initialized in sd_zbc_check_zones() before use. It is not initialized
+only and only if sd_zbc_check_zones() fails.
+
+Avoid this warning by initializing the zone_blocks variable to 0.
+
+Fixes: 5f832a395859 ("scsi: sd_zbc: Fix sd_zbc_check_zones() error checks")
+Cc: Stable <stable@vger.kernel.org>
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sd_zbc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -431,7 +431,7 @@ int sd_zbc_read_zones(struct scsi_disk *
+ {
+ struct gendisk *disk = sdkp->disk;
+ unsigned int nr_zones;
+- u32 zone_blocks;
++ u32 zone_blocks = 0;
+ int ret;
+
+ if (!sd_is_zoned(sdkp))
--- /dev/null
+From b76becde2b84137faa29bbc9a3b20953b5980e48 Mon Sep 17 00:00:00 2001
+From: Benjamin Block <bblock@linux.ibm.com>
+Date: Tue, 2 Jul 2019 23:02:00 +0200
+Subject: scsi: zfcp: fix request object use-after-free in send path causing seqno errors
+
+From: Benjamin Block <bblock@linux.ibm.com>
+
+commit b76becde2b84137faa29bbc9a3b20953b5980e48 upstream.
+
+With a recent change to our send path for FSF commands we introduced a
+possible use-after-free of request-objects, that might further lead to
+zfcp crafting bad requests, which the FCP channel correctly complains
+about with an error (FSF_PROT_SEQ_NUMB_ERROR). This error is then handled
+by an adapter-wide recovery.
+
+The following sequence illustrates the possible use-after-free:
+
+ Send Path:
+
+ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
+ {
+ struct zfcp_fsf_req *req;
+ ...
+ spin_lock_irq(&qdio->req_q_lock);
+ // ^^^^^^^^^^^^^^^^
+ // protects QDIO queue during sending
+ ...
+ req = zfcp_fsf_req_create(qdio,
+ FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+ // ^^^^^^^^^^^^^^^^^^^
+ // allocation of the request-object
+ ...
+ retval = zfcp_fsf_req_send(req);
+ ...
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+ }
+
+ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+ {
+ struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
+ ...
+ zfcp_reqlist_add(adapter->req_list, req);
+ // ^^^^^^^^^^^^^^^^
+ // add request to our driver-internal hash-table for tracking
+ // (protected by separate lock req_list->lock)
+ ...
+ if (zfcp_qdio_send(qdio, &req->qdio_req)) {
+ // ^^^^^^^^^^^^^^
+ // hand-off the request to FCP channel;
+ // the request can complete at any point now
+ ...
+ }
+
+ /* Don't increase for unsolicited status */
+ if (!zfcp_fsf_req_is_status_read_buffer(req))
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ // possible use-after-free
+ adapter->fsf_req_seq_no++;
+ // ^^^^^^^^^^^^^^^^
+ // because of the use-after-free we might
+ // miss this accounting, and as follow-up
+ // this results in the FCP channel error
+ // FSF_PROT_SEQ_NUMB_ERROR
+ adapter->req_no++;
+
+ return 0;
+ }
+
+ static inline bool
+ zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
+ {
+ return req->qtcb == NULL;
+ // ^^^^^^^^^
+ // possible use-after-free
+ }
+
+ Response Path:
+
+ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
+ {
+ ...
+ struct zfcp_fsf_req *fsf_req;
+ ...
+ for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
+ ...
+ fsf_req = zfcp_reqlist_find_rm(adapter->req_list,
+ req_id);
+ // ^^^^^^^^^^^^^^^^^^^^
+ // remove request from our driver-internal
+ // hash-table (lock req_list->lock)
+ ...
+ zfcp_fsf_req_complete(fsf_req);
+ }
+ }
+
+ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
+ {
+ ...
+ if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
+ zfcp_fsf_req_free(req);
+ // ^^^^^^^^^^^^^^^^^
+ // free memory for request-object
+ else
+ complete(&req->completion);
+ // ^^^^^^^^
+ // completion notification for code-paths that wait
+ // synchronous for the completion of the request; in
+ // those the memory is freed separately
+ }
+
+The result of the use-after-free only affects the send path, and can not
+lead to any data corruption. In case we miss the sequence-number
+accounting, because the memory was already re-purposed, the next FSF
+command will fail with said FCP channel error, and we will recover the
+whole adapter. This causes no additional errors, but it slows down
+traffic. There is a slight chance of the same thing happen again
+recursively after the adapter recovery, but so far this has not been seen.
+
+This was seen under z/VM, where the send path might run on a virtual CPU
+that gets scheduled away by z/VM, while the return path might still run,
+and so create the necessary timing. Running with KASAN can also slow down
+the kernel sufficiently to run into this user-after-free, and then see the
+report by KASAN.
+
+To fix this, simply pull the test for the sequence-number accounting in
+front of the hand-off to the FCP channel (this information doesn't change
+during hand-off), but leave the sequence-number accounting itself where it
+is.
+
+To make future regressions of the same kind less likely, add comments to
+all closely related code-paths.
+
+Signed-off-by: Benjamin Block <bblock@linux.ibm.com>
+Fixes: f9eca0227600 ("scsi: zfcp: drop duplicate fsf_command from zfcp_fsf_req which is also in QTCB header")
+Cc: <stable@vger.kernel.org> #5.0+
+Reviewed-by: Steffen Maier <maier@linux.ibm.com>
+Reviewed-by: Jens Remus <jremus@linux.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/scsi/zfcp_fsf.c | 45 ++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 40 insertions(+), 5 deletions(-)
+
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -11,6 +11,7 @@
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+ #include <linux/blktrace_api.h>
++#include <linux/types.h>
+ #include <linux/slab.h>
+ #include <scsi/fc/fc_els.h>
+ #include "zfcp_ext.h"
+@@ -741,6 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req
+
+ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+ {
++ const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
+ struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
+ int req_id = req->req_id;
+@@ -757,8 +759,20 @@ static int zfcp_fsf_req_send(struct zfcp
+ return -EIO;
+ }
+
++ /*
++ * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
++ * ONLY TOUCH SYNC req AGAIN ON req->completion.
++ *
++ * The request might complete and be freed concurrently at any point
++ * now. This is not protected by the QDIO-lock (req_q_lock). So any
++ * uncontrolled access after this might result in an use-after-free bug.
++ * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
++ * when it is completed via req->completion, is it safe to use req
++ * again.
++ */
++
+ /* Don't increase for unsolicited status */
+- if (!zfcp_fsf_req_is_status_read_buffer(req))
++ if (!is_srb)
+ adapter->fsf_req_seq_no++;
+ adapter->req_no++;
+
+@@ -805,6 +819,7 @@ int zfcp_fsf_status_read(struct zfcp_qdi
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ goto failed_req_send;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -914,8 +929,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_
+ req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
+- if (!zfcp_fsf_req_send(req))
++ if (!zfcp_fsf_req_send(req)) {
++ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
+ goto out;
++ }
+
+ out_error_free:
+ zfcp_fsf_req_free(req);
+@@ -1098,6 +1115,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_
+ ret = zfcp_fsf_req_send(req);
+ if (ret)
+ goto failed_send;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -1198,6 +1216,7 @@ int zfcp_fsf_send_els(struct zfcp_adapte
+ ret = zfcp_fsf_req_send(req);
+ if (ret)
+ goto failed_send;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -1243,6 +1262,7 @@ int zfcp_fsf_exchange_config_data(struct
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1279,8 +1299,10 @@ int zfcp_fsf_exchange_config_data_sync(s
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ spin_unlock_irq(&qdio->req_q_lock);
+- if (!retval)
++ if (!retval) {
++ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
+ wait_for_completion(&req->completion);
++ }
+
+ zfcp_fsf_req_free(req);
+ return retval;
+@@ -1330,6 +1352,7 @@ int zfcp_fsf_exchange_port_data(struct z
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1372,8 +1395,10 @@ int zfcp_fsf_exchange_port_data_sync(str
+ retval = zfcp_fsf_req_send(req);
+ spin_unlock_irq(&qdio->req_q_lock);
+
+- if (!retval)
++ if (!retval) {
++ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
+ wait_for_completion(&req->completion);
++ }
+
+ zfcp_fsf_req_free(req);
+
+@@ -1493,6 +1518,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_a
+ erp_action->fsf_req_id = 0;
+ put_device(&port->dev);
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1557,6 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1626,6 +1653,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_f
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ zfcp_fsf_req_free(req);
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+@@ -1681,6 +1709,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ zfcp_fsf_req_free(req);
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+@@ -1776,6 +1805,7 @@ int zfcp_fsf_close_physical_port(struct
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1899,6 +1929,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_ac
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1987,6 +2018,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_a
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -2299,6 +2331,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *
+ retval = zfcp_fsf_req_send(req);
+ if (unlikely(retval))
+ goto failed_scsi_cmnd;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -2373,8 +2406,10 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_m
+ zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
+
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
+- if (!zfcp_fsf_req_send(req))
++ if (!zfcp_fsf_req_send(req)) {
++ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
+ goto out;
++ }
+
+ zfcp_fsf_req_free(req);
+ req = NULL;
--- /dev/null
+From 106d45f350c7cac876844dc685845cba4ffdb70b Mon Sep 17 00:00:00 2001
+From: Benjamin Block <bblock@linux.ibm.com>
+Date: Tue, 2 Jul 2019 23:02:01 +0200
+Subject: scsi: zfcp: fix request object use-after-free in send path causing wrong traces
+
+From: Benjamin Block <bblock@linux.ibm.com>
+
+commit 106d45f350c7cac876844dc685845cba4ffdb70b upstream.
+
+When tracing instances where we open and close WKA ports, we also pass the
+request-ID of the respective FSF command.
+
+But after successfully sending the FSF command we must not use the
+request-object anymore, as this might result in an use-after-free (see
+"zfcp: fix request object use-after-free in send path causing seqno
+errors" ).
+
+To fix this add a new variable that caches the request-ID before sending
+the request. This won't change during the hand-off to the FCP channel,
+and so it's safe to trace this cached request-ID later, instead of using
+the request object.
+
+Signed-off-by: Benjamin Block <bblock@linux.ibm.com>
+Fixes: d27a7cb91960 ("zfcp: trace on request for open and close of WKA port")
+Cc: <stable@vger.kernel.org> #2.6.38+
+Reviewed-by: Steffen Maier <maier@linux.ibm.com>
+Reviewed-by: Jens Remus <jremus@linux.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/scsi/zfcp_fsf.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -1627,6 +1627,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_f
+ {
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+ struct zfcp_fsf_req *req;
++ unsigned long req_id = 0;
+ int retval = -EIO;
+
+ spin_lock_irq(&qdio->req_q_lock);
+@@ -1649,6 +1650,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_f
+ hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
+ req->data = wka_port;
+
++ req_id = req->req_id;
++
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+@@ -1657,7 +1660,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_f
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
+ return retval;
+ }
+
+@@ -1683,6 +1686,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_
+ {
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+ struct zfcp_fsf_req *req;
++ unsigned long req_id = 0;
+ int retval = -EIO;
+
+ spin_lock_irq(&qdio->req_q_lock);
+@@ -1705,6 +1709,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_
+ req->data = wka_port;
+ req->qtcb->header.port_handle = wka_port->handle;
+
++ req_id = req->req_id;
++
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+@@ -1713,7 +1719,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
+ return retval;
+ }
+
floppy-fix-invalid-pointer-dereference-in-drive_name.patch
floppy-fix-out-of-bounds-read-in-copy_buffer.patch
xen-let-alloc_xenballooned_pages-fail-if-not-enough-memory-free.patch
+scsi-ncr5380-always-re-enable-reselection-interrupt.patch
+scsi-ncr5380-handle-pdma-failure-reliably.patch
+revert-scsi-ncr5380-increase-register-polling-limit.patch
+scsi-core-fix-race-on-creating-sense-cache.patch
+scsi-sd_zbc-fix-compilation-warning.patch
+scsi-zfcp-fix-request-object-use-after-free-in-send-path-causing-seqno-errors.patch
+scsi-zfcp-fix-request-object-use-after-free-in-send-path-causing-wrong-traces.patch
+scsi-megaraid_sas-fix-calculation-of-target-id.patch
+scsi-mac_scsi-increase-pio-pdma-transfer-length-threshold.patch
+scsi-mac_scsi-fix-pseudo-dma-implementation-take-2.patch
+crypto-ghash-fix-unaligned-memory-access-in-ghash_setkey.patch
+crypto-caam-limit-output-iv-to-cbc-to-work-around-ctr-mode-dma-issue.patch
+crypto-ccp-validate-the-the-error-value-used-to-index-error-messages.patch
+crypto-arm64-sha1-ce-correct-digest-for-empty-data-in-finup.patch
+crypto-arm64-sha2-ce-correct-digest-for-empty-data-in-finup.patch
+crypto-chacha20poly1305-fix-atomic-sleep-when-using-async-algorithm.patch
+crypto-crypto4xx-fix-aes-ctr-blocksize-value.patch
+crypto-crypto4xx-fix-blocksize-for-cfb-and-ofb.patch
+crypto-crypto4xx-block-ciphers-should-only-accept-complete-blocks.patch
+crypto-ccp-memset-structure-fields-to-zero-before-reuse.patch
+crypto-ccp-gcm-use-const-time-tag-comparison.patch
+crypto-crypto4xx-fix-a-potential-double-free-in-ppc4xx_trng_probe.patch