]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Jul 2019 09:00:09 +0000 (11:00 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Jul 2019 09:00:09 +0000 (11:00 +0200)
added patches:
crypto-arm64-sha1-ce-correct-digest-for-empty-data-in-finup.patch
crypto-arm64-sha2-ce-correct-digest-for-empty-data-in-finup.patch
crypto-chacha20poly1305-fix-atomic-sleep-when-using-async-algorithm.patch
crypto-crypto4xx-fix-a-potential-double-free-in-ppc4xx_trng_probe.patch
crypto-ghash-fix-unaligned-memory-access-in-ghash_setkey.patch
scsi-mac_scsi-increase-pio-pdma-transfer-length-threshold.patch
scsi-ncr5380-always-re-enable-reselection-interrupt.patch
scsi-ncr5380-reduce-goto-statements-in-ncr5380_select.patch

queue-4.9/crypto-arm64-sha1-ce-correct-digest-for-empty-data-in-finup.patch [new file with mode: 0644]
queue-4.9/crypto-arm64-sha2-ce-correct-digest-for-empty-data-in-finup.patch [new file with mode: 0644]
queue-4.9/crypto-chacha20poly1305-fix-atomic-sleep-when-using-async-algorithm.patch [new file with mode: 0644]
queue-4.9/crypto-crypto4xx-fix-a-potential-double-free-in-ppc4xx_trng_probe.patch [new file with mode: 0644]
queue-4.9/crypto-ghash-fix-unaligned-memory-access-in-ghash_setkey.patch [new file with mode: 0644]
queue-4.9/scsi-mac_scsi-increase-pio-pdma-transfer-length-threshold.patch [new file with mode: 0644]
queue-4.9/scsi-ncr5380-always-re-enable-reselection-interrupt.patch [new file with mode: 0644]
queue-4.9/scsi-ncr5380-reduce-goto-statements-in-ncr5380_select.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/crypto-arm64-sha1-ce-correct-digest-for-empty-data-in-finup.patch b/queue-4.9/crypto-arm64-sha1-ce-correct-digest-for-empty-data-in-finup.patch
new file mode 100644 (file)
index 0000000..32f1720
--- /dev/null
@@ -0,0 +1,41 @@
+From 1d4aaf16defa86d2665ae7db0259d6cb07e2091f Mon Sep 17 00:00:00 2001
+From: Elena Petrova <lenaptr@google.com>
+Date: Tue, 28 May 2019 13:41:52 +0100
+Subject: crypto: arm64/sha1-ce - correct digest for empty data in finup
+
+From: Elena Petrova <lenaptr@google.com>
+
+commit 1d4aaf16defa86d2665ae7db0259d6cb07e2091f upstream.
+
+The sha1-ce finup implementation for ARM64 produces wrong digest
+for empty input (len=0). Expected: da39a3ee..., result: 67452301...
+(initial value of SHA internal state). The error is in sha1_ce_finup:
+for empty data `finalize` will be 1, so the code is relying on
+sha1_ce_transform to make the final round. However, in
+sha1_base_do_update, the block function will not be called when
+len == 0.
+
+Fix it by setting finalize to 0 if data is empty.
+
+Fixes: 07eb54d306f4 ("crypto: arm64/sha1-ce - move SHA-1 ARMv8 implementation to base layer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Elena Petrova <lenaptr@google.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/sha1-ce-glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -50,7 +50,7 @@ static int sha1_ce_finup(struct shash_de
+                        unsigned int len, u8 *out)
+ {
+       struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+-      bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
++      bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
+       /*
+        * Allow the asm code to perform the finalization if there is no
diff --git a/queue-4.9/crypto-arm64-sha2-ce-correct-digest-for-empty-data-in-finup.patch b/queue-4.9/crypto-arm64-sha2-ce-correct-digest-for-empty-data-in-finup.patch
new file mode 100644 (file)
index 0000000..4abb445
--- /dev/null
@@ -0,0 +1,41 @@
+From 6bd934de1e393466b319d29c4427598fda096c57 Mon Sep 17 00:00:00 2001
+From: Elena Petrova <lenaptr@google.com>
+Date: Tue, 28 May 2019 15:35:06 +0100
+Subject: crypto: arm64/sha2-ce - correct digest for empty data in finup
+
+From: Elena Petrova <lenaptr@google.com>
+
+commit 6bd934de1e393466b319d29c4427598fda096c57 upstream.
+
+The sha256-ce finup implementation for ARM64 produces wrong digest
+for empty input (len=0). Expected: the actual digest, result: initial
+value of SHA internal state. The error is in sha256_ce_finup:
+for empty data `finalize` will be 1, so the code is relying on
+sha2_ce_transform to make the final round. However, in
+sha256_base_do_update, the block function will not be called when
+len == 0.
+
+Fix it by setting finalize to 0 if data is empty.
+
+Fixes: 03802f6a80b3a ("crypto: arm64/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Elena Petrova <lenaptr@google.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/sha2-ce-glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/crypto/sha2-ce-glue.c
++++ b/arch/arm64/crypto/sha2-ce-glue.c
+@@ -52,7 +52,7 @@ static int sha256_ce_finup(struct shash_
+                          unsigned int len, u8 *out)
+ {
+       struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+-      bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
++      bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
+       /*
+        * Allow the asm code to perform the finalization if there is no
diff --git a/queue-4.9/crypto-chacha20poly1305-fix-atomic-sleep-when-using-async-algorithm.patch b/queue-4.9/crypto-chacha20poly1305-fix-atomic-sleep-when-using-async-algorithm.patch
new file mode 100644 (file)
index 0000000..2cce6b7
--- /dev/null
@@ -0,0 +1,195 @@
+From 7545b6c2087f4ef0287c8c9b7eba6a728c67ff8e Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 31 May 2019 11:12:30 -0700
+Subject: crypto: chacha20poly1305 - fix atomic sleep when using async algorithm
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 7545b6c2087f4ef0287c8c9b7eba6a728c67ff8e upstream.
+
+Clear the CRYPTO_TFM_REQ_MAY_SLEEP flag when the chacha20poly1305
+operation is being continued from an async completion callback, since
+sleeping may not be allowed in that context.
+
+This is basically the same bug that was recently fixed in the xts and
+lrw templates.  But, it's always been broken in chacha20poly1305 too.
+This was found using syzkaller in combination with the updated crypto
+self-tests which actually test the MAY_SLEEP flag now.
+
+Reproducer:
+
+    python -c 'import socket; socket.socket(socket.AF_ALG, 5, 0).bind(
+              ("aead", "rfc7539(cryptd(chacha20-generic),poly1305-generic)"))'
+
+Kernel output:
+
+    BUG: sleeping function called from invalid context at include/crypto/algapi.h:426
+    in_atomic(): 1, irqs_disabled(): 0, pid: 1001, name: kworker/2:2
+    [...]
+    CPU: 2 PID: 1001 Comm: kworker/2:2 Not tainted 5.2.0-rc2 #5
+    Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-20181126_142135-anatol 04/01/2014
+    Workqueue: crypto cryptd_queue_worker
+    Call Trace:
+     __dump_stack lib/dump_stack.c:77 [inline]
+     dump_stack+0x4d/0x6a lib/dump_stack.c:113
+     ___might_sleep kernel/sched/core.c:6138 [inline]
+     ___might_sleep.cold.19+0x8e/0x9f kernel/sched/core.c:6095
+     crypto_yield include/crypto/algapi.h:426 [inline]
+     crypto_hash_walk_done+0xd6/0x100 crypto/ahash.c:113
+     shash_ahash_update+0x41/0x60 crypto/shash.c:251
+     shash_async_update+0xd/0x10 crypto/shash.c:260
+     crypto_ahash_update include/crypto/hash.h:539 [inline]
+     poly_setkey+0xf6/0x130 crypto/chacha20poly1305.c:337
+     poly_init+0x51/0x60 crypto/chacha20poly1305.c:364
+     async_done_continue crypto/chacha20poly1305.c:78 [inline]
+     poly_genkey_done+0x15/0x30 crypto/chacha20poly1305.c:369
+     cryptd_skcipher_complete+0x29/0x70 crypto/cryptd.c:279
+     cryptd_skcipher_decrypt+0xcd/0x110 crypto/cryptd.c:339
+     cryptd_queue_worker+0x70/0xa0 crypto/cryptd.c:184
+     process_one_work+0x1ed/0x420 kernel/workqueue.c:2269
+     worker_thread+0x3e/0x3a0 kernel/workqueue.c:2415
+     kthread+0x11f/0x140 kernel/kthread.c:255
+     ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
+
+Fixes: 71ebc4d1b27d ("crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD construction, RFC7539")
+Cc: <stable@vger.kernel.org> # v4.2+
+Cc: Martin Willi <martin@strongswan.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/chacha20poly1305.c |   30 +++++++++++++++++++-----------
+ 1 file changed, 19 insertions(+), 11 deletions(-)
+
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -67,6 +67,8 @@ struct chachapoly_req_ctx {
+       unsigned int cryptlen;
+       /* Actual AD, excluding IV */
+       unsigned int assoclen;
++      /* request flags, with MAY_SLEEP cleared if needed */
++      u32 flags;
+       union {
+               struct poly_req poly;
+               struct chacha_req chacha;
+@@ -76,8 +78,12 @@ struct chachapoly_req_ctx {
+ static inline void async_done_continue(struct aead_request *req, int err,
+                                      int (*cont)(struct aead_request *))
+ {
+-      if (!err)
++      if (!err) {
++              struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
++
++              rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+               err = cont(req);
++      }
+       if (err != -EINPROGRESS && err != -EBUSY)
+               aead_request_complete(req, err);
+@@ -144,7 +150,7 @@ static int chacha_decrypt(struct aead_re
+               dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+       }
+-      skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++      skcipher_request_set_callback(&creq->req, rctx->flags,
+                                     chacha_decrypt_done, req);
+       skcipher_request_set_tfm(&creq->req, ctx->chacha);
+       skcipher_request_set_crypt(&creq->req, src, dst,
+@@ -188,7 +194,7 @@ static int poly_tail(struct aead_request
+       memcpy(&preq->tail.cryptlen, &len, sizeof(len));
+       sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_tail_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+       ahash_request_set_crypt(&preq->req, preq->src,
+@@ -219,7 +225,7 @@ static int poly_cipherpad(struct aead_re
+       sg_init_table(preq->src, 1);
+       sg_set_buf(preq->src, &preq->pad, padlen);
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_cipherpad_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+       ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+@@ -250,7 +256,7 @@ static int poly_cipher(struct aead_reque
+       sg_init_table(rctx->src, 2);
+       crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_cipher_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+       ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
+@@ -280,7 +286,7 @@ static int poly_adpad(struct aead_reques
+       sg_init_table(preq->src, 1);
+       sg_set_buf(preq->src, preq->pad, padlen);
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_adpad_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+       ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+@@ -304,7 +310,7 @@ static int poly_ad(struct aead_request *
+       struct poly_req *preq = &rctx->u.poly;
+       int err;
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_ad_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+       ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
+@@ -331,7 +337,7 @@ static int poly_setkey(struct aead_reque
+       sg_init_table(preq->src, 1);
+       sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_setkey_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+       ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
+@@ -355,7 +361,7 @@ static int poly_init(struct aead_request
+       struct poly_req *preq = &rctx->u.poly;
+       int err;
+-      ahash_request_set_callback(&preq->req, aead_request_flags(req),
++      ahash_request_set_callback(&preq->req, rctx->flags,
+                                  poly_init_done, req);
+       ahash_request_set_tfm(&preq->req, ctx->poly);
+@@ -393,7 +399,7 @@ static int poly_genkey(struct aead_reque
+       chacha_iv(creq->iv, req, 0);
+-      skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++      skcipher_request_set_callback(&creq->req, rctx->flags,
+                                     poly_genkey_done, req);
+       skcipher_request_set_tfm(&creq->req, ctx->chacha);
+       skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
+@@ -433,7 +439,7 @@ static int chacha_encrypt(struct aead_re
+               dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+       }
+-      skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++      skcipher_request_set_callback(&creq->req, rctx->flags,
+                                     chacha_encrypt_done, req);
+       skcipher_request_set_tfm(&creq->req, ctx->chacha);
+       skcipher_request_set_crypt(&creq->req, src, dst,
+@@ -451,6 +457,7 @@ static int chachapoly_encrypt(struct aea
+       struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+       rctx->cryptlen = req->cryptlen;
++      rctx->flags = aead_request_flags(req);
+       /* encrypt call chain:
+        * - chacha_encrypt/done()
+@@ -472,6 +479,7 @@ static int chachapoly_decrypt(struct aea
+       struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+       rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
++      rctx->flags = aead_request_flags(req);
+       /* decrypt call chain:
+        * - poly_genkey/done()
diff --git a/queue-4.9/crypto-crypto4xx-fix-a-potential-double-free-in-ppc4xx_trng_probe.patch b/queue-4.9/crypto-crypto4xx-fix-a-potential-double-free-in-ppc4xx_trng_probe.patch
new file mode 100644 (file)
index 0000000..af6e3b5
--- /dev/null
@@ -0,0 +1,52 @@
+From 95566aa75cd6b3b404502c06f66956b5481194b3 Mon Sep 17 00:00:00 2001
+From: Wen Yang <wen.yang99@zte.com.cn>
+Date: Mon, 8 Jul 2019 14:19:03 +0800
+Subject: crypto: crypto4xx - fix a potential double free in ppc4xx_trng_probe
+
+From: Wen Yang <wen.yang99@zte.com.cn>
+
+commit 95566aa75cd6b3b404502c06f66956b5481194b3 upstream.
+
+There is a possible double free issue in ppc4xx_trng_probe():
+
+85:    dev->trng_base = of_iomap(trng, 0);
+86:    of_node_put(trng);          ---> released here
+87:    if (!dev->trng_base)
+88:            goto err_out;
+...
+110:   ierr_out:
+111:           of_node_put(trng);  ---> double released here
+...
+
+This issue was detected by using the Coccinelle software.
+We fix it by removing the unnecessary of_node_put().
+
+Fixes: 5343e674f32f ("crypto4xx: integrate ppc4xx-rng into crypto4xx")
+Signed-off-by: Wen Yang <wen.yang99@zte.com.cn>
+Cc: <stable@vger.kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Armijn Hemel <armijn@tjaldur.nl>
+Cc: Julia Lawall <Julia.Lawall@lip6.fr>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Acked-by: Julia Lawall <julia.lawall@lip6.fr>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/amcc/crypto4xx_trng.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_trng.c
++++ b/drivers/crypto/amcc/crypto4xx_trng.c
+@@ -111,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_
+       return;
+ err_out:
+-      of_node_put(trng);
+       iounmap(dev->trng_base);
+       kfree(rng);
+       dev->trng_base = NULL;
diff --git a/queue-4.9/crypto-ghash-fix-unaligned-memory-access-in-ghash_setkey.patch b/queue-4.9/crypto-ghash-fix-unaligned-memory-access-in-ghash_setkey.patch
new file mode 100644 (file)
index 0000000..0305b7f
--- /dev/null
@@ -0,0 +1,57 @@
+From 5c6bc4dfa515738149998bb0db2481a4fdead979 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 30 May 2019 10:50:39 -0700
+Subject: crypto: ghash - fix unaligned memory access in ghash_setkey()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 5c6bc4dfa515738149998bb0db2481a4fdead979 upstream.
+
+Changing ghash_mod_init() to be subsys_initcall made it start running
+before the alignment fault handler has been installed on ARM.  In kernel
+builds where the keys in the ghash test vectors happened to be
+misaligned in the kernel image, this exposed the longstanding bug that
+ghash_setkey() is incorrectly casting the key buffer (which can have any
+alignment) to be128 for passing to gf128mul_init_4k_lle().
+
+Fix this by memcpy()ing the key to a temporary buffer.
+
+Don't fix it by setting an alignmask on the algorithm instead because
+that would unnecessarily force alignment of the data too.
+
+Fixes: 2cdc6899a88e ("crypto: ghash - Add GHASH digest algorithm for GCM")
+Reported-by: Peter Robinson <pbrobinson@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Tested-by: Peter Robinson <pbrobinson@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ghash-generic.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/crypto/ghash-generic.c
++++ b/crypto/ghash-generic.c
+@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_sh
+                       const u8 *key, unsigned int keylen)
+ {
+       struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
++      be128 k;
+       if (keylen != GHASH_BLOCK_SIZE) {
+               crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_sh
+       if (ctx->gf128)
+               gf128mul_free_4k(ctx->gf128);
+-      ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
++
++      BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
++      memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
++      ctx->gf128 = gf128mul_init_4k_lle(&k);
++      memzero_explicit(&k, GHASH_BLOCK_SIZE);
++
+       if (!ctx->gf128)
+               return -ENOMEM;
diff --git a/queue-4.9/scsi-mac_scsi-increase-pio-pdma-transfer-length-threshold.patch b/queue-4.9/scsi-mac_scsi-increase-pio-pdma-transfer-length-threshold.patch
new file mode 100644 (file)
index 0000000..4c69913
--- /dev/null
@@ -0,0 +1,62 @@
+From 7398cee4c3e6aea1ba07a6449e5533ecd0b92cdd Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: scsi: mac_scsi: Increase PIO/PDMA transfer length threshold
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 7398cee4c3e6aea1ba07a6449e5533ecd0b92cdd upstream.
+
+Some targets introduce delays when handshaking the response to certain
+commands. For example, a disk may send a 96-byte response to an INQUIRY
+command (or a 24-byte response to a MODE SENSE command) too slowly.
+
+Apparently the first 12 or 14 bytes are handshaked okay but then the system
+bus error timeout is reached while transferring the next word.
+
+Since the scsi bus phase hasn't changed, the driver then sets the target
+borken flag to prevent further PDMA transfers. The driver also logs the
+warning, "switching to slow handshake".
+
+Raise the PDMA threshold to 512 bytes so that PIO transfers will be used
+for these commands. This default is sufficiently low that PDMA will still
+be used for READ and WRITE commands.
+
+The existing threshold (16 bytes) was chosen more or less at random.
+However, best performance requires the threshold to be as low as possible.
+Those systems that don't need the PIO workaround at all may benefit from
+mac_scsi.setup_use_pdma=1
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: stable@vger.kernel.org # v4.14+
+Fixes: 3a0f64bfa907 ("mac_scsi: Fix pseudo DMA implementation")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/mac_scsi.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/mac_scsi.c
++++ b/drivers/scsi/mac_scsi.c
+@@ -54,7 +54,7 @@ static int setup_cmd_per_lun = -1;
+ module_param(setup_cmd_per_lun, int, 0);
+ static int setup_sg_tablesize = -1;
+ module_param(setup_sg_tablesize, int, 0);
+-static int setup_use_pdma = -1;
++static int setup_use_pdma = 512;
+ module_param(setup_use_pdma, int, 0);
+ static int setup_hostid = -1;
+ module_param(setup_hostid, int, 0);
+@@ -325,7 +325,7 @@ static int macscsi_dma_xfer_len(struct S
+       struct NCR5380_hostdata *hostdata = shost_priv(instance);
+       if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
+-          cmd->SCp.this_residual < 16)
++          cmd->SCp.this_residual < setup_use_pdma)
+               return 0;
+       return cmd->SCp.this_residual;
diff --git a/queue-4.9/scsi-ncr5380-always-re-enable-reselection-interrupt.patch b/queue-4.9/scsi-ncr5380-always-re-enable-reselection-interrupt.patch
new file mode 100644 (file)
index 0000000..cf82616
--- /dev/null
@@ -0,0 +1,93 @@
+From 57f31326518e98ee4cabf9a04efe00ed57c54147 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sun, 9 Jun 2019 11:19:11 +1000
+Subject: scsi: NCR5380: Always re-enable reselection interrupt
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 57f31326518e98ee4cabf9a04efe00ed57c54147 upstream.
+
+The reselection interrupt gets disabled during selection and must be
+re-enabled when hostdata->connected becomes NULL. If it isn't re-enabled a
+disconnected command may time-out or the target may wedge the bus while
+trying to reselect the host. This can happen after a command is aborted.
+
+Fix this by enabling the reselection interrupt in NCR5380_main() after
+calls to NCR5380_select() and NCR5380_information_transfer() return.
+
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: stable@vger.kernel.org # v4.9+
+Fixes: 8b00c3d5d40d ("ncr5380: Implement new eh_abort_handler")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Tested-by: Stan Johnson <userm57@yahoo.com>
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/NCR5380.c |   12 ++----------
+ 1 file changed, 2 insertions(+), 10 deletions(-)
+
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -813,6 +813,8 @@ static void NCR5380_main(struct work_str
+                       NCR5380_information_transfer(instance);
+                       done = 0;
+               }
++              if (!hostdata->connected)
++                      NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+               spin_unlock_irq(&hostdata->lock);
+               if (!done)
+                       cond_resched();
+@@ -1208,8 +1210,6 @@ static struct scsi_cmnd *NCR5380_select(
+               spin_lock_irq(&hostdata->lock);
+               NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+               NCR5380_reselect(instance);
+-              if (!hostdata->connected)
+-                      NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+               shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
+               goto out;
+       }
+@@ -1217,7 +1217,6 @@ static struct scsi_cmnd *NCR5380_select(
+       if (err < 0) {
+               spin_lock_irq(&hostdata->lock);
+               NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+-              NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+               /* Can't touch cmd if it has been reclaimed by the scsi ML */
+               if (!hostdata->selecting)
+@@ -1255,7 +1254,6 @@ static struct scsi_cmnd *NCR5380_select(
+       if (err < 0) {
+               shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
+               NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+-              NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+               goto out;
+       }
+       if (!hostdata->selecting) {
+@@ -1906,9 +1904,6 @@ static void NCR5380_information_transfer
+                                        */
+                                       NCR5380_write(TARGET_COMMAND_REG, 0);
+-                                      /* Enable reselect interrupts */
+-                                      NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+-
+                                       maybe_release_dma_irq(instance);
+                                       return;
+                               case MESSAGE_REJECT:
+@@ -1940,8 +1935,6 @@ static void NCR5380_information_transfer
+                                        */
+                                       NCR5380_write(TARGET_COMMAND_REG, 0);
+-                                      /* Enable reselect interrupts */
+-                                      NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ #ifdef SUN3_SCSI_VME
+                                       dregs->csr |= CSR_DMA_ENABLE;
+ #endif
+@@ -2049,7 +2042,6 @@ static void NCR5380_information_transfer
+                                       cmd->result = DID_ERROR << 16;
+                                       complete_cmd(instance, cmd);
+                                       maybe_release_dma_irq(instance);
+-                                      NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+                                       return;
+                               }
+                               msgout = NOP;
diff --git a/queue-4.9/scsi-ncr5380-reduce-goto-statements-in-ncr5380_select.patch b/queue-4.9/scsi-ncr5380-reduce-goto-statements-in-ncr5380_select.patch
new file mode 100644 (file)
index 0000000..f92d7fe
--- /dev/null
@@ -0,0 +1,73 @@
+From 6a162836997c10bbefb7c7ca772201cc45c0e4a6 Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Thu, 27 Sep 2018 11:17:11 +1000
+Subject: scsi: NCR5380: Reduce goto statements in NCR5380_select()
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+commit 6a162836997c10bbefb7c7ca772201cc45c0e4a6 upstream.
+
+Replace a 'goto' statement with a simple 'return' where possible.  This
+improves readability. No functional change.
+
+Tested-by: Michael Schmitz <schmitzmic@gmail.com>
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/NCR5380.c |   21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -1086,7 +1086,7 @@ static struct scsi_cmnd *NCR5380_select(
+       if (!hostdata->selecting) {
+               /* Command was aborted */
+               NCR5380_write(MODE_REG, MR_BASE);
+-              goto out;
++              return NULL;
+       }
+       if (err < 0) {
+               NCR5380_write(MODE_REG, MR_BASE);
+@@ -1135,7 +1135,7 @@ static struct scsi_cmnd *NCR5380_select(
+       if (!hostdata->selecting) {
+               NCR5380_write(MODE_REG, MR_BASE);
+               NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+-              goto out;
++              return NULL;
+       }
+       dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
+@@ -1218,13 +1218,16 @@ static struct scsi_cmnd *NCR5380_select(
+               spin_lock_irq(&hostdata->lock);
+               NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+               NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
++
+               /* Can't touch cmd if it has been reclaimed by the scsi ML */
+-              if (hostdata->selecting) {
+-                      cmd->result = DID_BAD_TARGET << 16;
+-                      complete_cmd(instance, cmd);
+-                      dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
+-                      cmd = NULL;
+-              }
++              if (!hostdata->selecting)
++                      return NULL;
++
++              cmd->result = DID_BAD_TARGET << 16;
++              complete_cmd(instance, cmd);
++              dsprintk(NDEBUG_SELECTION, instance,
++                      "target did not respond within 250ms\n");
++              cmd = NULL;
+               goto out;
+       }
+@@ -1257,7 +1260,7 @@ static struct scsi_cmnd *NCR5380_select(
+       }
+       if (!hostdata->selecting) {
+               do_abort(instance);
+-              goto out;
++              return NULL;
+       }
+       dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
index 552e5443c51666f3c2f1394e32b95a68c240aed4..4b72ca491abbf84a1fa71ead813b836ec108c1c4 100644 (file)
@@ -77,3 +77,11 @@ floppy-fix-out-of-bounds-read-in-next_valid_format.patch
 floppy-fix-invalid-pointer-dereference-in-drive_name.patch
 floppy-fix-out-of-bounds-read-in-copy_buffer.patch
 xen-let-alloc_xenballooned_pages-fail-if-not-enough-memory-free.patch
+scsi-ncr5380-reduce-goto-statements-in-ncr5380_select.patch
+scsi-ncr5380-always-re-enable-reselection-interrupt.patch
+scsi-mac_scsi-increase-pio-pdma-transfer-length-threshold.patch
+crypto-ghash-fix-unaligned-memory-access-in-ghash_setkey.patch
+crypto-arm64-sha1-ce-correct-digest-for-empty-data-in-finup.patch
+crypto-arm64-sha2-ce-correct-digest-for-empty-data-in-finup.patch
+crypto-chacha20poly1305-fix-atomic-sleep-when-using-async-algorithm.patch
+crypto-crypto4xx-fix-a-potential-double-free-in-ppc4xx_trng_probe.patch