]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Jul 2017 08:01:51 +0000 (10:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Jul 2017 08:01:51 +0000 (10:01 +0200)
added patches:
crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch
crypto-caam-fix-signals-handling.patch
crypto-caam-properly-set-iv-after-en-de-crypt.patch
crypto-sha1-ssse3-disable-avx2.patch
crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch
revert-sched-core-optimize-sched_smt.patch

queue-4.9/crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch [new file with mode: 0644]
queue-4.9/crypto-caam-fix-signals-handling.patch [new file with mode: 0644]
queue-4.9/crypto-caam-properly-set-iv-after-en-de-crypt.patch [new file with mode: 0644]
queue-4.9/crypto-sha1-ssse3-disable-avx2.patch [new file with mode: 0644]
queue-4.9/crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch [new file with mode: 0644]
queue-4.9/revert-sched-core-optimize-sched_smt.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch b/queue-4.9/crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch
new file mode 100644 (file)
index 0000000..fd6afde
--- /dev/null
@@ -0,0 +1,35 @@
+From 1606043f214f912a52195293614935811a6e3e53 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Wed, 28 Jun 2017 10:22:03 +0300
+Subject: crypto: atmel - only treat EBUSY as transient if backlog
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 1606043f214f912a52195293614935811a6e3e53 upstream.
+
+The Atmel SHA driver was treating -EBUSY as indication of queueing
+to backlog without checking that backlog is enabled for the request.
+
+Fix it by checking request flags.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/atmel-sha.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -1000,7 +1000,9 @@ static int atmel_sha_finup(struct ahash_
+       ctx->flags |= SHA_FLAGS_FINUP;
+       err1 = atmel_sha_update(req);
+-      if (err1 == -EINPROGRESS || err1 == -EBUSY)
++      if (err1 == -EINPROGRESS ||
++          (err1 == -EBUSY && (ahash_request_flags(req) &
++                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err1;
+       /*
diff --git a/queue-4.9/crypto-caam-fix-signals-handling.patch b/queue-4.9/crypto-caam-fix-signals-handling.patch
new file mode 100644 (file)
index 0000000..4136139
--- /dev/null
@@ -0,0 +1,59 @@
+From 7459e1d25ffefa2b1be799477fcc1f6c62f6cec7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Fri, 7 Jul 2017 16:57:06 +0300
+Subject: crypto: caam - fix signals handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 7459e1d25ffefa2b1be799477fcc1f6c62f6cec7 upstream.
+
+Driver does not properly handle the case when signals interrupt
+wait_for_completion_interruptible():
+-it does not check for return value
+-completion structure is allocated on stack; in case a signal interrupts
+the sleep, it will go out of scope, causing the worker thread
+(caam_jr_dequeue) to fail when it accesses it
+
+wait_for_completion_interruptible() is replaced with uninterruptable
+wait_for_completion().
+We choose to block all signals while waiting for I/O (device executing
+the split key generation job descriptor) since the alternative - in
+order to have a deterministic device state - would be to flush the job
+ring (aborting *all* in-progress jobs).
+
+Fixes: 045e36780f115 ("crypto: caam - ahash hmac support")
+Fixes: 4c1ec1f930154 ("crypto: caam - refactor key_gen, sg")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamhash.c |    2 +-
+ drivers/crypto/caam/key_gen.c  |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -491,7 +491,7 @@ static int hash_digest_key(struct caam_h
+       ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+       if (!ret) {
+               /* in progress */
+-              wait_for_completion_interruptible(&result.completion);
++              wait_for_completion(&result.completion);
+               ret = result.err;
+ #ifdef DEBUG
+               print_hex_dump(KERN_ERR,
+--- a/drivers/crypto/caam/key_gen.c
++++ b/drivers/crypto/caam/key_gen.c
+@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev,
+       ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+       if (!ret) {
+               /* in progress */
+-              wait_for_completion_interruptible(&result.completion);
++              wait_for_completion(&result.completion);
+               ret = result.err;
+ #ifdef DEBUG
+               print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/queue-4.9/crypto-caam-properly-set-iv-after-en-de-crypt.patch b/queue-4.9/crypto-caam-properly-set-iv-after-en-de-crypt.patch
new file mode 100644 (file)
index 0000000..b208b83
--- /dev/null
@@ -0,0 +1,89 @@
+From 854b06f768794cd664886ec3ba3a5b1c58d42167 Mon Sep 17 00:00:00 2001
+From: David Gstir <david@sigma-star.at>
+Date: Wed, 28 Jun 2017 15:27:10 +0200
+Subject: crypto: caam - properly set IV after {en,de}crypt
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Gstir <david@sigma-star.at>
+
+commit 854b06f768794cd664886ec3ba3a5b1c58d42167 upstream.
+
+Certain cipher modes like CTS expect the IV (req->info) of
+ablkcipher_request (or equivalently req->iv of skcipher_request) to
+contain the last ciphertext block when the {en,de}crypt operation is done.
+This is currently not the case for the CAAM driver which in turn breaks
+e.g. cts(cbc(aes)) when the CAAM driver is enabled.
+
+This patch fixes the CAAM driver to properly set the IV after the
+{en,de}crypt operation of ablkcipher finishes.
+
+This issue was revealed by the changes in the SW CTS mode in commit
+0605c41cc53ca ("crypto: cts - Convert to skcipher")
+
+Signed-off-by: David Gstir <david@sigma-star.at>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -2014,10 +2014,10 @@ static void ablkcipher_encrypt_done(stru
+ {
+       struct ablkcipher_request *req = context;
+       struct ablkcipher_edesc *edesc;
+-#ifdef DEBUG
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++#ifdef DEBUG
+       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+@@ -2037,6 +2037,14 @@ static void ablkcipher_encrypt_done(stru
+ #endif
+       ablkcipher_unmap(jrdev, edesc, req);
++
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block. This is used e.g. by the CTS mode.
++       */
++      scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++                               ivsize, 0);
++
+       kfree(edesc);
+       ablkcipher_request_complete(req, err);
+@@ -2047,10 +2055,10 @@ static void ablkcipher_decrypt_done(stru
+ {
+       struct ablkcipher_request *req = context;
+       struct ablkcipher_edesc *edesc;
+-#ifdef DEBUG
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++#ifdef DEBUG
+       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+@@ -2069,6 +2077,14 @@ static void ablkcipher_decrypt_done(stru
+ #endif
+       ablkcipher_unmap(jrdev, edesc, req);
++
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block.
++       */
++      scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
++                               ivsize, 0);
++
+       kfree(edesc);
+       ablkcipher_request_complete(req, err);
diff --git a/queue-4.9/crypto-sha1-ssse3-disable-avx2.patch b/queue-4.9/crypto-sha1-ssse3-disable-avx2.patch
new file mode 100644 (file)
index 0000000..5cde188
--- /dev/null
@@ -0,0 +1,33 @@
+From b82ce24426a4071da9529d726057e4e642948667 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 4 Jul 2017 12:21:12 +0800
+Subject: crypto: sha1-ssse3 - Disable avx2
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit b82ce24426a4071da9529d726057e4e642948667 upstream.
+
+It has been reported that sha1-avx2 can cause page faults by reading
+beyond the end of the input.  This patch disables it until it can be
+fixed.
+
+Fixes: 7c1da8d0d046 ("crypto: sha - SHA1 transform x86_64 AVX2")
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha1_ssse3_glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32
+ static bool avx2_usable(void)
+ {
+-      if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
++      if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+               && boot_cpu_has(X86_FEATURE_BMI1)
+               && boot_cpu_has(X86_FEATURE_BMI2))
+               return true;
diff --git a/queue-4.9/crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch b/queue-4.9/crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch
new file mode 100644 (file)
index 0000000..ccf25ae
--- /dev/null
@@ -0,0 +1,52 @@
+From 03d2c5114c95797c0aa7d9f463348b171a274fd4 Mon Sep 17 00:00:00 2001
+From: Martin Hicks <mort@bork.org>
+Date: Tue, 2 May 2017 09:38:35 -0400
+Subject: crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Hicks <mort@bork.org>
+
+commit 03d2c5114c95797c0aa7d9f463348b171a274fd4 upstream.
+
+An updated patch that also handles the additional key length requirements
+for the AEAD algorithms.
+
+The max keysize is not 96.  For SHA384/512 it's 128, and for the AEAD
+algorithms it's longer still.  Extend the max keysize for the
+AEAD size for AES256 + HMAC(SHA512).
+
+Fixes: 357fb60502ede ("crypto: talitos - add sha224, sha384 and sha512 to existing AEAD algorithms")
+Signed-off-by: Martin Hicks <mort@bork.org>
+Acked-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/talitos.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -816,7 +816,7 @@ static void talitos_unregister_rng(struc
+  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
+  */
+ #define TALITOS_CRA_PRIORITY_AEAD_HSNA        (TALITOS_CRA_PRIORITY - 1)
+-#define TALITOS_MAX_KEY_SIZE          96
++#define TALITOS_MAX_KEY_SIZE          (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+ #define TALITOS_MAX_IV_LENGTH         16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+ struct talitos_ctx {
+@@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct cryp
+ {
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
++      if (keylen > TALITOS_MAX_KEY_SIZE) {
++              crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++              return -EINVAL;
++      }
++
+       memcpy(&ctx->key, key, keylen);
+       ctx->keylen = keylen;
diff --git a/queue-4.9/revert-sched-core-optimize-sched_smt.patch b/queue-4.9/revert-sched-core-optimize-sched_smt.patch
new file mode 100644 (file)
index 0000000..70899a2
--- /dev/null
@@ -0,0 +1,134 @@
+From 9393c14486259dfbe11c5b9a22142162bae9ce54 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 19 Jul 2017 09:58:49 +0200
+Subject: Revert "sched/core: Optimize SCHED_SMT"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 1b568f0aabf280555125bc7cefc08321ff0ebaba.
+
+For the 4.9 kernel tree, this patch causes scheduler regressions.  It is
+fixed in newer kernels with a large number of individual patches, the
+sum of which is too big for the stable kernel tree.
+
+Ingo recommended just reverting the single patch for this tree, as it's
+much simpler.
+
+Reported-by: Ben Guthro <ben@guthro.net>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c  |   19 -------------------
+ kernel/sched/fair.c  |    8 +-------
+ kernel/sched/sched.h |   23 ++++++-----------------
+ 3 files changed, 7 insertions(+), 43 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7422,22 +7422,6 @@ int sched_cpu_dying(unsigned int cpu)
+ }
+ #endif
+-#ifdef CONFIG_SCHED_SMT
+-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+-
+-static void sched_init_smt(void)
+-{
+-      /*
+-       * We've enumerated all CPUs and will assume that if any CPU
+-       * has SMT siblings, CPU0 will too.
+-       */
+-      if (cpumask_weight(cpu_smt_mask(0)) > 1)
+-              static_branch_enable(&sched_smt_present);
+-}
+-#else
+-static inline void sched_init_smt(void) { }
+-#endif
+-
+ void __init sched_init_smp(void)
+ {
+       cpumask_var_t non_isolated_cpus;
+@@ -7467,9 +7451,6 @@ void __init sched_init_smp(void)
+       init_sched_rt_class();
+       init_sched_dl_class();
+-
+-      sched_init_smt();
+-
+       sched_smp_initialized = true;
+ }
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5376,7 +5376,7 @@ static inline bool test_idle_cores(int c
+  * Since SMT siblings share all cache levels, inspecting this limited remote
+  * state should be fairly cheap.
+  */
+-void __update_idle_core(struct rq *rq)
++void update_idle_core(struct rq *rq)
+ {
+       int core = cpu_of(rq);
+       int cpu;
+@@ -5408,9 +5408,6 @@ static int select_idle_core(struct task_
+       struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+       int core, cpu, wrap;
+-      if (!static_branch_likely(&sched_smt_present))
+-              return -1;
+-
+       if (!test_idle_cores(target, false))
+               return -1;
+@@ -5444,9 +5441,6 @@ static int select_idle_smt(struct task_s
+ {
+       int cpu;
+-      if (!static_branch_likely(&sched_smt_present))
+-              return -1;
+-
+       for_each_cpu(cpu, cpu_smt_mask(target)) {
+               if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
+                       continue;
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -43,6 +43,12 @@ extern void cpu_load_update_active(struc
+ static inline void cpu_load_update_active(struct rq *this_rq) { }
+ #endif
++#ifdef CONFIG_SCHED_SMT
++extern void update_idle_core(struct rq *rq);
++#else
++static inline void update_idle_core(struct rq *rq) { }
++#endif
++
+ /*
+  * Helpers for converting nanosecond timing to jiffy resolution
+  */
+@@ -731,23 +737,6 @@ static inline int cpu_of(struct rq *rq)
+ #endif
+ }
+-
+-#ifdef CONFIG_SCHED_SMT
+-
+-extern struct static_key_false sched_smt_present;
+-
+-extern void __update_idle_core(struct rq *rq);
+-
+-static inline void update_idle_core(struct rq *rq)
+-{
+-      if (static_branch_unlikely(&sched_smt_present))
+-              __update_idle_core(rq);
+-}
+-
+-#else
+-static inline void update_idle_core(struct rq *rq) { }
+-#endif
+-
+ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+ #define cpu_rq(cpu)           (&per_cpu(runqueues, (cpu)))
index ec2f8c0a937a9f27c563b9d194b9b1e9101743da..265fe32a0eff28d22338a3716478c49691c1ae36 100644 (file)
@@ -53,3 +53,9 @@ mnt-in-propgate_umount-handle-visiting-mounts-in-any-order.patch
 mnt-make-propagate_umount-less-slow-for-overlapping-mount-propagation-trees.patch
 selftests-capabilities-fix-the-test_execve-test.patch
 mm-fix-overflow-check-in-expand_upwards.patch
+crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch
+crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch
+crypto-sha1-ssse3-disable-avx2.patch
+crypto-caam-properly-set-iv-after-en-de-crypt.patch
+crypto-caam-fix-signals-handling.patch
+revert-sched-core-optimize-sched_smt.patch