]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Jul 2017 09:43:09 +0000 (11:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Jul 2017 09:43:09 +0000 (11:43 +0200)
added patches:
crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch
crypto-caam-fix-signals-handling.patch
crypto-sha1-ssse3-disable-avx2.patch
crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch
pm-qos-return-einval-for-bogus-strings.patch
sched-topology-fix-overlapping-sched_group_mask.patch
sched-topology-optimize-build_group_mask.patch
tracing-use-softirq_offset-for-softirq-dectection-for-more-accurate-results.patch

queue-3.18/crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch [new file with mode: 0644]
queue-3.18/crypto-caam-fix-signals-handling.patch [new file with mode: 0644]
queue-3.18/crypto-sha1-ssse3-disable-avx2.patch [new file with mode: 0644]
queue-3.18/crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch [new file with mode: 0644]
queue-3.18/pm-qos-return-einval-for-bogus-strings.patch [new file with mode: 0644]
queue-3.18/sched-topology-fix-overlapping-sched_group_mask.patch [new file with mode: 0644]
queue-3.18/sched-topology-optimize-build_group_mask.patch [new file with mode: 0644]
queue-3.18/series
queue-3.18/tracing-use-softirq_offset-for-softirq-dectection-for-more-accurate-results.patch [new file with mode: 0644]

diff --git a/queue-3.18/crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch b/queue-3.18/crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch
new file mode 100644 (file)
index 0000000..fc88251
--- /dev/null
@@ -0,0 +1,35 @@
+From 1606043f214f912a52195293614935811a6e3e53 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Wed, 28 Jun 2017 10:22:03 +0300
+Subject: crypto: atmel - only treat EBUSY as transient if backlog
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 1606043f214f912a52195293614935811a6e3e53 upstream.
+
+The Atmel SHA driver was treating -EBUSY as indication of queueing
+to backlog without checking that backlog is enabled for the request.
+
+Fix it by checking request flags.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/atmel-sha.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -957,7 +957,9 @@ static int atmel_sha_finup(struct ahash_
+       ctx->flags |= SHA_FLAGS_FINUP;
+       err1 = atmel_sha_update(req);
+-      if (err1 == -EINPROGRESS || err1 == -EBUSY)
++      if (err1 == -EINPROGRESS ||
++          (err1 == -EBUSY && (ahash_request_flags(req) &
++                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err1;
+       /*
diff --git a/queue-3.18/crypto-caam-fix-signals-handling.patch b/queue-3.18/crypto-caam-fix-signals-handling.patch
new file mode 100644 (file)
index 0000000..88b136c
--- /dev/null
@@ -0,0 +1,59 @@
+From 7459e1d25ffefa2b1be799477fcc1f6c62f6cec7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Fri, 7 Jul 2017 16:57:06 +0300
+Subject: crypto: caam - fix signals handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 7459e1d25ffefa2b1be799477fcc1f6c62f6cec7 upstream.
+
+Driver does not properly handle the case when signals interrupt
+wait_for_completion_interruptible():
+-it does not check for return value
+-completion structure is allocated on stack; in case a signal interrupts
+the sleep, it will go out of scope, causing the worker thread
+(caam_jr_dequeue) to fail when it accesses it
+
+wait_for_completion_interruptible() is replaced with uninterruptable
+wait_for_completion().
+We choose to block all signals while waiting for I/O (device executing
+the split key generation job descriptor) since the alternative - in
+order to have a deterministic device state - would be to flush the job
+ring (aborting *all* in-progress jobs).
+
+Fixes: 045e36780f115 ("crypto: caam - ahash hmac support")
+Fixes: 4c1ec1f930154 ("crypto: caam - refactor key_gen, sg")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamhash.c |    2 +-
+ drivers/crypto/caam/key_gen.c  |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -490,7 +490,7 @@ static int hash_digest_key(struct caam_h
+       ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+       if (!ret) {
+               /* in progress */
+-              wait_for_completion_interruptible(&result.completion);
++              wait_for_completion(&result.completion);
+               ret = result.err;
+ #ifdef DEBUG
+               print_hex_dump(KERN_ERR,
+--- a/drivers/crypto/caam/key_gen.c
++++ b/drivers/crypto/caam/key_gen.c
+@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev,
+       ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+       if (!ret) {
+               /* in progress */
+-              wait_for_completion_interruptible(&result.completion);
++              wait_for_completion(&result.completion);
+               ret = result.err;
+ #ifdef DEBUG
+               print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/queue-3.18/crypto-sha1-ssse3-disable-avx2.patch b/queue-3.18/crypto-sha1-ssse3-disable-avx2.patch
new file mode 100644 (file)
index 0000000..7af2b39
--- /dev/null
@@ -0,0 +1,33 @@
+From b82ce24426a4071da9529d726057e4e642948667 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 4 Jul 2017 12:21:12 +0800
+Subject: crypto: sha1-ssse3 - Disable avx2
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit b82ce24426a4071da9529d726057e4e642948667 upstream.
+
+It has been reported that sha1-avx2 can cause page faults by reading
+beyond the end of the input.  This patch disables it until it can be
+fixed.
+
+Fixes: 7c1da8d0d046 ("crypto: sha - SHA1 transform x86_64 AVX2")
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha1_ssse3_glue.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -224,7 +224,7 @@ static bool __init avx_usable(void)
+ #ifdef CONFIG_AS_AVX2
+ static bool __init avx2_usable(void)
+ {
+-      if (avx_usable() && cpu_has_avx2 && boot_cpu_has(X86_FEATURE_BMI1) &&
++      if (false && avx_usable() && cpu_has_avx2 && boot_cpu_has(X86_FEATURE_BMI1) &&
+           boot_cpu_has(X86_FEATURE_BMI2))
+               return true;
diff --git a/queue-3.18/crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch b/queue-3.18/crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch
new file mode 100644 (file)
index 0000000..830134b
--- /dev/null
@@ -0,0 +1,52 @@
+From 03d2c5114c95797c0aa7d9f463348b171a274fd4 Mon Sep 17 00:00:00 2001
+From: Martin Hicks <mort@bork.org>
+Date: Tue, 2 May 2017 09:38:35 -0400
+Subject: crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Hicks <mort@bork.org>
+
+commit 03d2c5114c95797c0aa7d9f463348b171a274fd4 upstream.
+
+An updated patch that also handles the additional key length requirements
+for the AEAD algorithms.
+
+The max keysize is not 96.  For SHA384/512 it's 128, and for the AEAD
+algorithms it's longer still.  Extend the max keysize for the
+AEAD size for AES256 + HMAC(SHA512).
+
+Fixes: 357fb60502ede ("crypto: talitos - add sha224, sha384 and sha512 to existing AEAD algorithms")
+Signed-off-by: Martin Hicks <mort@bork.org>
+Acked-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/talitos.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -634,7 +634,7 @@ static void talitos_unregister_rng(struc
+  * crypto alg
+  */
+ #define TALITOS_CRA_PRIORITY          3000
+-#define TALITOS_MAX_KEY_SIZE          96
++#define TALITOS_MAX_KEY_SIZE          (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+ #define TALITOS_MAX_IV_LENGTH         16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+ #define MD5_BLOCK_SIZE    64
+@@ -1324,6 +1324,11 @@ static int ablkcipher_setkey(struct cryp
+ {
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
++      if (keylen > TALITOS_MAX_KEY_SIZE) {
++              crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++              return -EINVAL;
++      }
++
+       memcpy(&ctx->key, key, keylen);
+       ctx->keylen = keylen;
diff --git a/queue-3.18/pm-qos-return-einval-for-bogus-strings.patch b/queue-3.18/pm-qos-return-einval-for-bogus-strings.patch
new file mode 100644 (file)
index 0000000..28698dc
--- /dev/null
@@ -0,0 +1,34 @@
+From 2ca30331c156ca9e97643ad05dd8930b8fe78b01 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 10 Jul 2017 10:21:40 +0300
+Subject: PM / QoS: return -EINVAL for bogus strings
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 2ca30331c156ca9e97643ad05dd8930b8fe78b01 upstream.
+
+In the current code, if the user accidentally writes a bogus command to
+this sysfs file, then we set the latency tolerance to an uninitialized
+variable.
+
+Fixes: 2d984ad132a8 (PM / QoS: Introcuce latency tolerance device PM QoS type)
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/power/sysfs.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -269,6 +269,8 @@ static ssize_t pm_qos_latency_tolerance_
+                       value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+               else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+                       value = PM_QOS_LATENCY_ANY;
++              else
++                      return -EINVAL;
+       }
+       ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+       return ret < 0 ? ret : n;
diff --git a/queue-3.18/sched-topology-fix-overlapping-sched_group_mask.patch b/queue-3.18/sched-topology-fix-overlapping-sched_group_mask.patch
new file mode 100644 (file)
index 0000000..2ac2a83
--- /dev/null
@@ -0,0 +1,99 @@
+From 73bb059f9b8a00c5e1bf2f7ca83138c05d05e600 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 25 Apr 2017 14:00:49 +0200
+Subject: sched/topology: Fix overlapping sched_group_mask
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 73bb059f9b8a00c5e1bf2f7ca83138c05d05e600 upstream.
+
+The point of sched_group_mask is to select those CPUs from
+sched_group_cpus that can actually arrive at this balance domain.
+
+The current code gets it wrong, as can be readily demonstrated with a
+topology like:
+
+  node   0   1   2   3
+    0:  10  20  30  20
+    1:  20  10  20  30
+    2:  30  20  10  20
+    3:  20  30  20  10
+
+Where (for example) domain 1 on CPU1 ends up with a mask that includes
+CPU0:
+
+  [] CPU1 attaching sched-domain:
+  []  domain 0: span 0-2 level NUMA
+  []   groups: 1 (mask: 1), 2, 0
+  []   domain 1: span 0-3 level NUMA
+  []    groups: 0-2 (mask: 0-2) (cpu_capacity: 3072), 0,2-3 (cpu_capacity: 3072)
+
+This causes sched_balance_cpu() to compute the wrong CPU and
+consequently should_we_balance() will terminate early resulting in
+missed load-balance opportunities.
+
+The fixed topology looks like:
+
+  [] CPU1 attaching sched-domain:
+  []  domain 0: span 0-2 level NUMA
+  []   groups: 1 (mask: 1), 2, 0
+  []   domain 1: span 0-3 level NUMA
+  []    groups: 0-2 (mask: 1) (cpu_capacity: 3072), 0,2-3 (cpu_capacity: 3072)
+
+(note: this relies on OVERLAP domains to always have children, this is
+ true because the regular topology domains are still here -- this is
+ before degenerate trimming)
+
+Debugged-by: Lauro Ramos Venancio <lvenanci@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Fixes: e3589f6c81e4 ("sched: Allow for overlapping sched_domain spans")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c |   18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5852,6 +5852,9 @@ enum s_alloc {
+  * Build an iteration mask that can exclude certain CPUs from the upwards
+  * domain traversal.
+  *
++ * Only CPUs that can arrive at this group should be considered to continue
++ * balancing.
++ *
+  * Asymmetric node setups can result in situations where the domain tree is of
+  * unequal depth, make sure to skip domains that already cover the entire
+  * range.
+@@ -5870,11 +5873,24 @@ static void build_group_mask(struct sche
+       for_each_cpu(i, sg_span) {
+               sibling = *per_cpu_ptr(sdd->sd, i);
+-              if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
++
++              /*
++               * Can happen in the asymmetric case, where these siblings are
++               * unused. The mask will not be empty because those CPUs that
++               * do have the top domain _should_ span the domain.
++               */
++              if (!sibling->child)
++                      continue;
++
++              /* If we would not end up here, we can't continue from here */
++              if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
+                       continue;
+               cpumask_set_cpu(i, sched_group_mask(sg));
+       }
++
++      /* We must not have empty masks here */
++      WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
+ }
+ /*
diff --git a/queue-3.18/sched-topology-optimize-build_group_mask.patch b/queue-3.18/sched-topology-optimize-build_group_mask.patch
new file mode 100644 (file)
index 0000000..9223b4b
--- /dev/null
@@ -0,0 +1,46 @@
+From f32d782e31bf079f600dcec126ed117b0577e85c Mon Sep 17 00:00:00 2001
+From: Lauro Ramos Venancio <lvenanci@redhat.com>
+Date: Thu, 20 Apr 2017 16:51:40 -0300
+Subject: sched/topology: Optimize build_group_mask()
+
+From: Lauro Ramos Venancio <lvenanci@redhat.com>
+
+commit f32d782e31bf079f600dcec126ed117b0577e85c upstream.
+
+The group mask is always used in intersection with the group CPUs. So,
+when building the group mask, we don't have to care about CPUs that are
+not part of the group.
+
+Signed-off-by: Lauro Ramos Venancio <lvenanci@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lwang@redhat.com
+Cc: riel@redhat.com
+Link: http://lkml.kernel.org/r/1492717903-5195-2-git-send-email-lvenanci@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5863,12 +5863,12 @@ enum s_alloc {
+  */
+ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+ {
+-      const struct cpumask *span = sched_domain_span(sd);
++      const struct cpumask *sg_span = sched_group_cpus(sg);
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *sibling;
+       int i;
+-      for_each_cpu(i, span) {
++      for_each_cpu(i, sg_span) {
+               sibling = *per_cpu_ptr(sdd->sd, i);
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+                       continue;
index 5f3ccb7d7a1a66bc2e92e3c404ba2e4e6a233c37..e044440ae4f35436733c606174f98525adc75f05 100644 (file)
@@ -18,3 +18,11 @@ checkpatch-silence-perl-5.26.0-unescaped-left-brace-warnings.patch
 exec-limit-arg-stack-to-at-most-75-of-_stk_lim.patch
 vt-fix-unchecked-__put_user-in-tioclinux-ioctls.patch
 mm-fix-overflow-check-in-expand_upwards.patch
+crypto-talitos-extend-max-key-length-for-sha384-512-hmac-and-aead.patch
+crypto-atmel-only-treat-ebusy-as-transient-if-backlog.patch
+crypto-sha1-ssse3-disable-avx2.patch
+crypto-caam-fix-signals-handling.patch
+sched-topology-optimize-build_group_mask.patch
+sched-topology-fix-overlapping-sched_group_mask.patch
+pm-qos-return-einval-for-bogus-strings.patch
+tracing-use-softirq_offset-for-softirq-dectection-for-more-accurate-results.patch
diff --git a/queue-3.18/tracing-use-softirq_offset-for-softirq-dectection-for-more-accurate-results.patch b/queue-3.18/tracing-use-softirq_offset-for-softirq-dectection-for-more-accurate-results.patch
new file mode 100644 (file)
index 0000000..a11c201
--- /dev/null
@@ -0,0 +1,38 @@
+From c59f29cb144a6a0dfac16ede9dc8eafc02dc56ca Mon Sep 17 00:00:00 2001
+From: Pavankumar Kondeti <pkondeti@codeaurora.org>
+Date: Fri, 9 Dec 2016 21:50:17 +0530
+Subject: tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results
+
+From: Pavankumar Kondeti <pkondeti@codeaurora.org>
+
+commit c59f29cb144a6a0dfac16ede9dc8eafc02dc56ca upstream.
+
+The 's' flag is supposed to indicate that a softirq is running. This
+can be detected by testing the preempt_count with SOFTIRQ_OFFSET.
+
+The current code tests the preempt_count with SOFTIRQ_MASK, which
+would be true even when softirqs are disabled but not serving a
+softirq.
+
+Link: http://lkml.kernel.org/r/1481300417-3564-1-git-send-email-pkondeti@codeaurora.org
+
+Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1587,7 +1587,7 @@ tracing_generic_entry_update(struct trac
+               TRACE_FLAG_IRQS_NOSUPPORT |
+ #endif
+               ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+-              ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
++              ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
+               (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+               (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+ }