]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Mar 2017 07:49:54 +0000 (08:49 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Mar 2017 07:49:54 +0000 (08:49 +0100)
added patches:
arm-arm64-kvm-enforce-unconditional-flush-to-poc-when-mapping-to-stage-2.patch
arm64-dma-mapping-fix-dma_mapping_error-when-bypassing-swiotlb.patch
arm64-fix-erroneous-__raw_read_system_reg-cases.patch
crypto-api-add-crypto_requires_off-helper.patch
crypto-testmgr-pad-aes_ccm_enc_tv_template-vector.patch
crypto-vmx-use-skcipher-for-cbc-fallback.patch
crypto-vmx-use-skcipher-for-xts-fallback.patch
crypto-xts-add-ecb-dependency.patch
crypto-xts-propagate-need_fallback-bit.patch
fuse-add-missing-fr_force.patch
kvm-arm-arm64-vgic-stop-injecting-the-msi-occurrence-twice.patch
revert-arm64-mm-set-the-contiguous-bit-for-kernel-mappings-where-appropriate.patch
x86-pkeys-check-against-max-pkey-to-avoid-overflows.patch

14 files changed:
queue-4.10/arm-arm64-kvm-enforce-unconditional-flush-to-poc-when-mapping-to-stage-2.patch [new file with mode: 0644]
queue-4.10/arm64-dma-mapping-fix-dma_mapping_error-when-bypassing-swiotlb.patch [new file with mode: 0644]
queue-4.10/arm64-fix-erroneous-__raw_read_system_reg-cases.patch [new file with mode: 0644]
queue-4.10/crypto-api-add-crypto_requires_off-helper.patch [new file with mode: 0644]
queue-4.10/crypto-testmgr-pad-aes_ccm_enc_tv_template-vector.patch [new file with mode: 0644]
queue-4.10/crypto-vmx-use-skcipher-for-cbc-fallback.patch [new file with mode: 0644]
queue-4.10/crypto-vmx-use-skcipher-for-xts-fallback.patch [new file with mode: 0644]
queue-4.10/crypto-xts-add-ecb-dependency.patch [new file with mode: 0644]
queue-4.10/crypto-xts-propagate-need_fallback-bit.patch [new file with mode: 0644]
queue-4.10/fuse-add-missing-fr_force.patch [new file with mode: 0644]
queue-4.10/kvm-arm-arm64-vgic-stop-injecting-the-msi-occurrence-twice.patch [new file with mode: 0644]
queue-4.10/revert-arm64-mm-set-the-contiguous-bit-for-kernel-mappings-where-appropriate.patch [new file with mode: 0644]
queue-4.10/series
queue-4.10/x86-pkeys-check-against-max-pkey-to-avoid-overflows.patch [new file with mode: 0644]

diff --git a/queue-4.10/arm-arm64-kvm-enforce-unconditional-flush-to-poc-when-mapping-to-stage-2.patch b/queue-4.10/arm-arm64-kvm-enforce-unconditional-flush-to-poc-when-mapping-to-stage-2.patch
new file mode 100644 (file)
index 0000000..8ce1e78
--- /dev/null
@@ -0,0 +1,73 @@
+From 8f36ebaf21fdae99c091c67e8b6fab33969f2667 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 25 Jan 2017 12:29:59 +0000
+Subject: arm/arm64: KVM: Enforce unconditional flush to PoC when mapping to stage-2
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 8f36ebaf21fdae99c091c67e8b6fab33969f2667 upstream.
+
+When we fault in a page, we flush it to the PoC (Point of Coherency)
+if the faulting vcpu has its own caches off, so that it can observe
+the page we just brought it.
+
+But if the vcpu has its caches on, we skip that step. Bad things
+happen when *another* vcpu tries to access that page with its own
+caches disabled. At that point, there is no garantee that the
+data has made it to the PoC, and we access stale data.
+
+The obvious fix is to always flush to PoC when a page is faulted
+in, no matter what the state of the vcpu is.
+
+Fixes: 2d58b733c876 ("arm64: KVM: force cache clean on page fault when caches are off")
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_mmu.h   |    9 +--------
+ arch/arm64/include/asm/kvm_mmu.h |    3 +--
+ 2 files changed, 2 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -150,18 +150,12 @@ static inline void __coherent_cache_gues
+        * and iterate over the range.
+        */
+-      bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+-
+       VM_BUG_ON(size & ~PAGE_MASK);
+-      if (!need_flush && !icache_is_pipt())
+-              goto vipt_cache;
+-
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+-              if (need_flush)
+-                      kvm_flush_dcache_to_poc(va, PAGE_SIZE);
++              kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+               if (icache_is_pipt())
+                       __cpuc_coherent_user_range((unsigned long)va,
+@@ -173,7 +167,6 @@ static inline void __coherent_cache_gues
+               kunmap_atomic(va);
+       }
+-vipt_cache:
+       if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
+               /* any kind of VIPT cache */
+               __flush_icache_all();
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -241,8 +241,7 @@ static inline void __coherent_cache_gues
+ {
+       void *va = page_address(pfn_to_page(pfn));
+-      if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
+-              kvm_flush_dcache_to_poc(va, size);
++      kvm_flush_dcache_to_poc(va, size);
+       if (!icache_is_aliasing()) {            /* PIPT */
+               flush_icache_range((unsigned long)va,
diff --git a/queue-4.10/arm64-dma-mapping-fix-dma_mapping_error-when-bypassing-swiotlb.patch b/queue-4.10/arm64-dma-mapping-fix-dma_mapping_error-when-bypassing-swiotlb.patch
new file mode 100644 (file)
index 0000000..8331d1a
--- /dev/null
@@ -0,0 +1,54 @@
+From adbe7e26f4257f72817495b9bce114284060b0d7 Mon Sep 17 00:00:00 2001
+From: Robin Murphy <robin.murphy@arm.com>
+Date: Wed, 25 Jan 2017 18:31:31 +0000
+Subject: arm64: dma-mapping: Fix dma_mapping_error() when bypassing SWIOTLB
+
+From: Robin Murphy <robin.murphy@arm.com>
+
+commit adbe7e26f4257f72817495b9bce114284060b0d7 upstream.
+
+When bypassing SWIOTLB on small-memory systems, we need to avoid calling
+into swiotlb_dma_mapping_error() in exactly the same way as we avoid
+swiotlb_dma_supported(), because the former also relies on SWIOTLB state
+being initialised.
+
+Under the assumptions for which we skip SWIOTLB, dma_map_{single,page}()
+will only ever return the DMA-offset-adjusted physical address of the
+page passed in, thus we can report success unconditionally.
+
+Fixes: b67a8b29df7e ("arm64: mm: only initialize swiotlb when necessary")
+CC: Jisheng Zhang <jszhang@marvell.com>
+Reported-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/dma-mapping.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -352,6 +352,13 @@ static int __swiotlb_dma_supported(struc
+       return 1;
+ }
++static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
++{
++      if (swiotlb)
++              return swiotlb_dma_mapping_error(hwdev, addr);
++      return 0;
++}
++
+ static struct dma_map_ops swiotlb_dma_ops = {
+       .alloc = __dma_alloc,
+       .free = __dma_free,
+@@ -366,7 +373,7 @@ static struct dma_map_ops swiotlb_dma_op
+       .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device = __swiotlb_sync_sg_for_device,
+       .dma_supported = __swiotlb_dma_supported,
+-      .mapping_error = swiotlb_dma_mapping_error,
++      .mapping_error = __swiotlb_dma_mapping_error,
+ };
+ static int __init atomic_pool_init(void)
diff --git a/queue-4.10/arm64-fix-erroneous-__raw_read_system_reg-cases.patch b/queue-4.10/arm64-fix-erroneous-__raw_read_system_reg-cases.patch
new file mode 100644 (file)
index 0000000..d6b9c45
--- /dev/null
@@ -0,0 +1,53 @@
+From 7d0928f18bf890d2853281f59aba0dd5a46b34f9 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 2 Feb 2017 17:32:14 +0000
+Subject: arm64: fix erroneous __raw_read_system_reg() cases
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 7d0928f18bf890d2853281f59aba0dd5a46b34f9 upstream.
+
+Since it was introduced in commit da8d02d19ffdd201 ("arm64/capabilities:
+Make use of system wide safe value"), __raw_read_system_reg() has
+erroneously mapped some sysreg IDs to other registers.
+
+For the fields in ID_ISAR5_EL1, our local feature detection will be
+erroneous. We may spuriously detect that a feature is uniformly
+supported, or may fail to detect when it actually is, meaning some
+compat hwcaps may be erroneous (or not enforced upon hotplug).
+
+This patch corrects the erroneous entries.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Fixes: da8d02d19ffdd201 ("arm64/capabilities: Make use of system wide safe value")
+Reported-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/cpufeature.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -654,15 +654,15 @@ static u64 __raw_read_system_reg(u32 sys
+       case SYS_ID_ISAR2_EL1:          return read_cpuid(ID_ISAR2_EL1);
+       case SYS_ID_ISAR3_EL1:          return read_cpuid(ID_ISAR3_EL1);
+       case SYS_ID_ISAR4_EL1:          return read_cpuid(ID_ISAR4_EL1);
+-      case SYS_ID_ISAR5_EL1:          return read_cpuid(ID_ISAR4_EL1);
++      case SYS_ID_ISAR5_EL1:          return read_cpuid(ID_ISAR5_EL1);
+       case SYS_MVFR0_EL1:             return read_cpuid(MVFR0_EL1);
+       case SYS_MVFR1_EL1:             return read_cpuid(MVFR1_EL1);
+       case SYS_MVFR2_EL1:             return read_cpuid(MVFR2_EL1);
+       case SYS_ID_AA64PFR0_EL1:       return read_cpuid(ID_AA64PFR0_EL1);
+-      case SYS_ID_AA64PFR1_EL1:       return read_cpuid(ID_AA64PFR0_EL1);
++      case SYS_ID_AA64PFR1_EL1:       return read_cpuid(ID_AA64PFR1_EL1);
+       case SYS_ID_AA64DFR0_EL1:       return read_cpuid(ID_AA64DFR0_EL1);
+-      case SYS_ID_AA64DFR1_EL1:       return read_cpuid(ID_AA64DFR0_EL1);
++      case SYS_ID_AA64DFR1_EL1:       return read_cpuid(ID_AA64DFR1_EL1);
+       case SYS_ID_AA64MMFR0_EL1:      return read_cpuid(ID_AA64MMFR0_EL1);
+       case SYS_ID_AA64MMFR1_EL1:      return read_cpuid(ID_AA64MMFR1_EL1);
+       case SYS_ID_AA64MMFR2_EL1:      return read_cpuid(ID_AA64MMFR2_EL1);
diff --git a/queue-4.10/crypto-api-add-crypto_requires_off-helper.patch b/queue-4.10/crypto-api-add-crypto_requires_off-helper.patch
new file mode 100644 (file)
index 0000000..97e4e6b
--- /dev/null
@@ -0,0 +1,42 @@
+From 016df0abc56ec06d0c63c5318ef53e40738dea8b Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sun, 26 Feb 2017 12:22:35 +0800
+Subject: crypto: api - Add crypto_requires_off helper
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 016df0abc56ec06d0c63c5318ef53e40738dea8b upstream.
+
+This patch adds crypto_requires_off which is an extension of
+crypto_requires_sync for similar bits such as NEED_FALLBACK.
+
+Suggested-by: Marcelo Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/crypto/algapi.h |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -344,13 +344,18 @@ static inline struct crypto_alg *crypto_
+       return crypto_attr_alg(tb[1], type, mask);
+ }
++static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
++{
++      return (type ^ off) & mask & off;
++}
++
+ /*
+  * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
+  * Otherwise returns zero.
+  */
+ static inline int crypto_requires_sync(u32 type, u32 mask)
+ {
+-      return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
++      return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
+ }
+ noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
diff --git a/queue-4.10/crypto-testmgr-pad-aes_ccm_enc_tv_template-vector.patch b/queue-4.10/crypto-testmgr-pad-aes_ccm_enc_tv_template-vector.patch
new file mode 100644 (file)
index 0000000..d1b082a
--- /dev/null
@@ -0,0 +1,83 @@
+From 1c68bb0f62bf8de8bb30123ea840d5168f25abea Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Tue, 28 Feb 2017 14:07:25 -0800
+Subject: crypto: testmgr - Pad aes_ccm_enc_tv_template vector
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 1c68bb0f62bf8de8bb30123ea840d5168f25abea upstream.
+
+Running with KASAN and crypto tests currently gives
+
+ BUG: KASAN: global-out-of-bounds in __test_aead+0x9d9/0x2200 at addr ffffffff8212fca0
+ Read of size 16 by task cryptomgr_test/1107
+ Address belongs to variable 0xffffffff8212fca0
+ CPU: 0 PID: 1107 Comm: cryptomgr_test Not tainted 4.10.0+ #45
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.9.1-1.fc24 04/01/2014
+ Call Trace:
+  dump_stack+0x63/0x8a
+  kasan_report.part.1+0x4a7/0x4e0
+  ? __test_aead+0x9d9/0x2200
+  ? crypto_ccm_init_crypt+0x218/0x3c0 [ccm]
+  kasan_report+0x20/0x30
+  check_memory_region+0x13c/0x1a0
+  memcpy+0x23/0x50
+  __test_aead+0x9d9/0x2200
+  ? kasan_unpoison_shadow+0x35/0x50
+  ? alg_test_akcipher+0xf0/0xf0
+  ? crypto_skcipher_init_tfm+0x2e3/0x310
+  ? crypto_spawn_tfm2+0x37/0x60
+  ? crypto_ccm_init_tfm+0xa9/0xd0 [ccm]
+  ? crypto_aead_init_tfm+0x7b/0x90
+  ? crypto_alloc_tfm+0xc4/0x190
+  test_aead+0x28/0xc0
+  alg_test_aead+0x54/0xd0
+  alg_test+0x1eb/0x3d0
+  ? alg_find_test+0x90/0x90
+  ? __sched_text_start+0x8/0x8
+  ? __wake_up_common+0x70/0xb0
+  cryptomgr_test+0x4d/0x60
+  kthread+0x173/0x1c0
+  ? crypto_acomp_scomp_free_ctx+0x60/0x60
+  ? kthread_create_on_node+0xa0/0xa0
+  ret_from_fork+0x2c/0x40
+ Memory state around the buggy address:
+  ffffffff8212fb80: 00 00 00 00 01 fa fa fa fa fa fa fa 00 00 00 00
+  ffffffff8212fc00: 00 01 fa fa fa fa fa fa 00 00 00 00 01 fa fa fa
+ >ffffffff8212fc80: fa fa fa fa 00 05 fa fa fa fa fa fa 00 00 00 00
+                                   ^
+  ffffffff8212fd00: 01 fa fa fa fa fa fa fa 00 00 00 00 01 fa fa fa
+  ffffffff8212fd80: fa fa fa fa 00 00 00 00 00 05 fa fa fa fa fa fa
+
+This always happens on the same IV which is less than 16 bytes.
+
+Per Ard,
+
+"CCM IVs are 16 bytes, but due to the way they are constructed
+internally, the final couple of bytes of input IV are dont-cares.
+
+Apparently, we do read all 16 bytes, which triggers the KASAN errors."
+
+Fix this by padding the IV with null bytes to be at least 16 bytes.
+
+Fixes: 0bc5a6c5c79a ("crypto: testmgr - Disable rfc4309 test and convert test vectors")
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/testmgr.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/crypto/testmgr.h
++++ b/crypto/testmgr.h
+@@ -22827,7 +22827,7 @@ static struct aead_testvec aes_ccm_enc_t
+                         "\x09\x75\x9a\x9b\x3c\x9b\x27\x39",
+               .klen   = 32,
+               .iv     = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d"
+-                        "\x43\xf6\x1e\x50",
++                        "\x43\xf6\x1e\x50\0\0\0\0",
+               .assoc  = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
+                         "\x13\x02\x01\x0c\x83\x4c\x96\x35"
+                         "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
diff --git a/queue-4.10/crypto-vmx-use-skcipher-for-cbc-fallback.patch b/queue-4.10/crypto-vmx-use-skcipher-for-cbc-fallback.patch
new file mode 100644 (file)
index 0000000..9c21069
--- /dev/null
@@ -0,0 +1,132 @@
+From c96d0a1c47abd5c4fa544dcedb5fac4d020ac58b Mon Sep 17 00:00:00 2001
+From: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Date: Wed, 1 Mar 2017 10:58:20 -0300
+Subject: crypto: vmx - Use skcipher for cbc fallback
+
+From: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+
+commit c96d0a1c47abd5c4fa544dcedb5fac4d020ac58b upstream.
+
+Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/aes_cbc.c |   47 +++++++++++++++++++++----------------------
+ 1 file changed, 24 insertions(+), 23 deletions(-)
+
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -27,11 +27,12 @@
+ #include <asm/switch_to.h>
+ #include <crypto/aes.h>
+ #include <crypto/scatterwalk.h>
++#include <crypto/skcipher.h>
+ #include "aesp8-ppc.h"
+ struct p8_aes_cbc_ctx {
+-      struct crypto_blkcipher *fallback;
++      struct crypto_skcipher *fallback;
+       struct aes_key enc_key;
+       struct aes_key dec_key;
+ };
+@@ -39,7 +40,7 @@ struct p8_aes_cbc_ctx {
+ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+ {
+       const char *alg;
+-      struct crypto_blkcipher *fallback;
++      struct crypto_skcipher *fallback;
+       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       if (!(alg = crypto_tfm_alg_name(tfm))) {
+@@ -47,8 +48,9 @@ static int p8_aes_cbc_init(struct crypto
+               return -ENOENT;
+       }
+-      fallback =
+-          crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
++      fallback = crypto_alloc_skcipher(alg, 0,
++                      CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++
+       if (IS_ERR(fallback)) {
+               printk(KERN_ERR
+                      "Failed to allocate transformation for '%s': %ld\n",
+@@ -56,11 +58,12 @@ static int p8_aes_cbc_init(struct crypto
+               return PTR_ERR(fallback);
+       }
+       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
++              crypto_skcipher_driver_name(fallback));
++
+-      crypto_blkcipher_set_flags(
++      crypto_skcipher_set_flags(
+               fallback,
+-              crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
++              crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+       ctx->fallback = fallback;
+       return 0;
+@@ -71,7 +74,7 @@ static void p8_aes_cbc_exit(struct crypt
+       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       if (ctx->fallback) {
+-              crypto_free_blkcipher(ctx->fallback);
++              crypto_free_skcipher(ctx->fallback);
+               ctx->fallback = NULL;
+       }
+ }
+@@ -91,7 +94,7 @@ static int p8_aes_cbc_setkey(struct cryp
+       pagefault_enable();
+       preempt_enable();
+-      ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
++      ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       return ret;
+ }
+@@ -103,15 +106,14 @@ static int p8_aes_cbc_encrypt(struct blk
+       struct blkcipher_walk walk;
+       struct p8_aes_cbc_ctx *ctx =
+               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+-      struct blkcipher_desc fallback_desc = {
+-              .tfm = ctx->fallback,
+-              .info = desc->info,
+-              .flags = desc->flags
+-      };
+       if (in_interrupt()) {
+-              ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
+-                                             nbytes);
++              SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
++              skcipher_request_set_tfm(req, ctx->fallback);
++              skcipher_request_set_callback(req, desc->flags, NULL, NULL);
++              skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
++              ret = crypto_skcipher_encrypt(req);
++              skcipher_request_zero(req);
+       } else {
+               preempt_disable();
+               pagefault_disable();
+@@ -144,15 +146,14 @@ static int p8_aes_cbc_decrypt(struct blk
+       struct blkcipher_walk walk;
+       struct p8_aes_cbc_ctx *ctx =
+               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+-      struct blkcipher_desc fallback_desc = {
+-              .tfm = ctx->fallback,
+-              .info = desc->info,
+-              .flags = desc->flags
+-      };
+       if (in_interrupt()) {
+-              ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
+-                                             nbytes);
++              SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
++              skcipher_request_set_tfm(req, ctx->fallback);
++              skcipher_request_set_callback(req, desc->flags, NULL, NULL);
++              skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
++              ret = crypto_skcipher_decrypt(req);
++              skcipher_request_zero(req);
+       } else {
+               preempt_disable();
+               pagefault_disable();
diff --git a/queue-4.10/crypto-vmx-use-skcipher-for-xts-fallback.patch b/queue-4.10/crypto-vmx-use-skcipher-for-xts-fallback.patch
new file mode 100644 (file)
index 0000000..c744da8
--- /dev/null
@@ -0,0 +1,106 @@
+From 5839f555fa576be57371686265206398d9ea1480 Mon Sep 17 00:00:00 2001
+From: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Date: Wed, 1 Mar 2017 11:00:00 -0300
+Subject: crypto: vmx - Use skcipher for xts fallback
+
+From: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+
+commit 5839f555fa576be57371686265206398d9ea1480 upstream.
+
+Signed-off-by: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
+index 24353ec336c5..6adc9290557a 100644
+--- a/drivers/crypto/vmx/aes_xts.c
++++ b/drivers/crypto/vmx/aes_xts.c
+@@ -28,11 +28,12 @@
+ #include <crypto/aes.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/xts.h>
++#include <crypto/skcipher.h>
+ #include "aesp8-ppc.h"
+ struct p8_aes_xts_ctx {
+-      struct crypto_blkcipher *fallback;
++      struct crypto_skcipher *fallback;
+       struct aes_key enc_key;
+       struct aes_key dec_key;
+       struct aes_key tweak_key;
+@@ -41,7 +42,7 @@ struct p8_aes_xts_ctx {
+ static int p8_aes_xts_init(struct crypto_tfm *tfm)
+ {
+       const char *alg;
+-      struct crypto_blkcipher *fallback;
++      struct crypto_skcipher *fallback;
+       struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       if (!(alg = crypto_tfm_alg_name(tfm))) {
+@@ -49,8 +50,8 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
+               return -ENOENT;
+       }
+-      fallback =
+-              crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
++      fallback = crypto_alloc_skcipher(alg, 0,
++                      CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback)) {
+               printk(KERN_ERR
+                       "Failed to allocate transformation for '%s': %ld\n",
+@@ -58,11 +59,11 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
+               return PTR_ERR(fallback);
+       }
+       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
++              crypto_skcipher_driver_name(fallback));
+-      crypto_blkcipher_set_flags(
++      crypto_skcipher_set_flags(
+               fallback,
+-              crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
++              crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+       ctx->fallback = fallback;
+       return 0;
+@@ -73,7 +74,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+       struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       if (ctx->fallback) {
+-              crypto_free_blkcipher(ctx->fallback);
++              crypto_free_skcipher(ctx->fallback);
+               ctx->fallback = NULL;
+       }
+ }
+@@ -98,7 +99,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+       pagefault_enable();
+       preempt_enable();
+-      ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
++      ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       return ret;
+ }
+@@ -113,15 +114,14 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
+       struct blkcipher_walk walk;
+       struct p8_aes_xts_ctx *ctx =
+               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+-      struct blkcipher_desc fallback_desc = {
+-              .tfm = ctx->fallback,
+-              .info = desc->info,
+-              .flags = desc->flags
+-      };
+       if (in_interrupt()) {
+-              ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
+-                            crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
++              SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
++              skcipher_request_set_tfm(req, ctx->fallback);
++              skcipher_request_set_callback(req, desc->flags, NULL, NULL);
++              skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
++              ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
++              skcipher_request_zero(req);
+       } else {
+               preempt_disable();
+               pagefault_disable();
diff --git a/queue-4.10/crypto-xts-add-ecb-dependency.patch b/queue-4.10/crypto-xts-add-ecb-dependency.patch
new file mode 100644 (file)
index 0000000..891b4f8
--- /dev/null
@@ -0,0 +1,33 @@
+From 12cb3a1c4184f891d965d1f39f8cfcc9ef617647 Mon Sep 17 00:00:00 2001
+From: Milan Broz <gmazyland@gmail.com>
+Date: Thu, 23 Feb 2017 08:38:26 +0100
+Subject: crypto: xts - Add ECB dependency
+
+From: Milan Broz <gmazyland@gmail.com>
+
+commit 12cb3a1c4184f891d965d1f39f8cfcc9ef617647 upstream.
+
+Since the
+   commit f1c131b45410a202eb45cc55980a7a9e4e4b4f40
+   crypto: xts - Convert to skcipher
+the XTS mode is based on ECB, so the mode must select
+ECB otherwise it can fail to initialize.
+
+Signed-off-by: Milan Broz <gmazyland@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/Kconfig |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -374,6 +374,7 @@ config CRYPTO_XTS
+       select CRYPTO_BLKCIPHER
+       select CRYPTO_MANAGER
+       select CRYPTO_GF128MUL
++      select CRYPTO_ECB
+       help
+         XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
+         key size 256, 384 or 512 bits. This implementation currently
diff --git a/queue-4.10/crypto-xts-propagate-need_fallback-bit.patch b/queue-4.10/crypto-xts-propagate-need_fallback-bit.patch
new file mode 100644 (file)
index 0000000..a1a008e
--- /dev/null
@@ -0,0 +1,60 @@
+From 89027579bc6c2febbcc9c2f9d5069adf71539e4b Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sun, 26 Feb 2017 12:24:10 +0800
+Subject: crypto: xts - Propagate NEED_FALLBACK bit
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 89027579bc6c2febbcc9c2f9d5069adf71539e4b upstream.
+
+When we're used as a fallback algorithm, we should propagate
+the NEED_FALLBACK bit when searching for the underlying ECB mode.
+
+This just happens to fix a hang too because otherwise the search
+may end up loading the same module that triggered this XTS creation.
+
+Fixes: f1c131b45410 ("crypto: xts - Convert to skcipher")
+Reported-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/xts.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -463,6 +463,7 @@ static int create(struct crypto_template
+       struct xts_instance_ctx *ctx;
+       struct skcipher_alg *alg;
+       const char *cipher_name;
++      u32 mask;
+       int err;
+       algt = crypto_get_attr_type(tb);
+@@ -483,18 +484,19 @@ static int create(struct crypto_template
+       ctx = skcipher_instance_ctx(inst);
+       crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+-      err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0,
+-                                 crypto_requires_sync(algt->type,
+-                                                      algt->mask));
++
++      mask = crypto_requires_off(algt->type, algt->mask,
++                                 CRYPTO_ALG_NEED_FALLBACK |
++                                 CRYPTO_ALG_ASYNC);
++
++      err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
+       if (err == -ENOENT) {
+               err = -ENAMETOOLONG;
+               if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+                            cipher_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto err_free_inst;
+-              err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0,
+-                                         crypto_requires_sync(algt->type,
+-                                                              algt->mask));
++              err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
+       }
+       if (err)
diff --git a/queue-4.10/fuse-add-missing-fr_force.patch b/queue-4.10/fuse-add-missing-fr_force.patch
new file mode 100644 (file)
index 0000000..0683b89
--- /dev/null
@@ -0,0 +1,34 @@
+From 2e38bea99a80eab408adee27f873a188d57b76cb Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Wed, 22 Feb 2017 20:08:25 +0100
+Subject: fuse: add missing FR_FORCE
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 2e38bea99a80eab408adee27f873a188d57b76cb upstream.
+
+fuse_file_put() was missing the "force" flag for the RELEASE request when
+sending synchronously (fuseblk).
+
+If this flag is not set, then a sync request may be interrupted before it
+is dequeued by the userspace filesystem.  In this case the OPEN won't be
+balanced with a RELEASE.
+
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Fixes: 5a18ec176c93 ("fuse: fix hang of single threaded fuseblk filesystem")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -100,6 +100,7 @@ static void fuse_file_put(struct fuse_fi
+                       iput(req->misc.release.inode);
+                       fuse_put_request(ff->fc, req);
+               } else if (sync) {
++                      __set_bit(FR_FORCE, &req->flags);
+                       __clear_bit(FR_BACKGROUND, &req->flags);
+                       fuse_request_send(ff->fc, req);
+                       iput(req->misc.release.inode);
diff --git a/queue-4.10/kvm-arm-arm64-vgic-stop-injecting-the-msi-occurrence-twice.patch b/queue-4.10/kvm-arm-arm64-vgic-stop-injecting-the-msi-occurrence-twice.patch
new file mode 100644 (file)
index 0000000..feb72f8
--- /dev/null
@@ -0,0 +1,42 @@
+From 0bdbf3b071986ba80731203683cf623d5c0cacb1 Mon Sep 17 00:00:00 2001
+From: Shanker Donthineni <shankerd@codeaurora.org>
+Date: Thu, 2 Feb 2017 20:30:03 -0600
+Subject: KVM: arm/arm64: vgic: Stop injecting the MSI occurrence twice
+
+From: Shanker Donthineni <shankerd@codeaurora.org>
+
+commit 0bdbf3b071986ba80731203683cf623d5c0cacb1 upstream.
+
+The IRQFD framework calls the architecture dependent function
+twice if the corresponding GSI type is edge triggered. For ARM,
+the function kvm_set_msi() is getting called twice whenever the
+IRQFD receives the event signal. The rest of the code path is
+trying to inject the MSI without any validation checks. No need
+to call the function vgic_its_inject_msi() second time to avoid
+an unnecessary overhead in IRQ queue logic. It also avoids the
+possibility of VM seeing the MSI twice.
+
+Simple fix, return -1 if the argument 'level' value is zero.
+
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-irqfd.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/virt/kvm/arm/vgic/vgic-irqfd.c
++++ b/virt/kvm/arm/vgic/vgic-irqfd.c
+@@ -99,6 +99,9 @@ int kvm_set_msi(struct kvm_kernel_irq_ro
+       if (!vgic_has_its(kvm))
+               return -ENODEV;
++      if (!level)
++              return -1;
++
+       return vgic_its_inject_msi(kvm, &msi);
+ }
diff --git a/queue-4.10/revert-arm64-mm-set-the-contiguous-bit-for-kernel-mappings-where-appropriate.patch b/queue-4.10/revert-arm64-mm-set-the-contiguous-bit-for-kernel-mappings-where-appropriate.patch
new file mode 100644 (file)
index 0000000..6e34aba
--- /dev/null
@@ -0,0 +1,124 @@
+From d81bbe6d882461dec4b71dbe2aa85565fcca4187 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 23 Feb 2017 16:22:55 +0000
+Subject: Revert "arm64: mm: set the contiguous bit for kernel mappings where appropriate"
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit d81bbe6d882461dec4b71dbe2aa85565fcca4187 upstream.
+
+This reverts commit 0bfc445dec9dd8130d22c9f4476eed7598524129.
+
+When we change the permissions of regions mapped using contiguous
+entries, the architecture requires us to follow a Break-Before-Make
+strategy, breaking *all* associated entries before we can change any of
+the following properties from the entries:
+
+ - presence of the contiguous bit
+ - output address
+ - attributes
+ - permissiones
+
+Failure to do so can result in a number of problems (e.g. TLB conflict
+aborts and/or erroneous results from TLB lookups).
+
+See ARM DDI 0487A.k_iss10775, "Misprogramming of the Contiguous bit",
+page D4-1762.
+
+We do not take this into account when altering the permissions of kernel
+segments in mark_rodata_ro(), where we change the permissions of live
+contiguous entires one-by-one, leaving them transiently inconsistent.
+This has been observed to result in failures on some fast model
+configurations.
+
+Unfortunately, we cannot follow Break-Before-Make here as we'd have to
+unmap kernel text and data used to perform the sequence.
+
+For the timebeing, revert commit 0bfc445dec9dd813 so as to avoid issues
+resulting from this misuse of the contiguous bit.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reported-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <Will.Deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c |   34 ++++------------------------------
+ 1 file changed, 4 insertions(+), 30 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -108,10 +108,8 @@ static bool pgattr_change_is_safe(u64 ol
+ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
+                                 unsigned long end, unsigned long pfn,
+                                 pgprot_t prot,
+-                                phys_addr_t (*pgtable_alloc)(void),
+-                                bool page_mappings_only)
++                                phys_addr_t (*pgtable_alloc)(void))
+ {
+-      pgprot_t __prot = prot;
+       pte_t *pte;
+       BUG_ON(pmd_sect(*pmd));
+@@ -129,18 +127,7 @@ static void alloc_init_pte(pmd_t *pmd, u
+       do {
+               pte_t old_pte = *pte;
+-              /*
+-               * Set the contiguous bit for the subsequent group of PTEs if
+-               * its size and alignment are appropriate.
+-               */
+-              if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
+-                      if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
+-                              __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+-                      else
+-                              __prot = prot;
+-              }
+-
+-              set_pte(pte, pfn_pte(pfn, __prot));
++              set_pte(pte, pfn_pte(pfn, prot));
+               pfn++;
+               /*
+@@ -159,7 +146,6 @@ static void alloc_init_pmd(pud_t *pud, u
+                                 phys_addr_t (*pgtable_alloc)(void),
+                                 bool page_mappings_only)
+ {
+-      pgprot_t __prot = prot;
+       pmd_t *pmd;
+       unsigned long next;
+@@ -186,18 +172,7 @@ static void alloc_init_pmd(pud_t *pud, u
+               /* try section mapping first */
+               if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                     !page_mappings_only) {
+-                      /*
+-                       * Set the contiguous bit for the subsequent group of
+-                       * PMDs if its size and alignment are appropriate.
+-                       */
+-                      if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
+-                              if (end - addr >= CONT_PMD_SIZE)
+-                                      __prot = __pgprot(pgprot_val(prot) |
+-                                                        PTE_CONT);
+-                              else
+-                                      __prot = prot;
+-                      }
+-                      pmd_set_huge(pmd, phys, __prot);
++                      pmd_set_huge(pmd, phys, prot);
+                       /*
+                        * After the PMD entry has been populated once, we
+@@ -207,8 +182,7 @@ static void alloc_init_pmd(pud_t *pud, u
+                                                     pmd_val(*pmd)));
+               } else {
+                       alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+-                                     prot, pgtable_alloc,
+-                                     page_mappings_only);
++                                     prot, pgtable_alloc);
+                       BUG_ON(pmd_val(old_pmd) != 0 &&
+                              pmd_val(old_pmd) != pmd_val(*pmd));
index a9c1c5ad71ab75d43849a940a8ea09edbb0ba871..8b77ba445850e79e93c6b2d32d3ed5e17c9c3017 100644 (file)
@@ -90,3 +90,16 @@ pci-hv-fix-wslot_to_devfn-to-fix-warnings-on-device-removal.patch
 pci-hotplug-pnv-php-disable-msi-and-pci-device-properly.patch
 pci-altera-fix-tlp_cfg_dw0-for-tlp-write.patch
 drivers-hv-vmbus-raise-retry-wait-limits-in-vmbus_post_msg.patch
+crypto-xts-add-ecb-dependency.patch
+crypto-testmgr-pad-aes_ccm_enc_tv_template-vector.patch
+crypto-xts-propagate-need_fallback-bit.patch
+crypto-api-add-crypto_requires_off-helper.patch
+crypto-vmx-use-skcipher-for-cbc-fallback.patch
+crypto-vmx-use-skcipher-for-xts-fallback.patch
+fuse-add-missing-fr_force.patch
+x86-pkeys-check-against-max-pkey-to-avoid-overflows.patch
+arm-arm64-kvm-enforce-unconditional-flush-to-poc-when-mapping-to-stage-2.patch
+arm64-dma-mapping-fix-dma_mapping_error-when-bypassing-swiotlb.patch
+arm64-fix-erroneous-__raw_read_system_reg-cases.patch
+kvm-arm-arm64-vgic-stop-injecting-the-msi-occurrence-twice.patch
+revert-arm64-mm-set-the-contiguous-bit-for-kernel-mappings-where-appropriate.patch
diff --git a/queue-4.10/x86-pkeys-check-against-max-pkey-to-avoid-overflows.patch b/queue-4.10/x86-pkeys-check-against-max-pkey-to-avoid-overflows.patch
new file mode 100644 (file)
index 0000000..1a450cc
--- /dev/null
@@ -0,0 +1,73 @@
+From 58ab9a088ddac4efe823471275859d64f735577e Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave.hansen@linux.intel.com>
+Date: Thu, 23 Feb 2017 14:26:03 -0800
+Subject: x86/pkeys: Check against max pkey to avoid overflows
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+commit 58ab9a088ddac4efe823471275859d64f735577e upstream.
+
+Kirill reported a warning from UBSAN about undefined behavior when using
+protection keys.  He is running on hardware that actually has support for
+it, which is not widely available.
+
+The warning triggers because of very large shifts of integers when doing a
+pkey_free() of a large, invalid value. This happens because we never check
+that the pkey "fits" into the mm_pkey_allocation_map().
+
+I do not believe there is any danger here of anything bad happening
+other than some aliasing issues where somebody could do:
+
+       pkey_free(35);
+
+and the kernel would effectively execute:
+
+       pkey_free(8);
+
+While this might be confusing to an app that was doing something stupid, it
+has to do something stupid and the effects are limited to the app shooting
+itself in the foot.
+
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: linux-kselftest@vger.kernel.org
+Cc: shuah@kernel.org
+Cc: kirill.shutemov@linux.intel.com
+Link: http://lkml.kernel.org/r/20170223222603.A022ED65@viggo.jf.intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pkeys.h |   15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/pkeys.h
++++ b/arch/x86/include/asm/pkeys.h
+@@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(s
+ static inline
+ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+ {
++      /*
++       * "Allocated" pkeys are those that have been returned
++       * from pkey_alloc().  pkey 0 is special, and never
++       * returned from pkey_alloc().
++       */
++      if (pkey <= 0)
++              return false;
++      if (pkey >= arch_max_pkey())
++              return false;
+       return mm_pkey_allocation_map(mm) & (1U << pkey);
+ }
+@@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm)
+ static inline
+ int mm_pkey_free(struct mm_struct *mm, int pkey)
+ {
+-      /*
+-       * pkey 0 is special, always allocated and can never
+-       * be freed.
+-       */
+-      if (!pkey)
+-              return -EINVAL;
+       if (!mm_pkey_is_allocated(mm, pkey))
+               return -EINVAL;