]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Jun 2019 08:01:23 +0000 (10:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Jun 2019 08:01:23 +0000 (10:01 +0200)
added patches:
crypto-vmx-ghash-do-nosimd-fallback-manually.patch
xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch

queue-5.0/crypto-vmx-ghash-do-nosimd-fallback-manually.patch [new file with mode: 0644]
queue-5.0/series
queue-5.0/xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch [new file with mode: 0644]

diff --git a/queue-5.0/crypto-vmx-ghash-do-nosimd-fallback-manually.patch b/queue-5.0/crypto-vmx-ghash-do-nosimd-fallback-manually.patch
new file mode 100644 (file)
index 0000000..295b820
--- /dev/null
@@ -0,0 +1,313 @@
+From 357d065a44cdd77ed5ff35155a989f2a763e96ef Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 17 May 2019 01:40:02 +1000
+Subject: crypto: vmx - ghash: do nosimd fallback manually
+
+From: Daniel Axtens <dja@axtens.net>
+
+commit 357d065a44cdd77ed5ff35155a989f2a763e96ef upstream.
+
+VMX ghash was using a fallback that did not support interleaving simd
+and nosimd operations, leading to failures in the extended test suite.
+
+If I understood correctly, Eric's suggestion was to use the same
+data format that the generic code uses, allowing us to call into it
+with the same contexts. I wasn't able to get that to work - I think
+there's a very different key structure and data layout being used.
+
+So instead steal the arm64 approach and perform the fallback
+operations directly if required.
+
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Cc: stable@vger.kernel.org # v4.1+
+Reported-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/ghash.c |  212 ++++++++++++++++++---------------------------
+ 1 file changed, 86 insertions(+), 126 deletions(-)
+
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -1,22 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+  * GHASH routines supporting VMX instructions on the Power 8
+  *
+- * Copyright (C) 2015 International Business Machines Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; version 2 only.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ * Copyright (C) 2015, 2019 International Business Machines Inc.
+  *
+  * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
++ *
++ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
++ * mechanism. The new approach is based on arm64 code, which is:
++ *   Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
+  */
+ #include <linux/types.h>
+@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128
+                 const u8 *in, size_t len);
+ struct p8_ghash_ctx {
++      /* key used by vector asm */
+       u128 htable[16];
+-      struct crypto_shash *fallback;
++      /* key used by software fallback */
++      be128 key;
+ };
+ struct p8_ghash_desc_ctx {
+       u64 shash[2];
+       u8 buffer[GHASH_DIGEST_SIZE];
+       int bytes;
+-      struct shash_desc fallback_desc;
+ };
+-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+-{
+-      const char *alg = "ghash-generic";
+-      struct crypto_shash *fallback;
+-      struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+-      fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+-      if (IS_ERR(fallback)) {
+-              printk(KERN_ERR
+-                     "Failed to allocate transformation for '%s': %ld\n",
+-                     alg, PTR_ERR(fallback));
+-              return PTR_ERR(fallback);
+-      }
+-
+-      crypto_shash_set_flags(fallback,
+-                             crypto_shash_get_flags((struct crypto_shash
+-                                                     *) tfm));
+-
+-      /* Check if the descsize defined in the algorithm is still enough. */
+-      if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+-          + crypto_shash_descsize(fallback)) {
+-              printk(KERN_ERR
+-                     "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+-                     alg,
+-                     shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+-                     crypto_shash_descsize(fallback));
+-              return -EINVAL;
+-      }
+-      ctx->fallback = fallback;
+-
+-      return 0;
+-}
+-
+-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+-{
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+-      if (ctx->fallback) {
+-              crypto_free_shash(ctx->fallback);
+-              ctx->fallback = NULL;
+-      }
+-}
+-
+ static int p8_ghash_init(struct shash_desc *desc)
+ {
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+       dctx->bytes = 0;
+       memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+-      dctx->fallback_desc.tfm = ctx->fallback;
+-      dctx->fallback_desc.flags = desc->flags;
+-      return crypto_shash_init(&dctx->fallback_desc);
++      return 0;
+ }
+ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto
+       disable_kernel_vsx();
+       pagefault_enable();
+       preempt_enable();
+-      return crypto_shash_setkey(ctx->fallback, key, keylen);
++
++      memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
++
++      return 0;
++}
++
++static inline void __ghash_block(struct p8_ghash_ctx *ctx,
++                               struct p8_ghash_desc_ctx *dctx)
++{
++      if (!IN_INTERRUPT) {
++              preempt_disable();
++              pagefault_disable();
++              enable_kernel_vsx();
++              gcm_ghash_p8(dctx->shash, ctx->htable,
++                              dctx->buffer, GHASH_DIGEST_SIZE);
++              disable_kernel_vsx();
++              pagefault_enable();
++              preempt_enable();
++      } else {
++              crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
++              gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++      }
++}
++
++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
++                                struct p8_ghash_desc_ctx *dctx,
++                                const u8 *src, unsigned int srclen)
++{
++      if (!IN_INTERRUPT) {
++              preempt_disable();
++              pagefault_disable();
++              enable_kernel_vsx();
++              gcm_ghash_p8(dctx->shash, ctx->htable,
++                              src, srclen);
++              disable_kernel_vsx();
++              pagefault_enable();
++              preempt_enable();
++      } else {
++              while (srclen >= GHASH_BLOCK_SIZE) {
++                      crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
++                      gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++                      srclen -= GHASH_BLOCK_SIZE;
++                      src += GHASH_BLOCK_SIZE;
++              }
++      }
+ }
+ static int p8_ghash_update(struct shash_desc *desc,
+@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-      if (IN_INTERRUPT) {
+-              return crypto_shash_update(&dctx->fallback_desc, src,
+-                                         srclen);
+-      } else {
+-              if (dctx->bytes) {
+-                      if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+-                              memcpy(dctx->buffer + dctx->bytes, src,
+-                                     srclen);
+-                              dctx->bytes += srclen;
+-                              return 0;
+-                      }
++      if (dctx->bytes) {
++              if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+                       memcpy(dctx->buffer + dctx->bytes, src,
+-                             GHASH_DIGEST_SIZE - dctx->bytes);
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable,
+-                                   dctx->buffer, GHASH_DIGEST_SIZE);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      src += GHASH_DIGEST_SIZE - dctx->bytes;
+-                      srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+-                      dctx->bytes = 0;
++                              srclen);
++                      dctx->bytes += srclen;
++                      return 0;
+               }
+-              len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+-              if (len) {
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      src += len;
+-                      srclen -= len;
+-              }
+-              if (srclen) {
+-                      memcpy(dctx->buffer, src, srclen);
+-                      dctx->bytes = srclen;
+-              }
+-              return 0;
++              memcpy(dctx->buffer + dctx->bytes, src,
++                      GHASH_DIGEST_SIZE - dctx->bytes);
++
++              __ghash_block(ctx, dctx);
++
++              src += GHASH_DIGEST_SIZE - dctx->bytes;
++              srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
++              dctx->bytes = 0;
++      }
++      len = srclen & ~(GHASH_DIGEST_SIZE - 1);
++      if (len) {
++              __ghash_blocks(ctx, dctx, src, len);
++              src += len;
++              srclen -= len;
+       }
++      if (srclen) {
++              memcpy(dctx->buffer, src, srclen);
++              dctx->bytes = srclen;
++      }
++      return 0;
+ }
+ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_d
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-      if (IN_INTERRUPT) {
+-              return crypto_shash_final(&dctx->fallback_desc, out);
+-      } else {
+-              if (dctx->bytes) {
+-                      for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+-                              dctx->buffer[i] = 0;
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable,
+-                                   dctx->buffer, GHASH_DIGEST_SIZE);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      dctx->bytes = 0;
+-              }
+-              memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+-              return 0;
++      if (dctx->bytes) {
++              for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
++                      dctx->buffer[i] = 0;
++              __ghash_block(ctx, dctx);
++              dctx->bytes = 0;
+       }
++      memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
++      return 0;
+ }
+ struct shash_alg p8_ghash_alg = {
+@@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = {
+                .cra_name = "ghash",
+                .cra_driver_name = "p8_ghash",
+                .cra_priority = 1000,
+-               .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                .cra_blocksize = GHASH_BLOCK_SIZE,
+                .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+                .cra_module = THIS_MODULE,
+-               .cra_init = p8_ghash_init_tfm,
+-               .cra_exit = p8_ghash_exit_tfm,
+       },
+ };
index 14691fd95ca4177af197a621ef65247fa0461e79..f8febaf53745c01aa37248159951bcd17c4f4871 100644 (file)
@@ -30,3 +30,5 @@ net-tls-fix-state-removal-with-feature-flags-off.patch
 net-tls-don-t-ignore-netdev-notifications-if-no-tls-features.patch
 cxgb4-revert-cxgb4-remove-sge_host_page_size-dependency-on-page-size.patch
 net-correct-zerocopy-refcnt-with-udp-msg_more.patch
+crypto-vmx-ghash-do-nosimd-fallback-manually.patch
+xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch
diff --git a/queue-5.0/xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch b/queue-5.0/xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch
new file mode 100644 (file)
index 0000000..600a9fc
--- /dev/null
@@ -0,0 +1,56 @@
+From 7681f31ec9cdacab4fd10570be924f2cef6669ba Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 13 Feb 2019 18:21:31 -0500
+Subject: xen/pciback: Don't disable PCI_COMMAND on PCI device reset.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 7681f31ec9cdacab4fd10570be924f2cef6669ba upstream.
+
+There is no need for this at all. Worst it means that if
+the guest tries to write to BARs it could lead (on certain
+platforms) to PCI SERR errors.
+
+Please note that with af6fc858a35b90e89ea7a7ee58e66628c55c776b
+"xen-pciback: limit guest control of command register"
+a guest is still allowed to enable those control bits (safely), but
+is not allowed to disable them and that therefore a well behaved
+frontend which enables things before using them will still
+function correctly.
+
+This is done via an write to the configuration register 0x4 which
+triggers on the backend side:
+command_write
+  \- pci_enable_device
+     \- pci_enable_device_flags
+        \- do_pci_enable_device
+           \- pcibios_enable_device
+              \-pci_enable_resourcess
+                [which enables the PCI_COMMAND_MEMORY|PCI_COMMAND_IO]
+
+However guests (and drivers) which don't do this could cause
+problems, including the security issues which XSA-120 sought
+to address.
+
+Reported-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Prarit Bhargava <prarit@redhat.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/xen-pciback/pciback_ops.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_d
+               if (pci_is_enabled(dev))
+                       pci_disable_device(dev);
+-              pci_write_config_word(dev, PCI_COMMAND, 0);
+-
+               dev->is_busmaster = 0;
+       } else {
+               pci_read_config_word(dev, PCI_COMMAND, &cmd);