]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Jun 2019 08:01:07 +0000 (10:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Jun 2019 08:01:07 +0000 (10:01 +0200)
added patches:
compiler.h-give-up-__compiletime_assert_fallback.patch
crypto-vmx-ghash-do-nosimd-fallback-manually.patch
include-linux-compiler-.h-define-asm_volatile_goto.patch
jump_label-move-asm-goto-support-test-to-kconfig.patch
xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch

queue-4.19/compiler.h-give-up-__compiletime_assert_fallback.patch [new file with mode: 0644]
queue-4.19/crypto-vmx-ghash-do-nosimd-fallback-manually.patch [new file with mode: 0644]
queue-4.19/include-linux-compiler-.h-define-asm_volatile_goto.patch [new file with mode: 0644]
queue-4.19/jump_label-move-asm-goto-support-test-to-kconfig.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch [new file with mode: 0644]

diff --git a/queue-4.19/compiler.h-give-up-__compiletime_assert_fallback.patch b/queue-4.19/compiler.h-give-up-__compiletime_assert_fallback.patch
new file mode 100644 (file)
index 0000000..31a5837
--- /dev/null
@@ -0,0 +1,85 @@
+From 81b45683487a51b0f4d3b29d37f20d6d078544e4 Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Sun, 26 Aug 2018 03:16:29 +0900
+Subject: compiler.h: give up __compiletime_assert_fallback()
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+commit 81b45683487a51b0f4d3b29d37f20d6d078544e4 upstream.
+
+__compiletime_assert_fallback() is supposed to stop building earlier
+by using the negative-array-size method in case the compiler does not
+support "error" attribute, but has never worked like that.
+
+You can simply try:
+
+    BUILD_BUG_ON(1);
+
+GCC immediately terminates the build, but Clang does not report
+anything because Clang does not support the "error" attribute now.
+It will later fail at link time, but __compiletime_assert_fallback()
+is not working at least.
+
+The root cause is commit 1d6a0d19c855 ("bug.h: prevent double evaluation
+of `condition' in BUILD_BUG_ON").  Prior to that commit, BUILD_BUG_ON()
+was checked by the negative-array-size method *and* the link-time trick.
+Since that commit, the negative-array-size is not effective because
+'__cond' is no longer constant.  As the comment in <linux/build_bug.h>
+says, GCC (and Clang as well) only emits the error for obvious cases.
+
+When '__cond' is a variable,
+
+    ((void)sizeof(char[1 - 2 * __cond]))
+
+... is not obvious for the compiler to know the array size is negative.
+
+Reverting that commit would break BUILD_BUG() because negative-size-array
+is evaluated before the code is optimized out.
+
+Let's give up __compiletime_assert_fallback().  This commit does not
+change the current behavior since it just rips off the useless code.
+
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/compiler.h |   17 +----------------
+ 1 file changed, 1 insertion(+), 16 deletions(-)
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -319,29 +319,14 @@ static inline void *offset_to_ptr(const
+ #endif
+ #ifndef __compiletime_error
+ # define __compiletime_error(message)
+-/*
+- * Sparse complains of variable sized arrays due to the temporary variable in
+- * __compiletime_assert. Unfortunately we can't just expand it out to make
+- * sparse see a constant array size without breaking compiletime_assert on old
+- * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
+- */
+-# ifndef __CHECKER__
+-#  define __compiletime_error_fallback(condition) \
+-      do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
+-# endif
+-#endif
+-#ifndef __compiletime_error_fallback
+-# define __compiletime_error_fallback(condition) do { } while (0)
+ #endif
+ #ifdef __OPTIMIZE__
+ # define __compiletime_assert(condition, msg, prefix, suffix)         \
+       do {                                                            \
+-              int __cond = !(condition);                              \
+               extern void prefix ## suffix(void) __compiletime_error(msg); \
+-              if (__cond)                                             \
++              if (!(condition))                                       \
+                       prefix ## suffix();                             \
+-              __compiletime_error_fallback(__cond);                   \
+       } while (0)
+ #else
+ # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
diff --git a/queue-4.19/crypto-vmx-ghash-do-nosimd-fallback-manually.patch b/queue-4.19/crypto-vmx-ghash-do-nosimd-fallback-manually.patch
new file mode 100644 (file)
index 0000000..295b820
--- /dev/null
@@ -0,0 +1,313 @@
+From 357d065a44cdd77ed5ff35155a989f2a763e96ef Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 17 May 2019 01:40:02 +1000
+Subject: crypto: vmx - ghash: do nosimd fallback manually
+
+From: Daniel Axtens <dja@axtens.net>
+
+commit 357d065a44cdd77ed5ff35155a989f2a763e96ef upstream.
+
+VMX ghash was using a fallback that did not support interleaving simd
+and nosimd operations, leading to failures in the extended test suite.
+
+If I understood correctly, Eric's suggestion was to use the same
+data format that the generic code uses, allowing us to call into it
+with the same contexts. I wasn't able to get that to work - I think
+there's a very different key structure and data layout being used.
+
+So instead steal the arm64 approach and perform the fallback
+operations directly if required.
+
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Cc: stable@vger.kernel.org # v4.1+
+Reported-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/ghash.c |  212 ++++++++++++++++++---------------------------
+ 1 file changed, 86 insertions(+), 126 deletions(-)
+
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -1,22 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+  * GHASH routines supporting VMX instructions on the Power 8
+  *
+- * Copyright (C) 2015 International Business Machines Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; version 2 only.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ * Copyright (C) 2015, 2019 International Business Machines Inc.
+  *
+  * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
++ *
++ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
++ * mechanism. The new approach is based on arm64 code, which is:
++ *   Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
+  */
+ #include <linux/types.h>
+@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128
+                 const u8 *in, size_t len);
+ struct p8_ghash_ctx {
++      /* key used by vector asm */
+       u128 htable[16];
+-      struct crypto_shash *fallback;
++      /* key used by software fallback */
++      be128 key;
+ };
+ struct p8_ghash_desc_ctx {
+       u64 shash[2];
+       u8 buffer[GHASH_DIGEST_SIZE];
+       int bytes;
+-      struct shash_desc fallback_desc;
+ };
+-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+-{
+-      const char *alg = "ghash-generic";
+-      struct crypto_shash *fallback;
+-      struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+-      fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+-      if (IS_ERR(fallback)) {
+-              printk(KERN_ERR
+-                     "Failed to allocate transformation for '%s': %ld\n",
+-                     alg, PTR_ERR(fallback));
+-              return PTR_ERR(fallback);
+-      }
+-
+-      crypto_shash_set_flags(fallback,
+-                             crypto_shash_get_flags((struct crypto_shash
+-                                                     *) tfm));
+-
+-      /* Check if the descsize defined in the algorithm is still enough. */
+-      if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+-          + crypto_shash_descsize(fallback)) {
+-              printk(KERN_ERR
+-                     "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+-                     alg,
+-                     shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+-                     crypto_shash_descsize(fallback));
+-              return -EINVAL;
+-      }
+-      ctx->fallback = fallback;
+-
+-      return 0;
+-}
+-
+-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+-{
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+-      if (ctx->fallback) {
+-              crypto_free_shash(ctx->fallback);
+-              ctx->fallback = NULL;
+-      }
+-}
+-
+ static int p8_ghash_init(struct shash_desc *desc)
+ {
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+       dctx->bytes = 0;
+       memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+-      dctx->fallback_desc.tfm = ctx->fallback;
+-      dctx->fallback_desc.flags = desc->flags;
+-      return crypto_shash_init(&dctx->fallback_desc);
++      return 0;
+ }
+ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto
+       disable_kernel_vsx();
+       pagefault_enable();
+       preempt_enable();
+-      return crypto_shash_setkey(ctx->fallback, key, keylen);
++
++      memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
++
++      return 0;
++}
++
++static inline void __ghash_block(struct p8_ghash_ctx *ctx,
++                               struct p8_ghash_desc_ctx *dctx)
++{
++      if (!IN_INTERRUPT) {
++              preempt_disable();
++              pagefault_disable();
++              enable_kernel_vsx();
++              gcm_ghash_p8(dctx->shash, ctx->htable,
++                              dctx->buffer, GHASH_DIGEST_SIZE);
++              disable_kernel_vsx();
++              pagefault_enable();
++              preempt_enable();
++      } else {
++              crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
++              gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++      }
++}
++
++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
++                                struct p8_ghash_desc_ctx *dctx,
++                                const u8 *src, unsigned int srclen)
++{
++      if (!IN_INTERRUPT) {
++              preempt_disable();
++              pagefault_disable();
++              enable_kernel_vsx();
++              gcm_ghash_p8(dctx->shash, ctx->htable,
++                              src, srclen);
++              disable_kernel_vsx();
++              pagefault_enable();
++              preempt_enable();
++      } else {
++              while (srclen >= GHASH_BLOCK_SIZE) {
++                      crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
++                      gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++                      srclen -= GHASH_BLOCK_SIZE;
++                      src += GHASH_BLOCK_SIZE;
++              }
++      }
+ }
+ static int p8_ghash_update(struct shash_desc *desc,
+@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-      if (IN_INTERRUPT) {
+-              return crypto_shash_update(&dctx->fallback_desc, src,
+-                                         srclen);
+-      } else {
+-              if (dctx->bytes) {
+-                      if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+-                              memcpy(dctx->buffer + dctx->bytes, src,
+-                                     srclen);
+-                              dctx->bytes += srclen;
+-                              return 0;
+-                      }
++      if (dctx->bytes) {
++              if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+                       memcpy(dctx->buffer + dctx->bytes, src,
+-                             GHASH_DIGEST_SIZE - dctx->bytes);
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable,
+-                                   dctx->buffer, GHASH_DIGEST_SIZE);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      src += GHASH_DIGEST_SIZE - dctx->bytes;
+-                      srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+-                      dctx->bytes = 0;
++                              srclen);
++                      dctx->bytes += srclen;
++                      return 0;
+               }
+-              len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+-              if (len) {
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      src += len;
+-                      srclen -= len;
+-              }
+-              if (srclen) {
+-                      memcpy(dctx->buffer, src, srclen);
+-                      dctx->bytes = srclen;
+-              }
+-              return 0;
++              memcpy(dctx->buffer + dctx->bytes, src,
++                      GHASH_DIGEST_SIZE - dctx->bytes);
++
++              __ghash_block(ctx, dctx);
++
++              src += GHASH_DIGEST_SIZE - dctx->bytes;
++              srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
++              dctx->bytes = 0;
++      }
++      len = srclen & ~(GHASH_DIGEST_SIZE - 1);
++      if (len) {
++              __ghash_blocks(ctx, dctx, src, len);
++              src += len;
++              srclen -= len;
+       }
++      if (srclen) {
++              memcpy(dctx->buffer, src, srclen);
++              dctx->bytes = srclen;
++      }
++      return 0;
+ }
+ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_d
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-      if (IN_INTERRUPT) {
+-              return crypto_shash_final(&dctx->fallback_desc, out);
+-      } else {
+-              if (dctx->bytes) {
+-                      for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+-                              dctx->buffer[i] = 0;
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable,
+-                                   dctx->buffer, GHASH_DIGEST_SIZE);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      dctx->bytes = 0;
+-              }
+-              memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+-              return 0;
++      if (dctx->bytes) {
++              for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
++                      dctx->buffer[i] = 0;
++              __ghash_block(ctx, dctx);
++              dctx->bytes = 0;
+       }
++      memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
++      return 0;
+ }
+ struct shash_alg p8_ghash_alg = {
+@@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = {
+                .cra_name = "ghash",
+                .cra_driver_name = "p8_ghash",
+                .cra_priority = 1000,
+-               .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                .cra_blocksize = GHASH_BLOCK_SIZE,
+                .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+                .cra_module = THIS_MODULE,
+-               .cra_init = p8_ghash_init_tfm,
+-               .cra_exit = p8_ghash_exit_tfm,
+       },
+ };
diff --git a/queue-4.19/include-linux-compiler-.h-define-asm_volatile_goto.patch b/queue-4.19/include-linux-compiler-.h-define-asm_volatile_goto.patch
new file mode 100644 (file)
index 0000000..8909a53
--- /dev/null
@@ -0,0 +1,37 @@
+From 8bd66d147c88bd441178c7b4c774ae5a185f19b8 Mon Sep 17 00:00:00 2001
+From: "ndesaulniers@google.com" <ndesaulniers@google.com>
+Date: Wed, 31 Oct 2018 12:39:01 -0700
+Subject: include/linux/compiler*.h: define asm_volatile_goto
+
+From: ndesaulniers@google.com <ndesaulniers@google.com>
+
+commit 8bd66d147c88bd441178c7b4c774ae5a185f19b8 upstream.
+
+asm_volatile_goto should also be defined for other compilers that support
+asm goto.
+
+Fixes commit 815f0ddb346c ("include/linux/compiler*.h: make compiler-*.h
+mutually exclusive").
+
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/compiler_types.h |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -151,6 +151,10 @@ struct ftrace_likely_data {
+ #define __assume_aligned(a, ...)
+ #endif
++#ifndef asm_volatile_goto
++#define asm_volatile_goto(x...) asm goto(x)
++#endif
++
+ /* Are two types/vars the same type (ignoring qualifiers)? */
+ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/queue-4.19/jump_label-move-asm-goto-support-test-to-kconfig.patch b/queue-4.19/jump_label-move-asm-goto-support-test-to-kconfig.patch
new file mode 100644 (file)
index 0000000..7ce258e
--- /dev/null
@@ -0,0 +1,864 @@
+From e9666d10a5677a494260d60d1fa0b73cc7646eb3 Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Mon, 31 Dec 2018 00:14:15 +0900
+Subject: jump_label: move 'asm goto' support test to Kconfig
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+commit e9666d10a5677a494260d60d1fa0b73cc7646eb3 upstream.
+
+Currently, CONFIG_JUMP_LABEL just means "I _want_ to use jump label".
+
+The jump label is controlled by HAVE_JUMP_LABEL, which is defined
+like this:
+
+  #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+  # define HAVE_JUMP_LABEL
+  #endif
+
+We can improve this by testing 'asm goto' support in Kconfig, then
+make JUMP_LABEL depend on CC_HAS_ASM_GOTO.
+
+Ugly #ifdef HAVE_JUMP_LABEL will go away, and CONFIG_JUMP_LABEL will
+match to the real kernel capability.
+
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
+Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
+[nc: Fix trivial conflicts in 4.19
+     arch/xtensa/kernel/jump_label.c doesn't exist yet
+     Ensured CC_HAVE_ASM_GOTO and HAVE_JUMP_LABEL were sufficiently
+     eliminated]
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ Makefile                                          |    7 -------
+ arch/Kconfig                                      |    1 +
+ arch/arm/kernel/jump_label.c                      |    4 ----
+ arch/arm64/kernel/jump_label.c                    |    4 ----
+ arch/mips/kernel/jump_label.c                     |    4 ----
+ arch/powerpc/include/asm/asm-prototypes.h         |    2 +-
+ arch/powerpc/kernel/jump_label.c                  |    2 --
+ arch/powerpc/platforms/powernv/opal-tracepoints.c |    2 +-
+ arch/powerpc/platforms/powernv/opal-wrappers.S    |    2 +-
+ arch/powerpc/platforms/pseries/hvCall.S           |    4 ++--
+ arch/powerpc/platforms/pseries/lpar.c             |    2 +-
+ arch/s390/kernel/Makefile                         |    3 ++-
+ arch/s390/kernel/jump_label.c                     |    4 ----
+ arch/sparc/kernel/Makefile                        |    2 +-
+ arch/sparc/kernel/jump_label.c                    |    4 ----
+ arch/x86/Makefile                                 |    2 +-
+ arch/x86/entry/calling.h                          |    2 +-
+ arch/x86/include/asm/cpufeature.h                 |    2 +-
+ arch/x86/include/asm/jump_label.h                 |   13 -------------
+ arch/x86/include/asm/rmwcc.h                      |    6 +++---
+ arch/x86/kernel/Makefile                          |    3 ++-
+ arch/x86/kernel/jump_label.c                      |    4 ----
+ arch/x86/kvm/emulate.c                            |    2 +-
+ include/linux/dynamic_debug.h                     |    6 +++---
+ include/linux/jump_label.h                        |   22 +++++++++-------------
+ include/linux/jump_label_ratelimit.h              |    8 +++-----
+ include/linux/module.h                            |    2 +-
+ include/linux/netfilter.h                         |    4 ++--
+ include/linux/netfilter_ingress.h                 |    2 +-
+ init/Kconfig                                      |    3 +++
+ kernel/jump_label.c                               |   10 +++-------
+ kernel/module.c                                   |    2 +-
+ kernel/sched/core.c                               |    2 +-
+ kernel/sched/debug.c                              |    4 ++--
+ kernel/sched/fair.c                               |    6 +++---
+ kernel/sched/sched.h                              |    6 +++---
+ lib/dynamic_debug.c                               |    2 +-
+ net/core/dev.c                                    |    6 +++---
+ net/netfilter/core.c                              |    6 +++---
+ scripts/gcc-goto.sh                               |    2 +-
+ tools/arch/x86/include/asm/rmwcc.h                |    6 +++---
+ 41 files changed, 65 insertions(+), 115 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -508,13 +508,6 @@ export RETPOLINE_VDSO_CFLAGS
+ KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
+-# check for 'asm goto'
+-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+-  CC_HAVE_ASM_GOTO := 1
+-  KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+-  KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+-endif
+-
+ # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
+ # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
+ # CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -71,6 +71,7 @@ config KPROBES
+ config JUMP_LABEL
+        bool "Optimize very unlikely/likely branches"
+        depends on HAVE_ARCH_JUMP_LABEL
++       depends on CC_HAS_ASM_GOTO
+        help
+          This option enables a transparent branch optimization that
+        makes certain almost-always-true or almost-always-false branch
+--- a/arch/arm/kernel/jump_label.c
++++ b/arch/arm/kernel/jump_label.c
+@@ -4,8 +4,6 @@
+ #include <asm/patch.h>
+ #include <asm/insn.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ static void __arch_jump_label_transform(struct jump_entry *entry,
+                                       enum jump_label_type type,
+                                       bool is_static)
+@@ -35,5 +33,3 @@ void arch_jump_label_transform_static(st
+ {
+       __arch_jump_label_transform(entry, type, true);
+ }
+-
+-#endif
+--- a/arch/arm64/kernel/jump_label.c
++++ b/arch/arm64/kernel/jump_label.c
+@@ -20,8 +20,6 @@
+ #include <linux/jump_label.h>
+ #include <asm/insn.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+ {
+@@ -49,5 +47,3 @@ void arch_jump_label_transform_static(st
+        * NOP needs to be replaced by a branch.
+        */
+ }
+-
+-#endif        /* HAVE_JUMP_LABEL */
+--- a/arch/mips/kernel/jump_label.c
++++ b/arch/mips/kernel/jump_label.c
+@@ -16,8 +16,6 @@
+ #include <asm/cacheflush.h>
+ #include <asm/inst.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ /*
+  * Define parameters for the standard MIPS and the microMIPS jump
+  * instruction encoding respectively:
+@@ -70,5 +68,3 @@ void arch_jump_label_transform(struct ju
+       mutex_unlock(&text_mutex);
+ }
+-
+-#endif /* HAVE_JUMP_LABEL */
+--- a/arch/powerpc/include/asm/asm-prototypes.h
++++ b/arch/powerpc/include/asm/asm-prototypes.h
+@@ -38,7 +38,7 @@ extern struct static_key hcall_tracepoin
+ void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
+ void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
+ /* OPAL tracing */
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ extern struct static_key opal_tracepoint_key;
+ #endif
+--- a/arch/powerpc/kernel/jump_label.c
++++ b/arch/powerpc/kernel/jump_label.c
+@@ -11,7 +11,6 @@
+ #include <linux/jump_label.h>
+ #include <asm/code-patching.h>
+-#ifdef HAVE_JUMP_LABEL
+ void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+ {
+@@ -22,4 +21,3 @@ void arch_jump_label_transform(struct ju
+       else
+               patch_instruction(addr, PPC_INST_NOP);
+ }
+-#endif
+--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
++++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
+@@ -4,7 +4,7 @@
+ #include <asm/trace.h>
+ #include <asm/asm-prototypes.h>
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
+ int opal_tracepoint_regfunc(void)
+--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
++++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
+@@ -20,7 +20,7 @@
+       .section        ".text"
+ #ifdef CONFIG_TRACEPOINTS
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ #define OPAL_BRANCH(LABEL)                                    \
+       ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key)
+ #else
+--- a/arch/powerpc/platforms/pseries/hvCall.S
++++ b/arch/powerpc/platforms/pseries/hvCall.S
+@@ -19,7 +19,7 @@
+       
+ #ifdef CONFIG_TRACEPOINTS
+-#ifndef HAVE_JUMP_LABEL
++#ifndef CONFIG_JUMP_LABEL
+       .section        ".toc","aw"
+       .globl hcall_tracepoint_refcount
+@@ -79,7 +79,7 @@ hcall_tracepoint_refcount:
+       mr      r5,BUFREG;                                      \
+       __HCALL_INST_POSTCALL
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ #define HCALL_BRANCH(LABEL)                                   \
+       ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
+ #else
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -828,7 +828,7 @@ EXPORT_SYMBOL(arch_free_page);
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+ #ifdef CONFIG_TRACEPOINTS
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
+ int hcall_tracepoint_regfunc(void)
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -44,7 +44,7 @@ CFLAGS_ptrace.o              += -DUTS_MACHINE='"$(UT
+ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
+ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
+-obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
++obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
+ obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
+ obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
+ obj-y += nospec-branch.o
+@@ -68,6 +68,7 @@ obj-$(CONFIG_KPROBES)                += kprobes.o
+ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+ obj-$(CONFIG_CRASH_DUMP)      += crash_dump.o
+ obj-$(CONFIG_UPROBES)         += uprobes.o
++obj-$(CONFIG_JUMP_LABEL)      += jump_label.o
+ obj-$(CONFIG_KEXEC_FILE)      += machine_kexec_file.o kexec_image.o
+ obj-$(CONFIG_KEXEC_FILE)      += kexec_elf.o
+--- a/arch/s390/kernel/jump_label.c
++++ b/arch/s390/kernel/jump_label.c
+@@ -10,8 +10,6 @@
+ #include <linux/jump_label.h>
+ #include <asm/ipl.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ struct insn {
+       u16 opcode;
+       s32 offset;
+@@ -102,5 +100,3 @@ void arch_jump_label_transform_static(st
+ {
+       __jump_label_transform(entry, type, 1);
+ }
+-
+-#endif
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -118,4 +118,4 @@ pc--$(CONFIG_PERF_EVENTS) := perf_event.
+ obj-$(CONFIG_SPARC64) += $(pc--y)
+ obj-$(CONFIG_UPROBES) += uprobes.o
+-obj-$(CONFIG_SPARC64) += jump_label.o
++obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+--- a/arch/sparc/kernel/jump_label.c
++++ b/arch/sparc/kernel/jump_label.c
+@@ -9,8 +9,6 @@
+ #include <asm/cacheflush.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+ {
+@@ -47,5 +45,3 @@ void arch_jump_label_transform(struct ju
+       flushi(insn);
+       mutex_unlock(&text_mutex);
+ }
+-
+-#endif
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -305,7 +305,7 @@ vdso_install:
+ archprepare: checkbin
+ checkbin:
+-ifndef CC_HAVE_ASM_GOTO
++ifndef CONFIG_CC_HAS_ASM_GOTO
+       @echo Compiler lacks asm-goto support.
+       @exit 1
+ endif
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -337,7 +337,7 @@ For 32-bit we have the following convent
+  */
+ .macro CALL_enter_from_user_mode
+ #ifdef CONFIG_CONTEXT_TRACKING
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
+ #endif
+       call enter_from_user_mode
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo
+ #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+-#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO)
++#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
+ /*
+  * Workaround for the sake of BPF compilation which utilizes kernel
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -2,19 +2,6 @@
+ #ifndef _ASM_X86_JUMP_LABEL_H
+ #define _ASM_X86_JUMP_LABEL_H
+-#ifndef HAVE_JUMP_LABEL
+-/*
+- * For better or for worse, if jump labels (the gcc extension) are missing,
+- * then the entire static branch patching infrastructure is compiled out.
+- * If that happens, the code in here will malfunction.  Raise a compiler
+- * error instead.
+- *
+- * In theory, jump labels and the static branch patching infrastructure
+- * could be decoupled to fix this.
+- */
+-#error asm/jump_label.h included on a non-jump-label kernel
+-#endif
+-
+ #define JUMP_LABEL_NOP_SIZE 5
+ #ifdef CONFIG_X86_64
+--- a/arch/x86/include/asm/rmwcc.h
++++ b/arch/x86/include/asm/rmwcc.h
+@@ -4,7 +4,7 @@
+ #define __CLOBBERS_MEM(clb...)        "memory", ## clb
+-#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
++#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
+ /* Use asm goto */
+@@ -21,7 +21,7 @@ cc_label:                                                            \
+ #define __BINARY_RMWcc_ARG    " %1, "
+-#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
++#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
+ /* Use flags output or a set instruction */
+@@ -36,7 +36,7 @@ do {                                                                 \
+ #define __BINARY_RMWcc_ARG    " %2, "
+-#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
++#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
+ #define GEN_UNARY_RMWcc(op, var, arg0, cc)                            \
+       __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o
+ obj-y                 += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
+ obj-y                 += time.o ioport.o dumpstack.o nmi.o
+ obj-$(CONFIG_MODIFY_LDT_SYSCALL)      += ldt.o
+-obj-y                 += setup.o x86_init.o i8259.o irqinit.o jump_label.o
++obj-y                 += setup.o x86_init.o i8259.o irqinit.o
++obj-$(CONFIG_JUMP_LABEL)      += jump_label.o
+ obj-$(CONFIG_IRQ_WORK)  += irq_work.o
+ obj-y                 += probe_roms.o
+ obj-$(CONFIG_X86_64)  += sys_x86_64.o
+--- a/arch/x86/kernel/jump_label.c
++++ b/arch/x86/kernel/jump_label.c
+@@ -16,8 +16,6 @@
+ #include <asm/alternative.h>
+ #include <asm/text-patching.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ union jump_code_union {
+       char code[JUMP_LABEL_NOP_SIZE];
+       struct {
+@@ -142,5 +140,3 @@ __init_or_module void arch_jump_label_tr
+       if (jlstate == JL_STATE_UPDATE)
+               __jump_label_transform(entry, type, text_poke_early, 1);
+ }
+-
+-#endif
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -456,7 +456,7 @@ FOP_END;
+ /*
+  * XXX: inoutclob user must know where the argument is being expanded.
+- *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
++ *      Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
+  */
+ #define asm_safe(insn, inoutclob...) \
+ ({ \
+--- a/include/linux/dynamic_debug.h
++++ b/include/linux/dynamic_debug.h
+@@ -2,7 +2,7 @@
+ #ifndef _DYNAMIC_DEBUG_H
+ #define _DYNAMIC_DEBUG_H
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
++#if defined(CONFIG_JUMP_LABEL)
+ #include <linux/jump_label.h>
+ #endif
+@@ -38,7 +38,7 @@ struct _ddebug {
+ #define _DPRINTK_FLAGS_DEFAULT 0
+ #endif
+       unsigned int flags:8;
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       union {
+               struct static_key_true dd_key_true;
+               struct static_key_false dd_key_false;
+@@ -83,7 +83,7 @@ void __dynamic_netdev_dbg(struct _ddebug
+               dd_key_init(key, init)                          \
+       }
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ #define dd_key_init(key, init) key = (init)
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -71,10 +71,6 @@
+  * Additional babbling in: Documentation/static-keys.txt
+  */
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+-# define HAVE_JUMP_LABEL
+-#endif
+-
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+@@ -86,7 +82,7 @@ extern bool static_key_initialized;
+                                   "%s(): static key '%pS' used before call to jump_label_init()", \
+                                   __func__, (key))
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ struct static_key {
+       atomic_t enabled;
+@@ -114,10 +110,10 @@ struct static_key {
+ struct static_key {
+       atomic_t enabled;
+ };
+-#endif        /* HAVE_JUMP_LABEL */
++#endif        /* CONFIG_JUMP_LABEL */
+ #endif /* __ASSEMBLY__ */
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ #include <asm/jump_label.h>
+ #endif
+@@ -130,7 +126,7 @@ enum jump_label_type {
+ struct module;
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ #define JUMP_TYPE_FALSE               0UL
+ #define JUMP_TYPE_TRUE                1UL
+@@ -184,7 +180,7 @@ extern void static_key_disable_cpuslocke
+       { .enabled = { 0 },                                     \
+         { .entries = (void *)JUMP_TYPE_FALSE } }
+-#else  /* !HAVE_JUMP_LABEL */
++#else  /* !CONFIG_JUMP_LABEL */
+ #include <linux/atomic.h>
+ #include <linux/bug.h>
+@@ -271,7 +267,7 @@ static inline void static_key_disable(st
+ #define STATIC_KEY_INIT_TRUE  { .enabled = ATOMIC_INIT(1) }
+ #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
+-#endif        /* HAVE_JUMP_LABEL */
++#endif        /* CONFIG_JUMP_LABEL */
+ #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+ #define jump_label_enabled static_key_enabled
+@@ -335,7 +331,7 @@ extern bool ____wrong_branch_error(void)
+       static_key_count((struct static_key *)x) > 0;                           \
+ })
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ /*
+  * Combine the right initial value (type) with the right branch order
+@@ -417,12 +413,12 @@ extern bool ____wrong_branch_error(void)
+       unlikely(branch);                                                       \
+ })
+-#else /* !HAVE_JUMP_LABEL */
++#else /* !CONFIG_JUMP_LABEL */
+ #define static_branch_likely(x)               likely(static_key_enabled(&(x)->key))
+ #define static_branch_unlikely(x)     unlikely(static_key_enabled(&(x)->key))
+-#endif /* HAVE_JUMP_LABEL */
++#endif /* CONFIG_JUMP_LABEL */
+ /*
+  * Advanced usage; refcount, branch is enabled when: count != 0
+--- a/include/linux/jump_label_ratelimit.h
++++ b/include/linux/jump_label_ratelimit.h
+@@ -5,21 +5,19 @@
+ #include <linux/jump_label.h>
+ #include <linux/workqueue.h>
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
++#if defined(CONFIG_JUMP_LABEL)
+ struct static_key_deferred {
+       struct static_key key;
+       unsigned long timeout;
+       struct delayed_work work;
+ };
+-#endif
+-#ifdef HAVE_JUMP_LABEL
+ extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+ extern void static_key_deferred_flush(struct static_key_deferred *key);
+ extern void
+ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+-#else /* !HAVE_JUMP_LABEL */
++#else /* !CONFIG_JUMP_LABEL */
+ struct static_key_deferred {
+       struct static_key  key;
+ };
+@@ -38,5 +36,5 @@ jump_label_rate_limit(struct static_key_
+ {
+       STATIC_KEY_CHECK_USE(key);
+ }
+-#endif        /* HAVE_JUMP_LABEL */
++#endif        /* CONFIG_JUMP_LABEL */
+ #endif        /* _LINUX_JUMP_LABEL_RATELIMIT_H */
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -433,7 +433,7 @@ struct module {
+       unsigned int num_tracepoints;
+       tracepoint_ptr_t *tracepoints_ptrs;
+ #endif
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       struct jump_entry *jump_entries;
+       unsigned int num_jump_entries;
+ #endif
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -176,7 +176,7 @@ void nf_unregister_net_hooks(struct net
+ int nf_register_sockopt(struct nf_sockopt_ops *reg);
+ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ #endif
+@@ -198,7 +198,7 @@ static inline int nf_hook(u_int8_t pf, u
+       struct nf_hook_entries *hook_head = NULL;
+       int ret = 1;
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       if (__builtin_constant_p(pf) &&
+           __builtin_constant_p(hook) &&
+           !static_key_false(&nf_hooks_needed[pf][hook]))
+--- a/include/linux/netfilter_ingress.h
++++ b/include/linux/netfilter_ingress.h
+@@ -8,7 +8,7 @@
+ #ifdef CONFIG_NETFILTER_INGRESS
+ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+ {
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+               return false;
+ #endif
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -23,6 +23,9 @@ config CLANG_VERSION
+       int
+       default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
++config CC_HAS_ASM_GOTO
++      def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
++
+ config CONSTRUCTORS
+       bool
+       depends on !UML
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -18,8 +18,6 @@
+ #include <linux/cpu.h>
+ #include <asm/sections.h>
+-#ifdef HAVE_JUMP_LABEL
+-
+ /* mutex to protect coming/going of the the jump_label table */
+ static DEFINE_MUTEX(jump_label_mutex);
+@@ -60,13 +58,13 @@ jump_label_sort_entries(struct jump_entr
+ static void jump_label_update(struct static_key *key);
+ /*
+- * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
++ * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
+  * The use of 'atomic_read()' requires atomic.h and its problematic for some
+  * kernel headers such as kernel.h and others. Since static_key_count() is not
+- * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
++ * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
+  * to have it be a function here. Similarly, for 'static_key_enable()' and
+  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
+- * to be included from most/all places for HAVE_JUMP_LABEL.
++ * to be included from most/all places for CONFIG_JUMP_LABEL.
+  */
+ int static_key_count(struct static_key *key)
+ {
+@@ -796,5 +794,3 @@ static __init int jump_label_test(void)
+ }
+ early_initcall(jump_label_test);
+ #endif /* STATIC_KEYS_SELFTEST */
+-
+-#endif /* HAVE_JUMP_LABEL */
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3100,7 +3100,7 @@ static int find_module_sections(struct m
+                                            sizeof(*mod->tracepoints_ptrs),
+                                            &mod->num_tracepoints);
+ #endif
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       mod->jump_entries = section_objs(info, "__jump_table",
+                                       sizeof(*mod->jump_entries),
+                                       &mod->num_jump_entries);
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -24,7 +24,7 @@
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
+ /*
+  * Debugging: various feature bits
+  *
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -73,7 +73,7 @@ static int sched_feat_show(struct seq_fi
+       return 0;
+ }
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ #define jump_label_key__true  STATIC_KEY_INIT_TRUE
+ #define jump_label_key__false STATIC_KEY_INIT_FALSE
+@@ -99,7 +99,7 @@ static void sched_feat_enable(int i)
+ #else
+ static void sched_feat_disable(int i) { };
+ static void sched_feat_enable(int i) { };
+-#endif /* HAVE_JUMP_LABEL */
++#endif /* CONFIG_JUMP_LABEL */
+ static int sched_feat_set(char *cmp)
+ {
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4209,7 +4209,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+ #ifdef CONFIG_CFS_BANDWIDTH
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ static struct static_key __cfs_bandwidth_used;
+ static inline bool cfs_bandwidth_used(void)
+@@ -4226,7 +4226,7 @@ void cfs_bandwidth_usage_dec(void)
+ {
+       static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
+ }
+-#else /* HAVE_JUMP_LABEL */
++#else /* CONFIG_JUMP_LABEL */
+ static bool cfs_bandwidth_used(void)
+ {
+       return true;
+@@ -4234,7 +4234,7 @@ static bool cfs_bandwidth_used(void)
+ void cfs_bandwidth_usage_inc(void) {}
+ void cfs_bandwidth_usage_dec(void) {}
+-#endif /* HAVE_JUMP_LABEL */
++#endif /* CONFIG_JUMP_LABEL */
+ /*
+  * default period for cfs group bandwidth.
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1359,7 +1359,7 @@ enum {
+ #undef SCHED_FEAT
+-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
+ /*
+  * To support run-time toggling of sched features, all the translation units
+@@ -1379,7 +1379,7 @@ static __always_inline bool static_branc
+ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
+ #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
+-#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
++#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
+ /*
+  * Each translation unit has its own copy of sysctl_sched_features to allow
+@@ -1395,7 +1395,7 @@ static const_debug __maybe_unused unsign
+ #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+-#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
++#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
+ extern struct static_key_false sched_numa_balancing;
+ extern struct static_key_false sched_schedstats;
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -188,7 +188,7 @@ static int ddebug_change(const struct dd
+                       newflags = (dp->flags & mask) | flags;
+                       if (newflags == dp->flags)
+                               continue;
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+                       if (dp->flags & _DPRINTK_FLAGS_PRINT) {
+                               if (!(flags & _DPRINTK_FLAGS_PRINT))
+                                       static_branch_disable(&dp->key.dd_key_true);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1821,7 +1821,7 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+ #endif
+ static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ static atomic_t netstamp_needed_deferred;
+ static atomic_t netstamp_wanted;
+ static void netstamp_clear(struct work_struct *work)
+@@ -1840,7 +1840,7 @@ static DECLARE_WORK(netstamp_work, netst
+ void net_enable_timestamp(void)
+ {
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       int wanted;
+       while (1) {
+@@ -1860,7 +1860,7 @@ EXPORT_SYMBOL(net_enable_timestamp);
+ void net_disable_timestamp(void)
+ {
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       int wanted;
+       while (1) {
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(nf_ipv6_ops);
+ DEFINE_PER_CPU(bool, nf_skb_duplicated);
+ EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+ struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ EXPORT_SYMBOL(nf_hooks_needed);
+ #endif
+@@ -347,7 +347,7 @@ static int __nf_register_net_hook(struct
+       if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+               net_inc_ingress_queue();
+ #endif
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+       static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
+ #endif
+       BUG_ON(p == new_hooks);
+@@ -405,7 +405,7 @@ static void __nf_unregister_net_hook(str
+               if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+                       net_dec_ingress_queue();
+ #endif
+-#ifdef HAVE_JUMP_LABEL
++#ifdef CONFIG_JUMP_LABEL
+               static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]);
+ #endif
+       } else {
+--- a/scripts/gcc-goto.sh
++++ b/scripts/gcc-goto.sh
+@@ -3,7 +3,7 @@
+ # Test for gcc 'asm goto' support
+ # Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
+-cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
++cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null
+ int main(void)
+ {
+ #if defined(__arm__) || defined(__aarch64__)
+--- a/tools/arch/x86/include/asm/rmwcc.h
++++ b/tools/arch/x86/include/asm/rmwcc.h
+@@ -2,7 +2,7 @@
+ #ifndef _TOOLS_LINUX_ASM_X86_RMWcc
+ #define _TOOLS_LINUX_ASM_X86_RMWcc
+-#ifdef CC_HAVE_ASM_GOTO
++#ifdef CONFIG_CC_HAS_ASM_GOTO
+ #define __GEN_RMWcc(fullop, var, cc, ...)                             \
+ do {                                                                  \
+@@ -20,7 +20,7 @@ cc_label:                                                            \
+ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                        \
+       __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+-#else /* !CC_HAVE_ASM_GOTO */
++#else /* !CONFIG_CC_HAS_ASM_GOTO */
+ #define __GEN_RMWcc(fullop, var, cc, ...)                             \
+ do {                                                                  \
+@@ -37,6 +37,6 @@ do {                                                                 \
+ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                        \
+       __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+-#endif /* CC_HAVE_ASM_GOTO */
++#endif /* CONFIG_CC_HAS_ASM_GOTO */
+ #endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
index f954e12e35e7bfbd1adebe1b98fb8f1fbc73ebb9..9a6c6640e44a5e720242af26a2850166ac9f5fcc 100644 (file)
@@ -23,3 +23,8 @@ net-stmmac-dma-channel-control-register-need-to-be-init-first.patch
 bnxt_en-fix-aggregation-buffer-leak-under-oom-condition.patch
 net-tls-fix-state-removal-with-feature-flags-off.patch
 net-tls-don-t-ignore-netdev-notifications-if-no-tls-features.patch
+crypto-vmx-ghash-do-nosimd-fallback-manually.patch
+include-linux-compiler-.h-define-asm_volatile_goto.patch
+compiler.h-give-up-__compiletime_assert_fallback.patch
+jump_label-move-asm-goto-support-test-to-kconfig.patch
+xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch
diff --git a/queue-4.19/xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch b/queue-4.19/xen-pciback-don-t-disable-pci_command-on-pci-device-reset.patch
new file mode 100644 (file)
index 0000000..600a9fc
--- /dev/null
@@ -0,0 +1,56 @@
+From 7681f31ec9cdacab4fd10570be924f2cef6669ba Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 13 Feb 2019 18:21:31 -0500
+Subject: xen/pciback: Don't disable PCI_COMMAND on PCI device reset.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 7681f31ec9cdacab4fd10570be924f2cef6669ba upstream.
+
+There is no need for this at all. Worst it means that if
+the guest tries to write to BARs it could lead (on certain
+platforms) to PCI SERR errors.
+
+Please note that with af6fc858a35b90e89ea7a7ee58e66628c55c776b
+"xen-pciback: limit guest control of command register"
+a guest is still allowed to enable those control bits (safely), but
+is not allowed to disable them and that therefore a well behaved
+frontend which enables things before using them will still
+function correctly.
+
+This is done via an write to the configuration register 0x4 which
+triggers on the backend side:
+command_write
+  \- pci_enable_device
+     \- pci_enable_device_flags
+        \- do_pci_enable_device
+           \- pcibios_enable_device
+              \-pci_enable_resourcess
+                [which enables the PCI_COMMAND_MEMORY|PCI_COMMAND_IO]
+
+However guests (and drivers) which don't do this could cause
+problems, including the security issues which XSA-120 sought
+to address.
+
+Reported-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Prarit Bhargava <prarit@redhat.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/xen-pciback/pciback_ops.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_d
+               if (pci_is_enabled(dev))
+                       pci_disable_device(dev);
+-              pci_write_config_word(dev, PCI_COMMAND, 0);
+-
+               dev->is_busmaster = 0;
+       } else {
+               pci_read_config_word(dev, PCI_COMMAND, &cmd);