]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Aug 2022 08:51:35 +0000 (10:51 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Aug 2022 08:51:35 +0000 (10:51 +0200)
added patches:
bpf-add-prog_test_run-support-for-sk_lookup-programs.patch
bpf-consolidate-shared-test-timing-code.patch
selftests-bpf-don-t-run-sk_lookup-in-verifier-tests.patch
x86-bugs-do-not-enable-ibpb-at-firmware-entry-when-ibpb-is-not-available.patch

queue-5.10/bpf-add-prog_test_run-support-for-sk_lookup-programs.patch [new file with mode: 0644]
queue-5.10/bpf-consolidate-shared-test-timing-code.patch [new file with mode: 0644]
queue-5.10/selftests-bpf-don-t-run-sk_lookup-in-verifier-tests.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-bugs-do-not-enable-ibpb-at-firmware-entry-when-ibpb-is-not-available.patch [new file with mode: 0644]

diff --git a/queue-5.10/bpf-add-prog_test_run-support-for-sk_lookup-programs.patch b/queue-5.10/bpf-add-prog_test_run-support-for-sk_lookup-programs.patch
new file mode 100644 (file)
index 0000000..b1206d8
--- /dev/null
@@ -0,0 +1,221 @@
+From foo@baz Mon Aug  1 10:51:00 AM CEST 2022
+From: Tianchen Ding <dtcccc@linux.alibaba.com>
+Date: Mon,  1 Aug 2022 15:29:15 +0800
+Subject: bpf: Add PROG_TEST_RUN support for sk_lookup programs
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Cc: Lorenz Bauer <lmb@cloudflare.com>, Alexei Starovoitov <ast@kernel.org>, linux-kernel@vger.kernel.org, stable@vger.kernel.org
+Message-ID: <20220801072916.29586-3-dtcccc@linux.alibaba.com>
+
+From: Tianchen Ding <dtcccc@linux.alibaba.com>
+
+From: Lorenz Bauer <lmb@cloudflare.com>
+
+commit 7c32e8f8bc33a5f4b113a630857e46634e3e143b upstream.
+
+Allow to pass sk_lookup programs to PROG_TEST_RUN. User space
+provides the full bpf_sk_lookup struct as context. Since the
+context includes a socket pointer that can't be exposed
+to user space we define that PROG_TEST_RUN returns the cookie
+of the selected socket or zero in place of the socket pointer.
+
+We don't support testing programs that select a reuseport socket,
+since this would mean running another (unrelated) BPF program
+from the sk_lookup test handler.
+
+Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20210303101816.36774-3-lmb@cloudflare.com
+Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h            |   10 +++
+ include/uapi/linux/bpf.h       |    5 +
+ net/bpf/test_run.c             |  105 +++++++++++++++++++++++++++++++++++++++++
+ net/core/filter.c              |    1 
+ tools/include/uapi/linux/bpf.h |    5 +
+ 5 files changed, 124 insertions(+), 2 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1457,6 +1457,9 @@ int bpf_prog_test_run_flow_dissector(str
+ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+                            const union bpf_attr *kattr,
+                            union bpf_attr __user *uattr);
++int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
++                              const union bpf_attr *kattr,
++                              union bpf_attr __user *uattr);
+ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+                   const struct bpf_prog *prog,
+                   struct bpf_insn_access_aux *info);
+@@ -1670,6 +1673,13 @@ static inline int bpf_prog_test_run_flow
+ {
+       return -ENOTSUPP;
+ }
++
++static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
++                                            const union bpf_attr *kattr,
++                                            union bpf_attr __user *uattr)
++{
++      return -ENOTSUPP;
++}
+ static inline void bpf_map_put(struct bpf_map *map)
+ {
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -5007,7 +5007,10 @@ struct bpf_pidns_info {
+ /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+ struct bpf_sk_lookup {
+-      __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
++      union {
++              __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
++              __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
++      };
+       __u32 family;           /* Protocol family (AF_INET, AF_INET6) */
+       __u32 protocol;         /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -10,8 +10,10 @@
+ #include <net/bpf_sk_storage.h>
+ #include <net/sock.h>
+ #include <net/tcp.h>
++#include <net/net_namespace.h>
+ #include <linux/error-injection.h>
+ #include <linux/smp.h>
++#include <linux/sock_diag.h>
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/bpf_test_run.h>
+@@ -796,3 +798,106 @@ out:
+       kfree(data);
+       return ret;
+ }
++
++int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
++                              union bpf_attr __user *uattr)
++{
++      struct bpf_test_timer t = { NO_PREEMPT };
++      struct bpf_prog_array *progs = NULL;
++      struct bpf_sk_lookup_kern ctx = {};
++      u32 repeat = kattr->test.repeat;
++      struct bpf_sk_lookup *user_ctx;
++      u32 retval, duration;
++      int ret = -EINVAL;
++
++      if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
++              return -EINVAL;
++
++      if (kattr->test.flags || kattr->test.cpu)
++              return -EINVAL;
++
++      if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
++          kattr->test.data_size_out)
++              return -EINVAL;
++
++      if (!repeat)
++              repeat = 1;
++
++      user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
++      if (IS_ERR(user_ctx))
++              return PTR_ERR(user_ctx);
++
++      if (!user_ctx)
++              return -EINVAL;
++
++      if (user_ctx->sk)
++              goto out;
++
++      if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
++              goto out;
++
++      if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
++              ret = -ERANGE;
++              goto out;
++      }
++
++      ctx.family = (u16)user_ctx->family;
++      ctx.protocol = (u16)user_ctx->protocol;
++      ctx.dport = (u16)user_ctx->local_port;
++      ctx.sport = (__force __be16)user_ctx->remote_port;
++
++      switch (ctx.family) {
++      case AF_INET:
++              ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
++              ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
++              break;
++
++#if IS_ENABLED(CONFIG_IPV6)
++      case AF_INET6:
++              ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
++              ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
++              break;
++#endif
++
++      default:
++              ret = -EAFNOSUPPORT;
++              goto out;
++      }
++
++      progs = bpf_prog_array_alloc(1, GFP_KERNEL);
++      if (!progs) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      progs->items[0].prog = prog;
++
++      bpf_test_timer_enter(&t);
++      do {
++              ctx.selected_sk = NULL;
++              retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
++      } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
++      bpf_test_timer_leave(&t);
++
++      if (ret < 0)
++              goto out;
++
++      user_ctx->cookie = 0;
++      if (ctx.selected_sk) {
++              if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
++                      ret = -EOPNOTSUPP;
++                      goto out;
++              }
++
++              user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
++      }
++
++      ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
++      if (!ret)
++              ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
++
++out:
++      bpf_prog_array_free(progs);
++      kfree(user_ctx);
++      return ret;
++}
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -10334,6 +10334,7 @@ static u32 sk_lookup_convert_ctx_access(
+ }
+ const struct bpf_prog_ops sk_lookup_prog_ops = {
++      .test_run = bpf_prog_test_run_sk_lookup,
+ };
+ const struct bpf_verifier_ops sk_lookup_verifier_ops = {
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -5006,7 +5006,10 @@ struct bpf_pidns_info {
+ /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+ struct bpf_sk_lookup {
+-      __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
++      union {
++              __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
++              __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
++      };
+       __u32 family;           /* Protocol family (AF_INET, AF_INET6) */
+       __u32 protocol;         /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
diff --git a/queue-5.10/bpf-consolidate-shared-test-timing-code.patch b/queue-5.10/bpf-consolidate-shared-test-timing-code.patch
new file mode 100644 (file)
index 0000000..9f611f2
--- /dev/null
@@ -0,0 +1,227 @@
+From foo@baz Mon Aug  1 10:51:00 AM CEST 2022
+From: Tianchen Ding <dtcccc@linux.alibaba.com>
+Date: Mon,  1 Aug 2022 15:29:14 +0800
+Subject: bpf: Consolidate shared test timing code
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Cc: Lorenz Bauer <lmb@cloudflare.com>, Alexei Starovoitov <ast@kernel.org>, linux-kernel@vger.kernel.org, stable@vger.kernel.org
+Message-ID: <20220801072916.29586-2-dtcccc@linux.alibaba.com>
+
+From: Tianchen Ding <dtcccc@linux.alibaba.com>
+
+From: Lorenz Bauer <lmb@cloudflare.com>
+
+commit 607b9cc92bd7208338d714a22b8082fe83bcb177 upstream.
+
+Share the timing / signal interruption logic between different
+implementations of PROG_TEST_RUN. There is a change in behaviour
+as well. We check the loop exit condition before checking for
+pending signals. This resolves an edge case where a signal
+arrives during the last iteration. Instead of aborting with
+EINTR we return the successful result to user space.
+
+Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20210303101816.36774-2-lmb@cloudflare.com
+[dtcccc: fix conflicts in bpf_test_run()]
+Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bpf/test_run.c |  140 +++++++++++++++++++++++++++++------------------------
+ 1 file changed, 78 insertions(+), 62 deletions(-)
+
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -16,14 +16,78 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/bpf_test_run.h>
++struct bpf_test_timer {
++      enum { NO_PREEMPT, NO_MIGRATE } mode;
++      u32 i;
++      u64 time_start, time_spent;
++};
++
++static void bpf_test_timer_enter(struct bpf_test_timer *t)
++      __acquires(rcu)
++{
++      rcu_read_lock();
++      if (t->mode == NO_PREEMPT)
++              preempt_disable();
++      else
++              migrate_disable();
++
++      t->time_start = ktime_get_ns();
++}
++
++static void bpf_test_timer_leave(struct bpf_test_timer *t)
++      __releases(rcu)
++{
++      t->time_start = 0;
++
++      if (t->mode == NO_PREEMPT)
++              preempt_enable();
++      else
++              migrate_enable();
++      rcu_read_unlock();
++}
++
++static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
++      __must_hold(rcu)
++{
++      t->i++;
++      if (t->i >= repeat) {
++              /* We're done. */
++              t->time_spent += ktime_get_ns() - t->time_start;
++              do_div(t->time_spent, t->i);
++              *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
++              *err = 0;
++              goto reset;
++      }
++
++      if (signal_pending(current)) {
++              /* During iteration: we've been cancelled, abort. */
++              *err = -EINTR;
++              goto reset;
++      }
++
++      if (need_resched()) {
++              /* During iteration: we need to reschedule between runs. */
++              t->time_spent += ktime_get_ns() - t->time_start;
++              bpf_test_timer_leave(t);
++              cond_resched();
++              bpf_test_timer_enter(t);
++      }
++
++      /* Do another round. */
++      return true;
++
++reset:
++      t->i = 0;
++      return false;
++}
++
+ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+                       u32 *retval, u32 *time, bool xdp)
+ {
+       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
++      struct bpf_test_timer t = { NO_MIGRATE };
+       enum bpf_cgroup_storage_type stype;
+-      u64 time_start, time_spent = 0;
+-      int ret = 0;
+-      u32 i;
++      int ret;
+       for_each_cgroup_storage_type(stype) {
+               storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
+@@ -38,10 +102,8 @@ static int bpf_test_run(struct bpf_prog
+       if (!repeat)
+               repeat = 1;
+-      rcu_read_lock();
+-      migrate_disable();
+-      time_start = ktime_get_ns();
+-      for (i = 0; i < repeat; i++) {
++      bpf_test_timer_enter(&t);
++      do {
+               ret = bpf_cgroup_storage_set(storage);
+               if (ret)
+                       break;
+@@ -53,29 +115,8 @@ static int bpf_test_run(struct bpf_prog
+               bpf_cgroup_storage_unset();
+-              if (signal_pending(current)) {
+-                      ret = -EINTR;
+-                      break;
+-              }
+-
+-              if (need_resched()) {
+-                      time_spent += ktime_get_ns() - time_start;
+-                      migrate_enable();
+-                      rcu_read_unlock();
+-
+-                      cond_resched();
+-
+-                      rcu_read_lock();
+-                      migrate_disable();
+-                      time_start = ktime_get_ns();
+-              }
+-      }
+-      time_spent += ktime_get_ns() - time_start;
+-      migrate_enable();
+-      rcu_read_unlock();
+-
+-      do_div(time_spent, repeat);
+-      *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
++      } while (bpf_test_timer_continue(&t, repeat, &ret, time));
++      bpf_test_timer_leave(&t);
+       for_each_cgroup_storage_type(stype)
+               bpf_cgroup_storage_free(storage[stype]);
+@@ -688,18 +729,17 @@ int bpf_prog_test_run_flow_dissector(str
+                                    const union bpf_attr *kattr,
+                                    union bpf_attr __user *uattr)
+ {
++      struct bpf_test_timer t = { NO_PREEMPT };
+       u32 size = kattr->test.data_size_in;
+       struct bpf_flow_dissector ctx = {};
+       u32 repeat = kattr->test.repeat;
+       struct bpf_flow_keys *user_ctx;
+       struct bpf_flow_keys flow_keys;
+-      u64 time_start, time_spent = 0;
+       const struct ethhdr *eth;
+       unsigned int flags = 0;
+       u32 retval, duration;
+       void *data;
+       int ret;
+-      u32 i;
+       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+               return -EINVAL;
+@@ -735,39 +775,15 @@ int bpf_prog_test_run_flow_dissector(str
+       ctx.data = data;
+       ctx.data_end = (__u8 *)data + size;
+-      rcu_read_lock();
+-      preempt_disable();
+-      time_start = ktime_get_ns();
+-      for (i = 0; i < repeat; i++) {
++      bpf_test_timer_enter(&t);
++      do {
+               retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
+                                         size, flags);
++      } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
++      bpf_test_timer_leave(&t);
+-              if (signal_pending(current)) {
+-                      preempt_enable();
+-                      rcu_read_unlock();
+-
+-                      ret = -EINTR;
+-                      goto out;
+-              }
+-
+-              if (need_resched()) {
+-                      time_spent += ktime_get_ns() - time_start;
+-                      preempt_enable();
+-                      rcu_read_unlock();
+-
+-                      cond_resched();
+-
+-                      rcu_read_lock();
+-                      preempt_disable();
+-                      time_start = ktime_get_ns();
+-              }
+-      }
+-      time_spent += ktime_get_ns() - time_start;
+-      preempt_enable();
+-      rcu_read_unlock();
+-
+-      do_div(time_spent, repeat);
+-      duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
++      if (ret < 0)
++              goto out;
+       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+                             retval, duration);
diff --git a/queue-5.10/selftests-bpf-don-t-run-sk_lookup-in-verifier-tests.patch b/queue-5.10/selftests-bpf-don-t-run-sk_lookup-in-verifier-tests.patch
new file mode 100644 (file)
index 0000000..30b3a00
--- /dev/null
@@ -0,0 +1,61 @@
+From foo@baz Mon Aug  1 10:51:00 AM CEST 2022
+From: Tianchen Ding <dtcccc@linux.alibaba.com>
+Date: Mon,  1 Aug 2022 15:29:16 +0800
+Subject: selftests: bpf: Don't run sk_lookup in verifier tests
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Cc: Lorenz Bauer <lmb@cloudflare.com>, Alexei Starovoitov <ast@kernel.org>, linux-kernel@vger.kernel.org, stable@vger.kernel.org
+Message-ID: <20220801072916.29586-4-dtcccc@linux.alibaba.com>
+
+From: Tianchen Ding <dtcccc@linux.alibaba.com>
+
+From: Lorenz Bauer <lmb@cloudflare.com>
+
+commit b4f894633fa14d7d46ba7676f950b90a401504bb upstream.
+
+sk_lookup doesn't allow setting data_in for bpf_prog_run. This doesn't
+play well with the verifier tests, since they always set a 64 byte
+input buffer. Allow not running verifier tests by setting
+bpf_test.runs to a negative value and don't run the ctx access case
+for sk_lookup. We have dedicated ctx access tests so skipping here
+doesn't reduce coverage.
+
+Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20210303101816.36774-6-lmb@cloudflare.com
+Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_verifier.c          |    4 ++--
+ tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c |    1 +
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -100,7 +100,7 @@ struct bpf_test {
+       enum bpf_prog_type prog_type;
+       uint8_t flags;
+       void (*fill_helper)(struct bpf_test *self);
+-      uint8_t runs;
++      int runs;
+ #define bpf_testdata_struct_t                                 \
+       struct {                                                \
+               uint32_t retval, retval_unpriv;                 \
+@@ -1054,7 +1054,7 @@ static void do_test_single(struct bpf_te
+       run_errs = 0;
+       run_successes = 0;
+-      if (!alignment_prevented_execution && fd_prog >= 0) {
++      if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
+               uint32_t expected_val;
+               int i;
+--- a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
++++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
+@@ -239,6 +239,7 @@
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
++      .runs = -1,
+ },
+ /* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
+ {
index 3c068f2020f1328d1b406f88603914dd07f79fc0..b9658dbe3c0e12dfb0935ff6ef135ee3a571afc6 100644 (file)
@@ -59,3 +59,7 @@ xfs-hold-buffer-across-unpin-and-potential-shutdown-processing.patch
 xfs-remove-dead-stale-buf-unpin-handling-code.patch
 xfs-logging-the-on-disk-inode-lsn-can-make-it-go-backwards.patch
 xfs-enforce-attr3-buffer-recovery-order.patch
+x86-bugs-do-not-enable-ibpb-at-firmware-entry-when-ibpb-is-not-available.patch
+bpf-consolidate-shared-test-timing-code.patch
+bpf-add-prog_test_run-support-for-sk_lookup-programs.patch
+selftests-bpf-don-t-run-sk_lookup-in-verifier-tests.patch
diff --git a/queue-5.10/x86-bugs-do-not-enable-ibpb-at-firmware-entry-when-ibpb-is-not-available.patch b/queue-5.10/x86-bugs-do-not-enable-ibpb-at-firmware-entry-when-ibpb-is-not-available.patch
new file mode 100644 (file)
index 0000000..d0ab2ba
--- /dev/null
@@ -0,0 +1,65 @@
+From 571c30b1a88465a1c85a6f7762609939b9085a15 Mon Sep 17 00:00:00 2001
+From: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Date: Thu, 28 Jul 2022 09:26:02 -0300
+Subject: x86/bugs: Do not enable IBPB at firmware entry when IBPB is not available
+
+From: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+
+commit 571c30b1a88465a1c85a6f7762609939b9085a15 upstream.
+
+Some cloud hypervisors do not provide IBPB on very recent CPU processors,
+including AMD processors affected by Retbleed.
+
+Using IBPB before firmware calls on such systems would cause a GPF at boot
+like the one below. Do not enable such calls when IBPB support is not
+present.
+
+  EFI Variables Facility v0.08 2004-May-17
+  general protection fault, maybe for address 0x1: 0000 [#1] PREEMPT SMP NOPTI
+  CPU: 0 PID: 24 Comm: kworker/u2:1 Not tainted 5.19.0-rc8+ #7
+  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 0.0.0 02/06/2015
+  Workqueue: efi_rts_wq efi_call_rts
+  RIP: 0010:efi_call_rts
+  Code: e8 37 33 58 ff 41 bf 48 00 00 00 49 89 c0 44 89 f9 48 83 c8 01 4c 89 c2 48 c1 ea 20 66 90 b9 49 00 00 00 b8 01 00 00 00 31 d2 <0f> 30 e8 7b 9f 5d ff e8 f6 f8 ff ff 4c 89 f1 4c 89 ea 4c 89 e6 48
+  RSP: 0018:ffffb373800d7e38 EFLAGS: 00010246
+  RAX: 0000000000000001 RBX: 0000000000000006 RCX: 0000000000000049
+  RDX: 0000000000000000 RSI: ffff94fbc19d8fe0 RDI: ffff94fbc1b2b300
+  RBP: ffffb373800d7e70 R08: 0000000000000000 R09: 0000000000000000
+  R10: 000000000000000b R11: 000000000000000b R12: ffffb3738001fd78
+  R13: ffff94fbc2fcfc00 R14: ffffb3738001fd80 R15: 0000000000000048
+  FS:  0000000000000000(0000) GS:ffff94fc3da00000(0000) knlGS:0000000000000000
+  CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+  CR2: ffff94fc30201000 CR3: 000000006f610000 CR4: 00000000000406f0
+  Call Trace:
+   <TASK>
+   ? __wake_up
+   process_one_work
+   worker_thread
+   ? rescuer_thread
+   kthread
+   ? kthread_complete_and_exit
+   ret_from_fork
+   </TASK>
+  Modules linked in:
+
+Fixes: 28a99e95f55c ("x86/amd: Use IBPB for firmware calls")
+Reported-by: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20220728122602.2500509-1-cascardo@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1476,6 +1476,7 @@ static void __init spectre_v2_select_mit
+        * enable IBRS around firmware calls.
+        */
+       if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
++          boot_cpu_has(X86_FEATURE_IBPB) &&
+           (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {