]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.11
authorSasha Levin <sashal@kernel.org>
Fri, 26 Mar 2021 04:36:34 +0000 (00:36 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 26 Mar 2021 04:36:34 +0000 (00:36 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.11/kvm-x86-protect-userspace-msr-filter-with-srcu-and-s.patch [new file with mode: 0644]
queue-5.11/series
queue-5.11/static_call-allow-module-use-without-exposing-static.patch [new file with mode: 0644]
queue-5.11/static_call-fix-static_call_set_init.patch [new file with mode: 0644]
queue-5.11/static_call-fix-the-module-key-fixup.patch [new file with mode: 0644]
queue-5.11/static_call-pull-some-static_call-declarations-to-th.patch [new file with mode: 0644]

diff --git a/queue-5.11/kvm-x86-protect-userspace-msr-filter-with-srcu-and-s.patch b/queue-5.11/kvm-x86-protect-userspace-msr-filter-with-srcu-and-s.patch
new file mode 100644 (file)
index 0000000..078c80c
--- /dev/null
@@ -0,0 +1,314 @@
+From 2420002e7b9f4ef338aa1d8191e548a8b327ae9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Mar 2021 11:44:33 -0700
+Subject: KVM: x86: Protect userspace MSR filter with SRCU, and set
+ atomically-ish
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit b318e8decf6b9ef1bcf4ca06fae6d6a2cb5d5c5c ]
+
+Fix a plethora of issues with MSR filtering by installing the resulting
+filter as an atomic bundle instead of updating the live filter one range
+at a time.  The KVM_X86_SET_MSR_FILTER ioctl() isn't truly atomic, as
+the hardware MSR bitmaps won't be updated until the next VM-Enter, but
+the relevant software struct is atomically updated, which is what KVM
+really needs.
+
+Similar to the approach used for modifying memslots, make arch.msr_filter
+a SRCU-protected pointer, do all the work configuring the new filter
+outside of kvm->lock, and then acquire kvm->lock only when the new filter
+has been vetted and created.  That way vCPU readers either see the old
+filter or the new filter in their entirety, not some half-baked state.
+
+Yuan Yao pointed out a use-after-free in ksm_msr_allowed() due to a
+TOCTOU bug, but that's just the tip of the iceberg...
+
+  - Nothing is __rcu annotated, making it nigh impossible to audit the
+    code for correctness.
+  - kvm_add_msr_filter() has an unpaired smp_wmb().  Violation of kernel
+    coding style aside, the lack of a smb_rmb() anywhere casts all code
+    into doubt.
+  - kvm_clear_msr_filter() has a double free TOCTOU bug, as it grabs
+    count before taking the lock.
+  - kvm_clear_msr_filter() also has memory leak due to the same TOCTOU bug.
+
+The entire approach of updating the live filter is also flawed.  While
+installing a new filter is inherently racy if vCPUs are running, fixing
+the above issues also makes it trivial to ensure certain behavior is
+deterministic, e.g. KVM can provide deterministic behavior for MSRs with
+identical settings in the old and new filters.  An atomic update of the
+filter also prevents KVM from getting into a half-baked state, e.g. if
+installing a filter fails, the existing approach would leave the filter
+in a half-baked state, having already committed whatever bits of the
+filter were already processed.
+
+[*] https://lkml.kernel.org/r/20210312083157.25403-1-yaoyuan0329os@gmail.com
+
+Fixes: 1a155254ff93 ("KVM: x86: Introduce MSR filtering")
+Cc: stable@vger.kernel.org
+Cc: Alexander Graf <graf@amazon.com>
+Reported-by: Yuan Yao <yaoyuan0329os@gmail.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210316184436.2544875-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/virt/kvm/api.rst  |   6 +-
+ arch/x86/include/asm/kvm_host.h |  14 ++--
+ arch/x86/kvm/x86.c              | 109 +++++++++++++++++++-------------
+ 3 files changed, 78 insertions(+), 51 deletions(-)
+
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 5570887a2dce..66d38520e65a 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -4831,8 +4831,10 @@ If an MSR access is not permitted through the filtering, it generates a
+ allows user space to deflect and potentially handle various MSR accesses
+ into user space.
+-If a vCPU is in running state while this ioctl is invoked, the vCPU may
+-experience inconsistent filtering behavior on MSR accesses.
++Note, invoking this ioctl with a vCPU is running is inherently racy.  However,
++KVM does guarantee that vCPUs will see either the previous filter or the new
++filter, e.g. MSRs with identical settings in both the old and new filter will
++have deterministic behavior.
+ 5. The kvm_run structure
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 3d6616f6f6ef..e0cfd620b293 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -894,6 +894,12 @@ enum kvm_irqchip_mode {
+       KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
++struct kvm_x86_msr_filter {
++      u8 count;
++      bool default_allow:1;
++      struct msr_bitmap_range ranges[16];
++};
++
+ #define APICV_INHIBIT_REASON_DISABLE    0
+ #define APICV_INHIBIT_REASON_HYPERV     1
+ #define APICV_INHIBIT_REASON_NESTED     2
+@@ -989,14 +995,12 @@ struct kvm_arch {
+       bool guest_can_read_msr_platform_info;
+       bool exception_payload_enabled;
++      bool bus_lock_detection_enabled;
++
+       /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
+       u32 user_space_msr_mask;
+-      struct {
+-              u8 count;
+-              bool default_allow:1;
+-              struct msr_bitmap_range ranges[16];
+-      } msr_filter;
++      struct kvm_x86_msr_filter __rcu *msr_filter;
+       struct kvm_pmu_event_filter *pmu_event_filter;
+       struct task_struct *nx_lpage_recovery_thread;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b967c1c774a1..f37f5c1430cf 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1523,35 +1523,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
+ {
++      struct kvm_x86_msr_filter *msr_filter;
++      struct msr_bitmap_range *ranges;
+       struct kvm *kvm = vcpu->kvm;
+-      struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
+-      u32 count = kvm->arch.msr_filter.count;
+-      u32 i;
+-      bool r = kvm->arch.msr_filter.default_allow;
++      bool allowed;
+       int idx;
++      u32 i;
+-      /* MSR filtering not set up or x2APIC enabled, allow everything */
+-      if (!count || (index >= 0x800 && index <= 0x8ff))
++      /* x2APIC MSRs do not support filtering. */
++      if (index >= 0x800 && index <= 0x8ff)
+               return true;
+-      /* Prevent collision with set_msr_filter */
+       idx = srcu_read_lock(&kvm->srcu);
+-      for (i = 0; i < count; i++) {
++      msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
++      if (!msr_filter) {
++              allowed = true;
++              goto out;
++      }
++
++      allowed = msr_filter->default_allow;
++      ranges = msr_filter->ranges;
++
++      for (i = 0; i < msr_filter->count; i++) {
+               u32 start = ranges[i].base;
+               u32 end = start + ranges[i].nmsrs;
+               u32 flags = ranges[i].flags;
+               unsigned long *bitmap = ranges[i].bitmap;
+               if ((index >= start) && (index < end) && (flags & type)) {
+-                      r = !!test_bit(index - start, bitmap);
++                      allowed = !!test_bit(index - start, bitmap);
+                       break;
+               }
+       }
++out:
+       srcu_read_unlock(&kvm->srcu, idx);
+-      return r;
++      return allowed;
+ }
+ EXPORT_SYMBOL_GPL(kvm_msr_allowed);
+@@ -5315,25 +5324,34 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+       return r;
+ }
+-static void kvm_clear_msr_filter(struct kvm *kvm)
++static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
++{
++      struct kvm_x86_msr_filter *msr_filter;
++
++      msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
++      if (!msr_filter)
++              return NULL;
++
++      msr_filter->default_allow = default_allow;
++      return msr_filter;
++}
++
++static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
+ {
+       u32 i;
+-      u32 count = kvm->arch.msr_filter.count;
+-      struct msr_bitmap_range ranges[16];
+-      mutex_lock(&kvm->lock);
+-      kvm->arch.msr_filter.count = 0;
+-      memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
+-      mutex_unlock(&kvm->lock);
+-      synchronize_srcu(&kvm->srcu);
++      if (!msr_filter)
++              return;
++
++      for (i = 0; i < msr_filter->count; i++)
++              kfree(msr_filter->ranges[i].bitmap);
+-      for (i = 0; i < count; i++)
+-              kfree(ranges[i].bitmap);
++      kfree(msr_filter);
+ }
+-static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
++static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
++                            struct kvm_msr_filter_range *user_range)
+ {
+-      struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
+       struct msr_bitmap_range range;
+       unsigned long *bitmap = NULL;
+       size_t bitmap_size;
+@@ -5367,11 +5385,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
+               goto err;
+       }
+-      /* Everything ok, add this range identifier to our global pool */
+-      ranges[kvm->arch.msr_filter.count] = range;
+-      /* Make sure we filled the array before we tell anyone to walk it */
+-      smp_wmb();
+-      kvm->arch.msr_filter.count++;
++      /* Everything ok, add this range identifier. */
++      msr_filter->ranges[msr_filter->count] = range;
++      msr_filter->count++;
+       return 0;
+ err:
+@@ -5382,10 +5398,11 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
+ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+ {
+       struct kvm_msr_filter __user *user_msr_filter = argp;
++      struct kvm_x86_msr_filter *new_filter, *old_filter;
+       struct kvm_msr_filter filter;
+       bool default_allow;
+-      int r = 0;
+       bool empty = true;
++      int r = 0;
+       u32 i;
+       if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
+@@ -5398,25 +5415,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+       if (empty && !default_allow)
+               return -EINVAL;
+-      kvm_clear_msr_filter(kvm);
+-
+-      kvm->arch.msr_filter.default_allow = default_allow;
++      new_filter = kvm_alloc_msr_filter(default_allow);
++      if (!new_filter)
++              return -ENOMEM;
+-      /*
+-       * Protect from concurrent calls to this function that could trigger
+-       * a TOCTOU violation on kvm->arch.msr_filter.count.
+-       */
+-      mutex_lock(&kvm->lock);
+       for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
+-              r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
+-              if (r)
+-                      break;
++              r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
++              if (r) {
++                      kvm_free_msr_filter(new_filter);
++                      return r;
++              }
+       }
++      mutex_lock(&kvm->lock);
++
++      /* The per-VM filter is protected by kvm->lock... */
++      old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
++
++      rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
++      synchronize_srcu(&kvm->srcu);
++
++      kvm_free_msr_filter(old_filter);
++
+       kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
+       mutex_unlock(&kvm->lock);
+-      return r;
++      return 0;
+ }
+ long kvm_arch_vm_ioctl(struct file *filp,
+@@ -10536,8 +10560,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
+ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+-      u32 i;
+-
+       if (current->mm == kvm->mm) {
+               /*
+                * Free memory regions allocated on behalf of userspace,
+@@ -10554,8 +10576,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+       }
+       if (kvm_x86_ops.vm_destroy)
+               kvm_x86_ops.vm_destroy(kvm);
+-      for (i = 0; i < kvm->arch.msr_filter.count; i++)
+-              kfree(kvm->arch.msr_filter.ranges[i].bitmap);
++      kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
+       kvm_pic_destroy(kvm);
+       kvm_ioapic_destroy(kvm);
+       kvm_free_vcpus(kvm);
+-- 
+2.30.1
+
index db8d177b3f1e8a2de6c56280bf780897b634e1a7..e582a1662bbac72cd8717b8b1510c8665ccf8743 100644 (file)
@@ -59,3 +59,8 @@ io_uring-cancel-deferred-requests-in-try_cancel.patch
 mm-fork-clear-pasid-for-new-mm.patch
 ia64-fix-ia64_syscall_get_set_arguments-for-break-ba.patch
 ia64-fix-ptrace-ptrace_syscall_info_exit-sign.patch
+static_call-pull-some-static_call-declarations-to-th.patch
+static_call-allow-module-use-without-exposing-static.patch
+static_call-fix-the-module-key-fixup.patch
+static_call-fix-static_call_set_init.patch
+kvm-x86-protect-userspace-msr-filter-with-srcu-and-s.patch
diff --git a/queue-5.11/static_call-allow-module-use-without-exposing-static.patch b/queue-5.11/static_call-allow-module-use-without-exposing-static.patch
new file mode 100644 (file)
index 0000000..65ad5eb
--- /dev/null
@@ -0,0 +1,342 @@
+From d2a896a73298a38e0be4fe11d15a3fb0c2acc934 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Jan 2021 17:18:37 -0600
+Subject: static_call: Allow module use without exposing static_call_key
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+[ Upstream commit 73f44fe19d359635a607e8e8daa0da4001c1cfc2 ]
+
+When exporting static_call_key; with EXPORT_STATIC_CALL*(), the module
+can use static_call_update() to change the function called.  This is
+not desirable in general.
+
+Not exporting static_call_key however also disallows usage of
+static_call(), since objtool needs the key to construct the
+static_call_site.
+
+Solve this by allowing objtool to create the static_call_site using
+the trampoline address when it builds a module and cannot find the
+static_call_key symbol. The module loader will then try and map the
+trampole back to a key before it constructs the normal sites list.
+
+Doing this requires a trampoline -> key associsation, so add another
+magic section that keeps those.
+
+Originally-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20210127231837.ifddpn7rhwdaepiu@treble
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/static_call.h      |  7 ++++
+ include/asm-generic/vmlinux.lds.h       |  5 ++-
+ include/linux/static_call.h             | 22 +++++++++-
+ include/linux/static_call_types.h       | 27 +++++++++++-
+ kernel/static_call.c                    | 55 ++++++++++++++++++++++++-
+ tools/include/linux/static_call_types.h | 27 +++++++++++-
+ tools/objtool/check.c                   | 17 +++++++-
+ 7 files changed, 149 insertions(+), 11 deletions(-)
+
+diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
+index c37f11999d0c..cbb67b6030f9 100644
+--- a/arch/x86/include/asm/static_call.h
++++ b/arch/x86/include/asm/static_call.h
+@@ -37,4 +37,11 @@
+ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)                      \
+       __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
++
++#define ARCH_ADD_TRAMP_KEY(name)                                      \
++      asm(".pushsection .static_call_tramp_key, \"a\"         \n"     \
++          ".long " STATIC_CALL_TRAMP_STR(name) " - .          \n"     \
++          ".long " STATIC_CALL_KEY_STR(name) " - .            \n"     \
++          ".popsection                                        \n")
++
+ #endif /* _ASM_STATIC_CALL_H */
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 34d8287cd774..d7efbc5490e8 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -393,7 +393,10 @@
+       . = ALIGN(8);                                                   \
+       __start_static_call_sites = .;                                  \
+       KEEP(*(.static_call_sites))                                     \
+-      __stop_static_call_sites = .;
++      __stop_static_call_sites = .;                                   \
++      __start_static_call_tramp_key = .;                              \
++      KEEP(*(.static_call_tramp_key))                                 \
++      __stop_static_call_tramp_key = .;
+ /*
+  * Allow architectures to handle ro_after_init data on their
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index a2c064585c03..04e6042d252d 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -138,6 +138,12 @@ struct static_call_key {
+       };
+ };
++/* For finding the key associated with a trampoline */
++struct static_call_tramp_key {
++      s32 tramp;
++      s32 key;
++};
++
+ extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
+ extern int static_call_mod_init(struct module *mod);
+ extern int static_call_text_reserved(void *start, void *end);
+@@ -163,11 +169,18 @@ extern int static_call_text_reserved(void *start, void *end);
+ #define EXPORT_STATIC_CALL(name)                                      \
+       EXPORT_SYMBOL(STATIC_CALL_KEY(name));                           \
+       EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+-
+ #define EXPORT_STATIC_CALL_GPL(name)                                  \
+       EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));                       \
+       EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
++/* Leave the key unexported, so modules can't change static call targets: */
++#define EXPORT_STATIC_CALL_TRAMP(name)                                        \
++      EXPORT_SYMBOL(STATIC_CALL_TRAMP(name));                         \
++      ARCH_ADD_TRAMP_KEY(name)
++#define EXPORT_STATIC_CALL_TRAMP_GPL(name)                            \
++      EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name));                     \
++      ARCH_ADD_TRAMP_KEY(name)
++
+ #elif defined(CONFIG_HAVE_STATIC_CALL)
+ static inline int static_call_init(void) { return 0; }
+@@ -209,11 +222,16 @@ static inline int static_call_text_reserved(void *start, void *end)
+ #define EXPORT_STATIC_CALL(name)                                      \
+       EXPORT_SYMBOL(STATIC_CALL_KEY(name));                           \
+       EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+-
+ #define EXPORT_STATIC_CALL_GPL(name)                                  \
+       EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));                       \
+       EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
++/* Leave the key unexported, so modules can't change static call targets: */
++#define EXPORT_STATIC_CALL_TRAMP(name)                                        \
++      EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
++#define EXPORT_STATIC_CALL_TRAMP_GPL(name)                            \
++      EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
++
+ #else /* Generic implementation */
+ static inline int static_call_init(void) { return 0; }
+diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
+index 08f78b1b88b4..ae5662d368b9 100644
+--- a/include/linux/static_call_types.h
++++ b/include/linux/static_call_types.h
+@@ -10,6 +10,7 @@
+ #define STATIC_CALL_KEY_PREFIX_STR    __stringify(STATIC_CALL_KEY_PREFIX)
+ #define STATIC_CALL_KEY_PREFIX_LEN    (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+ #define STATIC_CALL_KEY(name)         __PASTE(STATIC_CALL_KEY_PREFIX, name)
++#define STATIC_CALL_KEY_STR(name)     __stringify(STATIC_CALL_KEY(name))
+ #define STATIC_CALL_TRAMP_PREFIX      __SCT__
+ #define STATIC_CALL_TRAMP_PREFIX_STR  __stringify(STATIC_CALL_TRAMP_PREFIX)
+@@ -39,17 +40,39 @@ struct static_call_site {
+ #ifdef CONFIG_HAVE_STATIC_CALL
++#define __raw_static_call(name)       (&STATIC_CALL_TRAMP(name))
++
++#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
++
+ /*
+  * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+  * the symbol table so that objtool can reference it when it generates the
+  * .static_call_sites section.
+  */
++#define __STATIC_CALL_ADDRESSABLE(name) \
++      __ADDRESSABLE(STATIC_CALL_KEY(name))
++
+ #define __static_call(name)                                           \
+ ({                                                                    \
+-      __ADDRESSABLE(STATIC_CALL_KEY(name));                           \
+-      &STATIC_CALL_TRAMP(name);                                       \
++      __STATIC_CALL_ADDRESSABLE(name);                                \
++      __raw_static_call(name);                                        \
+ })
++#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#define __STATIC_CALL_ADDRESSABLE(name)
++#define __static_call(name)   __raw_static_call(name)
++
++#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#ifdef MODULE
++#define __STATIC_CALL_MOD_ADDRESSABLE(name)
++#define static_call_mod(name) __raw_static_call(name)
++#else
++#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
++#define static_call_mod(name) __static_call(name)
++#endif
++
+ #define static_call(name)     __static_call(name)
+ #else
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index db914da6e785..db64c2331a32 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -12,6 +12,8 @@
+ extern struct static_call_site __start_static_call_sites[],
+                              __stop_static_call_sites[];
++extern struct static_call_tramp_key __start_static_call_tramp_key[],
++                                  __stop_static_call_tramp_key[];
+ static bool static_call_initialized;
+@@ -332,10 +334,59 @@ static int __static_call_mod_text_reserved(void *start, void *end)
+       return ret;
+ }
++static unsigned long tramp_key_lookup(unsigned long addr)
++{
++      struct static_call_tramp_key *start = __start_static_call_tramp_key;
++      struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
++      struct static_call_tramp_key *tramp_key;
++
++      for (tramp_key = start; tramp_key != stop; tramp_key++) {
++              unsigned long tramp;
++
++              tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
++              if (tramp == addr)
++                      return (long)tramp_key->key + (long)&tramp_key->key;
++      }
++
++      return 0;
++}
++
+ static int static_call_add_module(struct module *mod)
+ {
+-      return __static_call_init(mod, mod->static_call_sites,
+-                                mod->static_call_sites + mod->num_static_call_sites);
++      struct static_call_site *start = mod->static_call_sites;
++      struct static_call_site *stop = start + mod->num_static_call_sites;
++      struct static_call_site *site;
++
++      for (site = start; site != stop; site++) {
++              unsigned long addr = (unsigned long)static_call_key(site);
++              unsigned long key;
++
++              /*
++               * Is the key is exported, 'addr' points to the key, which
++               * means modules are allowed to call static_call_update() on
++               * it.
++               *
++               * Otherwise, the key isn't exported, and 'addr' points to the
++               * trampoline so we need to lookup the key.
++               *
++               * We go through this dance to prevent crazy modules from
++               * abusing sensitive static calls.
++               */
++              if (!kernel_text_address(addr))
++                      continue;
++
++              key = tramp_key_lookup(addr);
++              if (!key) {
++                      pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
++                              static_call_addr(site));
++                      return -EINVAL;
++              }
++
++              site->key = (key - (long)&site->key) |
++                          (site->key & STATIC_CALL_SITE_FLAGS);
++      }
++
++      return __static_call_init(mod, start, stop);
+ }
+ static void static_call_del_module(struct module *mod)
+diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
+index 08f78b1b88b4..ae5662d368b9 100644
+--- a/tools/include/linux/static_call_types.h
++++ b/tools/include/linux/static_call_types.h
+@@ -10,6 +10,7 @@
+ #define STATIC_CALL_KEY_PREFIX_STR    __stringify(STATIC_CALL_KEY_PREFIX)
+ #define STATIC_CALL_KEY_PREFIX_LEN    (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+ #define STATIC_CALL_KEY(name)         __PASTE(STATIC_CALL_KEY_PREFIX, name)
++#define STATIC_CALL_KEY_STR(name)     __stringify(STATIC_CALL_KEY(name))
+ #define STATIC_CALL_TRAMP_PREFIX      __SCT__
+ #define STATIC_CALL_TRAMP_PREFIX_STR  __stringify(STATIC_CALL_TRAMP_PREFIX)
+@@ -39,17 +40,39 @@ struct static_call_site {
+ #ifdef CONFIG_HAVE_STATIC_CALL
++#define __raw_static_call(name)       (&STATIC_CALL_TRAMP(name))
++
++#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
++
+ /*
+  * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+  * the symbol table so that objtool can reference it when it generates the
+  * .static_call_sites section.
+  */
++#define __STATIC_CALL_ADDRESSABLE(name) \
++      __ADDRESSABLE(STATIC_CALL_KEY(name))
++
+ #define __static_call(name)                                           \
+ ({                                                                    \
+-      __ADDRESSABLE(STATIC_CALL_KEY(name));                           \
+-      &STATIC_CALL_TRAMP(name);                                       \
++      __STATIC_CALL_ADDRESSABLE(name);                                \
++      __raw_static_call(name);                                        \
+ })
++#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#define __STATIC_CALL_ADDRESSABLE(name)
++#define __static_call(name)   __raw_static_call(name)
++
++#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#ifdef MODULE
++#define __STATIC_CALL_MOD_ADDRESSABLE(name)
++#define static_call_mod(name) __raw_static_call(name)
++#else
++#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
++#define static_call_mod(name) __static_call(name)
++#endif
++
+ #define static_call(name)     __static_call(name)
+ #else
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index dc24aac08edd..5c83f73ad668 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -502,8 +502,21 @@ static int create_static_call_sections(struct objtool_file *file)
+               key_sym = find_symbol_by_name(file->elf, tmp);
+               if (!key_sym) {
+-                      WARN("static_call: can't find static_call_key symbol: %s", tmp);
+-                      return -1;
++                      if (!module) {
++                              WARN("static_call: can't find static_call_key symbol: %s", tmp);
++                              return -1;
++                      }
++
++                      /*
++                       * For modules(), the key might not be exported, which
++                       * means the module can make static calls but isn't
++                       * allowed to change them.
++                       *
++                       * In that case we temporarily set the key to be the
++                       * trampoline address.  This is fixed up in
++                       * static_call_add_module().
++                       */
++                      key_sym = insn->call_dest;
+               }
+               free(key_name);
+-- 
+2.30.1
+
diff --git a/queue-5.11/static_call-fix-static_call_set_init.patch b/queue-5.11/static_call-fix-static_call_set_init.patch
new file mode 100644 (file)
index 0000000..4b6446a
--- /dev/null
@@ -0,0 +1,84 @@
+From 37fc5d19d9d9ef0c451f4ca07da65cc850042bb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Mar 2021 11:27:19 +0100
+Subject: static_call: Fix static_call_set_init()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 68b1eddd421d2b16c6655eceb48918a1e896bbbc ]
+
+It turns out that static_call_set_init() does not preserve the other
+flags; IOW. it clears TAIL if it was set.
+
+Fixes: 9183c3f9ed710 ("static_call: Add inline static call infrastructure")
+Reported-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Sumit Garg <sumit.garg@linaro.org>
+Link: https://lkml.kernel.org/r/20210318113610.519406371@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/static_call.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index 5d53c354fbe7..49efbdc5b480 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -35,27 +35,30 @@ static inline void *static_call_addr(struct static_call_site *site)
+       return (void *)((long)site->addr + (long)&site->addr);
+ }
++static inline unsigned long __static_call_key(const struct static_call_site *site)
++{
++      return (long)site->key + (long)&site->key;
++}
+ static inline struct static_call_key *static_call_key(const struct static_call_site *site)
+ {
+-      return (struct static_call_key *)
+-              (((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
++      return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
+ }
+ /* These assume the key is word-aligned. */
+ static inline bool static_call_is_init(struct static_call_site *site)
+ {
+-      return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
++      return __static_call_key(site) & STATIC_CALL_SITE_INIT;
+ }
+ static inline bool static_call_is_tail(struct static_call_site *site)
+ {
+-      return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
++      return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
+ }
+ static inline void static_call_set_init(struct static_call_site *site)
+ {
+-      site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
++      site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
+                   (long)&site->key;
+ }
+@@ -199,7 +202,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+                       }
+                       arch_static_call_transform(site_addr, NULL, func,
+-                              static_call_is_tail(site));
++                                                 static_call_is_tail(site));
+               }
+       }
+@@ -358,7 +361,7 @@ static int static_call_add_module(struct module *mod)
+       struct static_call_site *site;
+       for (site = start; site != stop; site++) {
+-              unsigned long s_key = (long)site->key + (long)&site->key;
++              unsigned long s_key = __static_call_key(site);
+               unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
+               unsigned long key;
+-- 
+2.30.1
+
diff --git a/queue-5.11/static_call-fix-the-module-key-fixup.patch b/queue-5.11/static_call-fix-the-module-key-fixup.patch
new file mode 100644 (file)
index 0000000..fdb43d6
--- /dev/null
@@ -0,0 +1,55 @@
+From 26ec14f63311a57ecb8b92d96015c9ee0ec7f82e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Feb 2021 23:03:51 +0100
+Subject: static_call: Fix the module key fixup
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 50bf8080a94d171e843fc013abec19d8ab9f50ae ]
+
+Provided the target address of a R_X86_64_PC32 relocation is aligned,
+the low two bits should be invariant between the relative and absolute
+value.
+
+Turns out the address is not aligned and things go sideways, ensure we
+transfer the bits in the absolute form when fixing up the key address.
+
+Fixes: 73f44fe19d35 ("static_call: Allow module use without exposing static_call_key")
+Reported-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Link: https://lkml.kernel.org/r/20210225220351.GE4746@worktop.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/static_call.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index db64c2331a32..5d53c354fbe7 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -358,7 +358,8 @@ static int static_call_add_module(struct module *mod)
+       struct static_call_site *site;
+       for (site = start; site != stop; site++) {
+-              unsigned long addr = (unsigned long)static_call_key(site);
++              unsigned long s_key = (long)site->key + (long)&site->key;
++              unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
+               unsigned long key;
+               /*
+@@ -382,8 +383,8 @@ static int static_call_add_module(struct module *mod)
+                       return -EINVAL;
+               }
+-              site->key = (key - (long)&site->key) |
+-                          (site->key & STATIC_CALL_SITE_FLAGS);
++              key |= s_key & STATIC_CALL_SITE_FLAGS;
++              site->key = key - (long)&site->key;
+       }
+       return __static_call_init(mod, start, stop);
+-- 
+2.30.1
+
diff --git a/queue-5.11/static_call-pull-some-static_call-declarations-to-th.patch b/queue-5.11/static_call-pull-some-static_call-declarations-to-th.patch
new file mode 100644 (file)
index 0000000..80c1420
--- /dev/null
@@ -0,0 +1,171 @@
+From ff2ca082300c35b042b1954ee28fd357177ca591 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Jan 2021 15:12:18 +0100
+Subject: static_call: Pull some static_call declarations to the type headers
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 880cfed3a012d7863f42251791cea7fe78c39390 ]
+
+Some static call declarations are going to be needed on low level header
+files. Move the necessary material to the dedicated static call types
+header to avoid inclusion dependency hell.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20210118141223.123667-4-frederic@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/static_call.h             | 21 -------------------
+ include/linux/static_call_types.h       | 27 +++++++++++++++++++++++++
+ tools/include/linux/static_call_types.h | 27 +++++++++++++++++++++++++
+ 3 files changed, 54 insertions(+), 21 deletions(-)
+
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index 695da4c9b338..a2c064585c03 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -107,26 +107,10 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
+ #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
+-/*
+- * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+- * the symbol table so that objtool can reference it when it generates the
+- * .static_call_sites section.
+- */
+-#define __static_call(name)                                           \
+-({                                                                    \
+-      __ADDRESSABLE(STATIC_CALL_KEY(name));                           \
+-      &STATIC_CALL_TRAMP(name);                                       \
+-})
+-
+ #else
+ #define STATIC_CALL_TRAMP_ADDR(name) NULL
+ #endif
+-
+-#define DECLARE_STATIC_CALL(name, func)                                       \
+-      extern struct static_call_key STATIC_CALL_KEY(name);            \
+-      extern typeof(func) STATIC_CALL_TRAMP(name);
+-
+ #define static_call_update(name, func)                                        \
+ ({                                                                    \
+       BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name)));   \
+@@ -174,7 +158,6 @@ extern int static_call_text_reserved(void *start, void *end);
+       };                                                              \
+       ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+-#define static_call(name)     __static_call(name)
+ #define static_call_cond(name)        (void)__static_call(name)
+ #define EXPORT_STATIC_CALL(name)                                      \
+@@ -207,7 +190,6 @@ struct static_call_key {
+       };                                                              \
+       ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+-#define static_call(name)     __static_call(name)
+ #define static_call_cond(name)        (void)__static_call(name)
+ static inline
+@@ -252,9 +234,6 @@ struct static_call_key {
+               .func = NULL,                                           \
+       }
+-#define static_call(name)                                             \
+-      ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+-
+ static inline void __static_call_nop(void) { }
+ /*
+diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
+index 89135bb35bf7..08f78b1b88b4 100644
+--- a/include/linux/static_call_types.h
++++ b/include/linux/static_call_types.h
+@@ -4,6 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/stringify.h>
++#include <linux/compiler.h>
+ #define STATIC_CALL_KEY_PREFIX                __SCK__
+ #define STATIC_CALL_KEY_PREFIX_STR    __stringify(STATIC_CALL_KEY_PREFIX)
+@@ -32,4 +33,30 @@ struct static_call_site {
+       s32 key;
+ };
++#define DECLARE_STATIC_CALL(name, func)                                       \
++      extern struct static_call_key STATIC_CALL_KEY(name);            \
++      extern typeof(func) STATIC_CALL_TRAMP(name);
++
++#ifdef CONFIG_HAVE_STATIC_CALL
++
++/*
++ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
++ * the symbol table so that objtool can reference it when it generates the
++ * .static_call_sites section.
++ */
++#define __static_call(name)                                           \
++({                                                                    \
++      __ADDRESSABLE(STATIC_CALL_KEY(name));                           \
++      &STATIC_CALL_TRAMP(name);                                       \
++})
++
++#define static_call(name)     __static_call(name)
++
++#else
++
++#define static_call(name)                                             \
++      ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
++
++#endif /* CONFIG_HAVE_STATIC_CALL */
++
+ #endif /* _STATIC_CALL_TYPES_H */
+diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
+index 89135bb35bf7..08f78b1b88b4 100644
+--- a/tools/include/linux/static_call_types.h
++++ b/tools/include/linux/static_call_types.h
+@@ -4,6 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/stringify.h>
++#include <linux/compiler.h>
+ #define STATIC_CALL_KEY_PREFIX                __SCK__
+ #define STATIC_CALL_KEY_PREFIX_STR    __stringify(STATIC_CALL_KEY_PREFIX)
+@@ -32,4 +33,30 @@ struct static_call_site {
+       s32 key;
+ };
++#define DECLARE_STATIC_CALL(name, func)                                       \
++      extern struct static_call_key STATIC_CALL_KEY(name);            \
++      extern typeof(func) STATIC_CALL_TRAMP(name);
++
++#ifdef CONFIG_HAVE_STATIC_CALL
++
++/*
++ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
++ * the symbol table so that objtool can reference it when it generates the
++ * .static_call_sites section.
++ */
++#define __static_call(name)                                           \
++({                                                                    \
++      __ADDRESSABLE(STATIC_CALL_KEY(name));                           \
++      &STATIC_CALL_TRAMP(name);                                       \
++})
++
++#define static_call(name)     __static_call(name)
++
++#else
++
++#define static_call(name)                                             \
++      ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
++
++#endif /* CONFIG_HAVE_STATIC_CALL */
++
+ #endif /* _STATIC_CALL_TYPES_H */
+-- 
+2.30.1
+