]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 May 2022 23:05:33 +0000 (01:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 May 2022 23:05:33 +0000 (01:05 +0200)
added patches:
perf-symbol-pass-is_kallsyms-to-symbols__fixup_end.patch
perf-symbol-update-symbols__fixup_end.patch
x86-cpu-load-microcode-during-restore_processor_state.patch

queue-5.15/perf-symbol-pass-is_kallsyms-to-symbols__fixup_end.patch [new file with mode: 0644]
queue-5.15/perf-symbol-update-symbols__fixup_end.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/x86-cpu-load-microcode-during-restore_processor_state.patch [new file with mode: 0644]

diff --git a/queue-5.15/perf-symbol-pass-is_kallsyms-to-symbols__fixup_end.patch b/queue-5.15/perf-symbol-pass-is_kallsyms-to-symbols__fixup_end.patch
new file mode 100644 (file)
index 0000000..4a5dcb9
--- /dev/null
@@ -0,0 +1,99 @@
+From 838425f2defe5262906b698752d28fd2fca1aac2 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Fri, 15 Apr 2022 17:40:46 -0700
+Subject: perf symbol: Pass is_kallsyms to symbols__fixup_end()
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit 838425f2defe5262906b698752d28fd2fca1aac2 upstream.
+
+The symbol fixup is necessary for symbols in kallsyms since they don't
+have size info.  So we use the next symbol's address to calculate the
+size.  Now it's also used for user binaries because sometimes they miss
+size for hand-written asm functions.
+
+There's a arch-specific function to handle kallsyms differently but
+currently it cannot distinguish kallsyms from others.  Pass this
+information explicitly to handle it properly.  Note that those arch
+functions will be moved to the generic function so I didn't added it to
+the arch-functions.
+
+Fixes: 3cf6a32f3f2a4594 ("perf symbols: Fix symbol size calculation condition")
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: linux-s390@vger.kernel.org
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: https://lore.kernel.org/r/20220416004048.1514900-2-namhyung@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/symbol-elf.c |    2 +-
+ tools/perf/util/symbol.c     |    7 ++++---
+ tools/perf/util/symbol.h     |    2 +-
+ 3 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1290,7 +1290,7 @@ dso__load_sym_internal(struct dso *dso,
+        * For misannotated, zeroed, ASM function sizes.
+        */
+       if (nr > 0) {
+-              symbols__fixup_end(&dso->symbols);
++              symbols__fixup_end(&dso->symbols, false);
+               symbols__fixup_duplicate(&dso->symbols);
+               if (kmap) {
+                       /*
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -217,7 +217,8 @@ again:
+       }
+ }
+-void symbols__fixup_end(struct rb_root_cached *symbols)
++void symbols__fixup_end(struct rb_root_cached *symbols,
++                      bool is_kallsyms __maybe_unused)
+ {
+       struct rb_node *nd, *prevnd = rb_first_cached(symbols);
+       struct symbol *curr, *prev;
+@@ -1456,7 +1457,7 @@ int __dso__load_kallsyms(struct dso *dso
+       if (kallsyms__delta(kmap, filename, &delta))
+               return -1;
+-      symbols__fixup_end(&dso->symbols);
++      symbols__fixup_end(&dso->symbols, true);
+       symbols__fixup_duplicate(&dso->symbols);
+       if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
+@@ -1648,7 +1649,7 @@ int dso__load_bfd_symbols(struct dso *ds
+ #undef bfd_asymbol_section
+ #endif
+-      symbols__fixup_end(&dso->symbols);
++      symbols__fixup_end(&dso->symbols, false);
+       symbols__fixup_duplicate(&dso->symbols);
+       dso->adjust_symbols = 1;
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -192,7 +192,7 @@ void __symbols__insert(struct rb_root_ca
+                      bool kernel);
+ void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
+ void symbols__fixup_duplicate(struct rb_root_cached *symbols);
+-void symbols__fixup_end(struct rb_root_cached *symbols);
++void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
+ void maps__fixup_end(struct maps *maps);
+ typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
diff --git a/queue-5.15/perf-symbol-update-symbols__fixup_end.patch b/queue-5.15/perf-symbol-update-symbols__fixup_end.patch
new file mode 100644 (file)
index 0000000..06f50ef
--- /dev/null
@@ -0,0 +1,85 @@
+From 8799ebce84d672aae1dc3170510f6a3e66f96b11 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Fri, 15 Apr 2022 17:40:47 -0700
+Subject: perf symbol: Update symbols__fixup_end()
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit 8799ebce84d672aae1dc3170510f6a3e66f96b11 upstream.
+
+Now arch-specific functions all do the same thing.  When it fixes the
+symbol address it needs to check the boundary between the kernel image
+and modules.  For the last symbol in the previous region, it cannot
+know the exact size as it's discarded already.  Thus it just uses a
+small page size (4096) and rounds it up like the last symbol.
+
+Fixes: 3cf6a32f3f2a4594 ("perf symbols: Fix symbol size calculation condition")
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: linux-s390@vger.kernel.org
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: https://lore.kernel.org/r/20220416004048.1514900-3-namhyung@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/symbol.c |   29 +++++++++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -217,8 +217,8 @@ again:
+       }
+ }
+-void symbols__fixup_end(struct rb_root_cached *symbols,
+-                      bool is_kallsyms __maybe_unused)
++/* Update zero-sized symbols using the address of the next symbol */
++void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
+ {
+       struct rb_node *nd, *prevnd = rb_first_cached(symbols);
+       struct symbol *curr, *prev;
+@@ -232,8 +232,29 @@ void symbols__fixup_end(struct rb_root_c
+               prev = curr;
+               curr = rb_entry(nd, struct symbol, rb_node);
+-              if (prev->end == prev->start || prev->end != curr->start)
+-                      arch__symbols__fixup_end(prev, curr);
++              /*
++               * On some architecture kernel text segment start is located at
++               * some low memory address, while modules are located at high
++               * memory addresses (or vice versa).  The gap between end of
++               * kernel text segment and beginning of first module's text
++               * segment is very big.  Therefore do not fill this gap and do
++               * not assign it to the kernel dso map (kallsyms).
++               *
++               * In kallsyms, it determines module symbols using '[' character
++               * like in:
++               *   ffffffffc1937000 T hdmi_driver_init  [snd_hda_codec_hdmi]
++               */
++              if (prev->end == prev->start) {
++                      /* Last kernel/module symbol mapped to end of page */
++                      if (is_kallsyms && (!strchr(prev->name, '[') !=
++                                          !strchr(curr->name, '[')))
++                              prev->end = roundup(prev->end + 4096, 4096);
++                      else
++                              prev->end = curr->start;
++
++                      pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
++                                __func__, prev->name, prev->end);
++              }
+       }
+       /* Last entry */
index 1033f92ee1ba40c62b37226ed2719a606ccaee56..03dae6bc625ea0032986d798f522136a1fb24ad1 100644 (file)
@@ -151,3 +151,6 @@ thermal-int340x-fix-attr.show-callback-prototype.patch
 btrfs-fix-leaked-plug-after-failure-syncing-log-on-zoned-filesystems.patch
 arm-dts-at91-sama7g5ek-enable-pull-up-on-flexcom3-console-lines.patch
 arm-dts-imx8mm-venice-gw-71xx-72xx-73xx-fix-otg-controller-oc-mode.patch
+x86-cpu-load-microcode-during-restore_processor_state.patch
+perf-symbol-pass-is_kallsyms-to-symbols__fixup_end.patch
+perf-symbol-update-symbols__fixup_end.patch
diff --git a/queue-5.15/x86-cpu-load-microcode-during-restore_processor_state.patch b/queue-5.15/x86-cpu-load-microcode-during-restore_processor_state.patch
new file mode 100644 (file)
index 0000000..11613b9
--- /dev/null
@@ -0,0 +1,124 @@
+From f9e14dbbd454581061c736bf70bf5cbb15ac927c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 19 Apr 2022 09:52:41 -0700
+Subject: x86/cpu: Load microcode during restore_processor_state()
+
+From: Borislav Petkov <bp@suse.de>
+
+commit f9e14dbbd454581061c736bf70bf5cbb15ac927c upstream.
+
+When resuming from system sleep state, restore_processor_state()
+restores the boot CPU MSRs. These MSRs could be emulated by microcode.
+If microcode is not loaded yet, writing to emulated MSRs leads to
+unchecked MSR access error:
+
+  ...
+  PM: Calling lapic_suspend+0x0/0x210
+  unchecked MSR access error: WRMSR to 0x10f (tried to write 0x0...0) at rIP: ... (native_write_msr)
+  Call Trace:
+    <TASK>
+    ? restore_processor_state
+    x86_acpi_suspend_lowlevel
+    acpi_suspend_enter
+    suspend_devices_and_enter
+    pm_suspend.cold
+    state_store
+    kobj_attr_store
+    sysfs_kf_write
+    kernfs_fop_write_iter
+    new_sync_write
+    vfs_write
+    ksys_write
+    __x64_sys_write
+    do_syscall_64
+    entry_SYSCALL_64_after_hwframe
+   RIP: 0033:0x7fda13c260a7
+
+To ensure microcode emulated MSRs are available for restoration, load
+the microcode on the boot CPU before restoring these MSRs.
+
+  [ Pawan: write commit message and productize it. ]
+
+Fixes: e2a1256b17b1 ("x86/speculation: Restore speculation related MSRs during S3 resume")
+Reported-by: Kyle D. Pelton <kyle.d.pelton@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Tested-by: Kyle D. Pelton <kyle.d.pelton@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=215841
+Link: https://lore.kernel.org/r/4350dfbf785cd482d3fafa72b2b49c83102df3ce.1650386317.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/microcode.h     |    2 ++
+ arch/x86/kernel/cpu/microcode/core.c |    6 +++---
+ arch/x86/power/cpu.c                 |   10 +++++++++-
+ 3 files changed, 14 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -132,10 +132,12 @@ extern void load_ucode_ap(void);
+ void reload_early_microcode(void);
+ extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+ extern bool initrd_gone;
++void microcode_bsp_resume(void);
+ #else
+ static inline void __init load_ucode_bsp(void)                        { }
+ static inline void load_ucode_ap(void)                                { }
+ static inline void reload_early_microcode(void)                       { }
++static inline void microcode_bsp_resume(void)                 { }
+ static inline bool
+ get_builtin_firmware(struct cpio_data *cd, const char *name)  { return false; }
+ #endif
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -775,9 +775,9 @@ static struct subsys_interface mc_cpu_in
+ };
+ /**
+- * mc_bp_resume - Update boot CPU microcode during resume.
++ * microcode_bsp_resume - Update boot CPU microcode during resume.
+  */
+-static void mc_bp_resume(void)
++void microcode_bsp_resume(void)
+ {
+       int cpu = smp_processor_id();
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+@@ -789,7 +789,7 @@ static void mc_bp_resume(void)
+ }
+ static struct syscore_ops mc_syscore_ops = {
+-      .resume                 = mc_bp_resume,
++      .resume                 = microcode_bsp_resume,
+ };
+ static int mc_cpu_starting(unsigned int cpu)
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -25,6 +25,7 @@
+ #include <asm/cpu.h>
+ #include <asm/mmu_context.h>
+ #include <asm/cpu_device_id.h>
++#include <asm/microcode.h>
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -262,11 +263,18 @@ static void notrace __restore_processor_
+       x86_platform.restore_sched_clock_state();
+       mtrr_bp_restore();
+       perf_restore_debug_store();
+-      msr_restore_context(ctxt);
+       c = &cpu_data(smp_processor_id());
+       if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
+               init_ia32_feat_ctl(c);
++
++      microcode_bsp_resume();
++
++      /*
++       * This needs to happen after the microcode has been updated upon resume
++       * because some of the MSRs are "emulated" in microcode.
++       */
++      msr_restore_context(ctxt);
+ }
+ /* Needed by apm.c */