]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Nov 2022 08:55:10 +0000 (09:55 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Nov 2022 08:55:10 +0000 (09:55 +0100)
added patches:
io_uring-kill-goto-error-handling-in-io_sqpoll_wait_sq.patch
x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch

queue-5.10/io_uring-kill-goto-error-handling-in-io_sqpoll_wait_sq.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch [new file with mode: 0644]

diff --git a/queue-5.10/io_uring-kill-goto-error-handling-in-io_sqpoll_wait_sq.patch b/queue-5.10/io_uring-kill-goto-error-handling-in-io_sqpoll_wait_sq.patch
new file mode 100644 (file)
index 0000000..a57ab75
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Wed Nov 16 09:43:39 AM CET 2022
+Date: Wed, 16 Nov 2022 09:43:39 +0100
+To: Greg KH <gregkh@linuxfoundation.org>
+From: Jens Axboe <axboe@kernel.dk>
+Subject: io_uring: kill goto error handling in io_sqpoll_wait_sq()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Hunk extracted from commit 70aacfe66136809d7f080f89c492c278298719f4
+upstream.
+
+If the sqpoll thread has died, the out condition doesn't remove the
+waiting task from the waitqueue. The goto and check are not needed, just
+make it a break condition after setting the error value. That ensures
+that we always remove ourselves from sqo_sq_wait waitqueue.
+
+Reported-by: Xingyuan Mo <hdthky0@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -9038,7 +9038,7 @@ static int io_sqpoll_wait_sq(struct io_r
+               if (unlikely(ctx->sqo_dead)) {
+                       ret = -EOWNERDEAD;
+-                      goto out;
++                      break;
+               }
+               if (!io_sqring_full(ctx))
+@@ -9048,7 +9048,6 @@ static int io_sqpoll_wait_sq(struct io_r
+       } while (!signal_pending(current));
+       finish_wait(&ctx->sqo_sq_wait, &wait);
+-out:
+       return ret;
+ }
index a67b043fa4a611d7c261f3ba363ce2ba6bf563aa..869f0596e54b861bf92e0fb4216e237896913cb9 100644 (file)
@@ -93,3 +93,5 @@ dmaengine-at_hdmac-fix-impossible-condition.patch
 dmaengine-at_hdmac-check-return-code-of-dma_async_device_register.patch
 net-tun-call-napi_schedule_prep-to-ensure-we-own-a-napi.patch
 mmc-sdhci-esdhc-imx-convert-the-driver-to-dt-only.patch
+x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch
+io_uring-kill-goto-error-handling-in-io_sqpoll_wait_sq.patch
diff --git a/queue-5.10/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch b/queue-5.10/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch
new file mode 100644 (file)
index 0000000..e1d5185
--- /dev/null
@@ -0,0 +1,168 @@
+From 2632daebafd04746b4b96c2f26a6021bc38f6209 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 14 Nov 2022 12:44:01 +0100
+Subject: x86/cpu: Restore AMD's DE_CFG MSR after resume
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 2632daebafd04746b4b96c2f26a6021bc38f6209 upstream.
+
+DE_CFG contains the LFENCE serializing bit, restore it on resume too.
+This is relevant to older families due to the way how they do S3.
+
+Unify and correct naming while at it.
+
+Fixes: e4d0e84e4907 ("x86/cpu/AMD: Make LFENCE a serializing instruction")
+Reported-by: Andrew Cooper <Andrew.Cooper3@citrix.com>
+Reported-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h       |    8 +++++---
+ arch/x86/kernel/cpu/amd.c              |    6 ++----
+ arch/x86/kernel/cpu/hygon.c            |    4 ++--
+ arch/x86/kvm/svm/svm.c                 |   10 +++++-----
+ arch/x86/kvm/x86.c                     |    2 +-
+ arch/x86/power/cpu.c                   |    1 +
+ tools/arch/x86/include/asm/msr-index.h |    8 +++++---
+ 7 files changed, 21 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -489,6 +489,11 @@
+ #define MSR_AMD64_CPUID_FN_1          0xc0011004
+ #define MSR_AMD64_LS_CFG              0xc0011020
+ #define MSR_AMD64_DC_CFG              0xc0011022
++
++#define MSR_AMD64_DE_CFG              0xc0011029
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT  1
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE     BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
++
+ #define MSR_AMD64_BU_CFG2             0xc001102a
+ #define MSR_AMD64_IBSFETCHCTL         0xc0011030
+ #define MSR_AMD64_IBSFETCHLINAD               0xc0011031
+@@ -565,9 +570,6 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK    0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT   20
+ #define MSR_FAM10H_NODE_ID            0xc001100c
+-#define MSR_F10H_DECFG                        0xc0011029
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT   1
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE               BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1                       0xc001001a
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -822,8 +822,6 @@ static void init_amd_gh(struct cpuinfo_x
+               set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
+ }
+-#define MSR_AMD64_DE_CFG      0xC0011029
+-
+ static void init_amd_ln(struct cpuinfo_x86 *c)
+ {
+       /*
+@@ -1018,8 +1016,8 @@ static void init_amd(struct cpuinfo_x86
+                * msr_set_bit() uses the safe accessors, too, even if the MSR
+                * is not present.
+                */
+-              msr_set_bit(MSR_F10H_DECFG,
+-                          MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++              msr_set_bit(MSR_AMD64_DE_CFG,
++                          MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
+               /* A serializing LFENCE stops RDTSC speculation */
+               set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -342,8 +342,8 @@ static void init_hygon(struct cpuinfo_x8
+                * msr_set_bit() uses the safe accessors, too, even if the MSR
+                * is not present.
+                */
+-              msr_set_bit(MSR_F10H_DECFG,
+-                          MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++              msr_set_bit(MSR_AMD64_DE_CFG,
++                          MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
+               /* A serializing LFENCE stops RDTSC speculation */
+               set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2475,9 +2475,9 @@ static int svm_get_msr_feature(struct kv
+       msr->data = 0;
+       switch (msr->index) {
+-      case MSR_F10H_DECFG:
+-              if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+-                      msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
++      case MSR_AMD64_DE_CFG:
++              if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
++                      msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
+               break;
+       case MSR_IA32_PERF_CAPABILITIES:
+               return 0;
+@@ -2584,7 +2584,7 @@ static int svm_get_msr(struct kvm_vcpu *
+                       msr_info->data = 0x1E;
+               }
+               break;
+-      case MSR_F10H_DECFG:
++      case MSR_AMD64_DE_CFG:
+               msr_info->data = svm->msr_decfg;
+               break;
+       default:
+@@ -2764,7 +2764,7 @@ static int svm_set_msr(struct kvm_vcpu *
+       case MSR_VM_IGNNE:
+               vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
+               break;
+-      case MSR_F10H_DECFG: {
++      case MSR_AMD64_DE_CFG: {
+               struct kvm_msr_entry msr_entry;
+               msr_entry.index = msr->index;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1362,7 +1362,7 @@ static const u32 msr_based_features_all[
+       MSR_IA32_VMX_EPT_VPID_CAP,
+       MSR_IA32_VMX_VMFUNC,
+-      MSR_F10H_DECFG,
++      MSR_AMD64_DE_CFG,
+       MSR_IA32_UCODE_REV,
+       MSR_IA32_ARCH_CAPABILITIES,
+       MSR_IA32_PERF_CAPABILITIES,
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -522,6 +522,7 @@ static void pm_save_spec_msr(void)
+               MSR_TSX_FORCE_ABORT,
+               MSR_IA32_MCU_OPT_CTRL,
+               MSR_AMD64_LS_CFG,
++              MSR_AMD64_DE_CFG,
+       };
+       msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+--- a/tools/arch/x86/include/asm/msr-index.h
++++ b/tools/arch/x86/include/asm/msr-index.h
+@@ -489,6 +489,11 @@
+ #define MSR_AMD64_CPUID_FN_1          0xc0011004
+ #define MSR_AMD64_LS_CFG              0xc0011020
+ #define MSR_AMD64_DC_CFG              0xc0011022
++
++#define MSR_AMD64_DE_CFG              0xc0011029
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE     BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
++
+ #define MSR_AMD64_BU_CFG2             0xc001102a
+ #define MSR_AMD64_IBSFETCHCTL         0xc0011030
+ #define MSR_AMD64_IBSFETCHLINAD               0xc0011031
+@@ -565,9 +570,6 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK    0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT   20
+ #define MSR_FAM10H_NODE_ID            0xc001100c
+-#define MSR_F10H_DECFG                        0xc0011029
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT   1
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE               BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1                       0xc001001a