]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Apr 2018 00:04:06 +0000 (17:04 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Apr 2018 00:04:06 +0000 (17:04 -0700)
added patches:
x86-ipc-fix-x32-version-of-shmid64_ds-and-msqid64_ds.patch
x86-microcode-intel-save-microcode-patch-unconditionally.patch
x86-smpboot-don-t-use-mwait_play_dead-on-amd-systems.patch

queue-4.9/series
queue-4.9/x86-ipc-fix-x32-version-of-shmid64_ds-and-msqid64_ds.patch [new file with mode: 0644]
queue-4.9/x86-microcode-intel-save-microcode-patch-unconditionally.patch [new file with mode: 0644]
queue-4.9/x86-smpboot-don-t-use-mwait_play_dead-on-amd-systems.patch [new file with mode: 0644]

index 7ba1190eccddbefce7a497fa6fd3c00dc2b7b92a..dcf47cdcbfaabbf5eabaf95bfd4f28839e64147d 100644 (file)
@@ -55,3 +55,6 @@ rtc-opal-fix-opal-rtc-driver-opal_busy-loops.patch
 drm-amdgpu-set-compute_pgm_rsrc1-for-sgpr-vgpr-clearing-shaders.patch
 objtool-perf-fix-gcc-8-wrestrict-error.patch
 tools-lib-subcmd-pager.c-do-not-alias-select-params.patch
+x86-ipc-fix-x32-version-of-shmid64_ds-and-msqid64_ds.patch
+x86-smpboot-don-t-use-mwait_play_dead-on-amd-systems.patch
+x86-microcode-intel-save-microcode-patch-unconditionally.patch
diff --git a/queue-4.9/x86-ipc-fix-x32-version-of-shmid64_ds-and-msqid64_ds.patch b/queue-4.9/x86-ipc-fix-x32-version-of-shmid64_ds-and-msqid64_ds.patch
new file mode 100644 (file)
index 0000000..63f079d
--- /dev/null
@@ -0,0 +1,128 @@
+From 1a512c0882bd311c5b5561840fcfbe4c25b8f319 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 24 Apr 2018 23:19:51 +0200
+Subject: x86/ipc: Fix x32 version of shmid64_ds and msqid64_ds
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 1a512c0882bd311c5b5561840fcfbe4c25b8f319 upstream.
+
+A bugfix broke the x32 shmid64_ds and msqid64_ds data structure layout
+(as seen from user space)  a few years ago: Originally, __BITS_PER_LONG
+was defined as 64 on x32, so we did not have padding after the 64-bit
+__kernel_time_t fields, After __BITS_PER_LONG got changed to 32,
+applications would observe extra padding.
+
+In other parts of the uapi headers we seem to have a mix of those
+expecting either 32 or 64 on x32 applications, so we can't easily revert
+the path that broke these two structures.
+
+Instead, this patch decouples x32 from the other architectures and moves
+it back into arch specific headers, partially reverting the even older
+commit 73a2d096fdf2 ("x86: remove all now-duplicate header files").
+
+It's not clear whether this ever made any difference, since at least
+glibc carries its own (correct) copy of both of these header files,
+so possibly no application has ever observed the definitions here.
+
+Based on a suggestion from H.J. Lu, I tried out the tool from
+https://github.com/hjl-tools/linux-header to find other such
+bugs, which pointed out the same bug in statfs(), which also has
+a separate (correct) copy in glibc.
+
+Fixes: f4b4aae18288 ("x86/headers/uapi: Fix __BITS_PER_LONG value for x32 builds")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H . J . Lu" <hjl.tools@gmail.com>
+Cc: Jeffrey Walton <noloader@gmail.com>
+Cc: stable@vger.kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Link: https://lkml.kernel.org/r/20180424212013.3967461-1-arnd@arndb.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/uapi/asm/msgbuf.h |   31 +++++++++++++++++++++++++++
+ arch/x86/include/uapi/asm/shmbuf.h |   42 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 73 insertions(+)
+
+--- a/arch/x86/include/uapi/asm/msgbuf.h
++++ b/arch/x86/include/uapi/asm/msgbuf.h
+@@ -1 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#ifndef __ASM_X64_MSGBUF_H
++#define __ASM_X64_MSGBUF_H
++
++#if !defined(__x86_64__) || !defined(__ILP32__)
+ #include <asm-generic/msgbuf.h>
++#else
++/*
++ * The msqid64_ds structure for x86 architecture with x32 ABI.
++ *
++ * On x86-32 and x86-64 we can just use the generic definition, but
++ * x32 uses the same binary layout as x86_64, which is differnet
++ * from other 32-bit architectures.
++ */
++
++struct msqid64_ds {
++      struct ipc64_perm msg_perm;
++      __kernel_time_t msg_stime;      /* last msgsnd time */
++      __kernel_time_t msg_rtime;      /* last msgrcv time */
++      __kernel_time_t msg_ctime;      /* last change time */
++      __kernel_ulong_t msg_cbytes;    /* current number of bytes on queue */
++      __kernel_ulong_t msg_qnum;      /* number of messages in queue */
++      __kernel_ulong_t msg_qbytes;    /* max number of bytes on queue */
++      __kernel_pid_t msg_lspid;       /* pid of last msgsnd */
++      __kernel_pid_t msg_lrpid;       /* last receive pid */
++      __kernel_ulong_t __unused4;
++      __kernel_ulong_t __unused5;
++};
++
++#endif
++
++#endif /* __ASM_GENERIC_MSGBUF_H */
+--- a/arch/x86/include/uapi/asm/shmbuf.h
++++ b/arch/x86/include/uapi/asm/shmbuf.h
+@@ -1 +1,43 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#ifndef __ASM_X86_SHMBUF_H
++#define __ASM_X86_SHMBUF_H
++
++#if !defined(__x86_64__) || !defined(__ILP32__)
+ #include <asm-generic/shmbuf.h>
++#else
++/*
++ * The shmid64_ds structure for x86 architecture with x32 ABI.
++ *
++ * On x86-32 and x86-64 we can just use the generic definition, but
++ * x32 uses the same binary layout as x86_64, which is differnet
++ * from other 32-bit architectures.
++ */
++
++struct shmid64_ds {
++      struct ipc64_perm       shm_perm;       /* operation perms */
++      size_t                  shm_segsz;      /* size of segment (bytes) */
++      __kernel_time_t         shm_atime;      /* last attach time */
++      __kernel_time_t         shm_dtime;      /* last detach time */
++      __kernel_time_t         shm_ctime;      /* last change time */
++      __kernel_pid_t          shm_cpid;       /* pid of creator */
++      __kernel_pid_t          shm_lpid;       /* pid of last operator */
++      __kernel_ulong_t        shm_nattch;     /* no. of current attaches */
++      __kernel_ulong_t        __unused4;
++      __kernel_ulong_t        __unused5;
++};
++
++struct shminfo64 {
++      __kernel_ulong_t        shmmax;
++      __kernel_ulong_t        shmmin;
++      __kernel_ulong_t        shmmni;
++      __kernel_ulong_t        shmseg;
++      __kernel_ulong_t        shmall;
++      __kernel_ulong_t        __unused1;
++      __kernel_ulong_t        __unused2;
++      __kernel_ulong_t        __unused3;
++      __kernel_ulong_t        __unused4;
++};
++
++#endif
++
++#endif /* __ASM_X86_SHMBUF_H */
diff --git a/queue-4.9/x86-microcode-intel-save-microcode-patch-unconditionally.patch b/queue-4.9/x86-microcode-intel-save-microcode-patch-unconditionally.patch
new file mode 100644 (file)
index 0000000..3a93e49
--- /dev/null
@@ -0,0 +1,49 @@
+From 84749d83758af6576552046b215b9b7f37f9556b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Sat, 21 Apr 2018 10:19:29 +0200
+Subject: x86/microcode/intel: Save microcode patch unconditionally
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 84749d83758af6576552046b215b9b7f37f9556b upstream.
+
+save_mc_for_early() was a no-op on !CONFIG_HOTPLUG_CPU but the
+generic_load_microcode() path saves the microcode patches it has found into
+the cache of patches which is used for late loading too. Regardless of
+whether CPU hotplug is used or not.
+
+Make the saving unconditional so that late loading can find the proper
+patch.
+
+Reported-by: Vitezslav Samel <vitezslav@samel.cz>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Vitezslav Samel <vitezslav@samel.cz>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20180418081140.GA2439@pc11.op.pod.cz
+Link: https://lkml.kernel.org/r/20180421081930.15741-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -474,7 +474,6 @@ static void show_saved_mc(void)
+  */
+ static void save_mc_for_early(u8 *mc)
+ {
+-#ifdef CONFIG_HOTPLUG_CPU
+       /* Synchronization during CPU hotplug. */
+       static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+@@ -521,7 +520,6 @@ static void save_mc_for_early(u8 *mc)
+ out:
+       mutex_unlock(&x86_cpu_microcode_mutex);
+-#endif
+ }
+ static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/queue-4.9/x86-smpboot-don-t-use-mwait_play_dead-on-amd-systems.patch b/queue-4.9/x86-smpboot-don-t-use-mwait_play_dead-on-amd-systems.patch
new file mode 100644 (file)
index 0000000..7afe21a
--- /dev/null
@@ -0,0 +1,44 @@
+From da6fa7ef67f07108a1b0cb9fd9e7fcaabd39c051 Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Tue, 3 Apr 2018 09:02:28 -0500
+Subject: x86/smpboot: Don't use mwait_play_dead() on AMD systems
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit da6fa7ef67f07108a1b0cb9fd9e7fcaabd39c051 upstream.
+
+Recent AMD systems support using MWAIT for C1 state. However, MWAIT will
+not allow deeper cstates than C1 on current systems.
+
+play_dead() expects to use the deepest state available.  The deepest state
+available on AMD systems is reached through SystemIO or HALT. If MWAIT is
+available, it is preferred over the other methods, so the CPU never reaches
+the deepest possible state.
+
+Don't try to use MWAIT to play_dead() on AMD systems. Instead, use CPUIDLE
+to enter the deepest state advertised by firmware. If CPUIDLE is not
+available then fallback to HALT.
+
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: stable@vger.kernel.org
+Cc: Yazen Ghannam <Yazen.Ghannam@amd.com>
+Link: https://lkml.kernel.org/r/20180403140228.58540-1-Yazen.Ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/smpboot.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1591,6 +1591,8 @@ static inline void mwait_play_dead(void)
+       void *mwait_ptr;
+       int i;
++      if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++              return;
+       if (!this_cpu_has(X86_FEATURE_MWAIT))
+               return;
+       if (!this_cpu_has(X86_FEATURE_CLFLUSH))