]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Feb 2012 19:57:19 +0000 (11:57 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Feb 2012 19:57:19 +0000 (11:57 -0800)
added patches:
i387-move-amd-k7-k8-fpu-fxsave-fxrstor-workaround-from-save-to-restore.patch

queue-3.2/i387-move-amd-k7-k8-fpu-fxsave-fxrstor-workaround-from-save-to-restore.patch [new file with mode: 0644]
queue-3.2/series

diff --git a/queue-3.2/i387-move-amd-k7-k8-fpu-fxsave-fxrstor-workaround-from-save-to-restore.patch b/queue-3.2/i387-move-amd-k7-k8-fpu-fxsave-fxrstor-workaround-from-save-to-restore.patch
new file mode 100644 (file)
index 0000000..1e04fcb
--- /dev/null
@@ -0,0 +1,126 @@
+From 4903062b5485f0e2c286a23b44c9b59d9b017d53 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 16 Feb 2012 19:11:15 -0800
+Subject: i387: move AMD K7/K8 fpu fxsave/fxrstor workaround from save to restore
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 4903062b5485f0e2c286a23b44c9b59d9b017d53 upstream.
+
+The AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
+pending.  In order to not leak FIP state from one process to another, we
+need to do a floating point load after the fxsave of the old process,
+and before the fxrstor of the new FPU state.  That resets the state to
+the (uninteresting) kernel load, rather than some potentially sensitive
+user information.
+
+We used to do this directly after the FPU state save, but that is
+actually very inconvenient, since it
+
+ (a) corrupts what is potentially perfectly good FPU state that we might
+     want to lazy avoid restoring later and
+
+ (b) on x86-64 it resulted in a very annoying ordering constraint, where
+     "__unlazy_fpu()" in the task switch needs to be delayed until after
+     the DS segment has been reloaded just to get the new DS value.
+
+Coupling it to the fxrstor instead of the fxsave automatically avoids
+both of these issues, and also ensures that we only do it when actually
+necessary (the FP state after a save may never actually get used).  It's
+simply a much more natural place for the leaked state cleanup.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/i387.h  |   19 -------------------
+ arch/x86/kernel/process_64.c |    5 ++---
+ arch/x86/kernel/traps.c      |   14 ++++++++++++++
+ 3 files changed, 16 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/include/asm/i387.h
++++ b/arch/x86/include/asm/i387.h
+@@ -211,15 +211,6 @@ static inline void fpu_fxsave(struct fpu
+ #endif        /* CONFIG_X86_64 */
+-/* We need a safe address that is cheap to find and that is already
+-   in L1 during context switch. The best choices are unfortunately
+-   different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
+-
+ /*
+  * These must be called with preempt disabled
+  */
+@@ -243,16 +234,6 @@ static inline void fpu_save_init(struct
+       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
+               asm volatile("fnclex");
+-
+-      /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+-         is pending.  Clear the x87 state here by setting it to fixed
+-         values. safe_address is a random variable that should be in L1 */
+-      alternative_input(
+-              ASM_NOP8 ASM_NOP2,
+-              "emms\n\t"              /* clear stack tags */
+-              "fildl %P[addr]",       /* set F?P to defined value */
+-              X86_FEATURE_FXSAVE_LEAK,
+-              [addr] "m" (safe_address));
+ }
+ static inline void __save_init_fpu(struct task_struct *tsk)
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -382,6 +382,8 @@ __switch_to(struct task_struct *prev_p,
+       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       unsigned fsindex, gsindex;
++      __unlazy_fpu(prev_p);
++
+       /*
+        * Reload esp0, LDT and the page table pointer:
+        */
+@@ -410,9 +412,6 @@ __switch_to(struct task_struct *prev_p,
+       load_TLS(next, cpu);
+-      /* Must be after DS reload */
+-      __unlazy_fpu(prev_p);
+-
+       /*
+        * Leave lazy mode, flushing any hypercalls made here.
+        * This must be done before restoring TLS segments so
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -576,6 +576,10 @@ void math_state_restore(void)
+       struct thread_info *thread = current_thread_info();
+       struct task_struct *tsk = thread->task;
++      /* We need a safe address that is cheap to find and that is already
++         in L1. We just brought in "thread->task", so use that */
++#define safe_address (thread->task)
++
+       if (!tsk_used_math(tsk)) {
+               local_irq_enable();
+               /*
+@@ -593,6 +597,16 @@ void math_state_restore(void)
+       __thread_fpu_begin(thread);
++      /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
++         is pending.  Clear the x87 state here by setting it to fixed
++         values. safe_address is a random variable that should be in L1 */
++      alternative_input(
++              ASM_NOP8 ASM_NOP2,
++              "emms\n\t"              /* clear stack tags */
++              "fildl %P[addr]",       /* set F?P to defined value */
++              X86_FEATURE_FXSAVE_LEAK,
++              [addr] "m" (safe_address));
++
+       /*
+        * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+        */
index 93ac89d94807edcfc80465ab3e6d577282c988a7..ab658827ec7629a0eecceb10834dafd1582b83b6 100644 (file)
@@ -47,3 +47,4 @@ i387-fix-x86-64-preemption-unsafe-user-stack-save-restore.patch
 i387-move-ts_usedfpu-clearing-out-of-__save_init_fpu-and-into-callers.patch
 i387-don-t-ever-touch-ts_usedfpu-directly-use-helper-functions.patch
 i387-do-not-preload-fpu-state-at-task-switch-time.patch
+i387-move-amd-k7-k8-fpu-fxsave-fxrstor-workaround-from-save-to-restore.patch