From b69a8069249f99c224e756446ed99b661963f30b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 10 Feb 2019 12:39:09 +0100 Subject: [PATCH] add another proposed pending patch so I don't loose it --- ...don-t-export-__kernel_fpu_-begin-end.patch | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 pending/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch diff --git a/pending/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch b/pending/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch new file mode 100644 index 00000000000..f83eb18bb2a --- /dev/null +++ b/pending/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch @@ -0,0 +1,130 @@ +From 12209993e98c5fa1855c467f22a24e3d5b8be205 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 29 Nov 2018 16:02:10 +0100 +Subject: x86/fpu: Don't export __kernel_fpu_{begin,end}() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Sebastian Andrzej Siewior + +commit 12209993e98c5fa1855c467f22a24e3d5b8be205 upstream. + +There is one user of __kernel_fpu_begin() and before invoking it, +it invokes preempt_disable(). So it could invoke kernel_fpu_begin() +right away. The 32bit version of arch_efi_call_virt_setup() and +arch_efi_call_virt_teardown() does this already. + +The comment above *kernel_fpu*() claims that before invoking +__kernel_fpu_begin() preemption should be disabled and that KVM is a +good example of doing it. Well, KVM doesn't do that since commit + + f775b13eedee2 ("x86,kvm: move qemu/guest FPU switching out to vcpu_run") + +so it is not an example anymore. + +With EFI gone as the last user of __kernel_fpu_{begin|end}(), both can +be made static and not exported anymore. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Borislav Petkov +Reviewed-by: Rik van Riel +Cc: "H. Peter Anvin" +Cc: "Jason A. Donenfeld" +Cc: Andy Lutomirski +Cc: Ard Biesheuvel +Cc: Dave Hansen +Cc: Ingo Molnar +Cc: Nicolai Stange +Cc: Paolo Bonzini +Cc: Radim Krčmář +Cc: Thomas Gleixner +Cc: kvm ML +Cc: linux-efi +Cc: x86-ml +Link: https://lkml.kernel.org/r/20181129150210.2k4mawt37ow6c2vq@linutronix.de +Signed-off-by: Greg Kroah-Hartman + +diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h +index eea40d52ca78..45864898f7e5 100644 +--- a/arch/x86/include/asm/efi.h ++++ b/arch/x86/include/asm/efi.h +@@ -82,8 +82,7 @@ struct efi_scratch { + #define arch_efi_call_virt_setup() \ + ({ \ + efi_sync_low_kernel_mappings(); \ +- preempt_disable(); \ +- __kernel_fpu_begin(); \ ++ kernel_fpu_begin(); \ + firmware_restrict_branch_speculation_start(); \ + \ + if (!efi_enabled(EFI_OLD_MEMMAP)) \ +@@ -99,8 +98,7 @@ struct efi_scratch { + efi_switch_mm(efi_scratch.prev_mm); \ + \ + firmware_restrict_branch_speculation_end(); \ +- __kernel_fpu_end(); \ +- preempt_enable(); \ ++ kernel_fpu_end(); \ + }) + + extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, +diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h +index a9caac9d4a72..b56d504af654 100644 +--- a/arch/x86/include/asm/fpu/api.h ++++ b/arch/x86/include/asm/fpu/api.h +@@ -12,17 +12,12 @@ + #define _ASM_X86_FPU_API_H + + /* +- * Careful: __kernel_fpu_begin/end() must be called with preempt disabled +- * and they don't touch the preempt state on their own. +- * If you enable preemption after __kernel_fpu_begin(), preempt notifier +- * should call the __kernel_fpu_end() to prevent the kernel/user FPU +- * state from getting corrupted. KVM for example uses this model. +- * +- * All other cases use kernel_fpu_begin/end() which disable preemption +- * during kernel FPU usage. ++ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It ++ * disables preemption so be careful if you intend to use it for long periods ++ * of time. ++ * If you intend to use the FPU in softirq you need to check first with ++ * irq_fpu_usable() if it is possible. + */ +-extern void __kernel_fpu_begin(void); +-extern void __kernel_fpu_end(void); + extern void kernel_fpu_begin(void); + extern void kernel_fpu_end(void); + extern bool irq_fpu_usable(void); +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index 2ea85b32421a..2e5003fef51a 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -93,7 +93,7 @@ bool irq_fpu_usable(void) + } + EXPORT_SYMBOL(irq_fpu_usable); + +-void __kernel_fpu_begin(void) ++static void __kernel_fpu_begin(void) + { + struct fpu *fpu = ¤t->thread.fpu; + +@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) + __cpu_invalidate_fpregs_state(); + } + } +-EXPORT_SYMBOL(__kernel_fpu_begin); + +-void __kernel_fpu_end(void) ++static void __kernel_fpu_end(void) + { + struct fpu *fpu = ¤t->thread.fpu; + +@@ -122,7 +121,6 @@ void __kernel_fpu_end(void) + + kernel_fpu_enable(); + } +-EXPORT_SYMBOL(__kernel_fpu_end); + + void kernel_fpu_begin(void) + { -- 2.39.5