From: Greg Kroah-Hartman Date: Mon, 29 Apr 2019 13:36:17 +0000 (+0200) Subject: 4.19-stable patches X-Git-Tag: v4.9.172~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7e1d103f4f79cc6f7bda1a494a52549d33bb0d0c;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch --- diff --git a/queue-4.19/series b/queue-4.19/series index b33883c7ca7..9e6c750bf8a 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -80,3 +80,4 @@ fix-aio_poll-races.patch x86-retpolines-raise-limit-for-generating-indirect-calls-from-switch-case.patch x86-retpolines-disable-switch-jump-tables-when-retpolines-are-enabled.patch mm-fix-warning-in-insert_pfn.patch +x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch diff --git a/queue-4.19/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch b/queue-4.19/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch new file mode 100644 index 00000000000..b04027ac07a --- /dev/null +++ b/queue-4.19/x86-fpu-don-t-export-__kernel_fpu_-begin-end.patch @@ -0,0 +1,130 @@ +From 12209993e98c5fa1855c467f22a24e3d5b8be205 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 29 Nov 2018 16:02:10 +0100 +Subject: x86/fpu: Don't export __kernel_fpu_{begin,end}() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Sebastian Andrzej Siewior + +commit 12209993e98c5fa1855c467f22a24e3d5b8be205 upstream. + +There is one user of __kernel_fpu_begin() and before invoking it, +it invokes preempt_disable(). So it could invoke kernel_fpu_begin() +right away. The 32bit version of arch_efi_call_virt_setup() and +arch_efi_call_virt_teardown() does this already. + +The comment above *kernel_fpu*() claims that before invoking +__kernel_fpu_begin() preemption should be disabled and that KVM is a +good example of doing it. Well, KVM doesn't do that since commit + + f775b13eedee2 ("x86,kvm: move qemu/guest FPU switching out to vcpu_run") + +so it is not an example anymore. + +With EFI gone as the last user of __kernel_fpu_{begin|end}(), both can +be made static and not exported anymore. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Borislav Petkov +Reviewed-by: Rik van Riel +Cc: "H. Peter Anvin" +Cc: "Jason A. Donenfeld" +Cc: Andy Lutomirski +Cc: Ard Biesheuvel +Cc: Dave Hansen +Cc: Ingo Molnar +Cc: Nicolai Stange +Cc: Paolo Bonzini +Cc: Radim Krčmář +Cc: Thomas Gleixner +Cc: kvm ML +Cc: linux-efi +Cc: x86-ml +Link: https://lkml.kernel.org/r/20181129150210.2k4mawt37ow6c2vq@linutronix.de +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/efi.h | 6 ++---- + arch/x86/include/asm/fpu/api.h | 15 +++++---------- + arch/x86/kernel/fpu/core.c | 6 ++---- + 3 files changed, 9 insertions(+), 18 deletions(-) + +--- a/arch/x86/include/asm/efi.h ++++ b/arch/x86/include/asm/efi.h +@@ -82,8 +82,7 @@ struct efi_scratch { + #define arch_efi_call_virt_setup() \ + ({ \ + efi_sync_low_kernel_mappings(); \ +- preempt_disable(); \ +- __kernel_fpu_begin(); \ ++ kernel_fpu_begin(); \ + firmware_restrict_branch_speculation_start(); \ + \ + if (!efi_enabled(EFI_OLD_MEMMAP)) \ +@@ -99,8 +98,7 @@ struct efi_scratch { + efi_switch_mm(efi_scratch.prev_mm); \ + \ + firmware_restrict_branch_speculation_end(); \ +- __kernel_fpu_end(); \ +- preempt_enable(); \ ++ kernel_fpu_end(); \ + }) + + extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, +--- a/arch/x86/include/asm/fpu/api.h ++++ b/arch/x86/include/asm/fpu/api.h +@@ -12,17 +12,12 @@ + #define _ASM_X86_FPU_API_H + + /* +- * Careful: __kernel_fpu_begin/end() must be called with preempt disabled +- * and they don't touch the preempt state on their own. +- * If you enable preemption after __kernel_fpu_begin(), preempt notifier +- * should call the __kernel_fpu_end() to prevent the kernel/user FPU +- * state from getting corrupted. KVM for example uses this model. +- * +- * All other cases use kernel_fpu_begin/end() which disable preemption +- * during kernel FPU usage. ++ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It ++ * disables preemption so be careful if you intend to use it for long periods ++ * of time. ++ * If you intend to use the FPU in softirq you need to check first with ++ * irq_fpu_usable() if it is possible. + */ +-extern void __kernel_fpu_begin(void); +-extern void __kernel_fpu_end(void); + extern void kernel_fpu_begin(void); + extern void kernel_fpu_end(void); + extern bool irq_fpu_usable(void); +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -93,7 +93,7 @@ bool irq_fpu_usable(void) + } + EXPORT_SYMBOL(irq_fpu_usable); + +-void __kernel_fpu_begin(void) ++static void __kernel_fpu_begin(void) + { + struct fpu *fpu = ¤t->thread.fpu; + +@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) + __cpu_invalidate_fpregs_state(); + } + } +-EXPORT_SYMBOL(__kernel_fpu_begin); + +-void __kernel_fpu_end(void) ++static void __kernel_fpu_end(void) + { + struct fpu *fpu = ¤t->thread.fpu; + +@@ -122,7 +121,6 @@ void __kernel_fpu_end(void) + + kernel_fpu_enable(); + } +-EXPORT_SYMBOL(__kernel_fpu_end); + + void kernel_fpu_begin(void) + {