1 From 34273f41d57ee8d854dcd2a1d754cbb546cb548f Mon Sep 17 00:00:00 2001
2 From: "H. Peter Anvin" <hpa@zytor.com>
3 Date: Sun, 4 May 2014 10:36:22 -0700
4 Subject: x86, espfix: Make it possible to disable 16-bit support
6 From: "H. Peter Anvin" <hpa@zytor.com>
8 commit 34273f41d57ee8d854dcd2a1d754cbb546cb548f upstream.
10 Embedded systems, which may be very memory-size-sensitive, are
11 extremely unlikely to ever encounter any 16-bit software, so make it
12 a CONFIG_EXPERT option to turn off support for any 16-bit software
15 Signed-off-by: H. Peter Anvin <hpa@zytor.com>
16 Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
17 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
20 arch/x86/Kconfig | 23 ++++++++++++++++++-----
21 arch/x86/kernel/entry_32.S | 12 ++++++++++++
22 arch/x86/kernel/entry_64.S | 8 ++++++++
23 arch/x86/kernel/ldt.c | 5 +++++
24 4 files changed, 43 insertions(+), 5 deletions(-)
26 --- a/arch/x86/Kconfig
27 +++ b/arch/x86/Kconfig
28 @@ -966,14 +966,27 @@ config VM86
32 - This option is required by programs like DOSEMU to run 16-bit legacy
33 - code on X86 processors. It also may be needed by software like
34 - XFree86 to initialize some video cards via BIOS. Disabling this
35 - option saves about 6k.
36 + This option is required by programs like DOSEMU to run
37 + 16-bit real mode legacy code on x86 processors. It also may
38 + be needed by software like XFree86 to initialize some video
39 + cards via BIOS. Disabling this option saves about 6K.
42 + bool "Enable support for 16-bit segments" if EXPERT
45 + This option is required by programs like Wine to run 16-bit
46 + protected mode legacy code on x86 processors. Disabling
47 + this option saves about 300 bytes on i386, or around 6K text
48 + plus 16K runtime memory on x86-64,
52 + depends on X86_16BIT && X86_32
57 + depends on X86_16BIT && X86_64
60 tristate "Toshiba Laptop support"
61 --- a/arch/x86/kernel/entry_32.S
62 +++ b/arch/x86/kernel/entry_32.S
63 @@ -529,6 +529,7 @@ syscall_exit:
67 +#ifdef CONFIG_X86_ESPFIX32
68 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
69 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
70 # are returning to the kernel.
71 @@ -539,6 +540,7 @@ restore_all_notrace:
72 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
74 je ldt_ss # returning to user-space with LDT SS
77 RESTORE_REGS 4 # skip orig_eax/error_code
79 @@ -551,6 +553,7 @@ ENTRY(iret_exc)
81 _ASM_EXTABLE(irq_return,iret_exc)
83 +#ifdef CONFIG_X86_ESPFIX32
86 #ifdef CONFIG_PARAVIRT
87 @@ -594,6 +597,7 @@ ldt_ss:
88 lss (%esp), %esp /* switch to espfix segment */
89 CFI_ADJUST_CFA_OFFSET -8
95 @@ -706,6 +710,7 @@ END(syscall_badsys)
96 * the high word of the segment base from the GDT and swiches to the
97 * normal stack and adjusts ESP with the matching offset.
99 +#ifdef CONFIG_X86_ESPFIX32
100 /* fixup the stack */
101 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
102 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
103 @@ -715,8 +720,10 @@ END(syscall_badsys)
105 lss (%esp), %esp /* switch to the normal stack segment */
106 CFI_ADJUST_CFA_OFFSET -8
109 .macro UNWIND_ESPFIX_STACK
110 +#ifdef CONFIG_X86_ESPFIX32
112 /* see if on espfix stack */
113 cmpw $__ESPFIX_SS, %ax
114 @@ -727,6 +734,7 @@ END(syscall_badsys)
115 /* switch to normal stack */
122 @@ -1357,11 +1365,13 @@ END(debug)
126 +#ifdef CONFIG_X86_ESPFIX32
129 cmpw $__ESPFIX_SS, %ax
133 cmpl $ia32_sysenter_target,(%esp)
136 @@ -1401,6 +1411,7 @@ nmi_debug_stack_check:
137 FIX_STACK 24, nmi_stack_correct, 1
138 jmp nmi_stack_correct
140 +#ifdef CONFIG_X86_ESPFIX32
142 /* We have a RING0_INT_FRAME here.
144 @@ -1422,6 +1433,7 @@ nmi_espfix_stack:
145 lss 12+4(%esp), %esp # back to espfix stack
146 CFI_ADJUST_CFA_OFFSET -24
152 --- a/arch/x86/kernel/entry_64.S
153 +++ b/arch/x86/kernel/entry_64.S
154 @@ -1045,8 +1045,10 @@ irq_return:
155 * Are we returning to a stack segment from the LDT? Note: in
156 * 64-bit mode SS:RSP on the exception stack is always valid.
158 +#ifdef CONFIG_X86_ESPFIX64
159 testb $4,(SS-RIP)(%rsp)
165 @@ -1058,6 +1060,7 @@ ENTRY(native_iret)
166 _ASM_EXTABLE(native_iret, bad_iret)
169 +#ifdef CONFIG_X86_ESPFIX64
173 @@ -1081,6 +1084,7 @@ irq_return_ldt:
181 @@ -1152,6 +1156,7 @@ END(common_interrupt)
182 * modify the stack to make it look like we just entered
183 * the #GP handler from user space, similar to bad_iret.
185 +#ifdef CONFIG_X86_ESPFIX64
189 @@ -1177,6 +1182,9 @@ __do_double_fault:
192 END(__do_double_fault)
194 +# define __do_double_fault do_double_fault
198 * End of kprobes section
199 --- a/arch/x86/kernel/ldt.c
200 +++ b/arch/x86/kernel/ldt.c
201 @@ -229,6 +229,11 @@ static int write_ldt(void __user *ptr, u
205 + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
210 fill_ldt(&ldt, &ldt_info);