]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
e6daf449c90ec935c3ca394efefd96328967ec92
[thirdparty/kernel/stable-queue.git] /
1 From dde3036d62ba3375840b10ab9ec0d568fd773b07 Mon Sep 17 00:00:00 2001
2 From: Dominik Brodowski <linux@dominikbrodowski.net>
3 Date: Sun, 11 Feb 2018 11:49:47 +0100
4 Subject: x86/entry/64: Get rid of the ALLOC_PT_GPREGS_ON_STACK and SAVE_AND_CLEAR_REGS macros
5
6 From: Dominik Brodowski <linux@dominikbrodowski.net>
7
8 commit dde3036d62ba3375840b10ab9ec0d568fd773b07 upstream.
9
10 Previously, error_entry() and paranoid_entry() saved the GP registers
11 onto stack space previously allocated by its callers. Combine these two
12 steps in the callers, and use the generic PUSH_AND_CLEAR_REGS macro
13 for that.
14
15 This adds a significant amount ot text size. However, Ingo Molnar points
16 out that:
17
18 "these numbers also _very_ significantly over-represent the
19 extra footprint. The assumptions that resulted in
20 us compressing the IRQ entry code have changed very
21 significantly with the new x86 IRQ allocation code we
22 introduced in the last year:
23
24 - IRQ vectors are usually populated in tightly clustered
25 groups.
26
27 With our new vector allocator code the typical per CPU
28 allocation percentage on x86 systems is ~3 device vectors
29 and ~10 fixed vectors out of ~220 vectors - i.e. a very
30 low ~6% utilization (!). [...]
31
32 The days where we allocated a lot of vectors on every
33 CPU and the compression of the IRQ entry code text
34 mattered are over.
35
36 - Another issue is that only a small minority of vectors
37 is frequent enough to actually matter to cache utilization
38 in practice: 3-4 key IPIs and 1-2 device IRQs at most - and
39 those vectors tend to be tightly clustered as well into about
40 two groups, and are probably already on 2-3 cache lines in
41 practice.
42
43 For the common case of 'cache cold' IRQs it's the depth of
44 the call chain and the fragmentation of the resulting I$
45 that should be the main performance limit - not the overall
46 size of it.
47
48 - The CPU side cost of IRQ delivery is still very expensive
49 even in the best, most cached case, as in 'over a thousand
50 cycles'. So much stuff is done that maybe contemporary x86
51 IRQ entry microcode already prefetches the IDT entry and its
52 expected call target address."[*]
53
54 [*] http://lkml.kernel.org/r/20180208094710.qnjixhm6hybebdv7@gmail.com
55
56 The "testb $3, CS(%rsp)" instruction in the idtentry macro does not need
57 modification. Previously, %rsp was manually decreased by 15*8; with
58 this patch, %rsp is decreased by 15 pushq instructions.
59
60 [jpoimboe@redhat.com: unwind hint improvements]
61
62 Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
63 Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
64 Cc: Andy Lutomirski <luto@kernel.org>
65 Cc: Borislav Petkov <bp@alien8.de>
66 Cc: Brian Gerst <brgerst@gmail.com>
67 Cc: Denys Vlasenko <dvlasenk@redhat.com>
68 Cc: H. Peter Anvin <hpa@zytor.com>
69 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
70 Cc: Peter Zijlstra <peterz@infradead.org>
71 Cc: Thomas Gleixner <tglx@linutronix.de>
72 Cc: dan.j.williams@intel.com
73 Link: http://lkml.kernel.org/r/20180211104949.12992-7-linux@dominikbrodowski.net
74 Signed-off-by: Ingo Molnar <mingo@kernel.org>
75 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
76
77 ---
78 arch/x86/entry/calling.h | 42 +-----------------------------------------
79 arch/x86/entry/entry_64.S | 20 +++++++++-----------
80 2 files changed, 10 insertions(+), 52 deletions(-)
81
82 --- a/arch/x86/entry/calling.h
83 +++ b/arch/x86/entry/calling.h
84 @@ -97,46 +97,6 @@ For 32-bit we have the following convent
85
86 #define SIZEOF_PTREGS 21*8
87
88 - .macro ALLOC_PT_GPREGS_ON_STACK
89 - addq $-(15*8), %rsp
90 - .endm
91 -
92 - .macro SAVE_AND_CLEAR_REGS offset=0
93 - /*
94 - * Save registers and sanitize registers of values that a
95 - * speculation attack might otherwise want to exploit. The
96 - * lower registers are likely clobbered well before they
97 - * could be put to use in a speculative execution gadget.
98 - * Interleave XOR with PUSH for better uop scheduling:
99 - */
100 - movq %rdi, 14*8+\offset(%rsp)
101 - movq %rsi, 13*8+\offset(%rsp)
102 - movq %rdx, 12*8+\offset(%rsp)
103 - movq %rcx, 11*8+\offset(%rsp)
104 - movq %rax, 10*8+\offset(%rsp)
105 - movq %r8, 9*8+\offset(%rsp)
106 - xorq %r8, %r8 /* nospec r8 */
107 - movq %r9, 8*8+\offset(%rsp)
108 - xorq %r9, %r9 /* nospec r9 */
109 - movq %r10, 7*8+\offset(%rsp)
110 - xorq %r10, %r10 /* nospec r10 */
111 - movq %r11, 6*8+\offset(%rsp)
112 - xorq %r11, %r11 /* nospec r11 */
113 - movq %rbx, 5*8+\offset(%rsp)
114 - xorl %ebx, %ebx /* nospec rbx */
115 - movq %rbp, 4*8+\offset(%rsp)
116 - xorl %ebp, %ebp /* nospec rbp */
117 - movq %r12, 3*8+\offset(%rsp)
118 - xorq %r12, %r12 /* nospec r12 */
119 - movq %r13, 2*8+\offset(%rsp)
120 - xorq %r13, %r13 /* nospec r13 */
121 - movq %r14, 1*8+\offset(%rsp)
122 - xorq %r14, %r14 /* nospec r14 */
123 - movq %r15, 0*8+\offset(%rsp)
124 - xorq %r15, %r15 /* nospec r15 */
125 - UNWIND_HINT_REGS offset=\offset
126 - .endm
127 -
128 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
129 /*
130 * Push registers and sanitize registers of values that a
131 @@ -211,7 +171,7 @@ For 32-bit we have the following convent
132 * is just setting the LSB, which makes it an invalid stack address and is also
133 * a signal to the unwinder that it's a pt_regs pointer in disguise.
134 *
135 - * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts
136 + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
137 * the original rbp.
138 */
139 .macro ENCODE_FRAME_POINTER ptregs_offset=0
140 --- a/arch/x86/entry/entry_64.S
141 +++ b/arch/x86/entry/entry_64.S
142 @@ -867,7 +867,9 @@ ENTRY(\sym)
143 pushq $-1 /* ORIG_RAX: no syscall to restart */
144 .endif
145
146 - ALLOC_PT_GPREGS_ON_STACK
147 + /* Save all registers in pt_regs */
148 + PUSH_AND_CLEAR_REGS
149 + ENCODE_FRAME_POINTER
150
151 .if \paranoid < 2
152 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
153 @@ -1115,15 +1117,12 @@ idtentry machine_check do_mce has_err
154 #endif
155
156 /*
157 - * Save all registers in pt_regs, and switch gs if needed.
158 + * Switch gs if needed.
159 * Use slow, but surefire "are we in kernel?" check.
160 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
161 */
162 ENTRY(paranoid_entry)
163 - UNWIND_HINT_FUNC
164 cld
165 - SAVE_AND_CLEAR_REGS 8
166 - ENCODE_FRAME_POINTER 8
167 movl $1, %ebx
168 movl $MSR_GS_BASE, %ecx
169 rdmsr
170 @@ -1136,7 +1135,7 @@ ENTRY(paranoid_entry)
171 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
172
173 ret
174 -END(paranoid_entry)
175 +ENDPROC(paranoid_entry)
176
177 /*
178 * "Paranoid" exit path from exception stack. This is invoked
179 @@ -1167,14 +1166,12 @@ ENTRY(paranoid_exit)
180 END(paranoid_exit)
181
182 /*
183 - * Save all registers in pt_regs, and switch gs if needed.
184 + * Switch gs if needed.
185 * Return: EBX=0: came from user mode; EBX=1: otherwise
186 */
187 ENTRY(error_entry)
188 - UNWIND_HINT_FUNC
189 + UNWIND_HINT_REGS offset=8
190 cld
191 - SAVE_AND_CLEAR_REGS 8
192 - ENCODE_FRAME_POINTER 8
193 testb $3, CS+8(%rsp)
194 jz .Lerror_kernelspace
195
196 @@ -1565,7 +1562,8 @@ end_repeat_nmi:
197 * frame to point back to repeat_nmi.
198 */
199 pushq $-1 /* ORIG_RAX: no syscall to restart */
200 - ALLOC_PT_GPREGS_ON_STACK
201 + PUSH_AND_CLEAR_REGS
202 + ENCODE_FRAME_POINTER
203
204 /*
205 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit