]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/x86/kernel/fpu/core.c
x86/fpu: Optimize fpu__activate_fpstate_read()
[people/arne_f/kernel.git] / arch / x86 / kernel / fpu / core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
78f7f1e5 8#include <asm/fpu/internal.h>
59a36d16 9#include <asm/fpu/regset.h>
fcbc99c4 10#include <asm/fpu/signal.h>
e1cebad4 11#include <asm/traps.h>
fcbc99c4 12
91066588 13#include <linux/hardirq.h>
1da177e4 14
6f575023
IM
15/*
16 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
17 * depending on the FPU hardware format:
18 */
c47ada30 19union fpregs_state init_fpstate __read_mostly;
6f575023 20
085cc281
IM
21/*
22 * Track whether the kernel is using the FPU state
23 * currently.
24 *
25 * This flag is used:
26 *
27 * - by IRQ context code to potentially use the FPU
28 * if it's unused.
29 *
30 * - to debug kernel_fpu_begin()/end() correctness
31 */
14e153ef
ON
32static DEFINE_PER_CPU(bool, in_kernel_fpu);
33
b0c050c5 34/*
36b544dc 35 * Track which context is using the FPU on the CPU:
b0c050c5 36 */
36b544dc 37DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
b0c050c5 38
416d49ac 39static void kernel_fpu_disable(void)
7575637a 40{
e97131a8 41 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
7575637a
ON
42 this_cpu_write(in_kernel_fpu, true);
43}
44
416d49ac 45static void kernel_fpu_enable(void)
7575637a 46{
e97131a8 47 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
7575637a
ON
48 this_cpu_write(in_kernel_fpu, false);
49}
50
085cc281
IM
51static bool kernel_fpu_disabled(void)
52{
53 return this_cpu_read(in_kernel_fpu);
54}
55
8546c008
LT
56/*
57 * Were we in an interrupt that interrupted kernel mode?
58 *
304bceda 59 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
8546c008
LT
60 * pair does nothing at all: the thread must not have fpu (so
61 * that we don't try to save the FPU state), and TS must
62 * be set (so that the clts/stts pair does nothing that is
63 * visible in the interrupted kernel thread).
5187b28f 64 *
4b2e762e
ON
65 * Except for the eagerfpu case when we return true; in the likely case
66 * the thread has FPU but we are not going to set/clear TS.
8546c008 67 */
416d49ac 68static bool interrupted_kernel_fpu_idle(void)
8546c008 69{
085cc281 70 if (kernel_fpu_disabled())
14e153ef
ON
71 return false;
72
5d2bd700 73 if (use_eager_fpu())
4b2e762e 74 return true;
304bceda 75
d5cea9b0 76 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
8546c008
LT
77}
78
79/*
80 * Were we in user mode (or vm86 mode) when we were
81 * interrupted?
82 *
83 * Doing kernel_fpu_begin/end() is ok if we are running
84 * in an interrupt context from user mode - we'll just
85 * save the FPU state as required.
86 */
416d49ac 87static bool interrupted_user_mode(void)
8546c008
LT
88{
89 struct pt_regs *regs = get_irq_regs();
f39b6f0e 90 return regs && user_mode(regs);
8546c008
LT
91}
92
93/*
94 * Can we use the FPU in kernel mode with the
95 * whole "kernel_fpu_begin/end()" sequence?
96 *
97 * It's always ok in process context (ie "not interrupt")
98 * but it is sometimes ok even from an irq.
99 */
100bool irq_fpu_usable(void)
101{
102 return !in_interrupt() ||
103 interrupted_user_mode() ||
104 interrupted_kernel_fpu_idle();
105}
106EXPORT_SYMBOL(irq_fpu_usable);
107
b1a74bf8 108void __kernel_fpu_begin(void)
8546c008 109{
36b544dc 110 struct fpu *fpu = &current->thread.fpu;
8546c008 111
e97131a8 112 WARN_ON_FPU(!irq_fpu_usable());
63c6680c 113
3103ae3a 114 kernel_fpu_disable();
14e153ef 115
d5cea9b0 116 if (fpu->fpregs_active) {
4f836347 117 copy_fpregs_to_fpstate(fpu);
7aeccb83 118 } else {
36b544dc 119 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
32b49b3c 120 __fpregs_activate_hw();
8546c008
LT
121 }
122}
b1a74bf8 123EXPORT_SYMBOL(__kernel_fpu_begin);
8546c008 124
b1a74bf8 125void __kernel_fpu_end(void)
8546c008 126{
af2d94fd 127 struct fpu *fpu = &current->thread.fpu;
33a3ebdc 128
d5cea9b0 129 if (fpu->fpregs_active) {
e97131a8 130 if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
fbce7782 131 fpu__clear(fpu);
32b49b3c
IM
132 } else {
133 __fpregs_deactivate_hw();
731bd6a9 134 }
14e153ef 135
3103ae3a 136 kernel_fpu_enable();
8546c008 137}
b1a74bf8 138EXPORT_SYMBOL(__kernel_fpu_end);
8546c008 139
d63e79b1
IM
140void kernel_fpu_begin(void)
141{
142 preempt_disable();
d63e79b1
IM
143 __kernel_fpu_begin();
144}
145EXPORT_SYMBOL_GPL(kernel_fpu_begin);
146
147void kernel_fpu_end(void)
148{
149 __kernel_fpu_end();
150 preempt_enable();
151}
152EXPORT_SYMBOL_GPL(kernel_fpu_end);
153
91066588
IM
154/*
155 * CR0::TS save/restore functions:
156 */
157int irq_ts_save(void)
158{
159 /*
160 * If in process context and not atomic, we can take a spurious DNA fault.
161 * Otherwise, doing clts() in process context requires disabling preemption
162 * or some heavy lifting like kernel_fpu_begin()
163 */
164 if (!in_atomic())
165 return 0;
166
167 if (read_cr0() & X86_CR0_TS) {
168 clts();
169 return 1;
170 }
171
172 return 0;
173}
174EXPORT_SYMBOL_GPL(irq_ts_save);
175
176void irq_ts_restore(int TS_state)
177{
178 if (TS_state)
179 stts();
180}
181EXPORT_SYMBOL_GPL(irq_ts_restore);
182
4af08f2f 183/*
48c4717f 184 * Save the FPU state (mark it for reload if necessary):
87cdb98a
IM
185 *
186 * This only ever gets called for the current task.
4af08f2f 187 */
0c070595 188void fpu__save(struct fpu *fpu)
8546c008 189{
e97131a8 190 WARN_ON_FPU(fpu != &current->thread.fpu);
87cdb98a 191
8546c008 192 preempt_disable();
d5cea9b0 193 if (fpu->fpregs_active) {
48c4717f 194 if (!copy_fpregs_to_fpstate(fpu))
66af8e27 195 fpregs_deactivate(fpu);
a9241ea5 196 }
8546c008
LT
197 preempt_enable();
198}
4af08f2f 199EXPORT_SYMBOL_GPL(fpu__save);
8546c008 200
0aba6978
IM
201/*
202 * Legacy x87 fpstate state init:
203 */
c47ada30 204static inline void fpstate_init_fstate(struct fregs_state *fp)
0aba6978
IM
205{
206 fp->cwd = 0xffff037fu;
207 fp->swd = 0xffff0000u;
208 fp->twd = 0xffffffffu;
209 fp->fos = 0xffff0000u;
210}
211
c47ada30 212void fpstate_init(union fpregs_state *state)
1da177e4 213{
60e019eb 214 if (!cpu_has_fpu) {
bf935b0b 215 fpstate_init_soft(&state->soft);
86603283 216 return;
e8a496ac 217 }
e8a496ac 218
bf935b0b 219 memset(state, 0, xstate_size);
1d23c451 220
0aba6978 221 if (cpu_has_fxsr)
bf935b0b 222 fpstate_init_fxstate(&state->fxsave);
0aba6978 223 else
bf935b0b 224 fpstate_init_fstate(&state->fsave);
86603283 225}
c0ee2cf6 226EXPORT_SYMBOL_GPL(fpstate_init);
86603283 227
bfd6fc05
IM
228/*
229 * Copy the current task's FPU state to a new task's FPU context.
230 *
aeb997b9
IM
231 * In both the 'eager' and the 'lazy' case we save hardware registers
232 * directly to the destination buffer.
bfd6fc05 233 */
f9bc977f 234static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
e102f30f 235{
e97131a8 236 WARN_ON_FPU(src_fpu != &current->thread.fpu);
bfd6fc05 237
b1652900
IM
238 /*
239 * Don't let 'init optimized' areas of the XSAVE area
240 * leak into the child task:
241 */
242 if (use_eager_fpu())
7366ed77 243 memset(&dst_fpu->state.xsave, 0, xstate_size);
b1652900
IM
244
245 /*
246 * Save current FPU registers directly into the child
247 * FPU context, without any memory-to-memory copying.
248 *
249 * If the FPU context got destroyed in the process (FNSAVE
250 * done on old CPUs) then copy it back into the source
251 * context and mark the current task for lazy restore.
252 *
253 * We have to do all this with preemption disabled,
254 * mostly because of the FNSAVE case, because in that
255 * case we must not allow preemption in the window
256 * between the FNSAVE and us marking the context lazy.
257 *
258 * It shouldn't be an issue as even FNSAVE is plenty
259 * fast in terms of critical section length.
260 */
261 preempt_disable();
262 if (!copy_fpregs_to_fpstate(dst_fpu)) {
263 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
264 fpregs_deactivate(src_fpu);
e102f30f 265 }
b1652900 266 preempt_enable();
e102f30f
IM
267}
268
c69e098b 269int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
a752b53d 270{
c69e098b 271 dst_fpu->counter = 0;
d5cea9b0 272 dst_fpu->fpregs_active = 0;
c69e098b 273 dst_fpu->last_cpu = -1;
a752b53d 274
c4d6ee6e 275 if (src_fpu->fpstate_active)
f9bc977f 276 fpu_copy(dst_fpu, src_fpu);
c4d6ee6e 277
a752b53d
IM
278 return 0;
279}
280
97185c95 281/*
c4d72e2d
IM
282 * Activate the current task's in-memory FPU context,
283 * if it has not been used before:
97185c95 284 */
c4d72e2d 285void fpu__activate_curr(struct fpu *fpu)
97185c95 286{
e97131a8 287 WARN_ON_FPU(fpu != &current->thread.fpu);
97185c95 288
c4d72e2d 289 if (!fpu->fpstate_active) {
bf935b0b 290 fpstate_init(&fpu->state);
97185c95 291
c4d72e2d
IM
292 /* Safe to do for the current task: */
293 fpu->fpstate_active = 1;
294 }
97185c95 295}
c4d72e2d 296EXPORT_SYMBOL_GPL(fpu__activate_curr);
97185c95 297
05602812
IM
298/*
299 * This function must be called before we read a task's fpstate.
300 *
301 * If the task has not used the FPU before then initialize its
302 * fpstate.
303 *
304 * If the task has used the FPU before then save it.
305 */
306void fpu__activate_fpstate_read(struct fpu *fpu)
307{
308 /*
309 * If fpregs are active (in the current CPU), then
310 * copy them to the fpstate:
311 */
312 if (fpu->fpregs_active) {
313 fpu__save(fpu);
314 } else {
9ba6b791 315 if (!fpu->fpstate_active) {
05602812
IM
316 fpstate_init(&fpu->state);
317
318 /* Safe to do for current and for stopped child tasks: */
319 fpu->fpstate_active = 1;
320 }
321 }
322}
323
86603283 324/*
47f01e8c 325 * This function must be called before we read or write a task's fpstate.
af7f8721 326 *
47f01e8c 327 * If the task has not used the FPU before then initialize its
67ee658e 328 * fpstate.
af7f8721 329 *
47f01e8c 330 * If the task has used the FPU before then save and unlazy it.
af7f8721 331 *
47f01e8c
IM
332 * [ If this function is used for non-current child tasks, then
333 * after this function call, after registers in the fpstate are
67ee658e
IM
334 * modified and the child task has woken up, the child task will
335 * restore the modified FPU state from the modified context. If we
af7f8721 336 * didn't clear its lazy status here then the lazy in-registers
67ee658e 337 * state pending on its former CPU could be restored, corrupting
47f01e8c 338 * the modifications.
af7f8721 339 *
47f01e8c
IM
340 * This function can be used for the current task as well, but
341 * only for reading the fpstate. Modifications to the fpstate
342 * will be lost on eagerfpu systems. ]
af7f8721
IM
343 *
344 * TODO: A future optimization would be to skip the unlazying in
345 * the read-only case, it's not strictly necessary for
346 * read-only access to the context.
86603283 347 */
47f01e8c 348void fpu__activate_fpstate(struct fpu *fpu)
86603283 349{
47f01e8c
IM
350 /*
351 * If fpregs are active (in the current CPU), then
352 * copy them to the fpstate:
353 */
354 if (fpu->fpregs_active) {
355 fpu__save(fpu);
2fb29fc7 356 } else {
47f01e8c
IM
357 if (fpu->fpstate_active) {
358 /* Invalidate any lazy state: */
359 fpu->last_cpu = -1;
360 } else {
361 fpstate_init(&fpu->state);
362
363 /* Safe to do for current and for stopped child tasks: */
364 fpu->fpstate_active = 1;
365 }
2fb29fc7 366 }
1da177e4
LT
367}
368
93b90712 369/*
be7436d5
IM
370 * 'fpu__restore()' is called to copy FPU registers from
371 * the FPU fpstate to the live hw registers and to activate
372 * access to the hardware registers, so that FPU instructions
373 * can be used afterwards.
93b90712 374 *
be7436d5
IM
375 * Must be called with kernel preemption disabled (for example
376 * with local interrupts disabled, as it is in the case of
377 * do_device_not_available()).
93b90712 378 */
e1884d69 379void fpu__restore(struct fpu *fpu)
93b90712 380{
c4d72e2d 381 fpu__activate_curr(fpu);
93b90712 382
232f62cd 383 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
93b90712 384 kernel_fpu_disable();
232f62cd 385 fpregs_activate(fpu);
0e75c54f 386 if (unlikely(copy_fpstate_to_fpregs(fpu))) {
fbce7782 387 fpu__clear(fpu);
e1884d69 388 force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
93b90712 389 } else {
e1884d69 390 fpu->counter++;
93b90712
IM
391 }
392 kernel_fpu_enable();
393}
3a0aee48 394EXPORT_SYMBOL_GPL(fpu__restore);
93b90712 395
6ffc152e
IM
396/*
397 * Drops current FPU state: deactivates the fpregs and
398 * the fpstate. NOTE: it still leaves previous contents
399 * in the fpregs in the eager-FPU case.
400 *
401 * This function can be used in cases where we know that
402 * a state-restore is coming: either an explicit one,
403 * or a reschedule.
404 */
405void fpu__drop(struct fpu *fpu)
406{
407 preempt_disable();
408 fpu->counter = 0;
409
410 if (fpu->fpregs_active) {
411 /* Ignore delayed exceptions from user space */
412 asm volatile("1: fwait\n"
413 "2:\n"
414 _ASM_EXTABLE(1b, 2b));
415 fpregs_deactivate(fpu);
416 }
417
418 fpu->fpstate_active = 0;
419
420 preempt_enable();
421}
422
81541889
IM
423/*
424 * Clear FPU registers by setting them up from
425 * the init fpstate:
426 */
427static inline void copy_init_fpstate_to_fpregs(void)
428{
429 if (use_xsave())
c6813144 430 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
81541889 431 else
c6813144 432 copy_kernel_to_fxregs(&init_fpstate.fxsave);
81541889
IM
433}
434
6ffc152e 435/*
fbce7782
IM
436 * Clear the FPU state back to init state.
437 *
438 * Called by sys_execve(), by the signal handler code and by various
439 * error paths.
2e85591a 440 */
04c8e01d 441void fpu__clear(struct fpu *fpu)
81683cc8 442{
e97131a8 443 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
4c138410 444
81683cc8
IM
445 if (!use_eager_fpu()) {
446 /* FPU state will be reallocated lazily at the first use. */
50338615 447 fpu__drop(fpu);
81683cc8 448 } else {
c5bedc68 449 if (!fpu->fpstate_active) {
c4d72e2d 450 fpu__activate_curr(fpu);
81683cc8
IM
451 user_fpu_begin();
452 }
81541889 453 copy_init_fpstate_to_fpregs();
81683cc8
IM
454 }
455}
456
e1cebad4
IM
457/*
458 * x87 math exception handling:
459 */
460
461static inline unsigned short get_fpu_cwd(struct fpu *fpu)
462{
463 if (cpu_has_fxsr) {
464 return fpu->state.fxsave.cwd;
465 } else {
466 return (unsigned short)fpu->state.fsave.cwd;
467 }
468}
469
470static inline unsigned short get_fpu_swd(struct fpu *fpu)
471{
472 if (cpu_has_fxsr) {
473 return fpu->state.fxsave.swd;
474 } else {
475 return (unsigned short)fpu->state.fsave.swd;
476 }
477}
478
479static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
480{
481 if (cpu_has_xmm) {
482 return fpu->state.fxsave.mxcsr;
483 } else {
484 return MXCSR_DEFAULT;
485 }
486}
487
488int fpu__exception_code(struct fpu *fpu, int trap_nr)
489{
490 int err;
491
492 if (trap_nr == X86_TRAP_MF) {
493 unsigned short cwd, swd;
494 /*
495 * (~cwd & swd) will mask out exceptions that are not set to unmasked
496 * status. 0x3f is the exception bits in these regs, 0x200 is the
497 * C1 reg you need in case of a stack fault, 0x040 is the stack
498 * fault bit. We should only be taking one exception at a time,
499 * so if this combination doesn't produce any single exception,
500 * then we have a bad program that isn't synchronizing its FPU usage
501 * and it will suffer the consequences since we won't be able to
502 * fully reproduce the context of the exception
503 */
504 cwd = get_fpu_cwd(fpu);
505 swd = get_fpu_swd(fpu);
506
507 err = swd & ~cwd;
508 } else {
509 /*
510 * The SIMD FPU exceptions are handled a little differently, as there
511 * is only a single status/control register. Thus, to determine which
512 * unmasked exception was caught we must mask the exception mask bits
513 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
514 */
515 unsigned short mxcsr = get_fpu_mxcsr(fpu);
516 err = ~(mxcsr >> 7) & mxcsr;
517 }
518
519 if (err & 0x001) { /* Invalid op */
520 /*
521 * swd & 0x240 == 0x040: Stack Underflow
522 * swd & 0x240 == 0x240: Stack Overflow
523 * User must clear the SF bit (0x40) if set
524 */
525 return FPE_FLTINV;
526 } else if (err & 0x004) { /* Divide by Zero */
527 return FPE_FLTDIV;
528 } else if (err & 0x008) { /* Overflow */
529 return FPE_FLTOVF;
530 } else if (err & 0x012) { /* Denormal, Underflow */
531 return FPE_FLTUND;
532 } else if (err & 0x020) { /* Precision */
533 return FPE_FLTRES;
534 }
535
536 /*
537 * If we're using IRQ 13, or supposedly even some trap
538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.
540 */
541 return 0;
542}