]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/x86/kernel/fpu/core.c
x86/fpu: Simplify fpu__save()
[people/arne_f/kernel.git] / arch / x86 / kernel / fpu / core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
78f7f1e5 8#include <asm/fpu/internal.h>
91066588 9#include <linux/hardirq.h>
1da177e4 10
085cc281
IM
11/*
12 * Track whether the kernel is using the FPU state
13 * currently.
14 *
15 * This flag is used:
16 *
17 * - by IRQ context code to potentially use the FPU
18 * if it's unused.
19 *
20 * - to debug kernel_fpu_begin()/end() correctness
21 */
14e153ef
ON
22static DEFINE_PER_CPU(bool, in_kernel_fpu);
23
b0c050c5 24/*
36b544dc 25 * Track which context is using the FPU on the CPU:
b0c050c5 26 */
36b544dc 27DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
b0c050c5 28
416d49ac 29static void kernel_fpu_disable(void)
7575637a
ON
30{
31 WARN_ON(this_cpu_read(in_kernel_fpu));
32 this_cpu_write(in_kernel_fpu, true);
33}
34
416d49ac 35static void kernel_fpu_enable(void)
7575637a 36{
3103ae3a 37 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
7575637a
ON
38 this_cpu_write(in_kernel_fpu, false);
39}
40
085cc281
IM
41static bool kernel_fpu_disabled(void)
42{
43 return this_cpu_read(in_kernel_fpu);
44}
45
8546c008
LT
46/*
47 * Were we in an interrupt that interrupted kernel mode?
48 *
304bceda 49 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
8546c008
LT
50 * pair does nothing at all: the thread must not have fpu (so
51 * that we don't try to save the FPU state), and TS must
52 * be set (so that the clts/stts pair does nothing that is
53 * visible in the interrupted kernel thread).
5187b28f 54 *
4b2e762e
ON
55 * Except for the eagerfpu case when we return true; in the likely case
56 * the thread has FPU but we are not going to set/clear TS.
8546c008 57 */
416d49ac 58static bool interrupted_kernel_fpu_idle(void)
8546c008 59{
085cc281 60 if (kernel_fpu_disabled())
14e153ef
ON
61 return false;
62
5d2bd700 63 if (use_eager_fpu())
4b2e762e 64 return true;
304bceda 65
d5cea9b0 66 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
8546c008
LT
67}
68
69/*
70 * Were we in user mode (or vm86 mode) when we were
71 * interrupted?
72 *
73 * Doing kernel_fpu_begin/end() is ok if we are running
74 * in an interrupt context from user mode - we'll just
75 * save the FPU state as required.
76 */
416d49ac 77static bool interrupted_user_mode(void)
8546c008
LT
78{
79 struct pt_regs *regs = get_irq_regs();
f39b6f0e 80 return regs && user_mode(regs);
8546c008
LT
81}
82
83/*
84 * Can we use the FPU in kernel mode with the
85 * whole "kernel_fpu_begin/end()" sequence?
86 *
87 * It's always ok in process context (ie "not interrupt")
88 * but it is sometimes ok even from an irq.
89 */
90bool irq_fpu_usable(void)
91{
92 return !in_interrupt() ||
93 interrupted_user_mode() ||
94 interrupted_kernel_fpu_idle();
95}
96EXPORT_SYMBOL(irq_fpu_usable);
97
b1a74bf8 98void __kernel_fpu_begin(void)
8546c008 99{
36b544dc 100 struct fpu *fpu = &current->thread.fpu;
8546c008 101
3103ae3a 102 kernel_fpu_disable();
14e153ef 103
d5cea9b0 104 if (fpu->fpregs_active) {
4f836347 105 copy_fpregs_to_fpstate(fpu);
7aeccb83 106 } else {
36b544dc 107 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
32b49b3c 108 __fpregs_activate_hw();
8546c008
LT
109 }
110}
b1a74bf8 111EXPORT_SYMBOL(__kernel_fpu_begin);
8546c008 112
b1a74bf8 113void __kernel_fpu_end(void)
8546c008 114{
af2d94fd 115 struct fpu *fpu = &current->thread.fpu;
33a3ebdc 116
d5cea9b0 117 if (fpu->fpregs_active) {
11f2d50b 118 if (WARN_ON(restore_fpu_checking(fpu)))
af2d94fd 119 fpu_reset_state(fpu);
32b49b3c
IM
120 } else {
121 __fpregs_deactivate_hw();
731bd6a9 122 }
14e153ef 123
3103ae3a 124 kernel_fpu_enable();
8546c008 125}
b1a74bf8 126EXPORT_SYMBOL(__kernel_fpu_end);
8546c008 127
d63e79b1
IM
128void kernel_fpu_begin(void)
129{
130 preempt_disable();
131 WARN_ON_ONCE(!irq_fpu_usable());
132 __kernel_fpu_begin();
133}
134EXPORT_SYMBOL_GPL(kernel_fpu_begin);
135
136void kernel_fpu_end(void)
137{
138 __kernel_fpu_end();
139 preempt_enable();
140}
141EXPORT_SYMBOL_GPL(kernel_fpu_end);
142
91066588
IM
143/*
144 * CR0::TS save/restore functions:
145 */
146int irq_ts_save(void)
147{
148 /*
149 * If in process context and not atomic, we can take a spurious DNA fault.
150 * Otherwise, doing clts() in process context requires disabling preemption
151 * or some heavy lifting like kernel_fpu_begin()
152 */
153 if (!in_atomic())
154 return 0;
155
156 if (read_cr0() & X86_CR0_TS) {
157 clts();
158 return 1;
159 }
160
161 return 0;
162}
163EXPORT_SYMBOL_GPL(irq_ts_save);
164
165void irq_ts_restore(int TS_state)
166{
167 if (TS_state)
168 stts();
169}
170EXPORT_SYMBOL_GPL(irq_ts_restore);
171
4af08f2f
IM
172/*
173 * Save the FPU state (initialize it if necessary):
87cdb98a
IM
174 *
175 * This only ever gets called for the current task.
4af08f2f 176 */
0c070595 177void fpu__save(struct fpu *fpu)
8546c008 178{
0c070595 179 WARN_ON(fpu != &current->thread.fpu);
87cdb98a 180
8546c008 181 preempt_disable();
d5cea9b0 182 if (fpu->fpregs_active) {
fea435a2
IM
183 copy_fpregs_to_fpstate(fpu);
184 if (!use_eager_fpu())
66af8e27 185 fpregs_deactivate(fpu);
a9241ea5 186 }
8546c008
LT
187 preempt_enable();
188}
4af08f2f 189EXPORT_SYMBOL_GPL(fpu__save);
8546c008 190
c0ee2cf6 191void fpstate_init(struct fpu *fpu)
1da177e4 192{
60e019eb 193 if (!cpu_has_fpu) {
7366ed77 194 finit_soft_fpu(&fpu->state.soft);
86603283 195 return;
e8a496ac 196 }
e8a496ac 197
7366ed77 198 memset(&fpu->state, 0, xstate_size);
1d23c451 199
1da177e4 200 if (cpu_has_fxsr) {
7366ed77 201 fx_finit(&fpu->state.fxsave);
1da177e4 202 } else {
7366ed77 203 struct i387_fsave_struct *fp = &fpu->state.fsave;
61c4628b
SS
204 fp->cwd = 0xffff037fu;
205 fp->swd = 0xffff0000u;
206 fp->twd = 0xffffffffu;
207 fp->fos = 0xffff0000u;
1da177e4 208 }
86603283 209}
c0ee2cf6 210EXPORT_SYMBOL_GPL(fpstate_init);
86603283 211
bfd6fc05
IM
212/*
213 * Copy the current task's FPU state to a new task's FPU context.
214 *
215 * In the 'eager' case we just save to the destination context.
216 *
217 * In the 'lazy' case we save to the source context, mark the FPU lazy
218 * via stts() and copy the source context into the destination context.
219 */
f9bc977f 220static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
e102f30f 221{
f9bc977f 222 WARN_ON(src_fpu != &current->thread.fpu);
bfd6fc05 223
e102f30f 224 if (use_eager_fpu()) {
7366ed77 225 memset(&dst_fpu->state.xsave, 0, xstate_size);
9f876d67 226 copy_fpregs_to_fpstate(dst_fpu);
e102f30f 227 } else {
0c070595 228 fpu__save(src_fpu);
7366ed77 229 memcpy(&dst_fpu->state, &src_fpu->state, xstate_size);
e102f30f
IM
230 }
231}
232
c69e098b 233int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
a752b53d 234{
c69e098b 235 dst_fpu->counter = 0;
d5cea9b0 236 dst_fpu->fpregs_active = 0;
c69e098b 237 dst_fpu->last_cpu = -1;
a752b53d 238
c4d6ee6e 239 if (src_fpu->fpstate_active)
f9bc977f 240 fpu_copy(dst_fpu, src_fpu);
c4d6ee6e 241
a752b53d
IM
242 return 0;
243}
244
97185c95 245/*
c4d72e2d
IM
246 * Activate the current task's in-memory FPU context,
247 * if it has not been used before:
97185c95 248 */
c4d72e2d 249void fpu__activate_curr(struct fpu *fpu)
97185c95 250{
91d93d0e 251 WARN_ON_ONCE(fpu != &current->thread.fpu);
97185c95 252
c4d72e2d
IM
253 if (!fpu->fpstate_active) {
254 fpstate_init(fpu);
97185c95 255
c4d72e2d
IM
256 /* Safe to do for the current task: */
257 fpu->fpstate_active = 1;
258 }
97185c95 259}
c4d72e2d 260EXPORT_SYMBOL_GPL(fpu__activate_curr);
97185c95 261
86603283 262/*
67ee658e
IM
263 * This function must be called before we modify a stopped child's
264 * fpstate.
af7f8721
IM
265 *
266 * If the child has not used the FPU before then initialize its
67ee658e 267 * fpstate.
af7f8721
IM
268 *
269 * If the child has used the FPU before then unlazy it.
270 *
67ee658e
IM
271 * [ After this function call, after registers in the fpstate are
272 * modified and the child task has woken up, the child task will
273 * restore the modified FPU state from the modified context. If we
af7f8721 274 * didn't clear its lazy status here then the lazy in-registers
67ee658e 275 * state pending on its former CPU could be restored, corrupting
af7f8721
IM
276 * the modifications. ]
277 *
278 * This function is also called before we read a stopped child's
67ee658e
IM
279 * FPU state - to make sure it's initialized if the child has
280 * no active FPU state.
af7f8721
IM
281 *
282 * TODO: A future optimization would be to skip the unlazying in
283 * the read-only case, it's not strictly necessary for
284 * read-only access to the context.
86603283 285 */
67ee658e 286static void fpu__activate_stopped(struct fpu *child_fpu)
86603283 287{
2fb29fc7 288 WARN_ON_ONCE(child_fpu == &current->thread.fpu);
67e97fc2 289
c5bedc68 290 if (child_fpu->fpstate_active) {
cc08d545 291 child_fpu->last_cpu = -1;
2fb29fc7
IM
292 } else {
293 fpstate_init(child_fpu);
071ae621 294
2fb29fc7
IM
295 /* Safe to do for stopped child tasks: */
296 child_fpu->fpstate_active = 1;
297 }
1da177e4
LT
298}
299
93b90712 300/*
3a0aee48 301 * 'fpu__restore()' saves the current math information in the
93b90712
IM
302 * old math state array, and gets the new ones from the current task
303 *
304 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
305 * Don't touch unless you *really* know how it works.
306 *
307 * Must be called with kernel preemption disabled (eg with local
308 * local interrupts as in the case of do_device_not_available).
309 */
3a0aee48 310void fpu__restore(void)
93b90712
IM
311{
312 struct task_struct *tsk = current;
4540d3fa 313 struct fpu *fpu = &tsk->thread.fpu;
93b90712 314
c4d72e2d 315 fpu__activate_curr(fpu);
93b90712 316
232f62cd 317 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
93b90712 318 kernel_fpu_disable();
232f62cd 319 fpregs_activate(fpu);
11f2d50b 320 if (unlikely(restore_fpu_checking(fpu))) {
af2d94fd 321 fpu_reset_state(fpu);
93b90712
IM
322 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
323 } else {
324 tsk->thread.fpu.counter++;
325 }
326 kernel_fpu_enable();
327}
3a0aee48 328EXPORT_SYMBOL_GPL(fpu__restore);
93b90712 329
2e8a3102 330void fpu__clear(struct task_struct *tsk)
81683cc8 331{
c5bedc68
IM
332 struct fpu *fpu = &tsk->thread.fpu;
333
2e8a3102 334 WARN_ON_ONCE(tsk != current); /* Almost certainly an anomaly */
4c138410 335
81683cc8
IM
336 if (!use_eager_fpu()) {
337 /* FPU state will be reallocated lazily at the first use. */
ca6787ba 338 drop_fpu(fpu);
81683cc8 339 } else {
c5bedc68 340 if (!fpu->fpstate_active) {
c4d72e2d 341 fpu__activate_curr(fpu);
81683cc8
IM
342 user_fpu_begin();
343 }
344 restore_init_xstate();
345 }
346}
347
5b3efd50 348/*
678eaf60 349 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
5b3efd50
SS
350 * as the "regset->n" for the xstate regset will be updated based on the feature
351 * capabilites supported by the xsave.
352 */
678eaf60 353int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
44210111 354{
c5bedc68
IM
355 struct fpu *target_fpu = &target->thread.fpu;
356
357 return target_fpu->fpstate_active ? regset->n : 0;
44210111 358}
1da177e4 359
678eaf60 360int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
1da177e4 361{
c5bedc68
IM
362 struct fpu *target_fpu = &target->thread.fpu;
363
364 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
44210111 365}
1da177e4 366
44210111
RM
367int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
368 unsigned int pos, unsigned int count,
369 void *kbuf, void __user *ubuf)
370{
cc08d545 371 struct fpu *fpu = &target->thread.fpu;
aa283f49 372
44210111
RM
373 if (!cpu_has_fxsr)
374 return -ENODEV;
375
67ee658e 376 fpu__activate_stopped(fpu);
29104e10
SS
377 sanitize_i387_state(target);
378
44210111 379 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
7366ed77 380 &fpu->state.fxsave, 0, -1);
1da177e4 381}
44210111
RM
382
383int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
384 unsigned int pos, unsigned int count,
385 const void *kbuf, const void __user *ubuf)
386{
cc08d545 387 struct fpu *fpu = &target->thread.fpu;
44210111
RM
388 int ret;
389
390 if (!cpu_has_fxsr)
391 return -ENODEV;
392
67ee658e 393 fpu__activate_stopped(fpu);
29104e10
SS
394 sanitize_i387_state(target);
395
44210111 396 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
7366ed77 397 &fpu->state.fxsave, 0, -1);
44210111
RM
398
399 /*
400 * mxcsr reserved bits must be masked to zero for security reasons.
401 */
7366ed77 402 fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
44210111 403
42deec6f
SS
404 /*
405 * update the header bits in the xsave header, indicating the
406 * presence of FP and SSE state.
407 */
408 if (cpu_has_xsave)
7366ed77 409 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
42deec6f 410
44210111
RM
411 return ret;
412}
413
5b3efd50
SS
414int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
415 unsigned int pos, unsigned int count,
416 void *kbuf, void __user *ubuf)
417{
cc08d545 418 struct fpu *fpu = &target->thread.fpu;
18ecb3bf 419 struct xsave_struct *xsave;
5b3efd50
SS
420 int ret;
421
422 if (!cpu_has_xsave)
423 return -ENODEV;
424
67ee658e 425 fpu__activate_stopped(fpu);
5b3efd50 426
7366ed77 427 xsave = &fpu->state.xsave;
18ecb3bf 428
5b3efd50 429 /*
ff7fbc72
SS
430 * Copy the 48bytes defined by the software first into the xstate
431 * memory layout in the thread struct, so that we can copy the entire
432 * xstateregs to the user using one user_regset_copyout().
5b3efd50 433 */
e7f180dc
ON
434 memcpy(&xsave->i387.sw_reserved,
435 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
5b3efd50 436 /*
ff7fbc72 437 * Copy the xstate memory layout.
5b3efd50 438 */
e7f180dc 439 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
5b3efd50
SS
440 return ret;
441}
442
443int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
444 unsigned int pos, unsigned int count,
445 const void *kbuf, const void __user *ubuf)
446{
cc08d545 447 struct fpu *fpu = &target->thread.fpu;
18ecb3bf 448 struct xsave_struct *xsave;
5b3efd50 449 int ret;
5b3efd50
SS
450
451 if (!cpu_has_xsave)
452 return -ENODEV;
453
67ee658e 454 fpu__activate_stopped(fpu);
5b3efd50 455
7366ed77 456 xsave = &fpu->state.xsave;
18ecb3bf 457
e7f180dc 458 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
5b3efd50
SS
459 /*
460 * mxcsr reserved bits must be masked to zero for security reasons.
461 */
e7f180dc 462 xsave->i387.mxcsr &= mxcsr_feature_mask;
400e4b20 463 xsave->header.xfeatures &= xfeatures_mask;
5b3efd50
SS
464 /*
465 * These bits must be zero.
466 */
3a54450b 467 memset(&xsave->header.reserved, 0, 48);
8dcea8db 468
5b3efd50
SS
469 return ret;
470}
471
44210111 472#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1da177e4 473
1da177e4
LT
474/*
475 * FPU tag word conversions.
476 */
477
3b095a04 478static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
1da177e4
LT
479{
480 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
3b095a04 481
1da177e4 482 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
3b095a04 483 tmp = ~twd;
44210111 484 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
3b095a04
CG
485 /* and move the valid bits to the lower byte. */
486 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
487 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
488 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
f668964e 489
3b095a04 490 return tmp;
1da177e4
LT
491}
492
497888cf 493#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
44210111
RM
494#define FP_EXP_TAG_VALID 0
495#define FP_EXP_TAG_ZERO 1
496#define FP_EXP_TAG_SPECIAL 2
497#define FP_EXP_TAG_EMPTY 3
498
499static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
500{
501 struct _fpxreg *st;
502 u32 tos = (fxsave->swd >> 11) & 7;
503 u32 twd = (unsigned long) fxsave->twd;
504 u32 tag;
505 u32 ret = 0xffff0000u;
506 int i;
1da177e4 507
44210111 508 for (i = 0; i < 8; i++, twd >>= 1) {
3b095a04
CG
509 if (twd & 0x1) {
510 st = FPREG_ADDR(fxsave, (i - tos) & 7);
1da177e4 511
3b095a04 512 switch (st->exponent & 0x7fff) {
1da177e4 513 case 0x7fff:
44210111 514 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
515 break;
516 case 0x0000:
3b095a04
CG
517 if (!st->significand[0] &&
518 !st->significand[1] &&
519 !st->significand[2] &&
44210111
RM
520 !st->significand[3])
521 tag = FP_EXP_TAG_ZERO;
522 else
523 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
524 break;
525 default:
44210111
RM
526 if (st->significand[3] & 0x8000)
527 tag = FP_EXP_TAG_VALID;
528 else
529 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
530 break;
531 }
532 } else {
44210111 533 tag = FP_EXP_TAG_EMPTY;
1da177e4 534 }
44210111 535 ret |= tag << (2 * i);
1da177e4
LT
536 }
537 return ret;
538}
539
540/*
44210111 541 * FXSR floating point environment conversions.
1da177e4
LT
542 */
543
72a671ce 544void
f668964e 545convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
1da177e4 546{
7366ed77 547 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
44210111
RM
548 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
549 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
550 int i;
1da177e4 551
44210111
RM
552 env->cwd = fxsave->cwd | 0xffff0000u;
553 env->swd = fxsave->swd | 0xffff0000u;
554 env->twd = twd_fxsr_to_i387(fxsave);
555
556#ifdef CONFIG_X86_64
557 env->fip = fxsave->rip;
558 env->foo = fxsave->rdp;
10c11f30
BG
559 /*
560 * should be actually ds/cs at fpu exception time, but
561 * that information is not available in 64bit mode.
562 */
563 env->fcs = task_pt_regs(tsk)->cs;
44210111 564 if (tsk == current) {
10c11f30 565 savesegment(ds, env->fos);
1da177e4 566 } else {
10c11f30 567 env->fos = tsk->thread.ds;
1da177e4 568 }
10c11f30 569 env->fos |= 0xffff0000;
44210111
RM
570#else
571 env->fip = fxsave->fip;
609b5297 572 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
44210111
RM
573 env->foo = fxsave->foo;
574 env->fos = fxsave->fos;
575#endif
1da177e4 576
44210111
RM
577 for (i = 0; i < 8; ++i)
578 memcpy(&to[i], &from[i], sizeof(to[0]));
1da177e4
LT
579}
580
72a671ce
SS
581void convert_to_fxsr(struct task_struct *tsk,
582 const struct user_i387_ia32_struct *env)
1da177e4 583
1da177e4 584{
7366ed77 585 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
44210111
RM
586 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
587 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
588 int i;
1da177e4 589
44210111
RM
590 fxsave->cwd = env->cwd;
591 fxsave->swd = env->swd;
592 fxsave->twd = twd_i387_to_fxsr(env->twd);
593 fxsave->fop = (u16) ((u32) env->fcs >> 16);
594#ifdef CONFIG_X86_64
595 fxsave->rip = env->fip;
596 fxsave->rdp = env->foo;
597 /* cs and ds ignored */
598#else
599 fxsave->fip = env->fip;
600 fxsave->fcs = (env->fcs & 0xffff);
601 fxsave->foo = env->foo;
602 fxsave->fos = env->fos;
603#endif
1da177e4 604
44210111
RM
605 for (i = 0; i < 8; ++i)
606 memcpy(&to[i], &from[i], sizeof(from[0]));
1da177e4
LT
607}
608
44210111
RM
609int fpregs_get(struct task_struct *target, const struct user_regset *regset,
610 unsigned int pos, unsigned int count,
611 void *kbuf, void __user *ubuf)
1da177e4 612{
cc08d545 613 struct fpu *fpu = &target->thread.fpu;
44210111 614 struct user_i387_ia32_struct env;
1da177e4 615
67ee658e 616 fpu__activate_stopped(fpu);
1da177e4 617
60e019eb 618 if (!static_cpu_has(X86_FEATURE_FPU))
e8a496ac
SS
619 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
620
60e019eb 621 if (!cpu_has_fxsr)
44210111 622 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
7366ed77 623 &fpu->state.fsave, 0,
61c4628b 624 -1);
1da177e4 625
29104e10
SS
626 sanitize_i387_state(target);
627
44210111
RM
628 if (kbuf && pos == 0 && count == sizeof(env)) {
629 convert_from_fxsr(kbuf, target);
630 return 0;
1da177e4 631 }
44210111
RM
632
633 convert_from_fxsr(&env, target);
f668964e 634
44210111 635 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
1da177e4
LT
636}
637
44210111
RM
638int fpregs_set(struct task_struct *target, const struct user_regset *regset,
639 unsigned int pos, unsigned int count,
640 const void *kbuf, const void __user *ubuf)
1da177e4 641{
cc08d545 642 struct fpu *fpu = &target->thread.fpu;
44210111
RM
643 struct user_i387_ia32_struct env;
644 int ret;
1da177e4 645
67ee658e 646 fpu__activate_stopped(fpu);
aa283f49 647
29104e10
SS
648 sanitize_i387_state(target);
649
60e019eb 650 if (!static_cpu_has(X86_FEATURE_FPU))
e8a496ac
SS
651 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
652
60e019eb 653 if (!cpu_has_fxsr)
44210111 654 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
7366ed77 655 &fpu->state.fsave, 0,
60e019eb 656 -1);
44210111
RM
657
658 if (pos > 0 || count < sizeof(env))
659 convert_from_fxsr(&env, target);
660
661 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
662 if (!ret)
663 convert_to_fxsr(target, &env);
664
42deec6f
SS
665 /*
666 * update the header bit in the xsave header, indicating the
667 * presence of FP.
668 */
669 if (cpu_has_xsave)
7366ed77 670 fpu->state.xsave.header.xfeatures |= XSTATE_FP;
44210111 671 return ret;
1da177e4
LT
672}
673
1da177e4
LT
674/*
675 * FPU state for core dumps.
60b3b9af
RM
676 * This is only used for a.out dumps now.
677 * It is declared generically using elf_fpregset_t (which is
678 * struct user_i387_struct) but is in fact only used for 32-bit
679 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
1da177e4 680 */
c5bedc68 681int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
1da177e4 682{
1da177e4 683 struct task_struct *tsk = current;
c5bedc68 684 struct fpu *fpu = &tsk->thread.fpu;
f668964e 685 int fpvalid;
1da177e4 686
c5bedc68 687 fpvalid = fpu->fpstate_active;
60b3b9af
RM
688 if (fpvalid)
689 fpvalid = !fpregs_get(tsk, NULL,
690 0, sizeof(struct user_i387_ia32_struct),
c5bedc68 691 ufpu, NULL);
1da177e4
LT
692
693 return fpvalid;
694}
129f6946 695EXPORT_SYMBOL(dump_fpu);
1da177e4 696
60b3b9af 697#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */