]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
1361b83a | 8 | #include <asm/fpu-internal.h> |
1da177e4 | 9 | |
14e153ef ON |
10 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
11 | ||
7575637a ON |
12 | void kernel_fpu_disable(void) |
13 | { | |
14 | WARN_ON(this_cpu_read(in_kernel_fpu)); | |
15 | this_cpu_write(in_kernel_fpu, true); | |
16 | } | |
17 | ||
18 | void kernel_fpu_enable(void) | |
19 | { | |
20 | this_cpu_write(in_kernel_fpu, false); | |
21 | } | |
22 | ||
8546c008 LT |
23 | /* |
24 | * Were we in an interrupt that interrupted kernel mode? | |
25 | * | |
304bceda | 26 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
8546c008 LT |
27 | * pair does nothing at all: the thread must not have fpu (so |
28 | * that we don't try to save the FPU state), and TS must | |
29 | * be set (so that the clts/stts pair does nothing that is | |
30 | * visible in the interrupted kernel thread). | |
5187b28f | 31 | * |
4b2e762e ON |
32 | * Except for the eagerfpu case when we return true; in the likely case |
33 | * the thread has FPU but we are not going to set/clear TS. | |
8546c008 LT |
34 | */ |
35 | static inline bool interrupted_kernel_fpu_idle(void) | |
36 | { | |
14e153ef ON |
37 | if (this_cpu_read(in_kernel_fpu)) |
38 | return false; | |
39 | ||
5d2bd700 | 40 | if (use_eager_fpu()) |
4b2e762e | 41 | return true; |
304bceda | 42 | |
8546c008 LT |
43 | return !__thread_has_fpu(current) && |
44 | (read_cr0() & X86_CR0_TS); | |
45 | } | |
46 | ||
47 | /* | |
48 | * Were we in user mode (or vm86 mode) when we were | |
49 | * interrupted? | |
50 | * | |
51 | * Doing kernel_fpu_begin/end() is ok if we are running | |
52 | * in an interrupt context from user mode - we'll just | |
53 | * save the FPU state as required. | |
54 | */ | |
55 | static inline bool interrupted_user_mode(void) | |
56 | { | |
57 | struct pt_regs *regs = get_irq_regs(); | |
f39b6f0e | 58 | return regs && user_mode(regs); |
8546c008 LT |
59 | } |
60 | ||
61 | /* | |
62 | * Can we use the FPU in kernel mode with the | |
63 | * whole "kernel_fpu_begin/end()" sequence? | |
64 | * | |
65 | * It's always ok in process context (ie "not interrupt") | |
66 | * but it is sometimes ok even from an irq. | |
67 | */ | |
68 | bool irq_fpu_usable(void) | |
69 | { | |
70 | return !in_interrupt() || | |
71 | interrupted_user_mode() || | |
72 | interrupted_kernel_fpu_idle(); | |
73 | } | |
74 | EXPORT_SYMBOL(irq_fpu_usable); | |
75 | ||
b1a74bf8 | 76 | void __kernel_fpu_begin(void) |
8546c008 LT |
77 | { |
78 | struct task_struct *me = current; | |
79 | ||
14e153ef ON |
80 | this_cpu_write(in_kernel_fpu, true); |
81 | ||
8546c008 | 82 | if (__thread_has_fpu(me)) { |
5187b28f | 83 | __save_init_fpu(me); |
7aeccb83 | 84 | } else { |
c6ae41e7 | 85 | this_cpu_write(fpu_owner_task, NULL); |
7aeccb83 ON |
86 | if (!use_eager_fpu()) |
87 | clts(); | |
8546c008 LT |
88 | } |
89 | } | |
b1a74bf8 | 90 | EXPORT_SYMBOL(__kernel_fpu_begin); |
8546c008 | 91 | |
b1a74bf8 | 92 | void __kernel_fpu_end(void) |
8546c008 | 93 | { |
33a3ebdc ON |
94 | struct task_struct *me = current; |
95 | ||
96 | if (__thread_has_fpu(me)) { | |
97 | if (WARN_ON(restore_fpu_checking(me))) | |
b85e67d1 | 98 | fpu_reset_state(me); |
33a3ebdc | 99 | } else if (!use_eager_fpu()) { |
304bceda | 100 | stts(); |
731bd6a9 | 101 | } |
14e153ef ON |
102 | |
103 | this_cpu_write(in_kernel_fpu, false); | |
8546c008 | 104 | } |
b1a74bf8 | 105 | EXPORT_SYMBOL(__kernel_fpu_end); |
8546c008 | 106 | |
4af08f2f IM |
107 | /* |
108 | * Save the FPU state (initialize it if necessary): | |
87cdb98a IM |
109 | * |
110 | * This only ever gets called for the current task. | |
4af08f2f | 111 | */ |
0a781551 | 112 | void fpu__save(struct task_struct *tsk) |
8546c008 | 113 | { |
87cdb98a IM |
114 | WARN_ON(tsk != current); |
115 | ||
8546c008 LT |
116 | preempt_disable(); |
117 | if (__thread_has_fpu(tsk)) { | |
1a2a7f4e ON |
118 | if (use_eager_fpu()) { |
119 | __save_fpu(tsk); | |
120 | } else { | |
121 | __save_init_fpu(tsk); | |
122 | __thread_fpu_end(tsk); | |
123 | } | |
a9241ea5 | 124 | } |
8546c008 LT |
125 | preempt_enable(); |
126 | } | |
4af08f2f | 127 | EXPORT_SYMBOL_GPL(fpu__save); |
8546c008 | 128 | |
c0ee2cf6 | 129 | void fpstate_init(struct fpu *fpu) |
1da177e4 | 130 | { |
60e019eb | 131 | if (!cpu_has_fpu) { |
86603283 AK |
132 | finit_soft_fpu(&fpu->state->soft); |
133 | return; | |
e8a496ac | 134 | } |
e8a496ac | 135 | |
1d23c451 ON |
136 | memset(fpu->state, 0, xstate_size); |
137 | ||
1da177e4 | 138 | if (cpu_has_fxsr) { |
5d2bd700 | 139 | fx_finit(&fpu->state->fxsave); |
1da177e4 | 140 | } else { |
86603283 | 141 | struct i387_fsave_struct *fp = &fpu->state->fsave; |
61c4628b SS |
142 | fp->cwd = 0xffff037fu; |
143 | fp->swd = 0xffff0000u; | |
144 | fp->twd = 0xffffffffu; | |
145 | fp->fos = 0xffff0000u; | |
1da177e4 | 146 | } |
86603283 | 147 | } |
c0ee2cf6 | 148 | EXPORT_SYMBOL_GPL(fpstate_init); |
86603283 | 149 | |
ed97b085 | 150 | int fpstate_alloc(struct fpu *fpu) |
6fbe6712 IM |
151 | { |
152 | if (fpu->state) | |
153 | return 0; | |
ed97b085 | 154 | |
6fbe6712 IM |
155 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); |
156 | if (!fpu->state) | |
157 | return -ENOMEM; | |
ed97b085 IM |
158 | |
159 | /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */ | |
6fbe6712 | 160 | WARN_ON((unsigned long)fpu->state & 15); |
ed97b085 | 161 | |
6fbe6712 IM |
162 | return 0; |
163 | } | |
ed97b085 | 164 | EXPORT_SYMBOL_GPL(fpstate_alloc); |
6fbe6712 | 165 | |
97185c95 IM |
166 | /* |
167 | * Allocate the backing store for the current task's FPU registers | |
168 | * and initialize the registers themselves as well. | |
169 | * | |
170 | * Can fail. | |
171 | */ | |
172 | int fpstate_alloc_init(struct task_struct *curr) | |
173 | { | |
174 | int ret; | |
175 | ||
176 | if (WARN_ON_ONCE(curr != current)) | |
177 | return -EINVAL; | |
178 | if (WARN_ON_ONCE(curr->flags & PF_USED_MATH)) | |
179 | return -EINVAL; | |
180 | ||
181 | /* | |
182 | * Memory allocation at the first usage of the FPU and other state. | |
183 | */ | |
ed97b085 | 184 | ret = fpstate_alloc(&curr->thread.fpu); |
97185c95 IM |
185 | if (ret) |
186 | return ret; | |
187 | ||
c0ee2cf6 | 188 | fpstate_init(&curr->thread.fpu); |
97185c95 IM |
189 | |
190 | /* Safe to do for the current task: */ | |
191 | curr->flags |= PF_USED_MATH; | |
192 | ||
193 | return 0; | |
194 | } | |
195 | EXPORT_SYMBOL_GPL(fpstate_alloc_init); | |
196 | ||
86603283 AK |
197 | /* |
198 | * The _current_ task is using the FPU for the first time | |
199 | * so initialize it and set the mxcsr to its default | |
200 | * value at reset if we support XMM instructions and then | |
0d2eb44f | 201 | * remember the current task has used the FPU. |
86603283 | 202 | */ |
67e97fc2 | 203 | static int fpu__unlazy_stopped(struct task_struct *child) |
86603283 AK |
204 | { |
205 | int ret; | |
206 | ||
67e97fc2 IM |
207 | if (WARN_ON_ONCE(child == current)) |
208 | return -EINVAL; | |
209 | ||
071ae621 | 210 | if (child->flags & PF_USED_MATH) { |
67e97fc2 | 211 | task_disable_lazy_fpu_restore(child); |
86603283 AK |
212 | return 0; |
213 | } | |
214 | ||
44210111 | 215 | /* |
86603283 | 216 | * Memory allocation at the first usage of the FPU and other state. |
44210111 | 217 | */ |
ed97b085 | 218 | ret = fpstate_alloc(&child->thread.fpu); |
86603283 AK |
219 | if (ret) |
220 | return ret; | |
221 | ||
c0ee2cf6 | 222 | fpstate_init(&child->thread.fpu); |
86603283 | 223 | |
071ae621 IM |
224 | /* Safe to do for stopped child tasks: */ |
225 | child->flags |= PF_USED_MATH; | |
226 | ||
aa283f49 | 227 | return 0; |
1da177e4 LT |
228 | } |
229 | ||
93b90712 IM |
230 | /* |
231 | * 'math_state_restore()' saves the current math information in the | |
232 | * old math state array, and gets the new ones from the current task | |
233 | * | |
234 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
235 | * Don't touch unless you *really* know how it works. | |
236 | * | |
237 | * Must be called with kernel preemption disabled (eg with local | |
238 | * local interrupts as in the case of do_device_not_available). | |
239 | */ | |
240 | void math_state_restore(void) | |
241 | { | |
242 | struct task_struct *tsk = current; | |
243 | ||
244 | if (!tsk_used_math(tsk)) { | |
245 | local_irq_enable(); | |
246 | /* | |
247 | * does a slab alloc which can sleep | |
248 | */ | |
249 | if (fpstate_alloc_init(tsk)) { | |
250 | /* | |
251 | * ran out of memory! | |
252 | */ | |
253 | do_group_exit(SIGKILL); | |
254 | return; | |
255 | } | |
256 | local_irq_disable(); | |
257 | } | |
258 | ||
259 | /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */ | |
260 | kernel_fpu_disable(); | |
261 | __thread_fpu_begin(tsk); | |
262 | if (unlikely(restore_fpu_checking(tsk))) { | |
263 | fpu_reset_state(tsk); | |
264 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); | |
265 | } else { | |
266 | tsk->thread.fpu.counter++; | |
267 | } | |
268 | kernel_fpu_enable(); | |
269 | } | |
270 | EXPORT_SYMBOL_GPL(math_state_restore); | |
271 | ||
81683cc8 IM |
272 | void fpu__flush_thread(struct task_struct *tsk) |
273 | { | |
274 | if (!use_eager_fpu()) { | |
275 | /* FPU state will be reallocated lazily at the first use. */ | |
276 | drop_fpu(tsk); | |
277 | fpstate_free(&tsk->thread.fpu); | |
278 | } else { | |
279 | if (!tsk_used_math(tsk)) { | |
280 | /* kthread execs. TODO: cleanup this horror. */ | |
281 | if (WARN_ON(fpstate_alloc_init(tsk))) | |
282 | force_sig(SIGKILL, tsk); | |
283 | user_fpu_begin(); | |
284 | } | |
285 | restore_init_xstate(); | |
286 | } | |
287 | } | |
288 | ||
5b3efd50 SS |
289 | /* |
290 | * The xstateregs_active() routine is the same as the fpregs_active() routine, | |
291 | * as the "regset->n" for the xstate regset will be updated based on the feature | |
292 | * capabilites supported by the xsave. | |
293 | */ | |
44210111 RM |
294 | int fpregs_active(struct task_struct *target, const struct user_regset *regset) |
295 | { | |
296 | return tsk_used_math(target) ? regset->n : 0; | |
297 | } | |
1da177e4 | 298 | |
44210111 | 299 | int xfpregs_active(struct task_struct *target, const struct user_regset *regset) |
1da177e4 | 300 | { |
44210111 RM |
301 | return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0; |
302 | } | |
1da177e4 | 303 | |
44210111 RM |
304 | int xfpregs_get(struct task_struct *target, const struct user_regset *regset, |
305 | unsigned int pos, unsigned int count, | |
306 | void *kbuf, void __user *ubuf) | |
307 | { | |
aa283f49 SS |
308 | int ret; |
309 | ||
44210111 RM |
310 | if (!cpu_has_fxsr) |
311 | return -ENODEV; | |
312 | ||
67e97fc2 | 313 | ret = fpu__unlazy_stopped(target); |
aa283f49 SS |
314 | if (ret) |
315 | return ret; | |
44210111 | 316 | |
29104e10 SS |
317 | sanitize_i387_state(target); |
318 | ||
44210111 | 319 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
86603283 | 320 | &target->thread.fpu.state->fxsave, 0, -1); |
1da177e4 | 321 | } |
44210111 RM |
322 | |
323 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |
324 | unsigned int pos, unsigned int count, | |
325 | const void *kbuf, const void __user *ubuf) | |
326 | { | |
327 | int ret; | |
328 | ||
329 | if (!cpu_has_fxsr) | |
330 | return -ENODEV; | |
331 | ||
67e97fc2 | 332 | ret = fpu__unlazy_stopped(target); |
aa283f49 SS |
333 | if (ret) |
334 | return ret; | |
335 | ||
29104e10 SS |
336 | sanitize_i387_state(target); |
337 | ||
44210111 | 338 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
86603283 | 339 | &target->thread.fpu.state->fxsave, 0, -1); |
44210111 RM |
340 | |
341 | /* | |
342 | * mxcsr reserved bits must be masked to zero for security reasons. | |
343 | */ | |
86603283 | 344 | target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
44210111 | 345 | |
42deec6f SS |
346 | /* |
347 | * update the header bits in the xsave header, indicating the | |
348 | * presence of FP and SSE state. | |
349 | */ | |
350 | if (cpu_has_xsave) | |
86603283 | 351 | target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; |
42deec6f | 352 | |
44210111 RM |
353 | return ret; |
354 | } | |
355 | ||
5b3efd50 SS |
356 | int xstateregs_get(struct task_struct *target, const struct user_regset *regset, |
357 | unsigned int pos, unsigned int count, | |
358 | void *kbuf, void __user *ubuf) | |
359 | { | |
18ecb3bf | 360 | struct xsave_struct *xsave; |
5b3efd50 SS |
361 | int ret; |
362 | ||
363 | if (!cpu_has_xsave) | |
364 | return -ENODEV; | |
365 | ||
67e97fc2 | 366 | ret = fpu__unlazy_stopped(target); |
5b3efd50 SS |
367 | if (ret) |
368 | return ret; | |
369 | ||
18ecb3bf BP |
370 | xsave = &target->thread.fpu.state->xsave; |
371 | ||
5b3efd50 | 372 | /* |
ff7fbc72 SS |
373 | * Copy the 48bytes defined by the software first into the xstate |
374 | * memory layout in the thread struct, so that we can copy the entire | |
375 | * xstateregs to the user using one user_regset_copyout(). | |
5b3efd50 | 376 | */ |
e7f180dc ON |
377 | memcpy(&xsave->i387.sw_reserved, |
378 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); | |
5b3efd50 | 379 | /* |
ff7fbc72 | 380 | * Copy the xstate memory layout. |
5b3efd50 | 381 | */ |
e7f180dc | 382 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
5b3efd50 SS |
383 | return ret; |
384 | } | |
385 | ||
386 | int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |
387 | unsigned int pos, unsigned int count, | |
388 | const void *kbuf, const void __user *ubuf) | |
389 | { | |
18ecb3bf | 390 | struct xsave_struct *xsave; |
5b3efd50 | 391 | int ret; |
5b3efd50 SS |
392 | |
393 | if (!cpu_has_xsave) | |
394 | return -ENODEV; | |
395 | ||
67e97fc2 | 396 | ret = fpu__unlazy_stopped(target); |
5b3efd50 SS |
397 | if (ret) |
398 | return ret; | |
399 | ||
18ecb3bf BP |
400 | xsave = &target->thread.fpu.state->xsave; |
401 | ||
e7f180dc | 402 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
5b3efd50 SS |
403 | /* |
404 | * mxcsr reserved bits must be masked to zero for security reasons. | |
405 | */ | |
e7f180dc ON |
406 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
407 | xsave->xsave_hdr.xstate_bv &= pcntxt_mask; | |
5b3efd50 SS |
408 | /* |
409 | * These bits must be zero. | |
410 | */ | |
e7f180dc | 411 | memset(&xsave->xsave_hdr.reserved, 0, 48); |
5b3efd50 SS |
412 | return ret; |
413 | } | |
414 | ||
44210111 | 415 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
1da177e4 | 416 | |
1da177e4 LT |
417 | /* |
418 | * FPU tag word conversions. | |
419 | */ | |
420 | ||
3b095a04 | 421 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) |
1da177e4 LT |
422 | { |
423 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | |
3b095a04 | 424 | |
1da177e4 | 425 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ |
3b095a04 | 426 | tmp = ~twd; |
44210111 | 427 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ |
3b095a04 CG |
428 | /* and move the valid bits to the lower byte. */ |
429 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | |
430 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | |
431 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | |
f668964e | 432 | |
3b095a04 | 433 | return tmp; |
1da177e4 LT |
434 | } |
435 | ||
497888cf | 436 | #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) |
44210111 RM |
437 | #define FP_EXP_TAG_VALID 0 |
438 | #define FP_EXP_TAG_ZERO 1 | |
439 | #define FP_EXP_TAG_SPECIAL 2 | |
440 | #define FP_EXP_TAG_EMPTY 3 | |
441 | ||
442 | static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) | |
443 | { | |
444 | struct _fpxreg *st; | |
445 | u32 tos = (fxsave->swd >> 11) & 7; | |
446 | u32 twd = (unsigned long) fxsave->twd; | |
447 | u32 tag; | |
448 | u32 ret = 0xffff0000u; | |
449 | int i; | |
1da177e4 | 450 | |
44210111 | 451 | for (i = 0; i < 8; i++, twd >>= 1) { |
3b095a04 CG |
452 | if (twd & 0x1) { |
453 | st = FPREG_ADDR(fxsave, (i - tos) & 7); | |
1da177e4 | 454 | |
3b095a04 | 455 | switch (st->exponent & 0x7fff) { |
1da177e4 | 456 | case 0x7fff: |
44210111 | 457 | tag = FP_EXP_TAG_SPECIAL; |
1da177e4 LT |
458 | break; |
459 | case 0x0000: | |
3b095a04 CG |
460 | if (!st->significand[0] && |
461 | !st->significand[1] && | |
462 | !st->significand[2] && | |
44210111 RM |
463 | !st->significand[3]) |
464 | tag = FP_EXP_TAG_ZERO; | |
465 | else | |
466 | tag = FP_EXP_TAG_SPECIAL; | |
1da177e4 LT |
467 | break; |
468 | default: | |
44210111 RM |
469 | if (st->significand[3] & 0x8000) |
470 | tag = FP_EXP_TAG_VALID; | |
471 | else | |
472 | tag = FP_EXP_TAG_SPECIAL; | |
1da177e4 LT |
473 | break; |
474 | } | |
475 | } else { | |
44210111 | 476 | tag = FP_EXP_TAG_EMPTY; |
1da177e4 | 477 | } |
44210111 | 478 | ret |= tag << (2 * i); |
1da177e4 LT |
479 | } |
480 | return ret; | |
481 | } | |
482 | ||
483 | /* | |
44210111 | 484 | * FXSR floating point environment conversions. |
1da177e4 LT |
485 | */ |
486 | ||
72a671ce | 487 | void |
f668964e | 488 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
1da177e4 | 489 | { |
86603283 | 490 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; |
44210111 RM |
491 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
492 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; | |
493 | int i; | |
1da177e4 | 494 | |
44210111 RM |
495 | env->cwd = fxsave->cwd | 0xffff0000u; |
496 | env->swd = fxsave->swd | 0xffff0000u; | |
497 | env->twd = twd_fxsr_to_i387(fxsave); | |
498 | ||
499 | #ifdef CONFIG_X86_64 | |
500 | env->fip = fxsave->rip; | |
501 | env->foo = fxsave->rdp; | |
10c11f30 BG |
502 | /* |
503 | * should be actually ds/cs at fpu exception time, but | |
504 | * that information is not available in 64bit mode. | |
505 | */ | |
506 | env->fcs = task_pt_regs(tsk)->cs; | |
44210111 | 507 | if (tsk == current) { |
10c11f30 | 508 | savesegment(ds, env->fos); |
1da177e4 | 509 | } else { |
10c11f30 | 510 | env->fos = tsk->thread.ds; |
1da177e4 | 511 | } |
10c11f30 | 512 | env->fos |= 0xffff0000; |
44210111 RM |
513 | #else |
514 | env->fip = fxsave->fip; | |
609b5297 | 515 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); |
44210111 RM |
516 | env->foo = fxsave->foo; |
517 | env->fos = fxsave->fos; | |
518 | #endif | |
1da177e4 | 519 | |
44210111 RM |
520 | for (i = 0; i < 8; ++i) |
521 | memcpy(&to[i], &from[i], sizeof(to[0])); | |
1da177e4 LT |
522 | } |
523 | ||
72a671ce SS |
524 | void convert_to_fxsr(struct task_struct *tsk, |
525 | const struct user_i387_ia32_struct *env) | |
1da177e4 | 526 | |
1da177e4 | 527 | { |
86603283 | 528 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; |
44210111 RM |
529 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; |
530 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; | |
531 | int i; | |
1da177e4 | 532 | |
44210111 RM |
533 | fxsave->cwd = env->cwd; |
534 | fxsave->swd = env->swd; | |
535 | fxsave->twd = twd_i387_to_fxsr(env->twd); | |
536 | fxsave->fop = (u16) ((u32) env->fcs >> 16); | |
537 | #ifdef CONFIG_X86_64 | |
538 | fxsave->rip = env->fip; | |
539 | fxsave->rdp = env->foo; | |
540 | /* cs and ds ignored */ | |
541 | #else | |
542 | fxsave->fip = env->fip; | |
543 | fxsave->fcs = (env->fcs & 0xffff); | |
544 | fxsave->foo = env->foo; | |
545 | fxsave->fos = env->fos; | |
546 | #endif | |
1da177e4 | 547 | |
44210111 RM |
548 | for (i = 0; i < 8; ++i) |
549 | memcpy(&to[i], &from[i], sizeof(from[0])); | |
1da177e4 LT |
550 | } |
551 | ||
44210111 RM |
552 | int fpregs_get(struct task_struct *target, const struct user_regset *regset, |
553 | unsigned int pos, unsigned int count, | |
554 | void *kbuf, void __user *ubuf) | |
1da177e4 | 555 | { |
44210111 | 556 | struct user_i387_ia32_struct env; |
aa283f49 | 557 | int ret; |
1da177e4 | 558 | |
67e97fc2 | 559 | ret = fpu__unlazy_stopped(target); |
aa283f49 SS |
560 | if (ret) |
561 | return ret; | |
1da177e4 | 562 | |
60e019eb | 563 | if (!static_cpu_has(X86_FEATURE_FPU)) |
e8a496ac SS |
564 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
565 | ||
60e019eb | 566 | if (!cpu_has_fxsr) |
44210111 | 567 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
86603283 | 568 | &target->thread.fpu.state->fsave, 0, |
61c4628b | 569 | -1); |
1da177e4 | 570 | |
29104e10 SS |
571 | sanitize_i387_state(target); |
572 | ||
44210111 RM |
573 | if (kbuf && pos == 0 && count == sizeof(env)) { |
574 | convert_from_fxsr(kbuf, target); | |
575 | return 0; | |
1da177e4 | 576 | } |
44210111 RM |
577 | |
578 | convert_from_fxsr(&env, target); | |
f668964e | 579 | |
44210111 | 580 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); |
1da177e4 LT |
581 | } |
582 | ||
44210111 RM |
583 | int fpregs_set(struct task_struct *target, const struct user_regset *regset, |
584 | unsigned int pos, unsigned int count, | |
585 | const void *kbuf, const void __user *ubuf) | |
1da177e4 | 586 | { |
44210111 RM |
587 | struct user_i387_ia32_struct env; |
588 | int ret; | |
1da177e4 | 589 | |
67e97fc2 | 590 | ret = fpu__unlazy_stopped(target); |
aa283f49 SS |
591 | if (ret) |
592 | return ret; | |
593 | ||
29104e10 SS |
594 | sanitize_i387_state(target); |
595 | ||
60e019eb | 596 | if (!static_cpu_has(X86_FEATURE_FPU)) |
e8a496ac SS |
597 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
598 | ||
60e019eb | 599 | if (!cpu_has_fxsr) |
44210111 | 600 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
60e019eb PA |
601 | &target->thread.fpu.state->fsave, 0, |
602 | -1); | |
44210111 RM |
603 | |
604 | if (pos > 0 || count < sizeof(env)) | |
605 | convert_from_fxsr(&env, target); | |
606 | ||
607 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1); | |
608 | if (!ret) | |
609 | convert_to_fxsr(target, &env); | |
610 | ||
42deec6f SS |
611 | /* |
612 | * update the header bit in the xsave header, indicating the | |
613 | * presence of FP. | |
614 | */ | |
615 | if (cpu_has_xsave) | |
86603283 | 616 | target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; |
44210111 | 617 | return ret; |
1da177e4 LT |
618 | } |
619 | ||
1da177e4 LT |
620 | /* |
621 | * FPU state for core dumps. | |
60b3b9af RM |
622 | * This is only used for a.out dumps now. |
623 | * It is declared generically using elf_fpregset_t (which is | |
624 | * struct user_i387_struct) but is in fact only used for 32-bit | |
625 | * dumps, so on 64-bit it is really struct user_i387_ia32_struct. | |
1da177e4 | 626 | */ |
3b095a04 | 627 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) |
1da177e4 | 628 | { |
1da177e4 | 629 | struct task_struct *tsk = current; |
f668964e | 630 | int fpvalid; |
1da177e4 LT |
631 | |
632 | fpvalid = !!used_math(); | |
60b3b9af RM |
633 | if (fpvalid) |
634 | fpvalid = !fpregs_get(tsk, NULL, | |
635 | 0, sizeof(struct user_i387_ia32_struct), | |
636 | fpu, NULL); | |
1da177e4 LT |
637 | |
638 | return fpvalid; | |
639 | } | |
129f6946 | 640 | EXPORT_SYMBOL(dump_fpu); |
1da177e4 | 641 | |
60b3b9af | 642 | #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ |