]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/fpu: Convert task_struct::thread.fpu accesses to use x86_task_fpu()
authorIngo Molnar <mingo@kernel.org>
Wed, 9 Apr 2025 21:11:21 +0000 (23:11 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 14 Apr 2025 06:18:29 +0000 (08:18 +0200)
This will make the removal of the task_struct::thread.fpu array
easier.

No change in functionality - code generated before and after this
commit is identical on x86-defconfig:

  kepler:~/tip> diff -up vmlinux.before.asm vmlinux.after.asm
  kepler:~/tip>

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chang S. Bae <chang.seok.bae@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Link: https://lore.kernel.org/r/20250409211127.3544993-3-mingo@kernel.org
15 files changed:
arch/x86/include/asm/fpu/sched.h
arch/x86/kernel/fpu/context.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/regset.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/fpu/xstate.h
arch/x86/kernel/process.c
arch/x86/kernel/signal.c
arch/x86/kernel/traps.c
arch/x86/math-emu/fpu_aux.c
arch/x86/math-emu/fpu_entry.c
arch/x86/math-emu/fpu_system.h
arch/x86/mm/extable.c

index c485f1944c5f86a0ab0ecad38ce0e5bb65569bb8..1feaa68b75671a7dd52c0019f92d0c275fc182cf 100644 (file)
@@ -41,7 +41,7 @@ static inline void switch_fpu_prepare(struct task_struct *old, int cpu)
 {
        if (cpu_feature_enabled(X86_FEATURE_FPU) &&
            !(old->flags & (PF_KTHREAD | PF_USER_WORKER))) {
-               struct fpu *old_fpu = &old->thread.fpu;
+               struct fpu *old_fpu = x86_task_fpu(old);
 
                save_fpregs_to_fpstate(old_fpu);
                /*
index f6d856bd50bc5507eddfe2da4dd77be6a25359ca..10d0a720659cc8c1e3beb77f11334664570f254d 100644 (file)
@@ -53,7 +53,7 @@ static inline void fpregs_activate(struct fpu *fpu)
 /* Internal helper for switch_fpu_return() and signal frame setup */
 static inline void fpregs_restore_userregs(void)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
        int cpu = smp_processor_id();
 
        if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER)))
@@ -67,7 +67,7 @@ static inline void fpregs_restore_userregs(void)
                 * If PKRU is enabled, then the PKRU value is already
                 * correct because it was either set in switch_to() or in
                 * flush_thread(). So it is excluded because it might be
-                * not up to date in current->thread.fpu.xsave state.
+                * not up to date in current->thread.fpu->xsave state.
                 *
                 * XFD state is handled in restore_fpregs_from_fpstate().
                 */
index 91d6341f281f80e5dd4653116ce1b3833e2eabfe..dc6d7f93c4465737390c63e744a44238f20a06d6 100644 (file)
@@ -211,7 +211,7 @@ static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
                return;
 
        spin_lock_irq(&current->sighand->siglock);
-       fpuperm = &current->group_leader->thread.fpu.guest_perm;
+       fpuperm = &x86_task_fpu(current->group_leader)->guest_perm;
        perm = fpuperm->__state_perm;
 
        /* First fpstate allocation locks down permissions. */
@@ -323,7 +323,7 @@ EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
  */
 void fpu_sync_guest_vmexit_xfd_state(void)
 {
-       struct fpstate *fps = current->thread.fpu.fpstate;
+       struct fpstate *fps = x86_task_fpu(current)->fpstate;
 
        lockdep_assert_irqs_disabled();
        if (fpu_state_size_dynamic()) {
@@ -337,7 +337,7 @@ EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
 {
        struct fpstate *guest_fps = guest_fpu->fpstate;
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
        struct fpstate *cur_fps = fpu->fpstate;
 
        fpregs_lock();
@@ -438,7 +438,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
        if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
            !test_thread_flag(TIF_NEED_FPU_LOAD)) {
                set_thread_flag(TIF_NEED_FPU_LOAD);
-               save_fpregs_to_fpstate(&current->thread.fpu);
+               save_fpregs_to_fpstate(x86_task_fpu(current));
        }
        __cpu_invalidate_fpregs_state();
 
@@ -467,7 +467,7 @@ EXPORT_SYMBOL_GPL(kernel_fpu_end);
  */
 void fpu_sync_fpstate(struct fpu *fpu)
 {
-       WARN_ON_FPU(fpu != &current->thread.fpu);
+       WARN_ON_FPU(fpu != x86_task_fpu(current));
 
        fpregs_lock();
        trace_x86_fpu_before_save(fpu);
@@ -552,7 +552,7 @@ void fpstate_reset(struct fpu *fpu)
 static inline void fpu_inherit_perms(struct fpu *dst_fpu)
 {
        if (fpu_state_size_dynamic()) {
-               struct fpu *src_fpu = &current->group_leader->thread.fpu;
+               struct fpu *src_fpu = x86_task_fpu(current->group_leader);
 
                spin_lock_irq(&current->sighand->siglock);
                /* Fork also inherits the permissions of the parent */
@@ -572,7 +572,7 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
        if (!ssp)
                return 0;
 
-       xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave,
+       xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave,
                                XFEATURE_CET_USER);
 
        /*
@@ -593,8 +593,8 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
 int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
              unsigned long ssp)
 {
-       struct fpu *src_fpu = &current->thread.fpu;
-       struct fpu *dst_fpu = &dst->thread.fpu;
+       struct fpu *src_fpu = x86_task_fpu(current);
+       struct fpu *dst_fpu = x86_task_fpu(dst);
 
        /* The new task's FPU state cannot be valid in the hardware. */
        dst_fpu->last_cpu = -1;
@@ -686,7 +686,7 @@ void fpu__drop(struct fpu *fpu)
 {
        preempt_disable();
 
-       if (fpu == &current->thread.fpu) {
+       if (fpu == x86_task_fpu(current)) {
                /* Ignore delayed exceptions from user space */
                asm volatile("1: fwait\n"
                             "2:\n"
@@ -720,7 +720,7 @@ static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
  */
 static void fpu_reset_fpregs(void)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
 
        fpregs_lock();
        __fpu_invalidate_fpregs_state(fpu);
@@ -749,7 +749,7 @@ static void fpu_reset_fpregs(void)
  */
 void fpu__clear_user_states(struct fpu *fpu)
 {
-       WARN_ON_FPU(fpu != &current->thread.fpu);
+       WARN_ON_FPU(fpu != x86_task_fpu(current));
 
        fpregs_lock();
        if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
@@ -782,7 +782,7 @@ void fpu__clear_user_states(struct fpu *fpu)
 
 void fpu_flush_thread(void)
 {
-       fpstate_reset(&current->thread.fpu);
+       fpstate_reset(x86_task_fpu(current));
        fpu_reset_fpregs();
 }
 /*
@@ -823,7 +823,7 @@ void fpregs_lock_and_load(void)
  */
 void fpregs_assert_state_consistent(void)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
 
        if (test_thread_flag(TIF_NEED_FPU_LOAD))
                return;
@@ -835,7 +835,7 @@ EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
 
 void fpregs_mark_activate(void)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
 
        fpregs_activate(fpu);
        fpu->last_cpu = smp_processor_id();
index 998a08f17e3317abde0d01961ebfe5d571681e62..ad5cb2943d37a57a5780d9156d115894accead0b 100644 (file)
@@ -38,7 +38,7 @@ static void fpu__init_cpu_generic(void)
        /* Flush out any pending x87 state: */
 #ifdef CONFIG_MATH_EMULATION
        if (!boot_cpu_has(X86_FEATURE_FPU))
-               fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
+               fpstate_init_soft(&x86_task_fpu(current)->fpstate->regs.soft);
        else
 #endif
                asm volatile ("fninit");
@@ -154,7 +154,7 @@ static void __init fpu__init_task_struct_size(void)
         * Subtract off the static size of the register state.
         * It potentially has a bunch of padding.
         */
-       task_size -= sizeof(current->thread.fpu.__fpstate.regs);
+       task_size -= sizeof(union fpregs_state);
 
        /*
         * Add back the dynamically-calculated register state
@@ -204,7 +204,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
        fpu_kernel_cfg.default_size = size;
        fpu_user_cfg.max_size = size;
        fpu_user_cfg.default_size = size;
-       fpstate_reset(&current->thread.fpu);
+       fpstate_reset(x86_task_fpu(current));
 }
 
 /*
@@ -213,7 +213,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
  */
 void __init fpu__init_system(void)
 {
-       fpstate_reset(&current->thread.fpu);
+       fpstate_reset(x86_task_fpu(current));
        fpu__init_system_early_generic();
 
        /*
index 887b0b8e21e364f1edef09e1b3f678fb27de5815..0986c2200adc565b987bc2b828f2f4ad1477b2f5 100644 (file)
@@ -45,7 +45,7 @@ int regset_xregset_fpregs_active(struct task_struct *target, const struct user_r
  */
 static void sync_fpstate(struct fpu *fpu)
 {
-       if (fpu == &current->thread.fpu)
+       if (fpu == x86_task_fpu(current))
                fpu_sync_fpstate(fpu);
 }
 
@@ -63,7 +63,7 @@ static void fpu_force_restore(struct fpu *fpu)
         * Only stopped child tasks can be used to modify the FPU
         * state in the fpstate buffer:
         */
-       WARN_ON_FPU(fpu == &current->thread.fpu);
+       WARN_ON_FPU(fpu == x86_task_fpu(current));
 
        __fpu_invalidate_fpregs_state(fpu);
 }
@@ -71,7 +71,7 @@ static void fpu_force_restore(struct fpu *fpu)
 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
                struct membuf to)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
 
        if (!cpu_feature_enabled(X86_FEATURE_FXSR))
                return -ENODEV;
@@ -91,7 +91,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
                unsigned int pos, unsigned int count,
                const void *kbuf, const void __user *ubuf)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
        struct fxregs_state newstate;
        int ret;
 
@@ -133,7 +133,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
        if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
                return -ENODEV;
 
-       sync_fpstate(&target->thread.fpu);
+       sync_fpstate(x86_task_fpu(target));
 
        copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE);
        return 0;
@@ -143,7 +143,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
                  unsigned int pos, unsigned int count,
                  const void *kbuf, const void __user *ubuf)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
        struct xregs_state *tmpbuf = NULL;
        int ret;
 
@@ -187,7 +187,7 @@ int ssp_active(struct task_struct *target, const struct user_regset *regset)
 int ssp_get(struct task_struct *target, const struct user_regset *regset,
            struct membuf to)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
        struct cet_user_state *cetregs;
 
        if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
@@ -214,7 +214,7 @@ int ssp_set(struct task_struct *target, const struct user_regset *regset,
            unsigned int pos, unsigned int count,
            const void *kbuf, const void __user *ubuf)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
        struct xregs_state *xsave = &fpu->fpstate->regs.xsave;
        struct cet_user_state *cetregs;
        unsigned long user_ssp;
@@ -368,7 +368,7 @@ static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
 void
 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
 {
-       __convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave);
+       __convert_from_fxsr(env, tsk, &x86_task_fpu(tsk)->fpstate->regs.fxsave);
 }
 
 void convert_to_fxsr(struct fxregs_state *fxsave,
@@ -401,7 +401,7 @@ void convert_to_fxsr(struct fxregs_state *fxsave,
 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
               struct membuf to)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
        struct user_i387_ia32_struct env;
        struct fxregs_state fxsave, *fx;
 
@@ -433,7 +433,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
               unsigned int pos, unsigned int count,
               const void *kbuf, const void __user *ubuf)
 {
-       struct fpu *fpu = &target->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(target);
        struct user_i387_ia32_struct env;
        int ret;
 
index 6c69cb28b2983bc02faf7f647c010277f5fec4ba..b8b4fa9c2d04ef650aca8134e81559b91a6336aa 100644 (file)
@@ -43,13 +43,13 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
         * fpstate layout with out copying the extended state information
         * in the memory layout.
         */
-       if (__get_user(magic2, (__u32 __user *)(fpstate + current->thread.fpu.fpstate->user_size)))
+       if (__get_user(magic2, (__u32 __user *)(fpstate + x86_task_fpu(current)->fpstate->user_size)))
                return false;
 
        if (likely(magic2 == FP_XSTATE_MAGIC2))
                return true;
 setfx:
-       trace_x86_fpu_xstate_check_failed(&current->thread.fpu);
+       trace_x86_fpu_xstate_check_failed(x86_task_fpu(current));
 
        /* Set the parameters for fx only state */
        fx_sw->magic1 = 0;
@@ -64,13 +64,13 @@ setfx:
 static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf)
 {
        if (use_fxsr()) {
-               struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave;
+               struct xregs_state *xsave = &x86_task_fpu(tsk)->fpstate->regs.xsave;
                struct user_i387_ia32_struct env;
                struct _fpstate_32 __user *fp = buf;
 
                fpregs_lock();
                if (!test_thread_flag(TIF_NEED_FPU_LOAD))
-                       fxsave(&tsk->thread.fpu.fpstate->regs.fxsave);
+                       fxsave(&x86_task_fpu(tsk)->fpstate->regs.fxsave);
                fpregs_unlock();
 
                convert_from_fxsr(&env, tsk);
@@ -184,7 +184,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pk
 bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru)
 {
        struct task_struct *tsk = current;
-       struct fpstate *fpstate = tsk->thread.fpu.fpstate;
+       struct fpstate *fpstate = x86_task_fpu(tsk)->fpstate;
        bool ia32_fxstate = (buf != buf_fx);
        int ret;
 
@@ -272,7 +272,7 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
  */
 static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
        int ret;
 
        /* Restore enabled features only. */
@@ -332,7 +332,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
                              bool ia32_fxstate)
 {
        struct task_struct *tsk = current;
-       struct fpu *fpu = &tsk->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(tsk);
        struct user_i387_ia32_struct env;
        bool success, fx_only = false;
        union fpregs_state *fpregs;
@@ -452,7 +452,7 @@ static inline unsigned int xstate_sigframe_size(struct fpstate *fpstate)
  */
 bool fpu__restore_sig(void __user *buf, int ia32_frame)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
        void __user *buf_fx = buf;
        bool ia32_fxstate = false;
        bool success = false;
@@ -499,7 +499,7 @@ unsigned long
 fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
                     unsigned long *buf_fx, unsigned long *size)
 {
-       unsigned long frame_size = xstate_sigframe_size(current->thread.fpu.fpstate);
+       unsigned long frame_size = xstate_sigframe_size(x86_task_fpu(current)->fpstate);
 
        *buf_fx = sp = round_down(sp - frame_size, 64);
        if (ia32_frame && use_fxsr()) {
index 46c45e2f2a5aaac15edc6868d2824ced212f0d62..253da5aec9150f7e45e39982d8f7f98f3dd34de3 100644 (file)
@@ -763,7 +763,7 @@ static void __init fpu__init_disable_system_xstate(unsigned int legacy_size)
         */
        init_fpstate.xfd = 0;
 
-       fpstate_reset(&current->thread.fpu);
+       fpstate_reset(x86_task_fpu(current));
 }
 
 /*
@@ -871,7 +871,7 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
                goto out_disable;
 
        /* Reset the state for the current task */
-       fpstate_reset(&current->thread.fpu);
+       fpstate_reset(x86_task_fpu(current));
 
        /*
         * Update info used for ptrace frames; use standard-format size and no
@@ -945,7 +945,7 @@ void fpu__resume_cpu(void)
        }
 
        if (fpu_state_size_dynamic())
-               wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd);
+               wrmsrl(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd);
 }
 
 /*
@@ -1227,8 +1227,8 @@ out:
 void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
                             enum xstate_copy_mode copy_mode)
 {
-       __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
-                                 tsk->thread.fpu.fpstate->user_xfeatures,
+       __copy_xstate_to_uabi_buf(to, x86_task_fpu(tsk)->fpstate,
+                                 x86_task_fpu(tsk)->fpstate->user_xfeatures,
                                  tsk->thread.pkru, copy_mode);
 }
 
@@ -1368,7 +1368,7 @@ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u
 int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
                                      const void __user *ubuf)
 {
-       return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru);
+       return copy_uabi_to_xstate(x86_task_fpu(tsk)->fpstate, NULL, ubuf, &tsk->thread.pkru);
 }
 
 static bool validate_independent_components(u64 mask)
@@ -1462,7 +1462,7 @@ static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor)
          * The XFD MSR does not match fpstate->xfd. That's invalid when
          * the passed in fpstate is current's fpstate.
          */
-       if (fpstate->xfd == current->thread.fpu.fpstate->xfd)
+       if (fpstate->xfd == x86_task_fpu(current)->fpstate->xfd)
                return false;
 
        /*
@@ -1539,7 +1539,7 @@ void fpstate_free(struct fpu *fpu)
 static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
                           unsigned int usize, struct fpu_guest *guest_fpu)
 {
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
        struct fpstate *curfps, *newfps = NULL;
        unsigned int fpsize;
        bool in_use;
@@ -1632,7 +1632,7 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
         * AVX512.
         */
        bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
-       struct fpu *fpu = &current->group_leader->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current->group_leader);
        struct fpu_state_perm *perm;
        unsigned int ksize, usize;
        u64 mask;
@@ -1735,7 +1735,7 @@ int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu)
                return -EPERM;
        }
 
-       fpu = &current->group_leader->thread.fpu;
+       fpu = x86_task_fpu(current->group_leader);
        perm = guest_fpu ? &fpu->guest_perm : &fpu->perm;
        ksize = perm->__state_size;
        usize = perm->__user_state_size;
@@ -1840,7 +1840,7 @@ long fpu_xstate_prctl(int option, unsigned long arg2)
  */
 static void avx512_status(struct seq_file *m, struct task_struct *task)
 {
-       unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
+       unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
        long delta;
 
        if (!timestamp) {
index 0fd34f53f0258a3cec0d5aeba38dc617438e91a5..9a3a8ccf13bfc3caa8b6342ac908d780f7fef841 100644 (file)
@@ -22,7 +22,7 @@ static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
 
 static inline u64 xstate_get_group_perm(bool guest)
 {
-       struct fpu *fpu = &current->group_leader->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current->group_leader);
        struct fpu_state_perm *perm;
 
        /* Pairs with WRITE_ONCE() in xstate_request_perm() */
@@ -288,7 +288,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
         * internally, e.g. PKRU. That's user space ABI and also required
         * to allow the signal handler to modify PKRU.
         */
-       struct fpstate *fpstate = current->thread.fpu.fpstate;
+       struct fpstate *fpstate = x86_task_fpu(current)->fpstate;
        u64 mask = fpstate->user_xfeatures;
        u32 lmask;
        u32 hmask;
@@ -322,7 +322,7 @@ static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64
        u32 hmask = mask >> 32;
        int err;
 
-       xfd_validate_state(current->thread.fpu.fpstate, mask, true);
+       xfd_validate_state(x86_task_fpu(current)->fpstate, mask, true);
 
        stac();
        XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
index 962c3ce39323e7bdb75ee1920e4bea936c83d028..47694e391506630775e0d966a6214ad2ffad19e1 100644 (file)
@@ -103,7 +103,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        dst->thread.vm86 = NULL;
 #endif
        /* Drop the copied pointer to current's fpstate */
-       dst->thread.fpu.fpstate = NULL;
+       x86_task_fpu(dst)->fpstate = NULL;
 
        return 0;
 }
@@ -112,7 +112,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 void arch_release_task_struct(struct task_struct *tsk)
 {
        if (fpu_state_size_dynamic())
-               fpstate_free(&tsk->thread.fpu);
+               fpstate_free(x86_task_fpu(tsk));
 }
 #endif
 
@@ -122,7 +122,7 @@ void arch_release_task_struct(struct task_struct *tsk)
 void exit_thread(struct task_struct *tsk)
 {
        struct thread_struct *t = &tsk->thread;
-       struct fpu *fpu = &t->fpu;
+       struct fpu *fpu = x86_task_fpu(tsk);
 
        if (test_thread_flag(TIF_IO_BITMAP))
                io_bitmap_exit(tsk);
index 5f441039b5725fd1a23416a686f68a667e0a8774..2404233336ab613293c0db0eac76cb237ab132b3 100644 (file)
@@ -255,7 +255,7 @@ static void
 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
        bool stepping, failed;
-       struct fpu *fpu = &current->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(current);
 
        if (v8086_mode(regs))
                save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
@@ -423,14 +423,14 @@ bool sigaltstack_size_valid(size_t ss_size)
        if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
                return true;
 
-       fsize += current->group_leader->thread.fpu.perm.__user_state_size;
+       fsize += x86_task_fpu(current->group_leader)->perm.__user_state_size;
        if (likely(ss_size > fsize))
                return true;
 
        if (strict_sigaltstack_size)
                return ss_size > fsize;
 
-       mask = current->group_leader->thread.fpu.perm.__state_perm;
+       mask = x86_task_fpu(current->group_leader)->perm.__state_perm;
        if (mask & XFEATURE_MASK_USER_DYNAMIC)
                return ss_size > fsize;
 
index 9f88b8a78e50912790077cff0f940c7f601aff33..f48325dfaa016cad839912209e741d5bf4772105 100644 (file)
@@ -1295,7 +1295,7 @@ DEFINE_IDTENTRY_RAW(exc_debug)
 static void math_error(struct pt_regs *regs, int trapnr)
 {
        struct task_struct *task = current;
-       struct fpu *fpu = &task->thread.fpu;
+       struct fpu *fpu = x86_task_fpu(task);
        int si_code;
        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
                                                "simd exception";
index d62662bdd4604398656559cb23955a7d6b217a42..5f253ae406b67ad30ff6fc06cc31e000446fd3bb 100644 (file)
@@ -53,7 +53,7 @@ void fpstate_init_soft(struct swregs_state *soft)
 
 void finit(void)
 {
-       fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
+       fpstate_init_soft(&x86_task_fpu(current)->fpstate->regs.soft);
 }
 
 /*
index 91c52ead1226576f973459ffe9ec6a0d484ce5f1..5034df6177404a4c4cd2a4eede8bb7ff2a6f88fb 100644 (file)
@@ -641,7 +641,7 @@ int fpregs_soft_set(struct task_struct *target,
                    unsigned int pos, unsigned int count,
                    const void *kbuf, const void __user *ubuf)
 {
-       struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft;
+       struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft;
        void *space = s387->st_space;
        int ret;
        int offset, other, i, tags, regnr, tag, newtop;
@@ -692,7 +692,7 @@ int fpregs_soft_get(struct task_struct *target,
                    const struct user_regset *regset,
                    struct membuf to)
 {
-       struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft;
+       struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft;
        const void *space = s387->st_space;
        int offset = (S387->ftop & 7) * 10, other = 80 - offset;
 
index eec3e4805c75e595653dbba3b98e6fbc2a51f49f..5e238e930fe30590807687f9f12cd3844311eb65 100644 (file)
@@ -73,7 +73,7 @@ static inline bool seg_writable(struct desc_struct *d)
        return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_WRITABLE;
 }
 
-#define I387                   (&current->thread.fpu.fpstate->regs)
+#define I387                   (&x86_task_fpu(current)->fpstate->regs)
 #define FPU_info               (I387->soft.info)
 
 #define FPU_CS                 (*(unsigned short *) &(FPU_info->regs->cs))
index 51986e8a9d353528cbee8186019a3ed8bc7823f2..bf8dab18be9747e352bda6898cb3e19c0f3eb384 100644 (file)
@@ -111,7 +111,7 @@ static bool ex_handler_sgx(const struct exception_table_entry *fixup,
 
 /*
  * Handler for when we fail to restore a task's FPU state.  We should never get
- * here because the FPU state of a task using the FPU (task->thread.fpu.state)
+ * here because the FPU state of a task using the FPU (struct fpu::fpstate)
  * should always be valid.  However, past bugs have allowed userspace to set
  * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
  * These caused XRSTOR to fail when switching to the task, leaking the FPU