diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index b56d504af6545b95726f623886de5fb8ba2b006b..73e684160f354ead1e894134dcbcc9ae1d96f21e 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -10,6 +10,7 @@ #ifndef _ASM_X86_FPU_API_H #define _ASM_X86_FPU_API_H +#include <linux/preempt.h> /* * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It @@ -22,6 +23,16 @@ extern void kernel_fpu_begin(void); extern void kernel_fpu_end(void); extern bool irq_fpu_usable(void); +static inline void fpregs_lock(void) +{ + preempt_disable(); +} + +static inline void fpregs_unlock(void) +{ + preempt_enable(); +} + /* * Query the presence of one or more xfeatures. Works on any legacy CPU as well. * diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 54f70cae2f15f0bfae1a9f3e5a766861adf0d60d..3e0c2c496f2d605d6a6e32eb8bf9beafa05a2138 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -484,6 +484,18 @@ static inline void fpregs_activate(struct fpu *fpu) trace_x86_fpu_regs_activated(fpu); } +/* + * Internal helper, do not use directly. Use switch_fpu_return() instead. + */ +static inline void __fpregs_load_activate(struct fpu *fpu, int cpu) +{ + if (!fpregs_state_valid(fpu, cpu)) { + if (current->mm) + copy_kernel_to_fpregs(&fpu->state); + fpregs_activate(fpu); + } +} + /* * FPU state switching for scheduling. * @@ -522,14 +534,8 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu) */ static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) { - if (static_cpu_has(X86_FEATURE_FPU)) { - if (!fpregs_state_valid(new_fpu, cpu)) { - if (current->mm) - copy_kernel_to_fpregs(&new_fpu->state); - } - - fpregs_activate(new_fpu); - } + if (static_cpu_has(X86_FEATURE_FPU)) + __fpregs_load_activate(new_fpu, cpu); } /*