diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 04042eacc8525bbc0be3251a6b2a1b99e29201a5..54f70cae2f15f0bfae1a9f3e5a766861adf0d60d 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -532,23 +532,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) } } -/* - * Needs to be preemption-safe. - * - * NOTE! user_fpu_begin() must be used only immediately before restoring - * the save state. It does not do any saving/restoring on its own. In - * lazy FPU mode, it is just an optimization to avoid a #NM exception, - * the task can lose the FPU right after preempt_enable(). - */ -static inline void user_fpu_begin(void) -{ - struct fpu *fpu = ¤t->thread.fpu; - - preempt_disable(); - fpregs_activate(fpu); - preempt_enable(); -} - /* * MXCSR and XCR definitions: */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 97e27de2b7c05ec50b3e7bac10c6e4b4a41cdd2f..739ca3ae2bdcdbe2f05bf5aa0940e0bf1ba84bda 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -335,10 +335,8 @@ void fpu__clear(struct fpu *fpu) * Make sure fpstate is cleared and initialized. */ fpu__initialize(fpu); - if (static_cpu_has(X86_FEATURE_FPU)) { - user_fpu_begin(); + if (static_cpu_has(X86_FEATURE_FPU)) copy_init_fpstate_to_fpregs(); - } } /* diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 34989d2a8893c31837a534691d168c77d5ec6138..155f4552413e7451f528e4c2684ad38514f9655b 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -322,7 +322,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) * For 64-bit frames and 32-bit fsave frames, restore the user * state to the registers directly (with exceptions handled). */ - user_fpu_begin(); if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) { fpu__clear(fpu); return -1;