Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 1994 Linus Torvalds
0004  *
0005  * Pentium III FXSR, SSE support
0006  * General FPU state handling cleanups
0007  *  Gareth Hughes <gareth@valinux.com>, May 2000
0008  * x86-64 work by Andi Kleen 2002
0009  */
0010 
0011 #ifndef _ASM_X86_FPU_API_H
0012 #define _ASM_X86_FPU_API_H
0013 #include <linux/bottom_half.h>
0014 
0015 #include <asm/fpu/types.h>
0016 
0017 /*
0018  * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
0019  * disables preemption so be careful if you intend to use it for long periods
0020  * of time.
0021  * If you intend to use the FPU in irq/softirq you need to check first with
0022  * irq_fpu_usable() if it is possible.
0023  */
0024 
0025 /* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
0026 #define KFPU_387    _BITUL(0)   /* 387 state will be initialized */
0027 #define KFPU_MXCSR  _BITUL(1)   /* MXCSR will be initialized */
0028 
0029 extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
0030 extern void kernel_fpu_end(void);
0031 extern bool irq_fpu_usable(void);
0032 extern void fpregs_mark_activate(void);
0033 
0034 /* Code that is unaware of kernel_fpu_begin_mask() can use this */
0035 static inline void kernel_fpu_begin(void)
0036 {
0037 #ifdef CONFIG_X86_64
0038     /*
0039      * Any 64-bit code that uses 387 instructions must explicitly request
0040      * KFPU_387.
0041      */
0042     kernel_fpu_begin_mask(KFPU_MXCSR);
0043 #else
0044     /*
0045      * 32-bit kernel code may use 387 operations as well as SSE2, etc,
0046      * as long as it checks that the CPU has the required capability.
0047      */
0048     kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
0049 #endif
0050 }
0051 
0052 /*
0053  * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate.
0054  * A context switch will (and softirq might) save CPU's FPU registers to
0055  * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
0056  * a random state.
0057  *
0058  * local_bh_disable() protects against both preemption and soft interrupts
0059  * on !RT kernels.
0060  *
0061  * On RT kernels local_bh_disable() is not sufficient because it only
0062  * serializes soft interrupt related sections via a local lock, but stays
0063  * preemptible. Disabling preemption is the right choice here as bottom
0064  * half processing is always in thread context on RT kernels so it
0065  * implicitly prevents bottom half processing as well.
0066  *
0067  * Disabling preemption also serializes against kernel_fpu_begin().
0068  */
0069 static inline void fpregs_lock(void)
0070 {
0071     if (!IS_ENABLED(CONFIG_PREEMPT_RT))
0072         local_bh_disable();
0073     else
0074         preempt_disable();
0075 }
0076 
0077 static inline void fpregs_unlock(void)
0078 {
0079     if (!IS_ENABLED(CONFIG_PREEMPT_RT))
0080         local_bh_enable();
0081     else
0082         preempt_enable();
0083 }
0084 
0085 #ifdef CONFIG_X86_DEBUG_FPU
0086 extern void fpregs_assert_state_consistent(void);
0087 #else
0088 static inline void fpregs_assert_state_consistent(void) { }
0089 #endif
0090 
0091 /*
0092  * Load the task FPU state before returning to userspace.
0093  */
0094 extern void switch_fpu_return(void);
0095 
0096 /*
0097  * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
0098  *
0099  * If 'feature_name' is set then put a human-readable description of
0100  * the feature there as well - this can be used to print error (or success)
0101  * messages.
0102  */
0103 extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
0104 
0105 /* Trap handling */
0106 extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
0107 extern void fpu_sync_fpstate(struct fpu *fpu);
0108 extern void fpu_reset_from_exception_fixup(void);
0109 
0110 /* Boot, hotplug and resume */
0111 extern void fpu__init_cpu(void);
0112 extern void fpu__init_system(struct cpuinfo_x86 *c);
0113 extern void fpu__init_check_bugs(void);
0114 extern void fpu__resume_cpu(void);
0115 
0116 #ifdef CONFIG_MATH_EMULATION
0117 extern void fpstate_init_soft(struct swregs_state *soft);
0118 #else
0119 static inline void fpstate_init_soft(struct swregs_state *soft) {}
0120 #endif
0121 
0122 /* State tracking */
0123 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
0124 
0125 /* Process cleanup */
0126 #ifdef CONFIG_X86_64
0127 extern void fpstate_free(struct fpu *fpu);
0128 #else
0129 static inline void fpstate_free(struct fpu *fpu) { }
0130 #endif
0131 
0132 /* fpstate-related functions which are exported to KVM */
0133 extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
0134 
0135 extern u64 xstate_get_guest_group_perm(void);
0136 
0137 /* KVM specific functions */
0138 extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
0139 extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
0140 extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
0141 extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures);
0142 
0143 #ifdef CONFIG_X86_64
0144 extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd);
0145 extern void fpu_sync_guest_vmexit_xfd_state(void);
0146 #else
0147 static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { }
0148 static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
0149 #endif
0150 
0151 extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
0152 extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
0153 
0154 static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
0155 {
0156     gfpu->fpstate->is_confidential = true;
0157 }
0158 
0159 static inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
0160 {
0161     return gfpu->fpstate->is_confidential;
0162 }
0163 
0164 /* prctl */
0165 extern long fpu_xstate_prctl(int option, unsigned long arg2);
0166 
0167 extern void fpu_idle_fpregs(void);
0168 
0169 #endif /* _ASM_X86_FPU_API_H */