0001
0002 #ifndef __X86_KERNEL_FPU_XSTATE_H
0003 #define __X86_KERNEL_FPU_XSTATE_H
0004
0005 #include <asm/cpufeature.h>
0006 #include <asm/fpu/xstate.h>
0007 #include <asm/fpu/xcr.h>
0008
0009 #ifdef CONFIG_X86_64
0010 DECLARE_PER_CPU(u64, xfd_state);
0011 #endif
0012
0013 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
0014 {
0015
0016
0017
0018
0019 if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED))
0020 xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
0021 }
0022
0023 static inline u64 xstate_get_group_perm(bool guest)
0024 {
0025 struct fpu *fpu = ¤t->group_leader->thread.fpu;
0026 struct fpu_state_perm *perm;
0027
0028
0029 perm = guest ? &fpu->guest_perm : &fpu->perm;
0030 return READ_ONCE(perm->__state_perm);
0031 }
0032
0033 static inline u64 xstate_get_host_group_perm(void)
0034 {
0035 return xstate_get_group_perm(false);
0036 }
0037
0038 enum xstate_copy_mode {
0039 XSTATE_COPY_FP,
0040 XSTATE_COPY_FX,
0041 XSTATE_COPY_XSAVE,
0042 };
0043
0044 struct membuf;
0045 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
0046 u32 pkru_val, enum xstate_copy_mode copy_mode);
0047 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
0048 enum xstate_copy_mode mode);
0049 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
0050 extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
0051
0052
0053 extern void fpu__init_cpu_xstate(void);
0054 extern void fpu__init_system_xstate(unsigned int legacy_size);
0055
0056 extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
0057
0058 static inline u64 xfeatures_mask_supervisor(void)
0059 {
0060 return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
0061 }
0062
0063 static inline u64 xfeatures_mask_independent(void)
0064 {
0065 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
0066 return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
0067
0068 return XFEATURE_MASK_INDEPENDENT;
0069 }
0070
0071
0072
0073 #ifdef CONFIG_X86_64
0074 #define REX_PREFIX "0x48, "
0075 #else
0076 #define REX_PREFIX
0077 #endif
0078
0079
0080 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
0081 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
0082 #define XSAVEC ".byte " REX_PREFIX "0x0f,0xc7,0x27"
0083 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
0084 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
0085 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
0086
0087
0088
0089
0090
0091 #define XSTATE_OP(op, st, lmask, hmask, err) \
0092 asm volatile("1:" op "\n\t" \
0093 "xor %[err], %[err]\n" \
0094 "2:\n\t" \
0095 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \
0096 : [err] "=a" (err) \
0097 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
0098 : "memory")
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 #define XSTATE_XSAVE(st, lmask, hmask, err) \
0117 asm volatile(ALTERNATIVE_3(XSAVE, \
0118 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
0119 XSAVEC, X86_FEATURE_XSAVEC, \
0120 XSAVES, X86_FEATURE_XSAVES) \
0121 "\n" \
0122 "xor %[err], %[err]\n" \
0123 "3:\n" \
0124 _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
0125 : [err] "=r" (err) \
0126 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
0127 : "memory")
0128
0129
0130
0131
0132
0133 #define XSTATE_XRESTORE(st, lmask, hmask) \
0134 asm volatile(ALTERNATIVE(XRSTOR, \
0135 XRSTORS, X86_FEATURE_XSAVES) \
0136 "\n" \
0137 "3:\n" \
0138 _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \
0139 : \
0140 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
0141 : "memory")
0142
0143 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
0144 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
0145 #else
0146 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
0147 #endif
0148
0149 #ifdef CONFIG_X86_64
0150 static inline void xfd_update_state(struct fpstate *fpstate)
0151 {
0152 if (fpu_state_size_dynamic()) {
0153 u64 xfd = fpstate->xfd;
0154
0155 if (__this_cpu_read(xfd_state) != xfd) {
0156 wrmsrl(MSR_IA32_XFD, xfd);
0157 __this_cpu_write(xfd_state, xfd);
0158 }
0159 }
0160 }
0161
0162 extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
0163 #else
0164 static inline void xfd_update_state(struct fpstate *fpstate) { }
0165
0166 static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
0167 return -EPERM;
0168 }
0169 #endif
0170
0171
0172
0173
0174
0175
0176
0177 static inline void os_xsave(struct fpstate *fpstate)
0178 {
0179 u64 mask = fpstate->xfeatures;
0180 u32 lmask = mask;
0181 u32 hmask = mask >> 32;
0182 int err;
0183
0184 WARN_ON_FPU(!alternatives_patched);
0185 xfd_validate_state(fpstate, mask, false);
0186
0187 XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
0188
0189
0190 WARN_ON_FPU(err);
0191 }
0192
0193
0194
0195
0196
0197
0198 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
0199 {
0200 u32 lmask = mask;
0201 u32 hmask = mask >> 32;
0202
0203 xfd_validate_state(fpstate, mask, true);
0204 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
0205 }
0206
0207
0208 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
0209 {
0210 u64 mask = xfeatures_mask_supervisor();
0211 u32 lmask = mask;
0212 u32 hmask = mask >> 32;
0213
0214 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
0215 }
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 static inline u64 xfeatures_need_sigframe_write(void)
0230 {
0231 u64 xfeaures_to_write;
0232
0233
0234 xfeaures_to_write = xfeatures_in_use();
0235
0236
0237 xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
0238 ~XFEATURE_MASK_SIGFRAME_INITOPT;
0239
0240 return xfeaures_to_write;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
0257 {
0258
0259
0260
0261
0262
0263 struct fpstate *fpstate = current->thread.fpu.fpstate;
0264 u64 mask = fpstate->user_xfeatures;
0265 u32 lmask;
0266 u32 hmask;
0267 int err;
0268
0269
0270 if (fpu_state_size_dynamic())
0271 mask &= xfeatures_need_sigframe_write();
0272
0273 lmask = mask;
0274 hmask = mask >> 32;
0275 xfd_validate_state(fpstate, mask, false);
0276
0277 stac();
0278 XSTATE_OP(XSAVE, buf, lmask, hmask, err);
0279 clac();
0280
0281 return err;
0282 }
0283
0284
0285
0286
0287 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
0288 {
0289 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
0290 u32 lmask = mask;
0291 u32 hmask = mask >> 32;
0292 int err;
0293
0294 xfd_validate_state(current->thread.fpu.fpstate, mask, true);
0295
0296 stac();
0297 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
0298 clac();
0299
0300 return err;
0301 }
0302
0303
0304
0305
0306
0307 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
0308 {
0309 struct xregs_state *xstate = &fpstate->regs.xsave;
0310 u32 lmask = mask;
0311 u32 hmask = mask >> 32;
0312 int err;
0313
0314
0315 xfd_update_state(fpstate);
0316
0317 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
0318 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
0319 else
0320 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
0321
0322 return err;
0323 }
0324
0325
0326 #endif