0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <asm/fpu/api.h>
0010 #include <asm/fpu/regset.h>
0011 #include <asm/fpu/sched.h>
0012 #include <asm/fpu/signal.h>
0013 #include <asm/fpu/types.h>
0014 #include <asm/traps.h>
0015 #include <asm/irq_regs.h>
0016
0017 #include <uapi/asm/kvm.h>
0018
0019 #include <linux/hardirq.h>
0020 #include <linux/pkeys.h>
0021 #include <linux/vmalloc.h>
0022
0023 #include "context.h"
0024 #include "internal.h"
0025 #include "legacy.h"
0026 #include "xstate.h"
0027
0028 #define CREATE_TRACE_POINTS
0029 #include <asm/trace/fpu.h>
0030
0031 #ifdef CONFIG_X86_64
0032 DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
0033 DEFINE_PER_CPU(u64, xfd_state);
0034 #endif
0035
0036
0037 struct fpu_state_config fpu_kernel_cfg __ro_after_init;
0038 struct fpu_state_config fpu_user_cfg __ro_after_init;
0039
0040
0041
0042
0043
0044 struct fpstate init_fpstate __ro_after_init;
0045
0046
0047 static DEFINE_PER_CPU(bool, in_kernel_fpu);
0048
0049
0050
0051
0052 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
0053
0054
0055
0056
0057
0058 bool irq_fpu_usable(void)
0059 {
0060 if (WARN_ON_ONCE(in_nmi()))
0061 return false;
0062
0063
0064 if (this_cpu_read(in_kernel_fpu))
0065 return false;
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 if (!in_hardirq())
0077 return true;
0078
0079
0080
0081
0082
0083
0084 return !softirq_count();
0085 }
0086 EXPORT_SYMBOL(irq_fpu_usable);
0087
0088
0089
0090
0091
0092 static void update_avx_timestamp(struct fpu *fpu)
0093 {
0094
0095 #define AVX512_TRACKING_MASK (XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM)
0096
0097 if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK)
0098 fpu->avx512_timestamp = jiffies;
0099 }
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 void save_fpregs_to_fpstate(struct fpu *fpu)
0116 {
0117 if (likely(use_xsave())) {
0118 os_xsave(fpu->fpstate);
0119 update_avx_timestamp(fpu);
0120 return;
0121 }
0122
0123 if (likely(use_fxsr())) {
0124 fxsave(&fpu->fpstate->regs.fxsave);
0125 return;
0126 }
0127
0128
0129
0130
0131
0132 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
0133 frstor(&fpu->fpstate->regs.fsave);
0134 }
0135
0136 void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
0137 {
0138
0139
0140
0141
0142
0143
0144 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
0145 asm volatile(
0146 "fnclex\n\t"
0147 "emms\n\t"
0148 "fildl %P[addr]"
0149 : : [addr] "m" (fpstate));
0150 }
0151
0152 if (use_xsave()) {
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168 xfd_update_state(fpstate);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179 mask = fpu_kernel_cfg.max_features & mask;
0180
0181 os_xrstor(fpstate, mask);
0182 } else {
0183 if (use_fxsr())
0184 fxrstor(&fpstate->regs.fxsave);
0185 else
0186 frstor(&fpstate->regs.fsave);
0187 }
0188 }
0189
0190 void fpu_reset_from_exception_fixup(void)
0191 {
0192 restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
0193 }
0194
0195 #if IS_ENABLED(CONFIG_KVM)
0196 static void __fpstate_reset(struct fpstate *fpstate, u64 xfd);
0197
0198 static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
0199 {
0200 struct fpu_state_perm *fpuperm;
0201 u64 perm;
0202
0203 if (!IS_ENABLED(CONFIG_X86_64))
0204 return;
0205
0206 spin_lock_irq(¤t->sighand->siglock);
0207 fpuperm = ¤t->group_leader->thread.fpu.guest_perm;
0208 perm = fpuperm->__state_perm;
0209
0210
0211 WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
0212
0213 spin_unlock_irq(¤t->sighand->siglock);
0214
0215 gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED;
0216 }
0217
0218 bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
0219 {
0220 struct fpstate *fpstate;
0221 unsigned int size;
0222
0223 size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
0224 fpstate = vzalloc(size);
0225 if (!fpstate)
0226 return false;
0227
0228
0229 __fpstate_reset(fpstate, 0);
0230 fpstate_init_user(fpstate);
0231 fpstate->is_valloc = true;
0232 fpstate->is_guest = true;
0233
0234 gfpu->fpstate = fpstate;
0235 gfpu->xfeatures = fpu_user_cfg.default_features;
0236 gfpu->perm = fpu_user_cfg.default_features;
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 gfpu->uabi_size = sizeof(struct kvm_xsave);
0248 if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size))
0249 gfpu->uabi_size = fpu_user_cfg.default_size;
0250
0251 fpu_init_guest_permissions(gfpu);
0252
0253 return true;
0254 }
0255 EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
0256
0257 void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
0258 {
0259 struct fpstate *fps = gfpu->fpstate;
0260
0261 if (!fps)
0262 return;
0263
0264 if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
0265 return;
0266
0267 gfpu->fpstate = NULL;
0268 vfree(fps);
0269 }
0270 EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures)
0282 {
0283 lockdep_assert_preemption_enabled();
0284
0285
0286 xfeatures &= ~guest_fpu->xfeatures;
0287 if (!xfeatures)
0288 return 0;
0289
0290 return __xfd_enable_feature(xfeatures, guest_fpu);
0291 }
0292 EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
0293
0294 #ifdef CONFIG_X86_64
0295 void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
0296 {
0297 fpregs_lock();
0298 guest_fpu->fpstate->xfd = xfd;
0299 if (guest_fpu->fpstate->in_use)
0300 xfd_update_state(guest_fpu->fpstate);
0301 fpregs_unlock();
0302 }
0303 EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317 void fpu_sync_guest_vmexit_xfd_state(void)
0318 {
0319 struct fpstate *fps = current->thread.fpu.fpstate;
0320
0321 lockdep_assert_irqs_disabled();
0322 if (fpu_state_size_dynamic()) {
0323 rdmsrl(MSR_IA32_XFD, fps->xfd);
0324 __this_cpu_write(xfd_state, fps->xfd);
0325 }
0326 }
0327 EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
0328 #endif
0329
0330 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
0331 {
0332 struct fpstate *guest_fps = guest_fpu->fpstate;
0333 struct fpu *fpu = ¤t->thread.fpu;
0334 struct fpstate *cur_fps = fpu->fpstate;
0335
0336 fpregs_lock();
0337 if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
0338 save_fpregs_to_fpstate(fpu);
0339
0340
0341 if (enter_guest) {
0342 fpu->__task_fpstate = cur_fps;
0343 fpu->fpstate = guest_fps;
0344 guest_fps->in_use = true;
0345 } else {
0346 guest_fps->in_use = false;
0347 fpu->fpstate = fpu->__task_fpstate;
0348 fpu->__task_fpstate = NULL;
0349 }
0350
0351 cur_fps = fpu->fpstate;
0352
0353 if (!cur_fps->is_confidential) {
0354
0355 restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
0356 } else {
0357
0358
0359
0360
0361
0362 xfd_update_state(cur_fps);
0363 }
0364
0365 fpregs_mark_activate();
0366 fpregs_unlock();
0367 return 0;
0368 }
0369 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
0370
0371 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
0372 unsigned int size, u32 pkru)
0373 {
0374 struct fpstate *kstate = gfpu->fpstate;
0375 union fpregs_state *ustate = buf;
0376 struct membuf mb = { .p = buf, .left = size };
0377
0378 if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
0379 __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
0380 } else {
0381 memcpy(&ustate->fxsave, &kstate->regs.fxsave,
0382 sizeof(ustate->fxsave));
0383
0384 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
0385 }
0386 }
0387 EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
0388
0389 int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
0390 u64 xcr0, u32 *vpkru)
0391 {
0392 struct fpstate *kstate = gfpu->fpstate;
0393 const union fpregs_state *ustate = buf;
0394 struct pkru_state *xpkru;
0395 int ret;
0396
0397 if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
0398 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
0399 return -EINVAL;
0400 if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
0401 return -EINVAL;
0402 memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
0403 return 0;
0404 }
0405
0406 if (ustate->xsave.header.xfeatures & ~xcr0)
0407 return -EINVAL;
0408
0409 ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
0410 if (ret)
0411 return ret;
0412
0413
0414 if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
0415 xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
0416 *vpkru = xpkru->pkru;
0417 }
0418 return 0;
0419 }
0420 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
0421 #endif
0422
0423 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
0424 {
0425 preempt_disable();
0426
0427 WARN_ON_FPU(!irq_fpu_usable());
0428 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
0429
0430 this_cpu_write(in_kernel_fpu, true);
0431
0432 if (!(current->flags & PF_KTHREAD) &&
0433 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
0434 set_thread_flag(TIF_NEED_FPU_LOAD);
0435 save_fpregs_to_fpstate(¤t->thread.fpu);
0436 }
0437 __cpu_invalidate_fpregs_state();
0438
0439
0440 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
0441 ldmxcsr(MXCSR_DEFAULT);
0442
0443 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
0444 asm volatile ("fninit");
0445 }
0446 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
0447
0448 void kernel_fpu_end(void)
0449 {
0450 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
0451
0452 this_cpu_write(in_kernel_fpu, false);
0453 preempt_enable();
0454 }
0455 EXPORT_SYMBOL_GPL(kernel_fpu_end);
0456
0457
0458
0459
0460
0461 void fpu_sync_fpstate(struct fpu *fpu)
0462 {
0463 WARN_ON_FPU(fpu != ¤t->thread.fpu);
0464
0465 fpregs_lock();
0466 trace_x86_fpu_before_save(fpu);
0467
0468 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
0469 save_fpregs_to_fpstate(fpu);
0470
0471 trace_x86_fpu_after_save(fpu);
0472 fpregs_unlock();
0473 }
0474
0475 static inline unsigned int init_fpstate_copy_size(void)
0476 {
0477 if (!use_xsave())
0478 return fpu_kernel_cfg.default_size;
0479
0480
0481 return sizeof(init_fpstate.regs.xsave);
0482 }
0483
0484 static inline void fpstate_init_fxstate(struct fpstate *fpstate)
0485 {
0486 fpstate->regs.fxsave.cwd = 0x37f;
0487 fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
0488 }
0489
0490
0491
0492
0493 static inline void fpstate_init_fstate(struct fpstate *fpstate)
0494 {
0495 fpstate->regs.fsave.cwd = 0xffff037fu;
0496 fpstate->regs.fsave.swd = 0xffff0000u;
0497 fpstate->regs.fsave.twd = 0xffffffffu;
0498 fpstate->regs.fsave.fos = 0xffff0000u;
0499 }
0500
0501
0502
0503
0504
0505
0506 void fpstate_init_user(struct fpstate *fpstate)
0507 {
0508 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
0509 fpstate_init_soft(&fpstate->regs.soft);
0510 return;
0511 }
0512
0513 xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
0514
0515 if (cpu_feature_enabled(X86_FEATURE_FXSR))
0516 fpstate_init_fxstate(fpstate);
0517 else
0518 fpstate_init_fstate(fpstate);
0519 }
0520
0521 static void __fpstate_reset(struct fpstate *fpstate, u64 xfd)
0522 {
0523
0524 fpstate->size = fpu_kernel_cfg.default_size;
0525 fpstate->user_size = fpu_user_cfg.default_size;
0526 fpstate->xfeatures = fpu_kernel_cfg.default_features;
0527 fpstate->user_xfeatures = fpu_user_cfg.default_features;
0528 fpstate->xfd = xfd;
0529 }
0530
0531 void fpstate_reset(struct fpu *fpu)
0532 {
0533
0534 fpu->fpstate = &fpu->__fpstate;
0535 __fpstate_reset(fpu->fpstate, init_fpstate.xfd);
0536
0537
0538 fpu->perm.__state_perm = fpu_kernel_cfg.default_features;
0539 fpu->perm.__state_size = fpu_kernel_cfg.default_size;
0540 fpu->perm.__user_state_size = fpu_user_cfg.default_size;
0541
0542 fpu->guest_perm = fpu->perm;
0543 }
0544
0545 static inline void fpu_inherit_perms(struct fpu *dst_fpu)
0546 {
0547 if (fpu_state_size_dynamic()) {
0548 struct fpu *src_fpu = ¤t->group_leader->thread.fpu;
0549
0550 spin_lock_irq(¤t->sighand->siglock);
0551
0552 dst_fpu->perm = src_fpu->perm;
0553 dst_fpu->guest_perm = src_fpu->guest_perm;
0554 spin_unlock_irq(¤t->sighand->siglock);
0555 }
0556 }
0557
0558
0559 int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
0560 {
0561 struct fpu *src_fpu = ¤t->thread.fpu;
0562 struct fpu *dst_fpu = &dst->thread.fpu;
0563
0564
0565 dst_fpu->last_cpu = -1;
0566
0567 fpstate_reset(dst_fpu);
0568
0569 if (!cpu_feature_enabled(X86_FEATURE_FPU))
0570 return 0;
0571
0572
0573
0574
0575
0576 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
0577
0578
0579
0580
0581
0582 if (minimal) {
0583
0584 memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
0585 init_fpstate_copy_size());
0586 return 0;
0587 }
0588
0589
0590
0591
0592
0593 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 fpregs_lock();
0605 if (test_thread_flag(TIF_NEED_FPU_LOAD))
0606 fpregs_restore_userregs();
0607 save_fpregs_to_fpstate(dst_fpu);
0608 if (!(clone_flags & CLONE_THREAD))
0609 fpu_inherit_perms(dst_fpu);
0610 fpregs_unlock();
0611
0612
0613
0614
0615
0616 if (use_xsave())
0617 dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID;
0618
0619 trace_x86_fpu_copy_src(src_fpu);
0620 trace_x86_fpu_copy_dst(dst_fpu);
0621
0622 return 0;
0623 }
0624
0625
0626
0627
0628
0629 void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
0630 {
0631 *offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
0632 *size = fpu_kernel_cfg.default_size;
0633 }
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644 void fpu__drop(struct fpu *fpu)
0645 {
0646 preempt_disable();
0647
0648 if (fpu == ¤t->thread.fpu) {
0649
0650 asm volatile("1: fwait\n"
0651 "2:\n"
0652 _ASM_EXTABLE(1b, 2b));
0653 fpregs_deactivate(fpu);
0654 }
0655
0656 trace_x86_fpu_dropped(fpu);
0657
0658 preempt_enable();
0659 }
0660
0661
0662
0663
0664
0665 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
0666 {
0667 if (use_xsave())
0668 os_xrstor(&init_fpstate, features_mask);
0669 else if (use_fxsr())
0670 fxrstor(&init_fpstate.regs.fxsave);
0671 else
0672 frstor(&init_fpstate.regs.fsave);
0673
0674 pkru_write_default();
0675 }
0676
0677
0678
0679
0680 static void fpu_reset_fpregs(void)
0681 {
0682 struct fpu *fpu = ¤t->thread.fpu;
0683
0684 fpregs_lock();
0685 fpu__drop(fpu);
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699 memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
0700 set_thread_flag(TIF_NEED_FPU_LOAD);
0701 fpregs_unlock();
0702 }
0703
0704
0705
0706
0707
0708
0709 void fpu__clear_user_states(struct fpu *fpu)
0710 {
0711 WARN_ON_FPU(fpu != ¤t->thread.fpu);
0712
0713 fpregs_lock();
0714 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
0715 fpu_reset_fpregs();
0716 fpregs_unlock();
0717 return;
0718 }
0719
0720
0721
0722
0723
0724 if (xfeatures_mask_supervisor() &&
0725 !fpregs_state_valid(fpu, smp_processor_id()))
0726 os_xrstor_supervisor(fpu->fpstate);
0727
0728
0729 restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
0730
0731
0732
0733
0734
0735
0736
0737
0738 fpregs_mark_activate();
0739 fpregs_unlock();
0740 }
0741
0742 void fpu_flush_thread(void)
0743 {
0744 fpstate_reset(¤t->thread.fpu);
0745 fpu_reset_fpregs();
0746 }
0747
0748
0749
0750 void switch_fpu_return(void)
0751 {
0752 if (!static_cpu_has(X86_FEATURE_FPU))
0753 return;
0754
0755 fpregs_restore_userregs();
0756 }
0757 EXPORT_SYMBOL_GPL(switch_fpu_return);
0758
0759 #ifdef CONFIG_X86_DEBUG_FPU
0760
0761
0762
0763
0764
0765 void fpregs_assert_state_consistent(void)
0766 {
0767 struct fpu *fpu = ¤t->thread.fpu;
0768
0769 if (test_thread_flag(TIF_NEED_FPU_LOAD))
0770 return;
0771
0772 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
0773 }
0774 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
0775 #endif
0776
0777 void fpregs_mark_activate(void)
0778 {
0779 struct fpu *fpu = ¤t->thread.fpu;
0780
0781 fpregs_activate(fpu);
0782 fpu->last_cpu = smp_processor_id();
0783 clear_thread_flag(TIF_NEED_FPU_LOAD);
0784 }
0785
0786
0787
0788
0789
0790 int fpu__exception_code(struct fpu *fpu, int trap_nr)
0791 {
0792 int err;
0793
0794 if (trap_nr == X86_TRAP_MF) {
0795 unsigned short cwd, swd;
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 if (boot_cpu_has(X86_FEATURE_FXSR)) {
0807 cwd = fpu->fpstate->regs.fxsave.cwd;
0808 swd = fpu->fpstate->regs.fxsave.swd;
0809 } else {
0810 cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
0811 swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
0812 }
0813
0814 err = swd & ~cwd;
0815 } else {
0816
0817
0818
0819
0820
0821
0822 unsigned short mxcsr = MXCSR_DEFAULT;
0823
0824 if (boot_cpu_has(X86_FEATURE_XMM))
0825 mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
0826
0827 err = ~(mxcsr >> 7) & mxcsr;
0828 }
0829
0830 if (err & 0x001) {
0831
0832
0833
0834
0835
0836 return FPE_FLTINV;
0837 } else if (err & 0x004) {
0838 return FPE_FLTDIV;
0839 } else if (err & 0x008) {
0840 return FPE_FLTOVF;
0841 } else if (err & 0x012) {
0842 return FPE_FLTUND;
0843 } else if (err & 0x020) {
0844 return FPE_FLTRES;
0845 }
0846
0847
0848
0849
0850
0851
0852 return 0;
0853 }
0854
0855
0856
0857
0858
0859 void fpu_idle_fpregs(void)
0860 {
0861
0862 if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
0863 (xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
0864 tile_release();
0865 fpregs_deactivate(¤t->thread.fpu);
0866 }
0867 }