0001
0002
0003
0004
0005 #ifndef __ASM_FP_H
0006 #define __ASM_FP_H
0007
0008 #include <asm/errno.h>
0009 #include <asm/ptrace.h>
0010 #include <asm/processor.h>
0011 #include <asm/sigcontext.h>
0012 #include <asm/sysreg.h>
0013
0014 #ifndef __ASSEMBLY__
0015
0016 #include <linux/bitmap.h>
0017 #include <linux/build_bug.h>
0018 #include <linux/bug.h>
0019 #include <linux/cache.h>
0020 #include <linux/init.h>
0021 #include <linux/stddef.h>
0022 #include <linux/types.h>
0023
0024 #ifdef CONFIG_COMPAT
0025
0026 #define VFP_FPSCR_STAT_MASK 0xf800009f
0027 #define VFP_FPSCR_CTRL_MASK 0x07f79f00
0028
0029
0030
0031
0032 #define VFP_STATE_SIZE ((32 * 8) + 4)
0033 #endif
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #define SME_VQ_MAX 16
0046
0047 struct task_struct;
0048
0049 extern void fpsimd_save_state(struct user_fpsimd_state *state);
0050 extern void fpsimd_load_state(struct user_fpsimd_state *state);
0051
0052 extern void fpsimd_thread_switch(struct task_struct *next);
0053 extern void fpsimd_flush_thread(void);
0054
0055 extern void fpsimd_signal_preserve_current_state(void);
0056 extern void fpsimd_preserve_current_state(void);
0057 extern void fpsimd_restore_current_state(void);
0058 extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
0059
0060 extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
0061 void *sve_state, unsigned int sve_vl,
0062 void *za_state, unsigned int sme_vl,
0063 u64 *svcr);
0064
0065 extern void fpsimd_flush_task_state(struct task_struct *target);
0066 extern void fpsimd_save_and_flush_cpu_state(void);
0067
0068 static inline bool thread_sm_enabled(struct thread_struct *thread)
0069 {
0070 return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
0071 }
0072
0073 static inline bool thread_za_enabled(struct thread_struct *thread)
0074 {
0075 return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
0076 }
0077
0078
0079 #define VL_ARCH_MAX 0x100
0080
0081
0082 static inline size_t sve_ffr_offset(int vl)
0083 {
0084 return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
0085 }
0086
0087 static inline void *sve_pffr(struct thread_struct *thread)
0088 {
0089 unsigned int vl;
0090
0091 if (system_supports_sme() && thread_sm_enabled(thread))
0092 vl = thread_get_sme_vl(thread);
0093 else
0094 vl = thread_get_sve_vl(thread);
0095
0096 return (char *)thread->sve_state + sve_ffr_offset(vl);
0097 }
0098
0099 extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
0100 extern void sve_load_state(void const *state, u32 const *pfpsr,
0101 int restore_ffr);
0102 extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
0103 extern unsigned int sve_get_vl(void);
0104 extern void sve_set_vq(unsigned long vq_minus_1);
0105 extern void sme_set_vq(unsigned long vq_minus_1);
0106 extern void za_save_state(void *state);
0107 extern void za_load_state(void const *state);
0108
0109 struct arm64_cpu_capabilities;
0110 extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
0111 extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
0112 extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
0113
0114 extern u64 read_zcr_features(void);
0115 extern u64 read_smcr_features(void);
0116
0117
0118
0119
0120
0121
0122 static inline unsigned int __vq_to_bit(unsigned int vq)
0123 {
0124 return SVE_VQ_MAX - vq;
0125 }
0126
0127 static inline unsigned int __bit_to_vq(unsigned int bit)
0128 {
0129 return SVE_VQ_MAX - bit;
0130 }
0131
0132
0133 struct vl_info {
0134 enum vec_type type;
0135 const char *name;
0136
0137
0138 int min_vl;
0139
0140
0141 int max_vl;
0142 int max_virtualisable_vl;
0143
0144
0145
0146
0147
0148 DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
0149
0150
0151 DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
0152 };
0153
0154 #ifdef CONFIG_ARM64_SVE
0155
0156 extern void sve_alloc(struct task_struct *task, bool flush);
0157 extern void fpsimd_release_task(struct task_struct *task);
0158 extern void fpsimd_sync_to_sve(struct task_struct *task);
0159 extern void fpsimd_force_sync_to_sve(struct task_struct *task);
0160 extern void sve_sync_to_fpsimd(struct task_struct *task);
0161 extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
0162
0163 extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
0164 unsigned long vl, unsigned long flags);
0165
0166 extern int sve_set_current_vl(unsigned long arg);
0167 extern int sve_get_current_vl(void);
0168
0169 static inline void sve_user_disable(void)
0170 {
0171 sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
0172 }
0173
0174 static inline void sve_user_enable(void)
0175 {
0176 sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
0177 }
0178
0179 #define sve_cond_update_zcr_vq(val, reg) \
0180 do { \
0181 u64 __zcr = read_sysreg_s((reg)); \
0182 u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
0183 __new |= (val) & ZCR_ELx_LEN_MASK; \
0184 if (__zcr != __new) \
0185 write_sysreg_s(__new, (reg)); \
0186 } while (0)
0187
0188
0189
0190
0191
0192 enum vec_type;
0193
0194 extern void __init vec_init_vq_map(enum vec_type type);
0195 extern void vec_update_vq_map(enum vec_type type);
0196 extern int vec_verify_vq_map(enum vec_type type);
0197 extern void __init sve_setup(void);
0198
0199 extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
0200
0201 static inline void write_vl(enum vec_type type, u64 val)
0202 {
0203 u64 tmp;
0204
0205 switch (type) {
0206 #ifdef CONFIG_ARM64_SVE
0207 case ARM64_VEC_SVE:
0208 tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
0209 write_sysreg_s(tmp | val, SYS_ZCR_EL1);
0210 break;
0211 #endif
0212 #ifdef CONFIG_ARM64_SME
0213 case ARM64_VEC_SME:
0214 tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK;
0215 write_sysreg_s(tmp | val, SYS_SMCR_EL1);
0216 break;
0217 #endif
0218 default:
0219 WARN_ON_ONCE(1);
0220 break;
0221 }
0222 }
0223
0224 static inline int vec_max_vl(enum vec_type type)
0225 {
0226 return vl_info[type].max_vl;
0227 }
0228
0229 static inline int vec_max_virtualisable_vl(enum vec_type type)
0230 {
0231 return vl_info[type].max_virtualisable_vl;
0232 }
0233
0234 static inline int sve_max_vl(void)
0235 {
0236 return vec_max_vl(ARM64_VEC_SVE);
0237 }
0238
0239 static inline int sve_max_virtualisable_vl(void)
0240 {
0241 return vec_max_virtualisable_vl(ARM64_VEC_SVE);
0242 }
0243
0244
0245 static inline bool vq_available(enum vec_type type, unsigned int vq)
0246 {
0247 return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
0248 }
0249
0250 static inline bool sve_vq_available(unsigned int vq)
0251 {
0252 return vq_available(ARM64_VEC_SVE, vq);
0253 }
0254
0255 size_t sve_state_size(struct task_struct const *task);
0256
0257 #else
0258
0259 static inline void sve_alloc(struct task_struct *task, bool flush) { }
0260 static inline void fpsimd_release_task(struct task_struct *task) { }
0261 static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
0262 static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
0263
0264 static inline int sve_max_virtualisable_vl(void)
0265 {
0266 return 0;
0267 }
0268
0269 static inline int sve_set_current_vl(unsigned long arg)
0270 {
0271 return -EINVAL;
0272 }
0273
0274 static inline int sve_get_current_vl(void)
0275 {
0276 return -EINVAL;
0277 }
0278
0279 static inline int sve_max_vl(void)
0280 {
0281 return -EINVAL;
0282 }
0283
0284 static inline bool sve_vq_available(unsigned int vq) { return false; }
0285
0286 static inline void sve_user_disable(void) { BUILD_BUG(); }
0287 static inline void sve_user_enable(void) { BUILD_BUG(); }
0288
0289 #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
0290
0291 static inline void vec_init_vq_map(enum vec_type t) { }
0292 static inline void vec_update_vq_map(enum vec_type t) { }
0293 static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
0294 static inline void sve_setup(void) { }
0295
0296 static inline size_t sve_state_size(struct task_struct const *task)
0297 {
0298 return 0;
0299 }
0300
0301 #endif
0302
0303 #ifdef CONFIG_ARM64_SME
0304
0305 static inline void sme_user_disable(void)
0306 {
0307 sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0);
0308 }
0309
0310 static inline void sme_user_enable(void)
0311 {
0312 sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN);
0313 }
0314
0315 static inline void sme_smstart_sm(void)
0316 {
0317 asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
0318 }
0319
0320 static inline void sme_smstop_sm(void)
0321 {
0322 asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
0323 }
0324
0325 static inline void sme_smstop(void)
0326 {
0327 asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
0328 }
0329
0330 extern void __init sme_setup(void);
0331
0332 static inline int sme_max_vl(void)
0333 {
0334 return vec_max_vl(ARM64_VEC_SME);
0335 }
0336
0337 static inline int sme_max_virtualisable_vl(void)
0338 {
0339 return vec_max_virtualisable_vl(ARM64_VEC_SME);
0340 }
0341
0342 extern void sme_alloc(struct task_struct *task);
0343 extern unsigned int sme_get_vl(void);
0344 extern int sme_set_current_vl(unsigned long arg);
0345 extern int sme_get_current_vl(void);
0346
0347
0348
0349
0350
0351
0352 static inline size_t za_state_size(struct task_struct const *task)
0353 {
0354 unsigned int vl = task_get_sme_vl(task);
0355
0356 return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
0357 }
0358
0359 #else
0360
0361 static inline void sme_user_disable(void) { BUILD_BUG(); }
0362 static inline void sme_user_enable(void) { BUILD_BUG(); }
0363
0364 static inline void sme_smstart_sm(void) { }
0365 static inline void sme_smstop_sm(void) { }
0366 static inline void sme_smstop(void) { }
0367
0368 static inline void sme_alloc(struct task_struct *task) { }
0369 static inline void sme_setup(void) { }
0370 static inline unsigned int sme_get_vl(void) { return 0; }
0371 static inline int sme_max_vl(void) { return 0; }
0372 static inline int sme_max_virtualisable_vl(void) { return 0; }
0373 static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
0374 static inline int sme_get_current_vl(void) { return -EINVAL; }
0375
0376 static inline size_t za_state_size(struct task_struct const *task)
0377 {
0378 return 0;
0379 }
0380
0381 #endif
0382
0383
0384 extern void __efi_fpsimd_begin(void);
0385 extern void __efi_fpsimd_end(void);
0386
0387 #endif
0388
0389 #endif