Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/processor.h
0004  *
0005  * Copyright (C) 1995-1999 Russell King
0006  * Copyright (C) 2012 ARM Ltd.
0007  */
0008 #ifndef __ASM_PROCESSOR_H
0009 #define __ASM_PROCESSOR_H
0010 
0011 /*
0012  * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
0013  * no point in shifting all network buffers by 2 bytes just to make some IP
0014  * header fields appear aligned in memory, potentially sacrificing some DMA
0015  * performance on some platforms.
0016  */
0017 #define NET_IP_ALIGN    0
0018 
0019 #define MTE_CTRL_GCR_USER_EXCL_SHIFT    0
0020 #define MTE_CTRL_GCR_USER_EXCL_MASK 0xffff
0021 
0022 #define MTE_CTRL_TCF_SYNC       (1UL << 16)
0023 #define MTE_CTRL_TCF_ASYNC      (1UL << 17)
0024 #define MTE_CTRL_TCF_ASYMM      (1UL << 18)
0025 
0026 #ifndef __ASSEMBLY__
0027 
0028 #include <linux/build_bug.h>
0029 #include <linux/cache.h>
0030 #include <linux/init.h>
0031 #include <linux/stddef.h>
0032 #include <linux/string.h>
0033 #include <linux/thread_info.h>
0034 
0035 #include <vdso/processor.h>
0036 
0037 #include <asm/alternative.h>
0038 #include <asm/cpufeature.h>
0039 #include <asm/hw_breakpoint.h>
0040 #include <asm/kasan.h>
0041 #include <asm/lse.h>
0042 #include <asm/pgtable-hwdef.h>
0043 #include <asm/pointer_auth.h>
0044 #include <asm/ptrace.h>
0045 #include <asm/spectre.h>
0046 #include <asm/types.h>
0047 
0048 /*
0049  * TASK_SIZE - the maximum size of a user space task.
0050  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
0051  */
0052 
0053 #define DEFAULT_MAP_WINDOW_64   (UL(1) << VA_BITS_MIN)
0054 #define TASK_SIZE_64        (UL(1) << vabits_actual)
0055 #define TASK_SIZE_MAX       (UL(1) << VA_BITS)
0056 
0057 #ifdef CONFIG_COMPAT
0058 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
0059 /*
0060  * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
0061  * by the compat vectors page.
0062  */
0063 #define TASK_SIZE_32        UL(0x100000000)
0064 #else
0065 #define TASK_SIZE_32        (UL(0x100000000) - PAGE_SIZE)
0066 #endif /* CONFIG_ARM64_64K_PAGES */
0067 #define TASK_SIZE       (test_thread_flag(TIF_32BIT) ? \
0068                 TASK_SIZE_32 : TASK_SIZE_64)
0069 #define TASK_SIZE_OF(tsk)   (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
0070                 TASK_SIZE_32 : TASK_SIZE_64)
0071 #define DEFAULT_MAP_WINDOW  (test_thread_flag(TIF_32BIT) ? \
0072                 TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
0073 #else
0074 #define TASK_SIZE       TASK_SIZE_64
0075 #define DEFAULT_MAP_WINDOW  DEFAULT_MAP_WINDOW_64
0076 #endif /* CONFIG_COMPAT */
0077 
0078 #ifdef CONFIG_ARM64_FORCE_52BIT
0079 #define STACK_TOP_MAX       TASK_SIZE_64
0080 #define TASK_UNMAPPED_BASE  (PAGE_ALIGN(TASK_SIZE / 4))
0081 #else
0082 #define STACK_TOP_MAX       DEFAULT_MAP_WINDOW_64
0083 #define TASK_UNMAPPED_BASE  (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
0084 #endif /* CONFIG_ARM64_FORCE_52BIT */
0085 
0086 #ifdef CONFIG_COMPAT
0087 #define AARCH32_VECTORS_BASE    0xffff0000
0088 #define STACK_TOP       (test_thread_flag(TIF_32BIT) ? \
0089                 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
0090 #else
0091 #define STACK_TOP       STACK_TOP_MAX
0092 #endif /* CONFIG_COMPAT */
0093 
0094 #ifndef CONFIG_ARM64_FORCE_52BIT
0095 #define arch_get_mmap_end(addr, len, flags) \
0096         (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
0097 
0098 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
0099                     base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
0100                     base)
0101 #endif /* CONFIG_ARM64_FORCE_52BIT */
0102 
0103 extern phys_addr_t arm64_dma_phys_limit;
0104 #define ARCH_LOW_ADDRESS_LIMIT  (arm64_dma_phys_limit - 1)
0105 
0106 struct debug_info {
0107 #ifdef CONFIG_HAVE_HW_BREAKPOINT
0108     /* Have we suspended stepping by a debugger? */
0109     int         suspended_step;
0110     /* Allow breakpoints and watchpoints to be disabled for this thread. */
0111     int         bps_disabled;
0112     int         wps_disabled;
0113     /* Hardware breakpoints pinned to this task. */
0114     struct perf_event   *hbp_break[ARM_MAX_BRP];
0115     struct perf_event   *hbp_watch[ARM_MAX_WRP];
0116 #endif
0117 };
0118 
0119 enum vec_type {
0120     ARM64_VEC_SVE = 0,
0121     ARM64_VEC_SME,
0122     ARM64_VEC_MAX,
0123 };
0124 
0125 struct cpu_context {
0126     unsigned long x19;
0127     unsigned long x20;
0128     unsigned long x21;
0129     unsigned long x22;
0130     unsigned long x23;
0131     unsigned long x24;
0132     unsigned long x25;
0133     unsigned long x26;
0134     unsigned long x27;
0135     unsigned long x28;
0136     unsigned long fp;
0137     unsigned long sp;
0138     unsigned long pc;
0139 };
0140 
0141 struct thread_struct {
0142     struct cpu_context  cpu_context;    /* cpu context */
0143 
0144     /*
0145      * Whitelisted fields for hardened usercopy:
0146      * Maintainers must ensure manually that this contains no
0147      * implicit padding.
0148      */
0149     struct {
0150         unsigned long   tp_value;   /* TLS register */
0151         unsigned long   tp2_value;
0152         struct user_fpsimd_state fpsimd_state;
0153     } uw;
0154 
0155     unsigned int        fpsimd_cpu;
0156     void            *sve_state; /* SVE registers, if any */
0157     void            *za_state;  /* ZA register, if any */
0158     unsigned int        vl[ARM64_VEC_MAX];  /* vector length */
0159     unsigned int        vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
0160     unsigned long       fault_address;  /* fault info */
0161     unsigned long       fault_code; /* ESR_EL1 value */
0162     struct debug_info   debug;      /* debugging */
0163 #ifdef CONFIG_ARM64_PTR_AUTH
0164     struct ptrauth_keys_user    keys_user;
0165 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
0166     struct ptrauth_keys_kernel  keys_kernel;
0167 #endif
0168 #endif
0169 #ifdef CONFIG_ARM64_MTE
0170     u64         mte_ctrl;
0171 #endif
0172     u64         sctlr_user;
0173     u64         svcr;
0174     u64         tpidr2_el0;
0175 };
0176 
0177 static inline unsigned int thread_get_vl(struct thread_struct *thread,
0178                      enum vec_type type)
0179 {
0180     return thread->vl[type];
0181 }
0182 
0183 static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
0184 {
0185     return thread_get_vl(thread, ARM64_VEC_SVE);
0186 }
0187 
0188 static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
0189 {
0190     return thread_get_vl(thread, ARM64_VEC_SME);
0191 }
0192 
0193 static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
0194 {
0195     if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
0196         return thread_get_sme_vl(thread);
0197     else
0198         return thread_get_sve_vl(thread);
0199 }
0200 
0201 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
0202 void task_set_vl(struct task_struct *task, enum vec_type type,
0203          unsigned long vl);
0204 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
0205             unsigned long vl);
0206 unsigned int task_get_vl_onexec(const struct task_struct *task,
0207                 enum vec_type type);
0208 
0209 static inline unsigned int task_get_sve_vl(const struct task_struct *task)
0210 {
0211     return task_get_vl(task, ARM64_VEC_SVE);
0212 }
0213 
0214 static inline unsigned int task_get_sme_vl(const struct task_struct *task)
0215 {
0216     return task_get_vl(task, ARM64_VEC_SME);
0217 }
0218 
0219 static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
0220 {
0221     task_set_vl(task, ARM64_VEC_SVE, vl);
0222 }
0223 
0224 static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
0225 {
0226     return task_get_vl_onexec(task, ARM64_VEC_SVE);
0227 }
0228 
0229 static inline void task_set_sve_vl_onexec(struct task_struct *task,
0230                       unsigned long vl)
0231 {
0232     task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
0233 }
0234 
0235 #define SCTLR_USER_MASK                                                        \
0236     (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB |   \
0237      SCTLR_EL1_TCF0_MASK)
0238 
0239 static inline void arch_thread_struct_whitelist(unsigned long *offset,
0240                         unsigned long *size)
0241 {
0242     /* Verify that there is no padding among the whitelisted fields: */
0243     BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
0244              sizeof_field(struct thread_struct, uw.tp_value) +
0245              sizeof_field(struct thread_struct, uw.tp2_value) +
0246              sizeof_field(struct thread_struct, uw.fpsimd_state));
0247 
0248     *offset = offsetof(struct thread_struct, uw);
0249     *size = sizeof_field(struct thread_struct, uw);
0250 }
0251 
0252 #ifdef CONFIG_COMPAT
0253 #define task_user_tls(t)                        \
0254 ({                                  \
0255     unsigned long *__tls;                       \
0256     if (is_compat_thread(task_thread_info(t)))          \
0257         __tls = &(t)->thread.uw.tp2_value;          \
0258     else                                \
0259         __tls = &(t)->thread.uw.tp_value;           \
0260     __tls;                              \
0261  })
0262 #else
0263 #define task_user_tls(t)    (&(t)->thread.uw.tp_value)
0264 #endif
0265 
0266 /* Sync TPIDR_EL0 back to thread_struct for current */
0267 void tls_preserve_current_state(void);
0268 
0269 #define INIT_THREAD {               \
0270     .fpsimd_cpu = NR_CPUS,          \
0271 }
0272 
0273 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
0274 {
0275     s32 previous_syscall = regs->syscallno;
0276     memset(regs, 0, sizeof(*regs));
0277     regs->syscallno = previous_syscall;
0278     regs->pc = pc;
0279 
0280     if (system_uses_irq_prio_masking())
0281         regs->pmr_save = GIC_PRIO_IRQON;
0282 }
0283 
0284 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
0285                 unsigned long sp)
0286 {
0287     start_thread_common(regs, pc);
0288     regs->pstate = PSR_MODE_EL0t;
0289     spectre_v4_enable_task_mitigation(current);
0290     regs->sp = sp;
0291 }
0292 
0293 #ifdef CONFIG_COMPAT
0294 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
0295                        unsigned long sp)
0296 {
0297     start_thread_common(regs, pc);
0298     regs->pstate = PSR_AA32_MODE_USR;
0299     if (pc & 1)
0300         regs->pstate |= PSR_AA32_T_BIT;
0301 
0302 #ifdef __AARCH64EB__
0303     regs->pstate |= PSR_AA32_E_BIT;
0304 #endif
0305 
0306     spectre_v4_enable_task_mitigation(current);
0307     regs->compat_sp = sp;
0308 }
0309 #endif
0310 
0311 static inline bool is_ttbr0_addr(unsigned long addr)
0312 {
0313     /* entry assembly clears tags for TTBR0 addrs */
0314     return addr < TASK_SIZE;
0315 }
0316 
0317 static inline bool is_ttbr1_addr(unsigned long addr)
0318 {
0319     /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
0320     return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
0321 }
0322 
0323 /* Forward declaration, a strange C thing */
0324 struct task_struct;
0325 
0326 /* Free all resources held by a thread. */
0327 extern void release_thread(struct task_struct *);
0328 
0329 unsigned long __get_wchan(struct task_struct *p);
0330 
0331 void update_sctlr_el1(u64 sctlr);
0332 
0333 /* Thread switching */
0334 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
0335                      struct task_struct *next);
0336 
0337 #define task_pt_regs(p) \
0338     ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
0339 
0340 #define KSTK_EIP(tsk)   ((unsigned long)task_pt_regs(tsk)->pc)
0341 #define KSTK_ESP(tsk)   user_stack_pointer(task_pt_regs(tsk))
0342 
0343 /*
0344  * Prefetching support
0345  */
0346 #define ARCH_HAS_PREFETCH
0347 static inline void prefetch(const void *ptr)
0348 {
0349     asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
0350 }
0351 
0352 #define ARCH_HAS_PREFETCHW
0353 static inline void prefetchw(const void *ptr)
0354 {
0355     asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
0356 }
0357 
0358 #define ARCH_HAS_SPINLOCK_PREFETCH
0359 static inline void spin_lock_prefetch(const void *ptr)
0360 {
0361     asm volatile(ARM64_LSE_ATOMIC_INSN(
0362              "prfm pstl1strm, %a0",
0363              "nop") : : "p" (ptr));
0364 }
0365 
0366 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
0367 extern void __init minsigstksz_setup(void);
0368 
0369 /*
0370  * Not at the top of the file due to a direct #include cycle between
0371  * <asm/fpsimd.h> and <asm/processor.h>.  Deferring this #include
0372  * ensures that contents of processor.h are visible to fpsimd.h even if
0373  * processor.h is included first.
0374  *
0375  * These prctl helpers are the only things in this file that require
0376  * fpsimd.h.  The core code expects them to be in this header.
0377  */
0378 #include <asm/fpsimd.h>
0379 
0380 /* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
0381 #define SVE_SET_VL(arg) sve_set_current_vl(arg)
0382 #define SVE_GET_VL()    sve_get_current_vl()
0383 #define SME_SET_VL(arg) sme_set_current_vl(arg)
0384 #define SME_GET_VL()    sme_get_current_vl()
0385 
0386 /* PR_PAC_RESET_KEYS prctl */
0387 #define PAC_RESET_KEYS(tsk, arg)    ptrauth_prctl_reset_keys(tsk, arg)
0388 
0389 /* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
0390 #define PAC_SET_ENABLED_KEYS(tsk, keys, enabled)                \
0391     ptrauth_set_enabled_keys(tsk, keys, enabled)
0392 #define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
0393 
0394 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
0395 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
0396 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
0397 long get_tagged_addr_ctrl(struct task_struct *task);
0398 #define SET_TAGGED_ADDR_CTRL(arg)   set_tagged_addr_ctrl(current, arg)
0399 #define GET_TAGGED_ADDR_CTRL()      get_tagged_addr_ctrl(current)
0400 #endif
0401 
0402 /*
0403  * For CONFIG_GCC_PLUGIN_STACKLEAK
0404  *
0405  * These need to be macros because otherwise we get stuck in a nightmare
0406  * of header definitions for the use of task_stack_page.
0407  */
0408 
0409 /*
0410  * The top of the current task's task stack
0411  */
0412 #define current_top_of_stack()  ((unsigned long)current->stack + THREAD_SIZE)
0413 #define on_thread_stack()   (on_task_stack(current, current_stack_pointer, 1, NULL))
0414 
0415 #endif /* __ASSEMBLY__ */
0416 #endif /* __ASM_PROCESSOR_H */