0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __ASM_ARM_PROCESSOR_H
0009 #define __ASM_ARM_PROCESSOR_H
0010
0011 #ifdef __KERNEL__
0012
0013 #include <asm/hw_breakpoint.h>
0014 #include <asm/ptrace.h>
0015 #include <asm/types.h>
0016 #include <asm/unified.h>
0017 #include <asm/vdso/processor.h>
0018
0019 #ifdef __KERNEL__
0020 #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
0021 TASK_SIZE : TASK_SIZE_26)
0022 #define STACK_TOP_MAX TASK_SIZE
0023 #endif
0024
0025 struct debug_info {
0026 #ifdef CONFIG_HAVE_HW_BREAKPOINT
0027 struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
0028 #endif
0029 };
0030
0031 struct thread_struct {
0032
0033 unsigned long address;
0034 unsigned long trap_no;
0035 unsigned long error_code;
0036
0037 struct debug_info debug;
0038 };
0039
0040
0041
0042
0043
0044 static inline void arch_thread_struct_whitelist(unsigned long *offset,
0045 unsigned long *size)
0046 {
0047 *offset = *size = 0;
0048 }
0049
0050 #define INIT_THREAD { }
0051
0052 #define start_thread(regs,pc,sp) \
0053 ({ \
0054 unsigned long r7, r8, r9; \
0055 \
0056 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
0057 r7 = regs->ARM_r7; \
0058 r8 = regs->ARM_r8; \
0059 r9 = regs->ARM_r9; \
0060 } \
0061 memset(regs->uregs, 0, sizeof(regs->uregs)); \
0062 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
0063 current->personality & FDPIC_FUNCPTRS) { \
0064 regs->ARM_r7 = r7; \
0065 regs->ARM_r8 = r8; \
0066 regs->ARM_r9 = r9; \
0067 regs->ARM_r10 = current->mm->start_data; \
0068 } else if (!IS_ENABLED(CONFIG_MMU)) \
0069 regs->ARM_r10 = current->mm->start_data; \
0070 if (current->personality & ADDR_LIMIT_32BIT) \
0071 regs->ARM_cpsr = USR_MODE; \
0072 else \
0073 regs->ARM_cpsr = USR26_MODE; \
0074 if (elf_hwcap & HWCAP_THUMB && pc & 1) \
0075 regs->ARM_cpsr |= PSR_T_BIT; \
0076 regs->ARM_cpsr |= PSR_ENDSTATE; \
0077 regs->ARM_pc = pc & ~1; \
0078 regs->ARM_sp = sp; \
0079 })
0080
0081
0082 struct task_struct;
0083
0084
0085 extern void release_thread(struct task_struct *);
0086
0087 unsigned long __get_wchan(struct task_struct *p);
0088
0089 #define task_pt_regs(p) \
0090 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
0091
0092 #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
0093 #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
0094
0095 #ifdef CONFIG_SMP
0096 #define __ALT_SMP_ASM(smp, up) \
0097 "9998: " smp "\n" \
0098 " .pushsection \".alt.smp.init\", \"a\"\n" \
0099 " .align 2\n" \
0100 " .long 9998b - .\n" \
0101 " " up "\n" \
0102 " .popsection\n"
0103 #else
0104 #define __ALT_SMP_ASM(smp, up) up
0105 #endif
0106
0107
0108
0109
0110 #if __LINUX_ARM_ARCH__ >= 5
0111
0112 #define ARCH_HAS_PREFETCH
0113 static inline void prefetch(const void *ptr)
0114 {
0115 __asm__ __volatile__(
0116 "pld\t%a0"
0117 :: "p" (ptr));
0118 }
0119
0120 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
0121 #define ARCH_HAS_PREFETCHW
0122 static inline void prefetchw(const void *ptr)
0123 {
0124 __asm__ __volatile__(
0125 ".arch_extension mp\n"
0126 __ALT_SMP_ASM(
0127 "pldw\t%a0",
0128 "pld\t%a0"
0129 )
0130 :: "p" (ptr));
0131 }
0132 #endif
0133 #endif
0134
0135 #endif
0136
0137 #endif