0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __ASM_SPARC64_PROCESSOR_H
0009 #define __ASM_SPARC64_PROCESSOR_H
0010
0011 #include <asm/asi.h>
0012 #include <asm/pstate.h>
0013 #include <asm/ptrace.h>
0014 #include <asm/page.h>
0015
0016
0017
0018
0019
0020
0021
0022
0023 #define VA_BITS 44
0024 #ifndef __ASSEMBLY__
0025 #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
0026 #else
0027 #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
0028 #endif
0029
0030 #define TASK_SIZE_OF(tsk) \
0031 (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
0032 (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
0033 #define TASK_SIZE \
0034 (test_thread_flag(TIF_32BIT) ? \
0035 (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
0036 #ifdef __KERNEL__
0037
0038 #define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
0039 #define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
0040
0041 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
0042 STACK_TOP32 : STACK_TOP64)
0043
0044 #define STACK_TOP_MAX STACK_TOP64
0045
0046 #endif
0047
0048 #ifndef __ASSEMBLY__
0049
0050
0051
0052 struct thread_struct {
0053 #ifdef CONFIG_DEBUG_SPINLOCK
0054
0055
0056
0057
0058 int smp_lock_count;
0059 unsigned int smp_lock_pc;
0060 #else
0061 int dummy;
0062 #endif
0063 };
0064
0065 #endif
0066
0067 #ifndef CONFIG_DEBUG_SPINLOCK
0068 #define INIT_THREAD { \
0069 0, \
0070 }
0071 #else
0072 #define INIT_THREAD { \
0073 \
0074 0, 0, \
0075 }
0076 #endif
0077
0078 #ifndef __ASSEMBLY__
0079
0080 #include <linux/types.h>
0081 #include <asm/fpumacro.h>
0082
0083 struct task_struct;
0084
0085
0086 #ifdef CONFIG_SMP
0087 #define TSTATE_INITIAL_MM TSTATE_TSO
0088 #else
0089 #define TSTATE_INITIAL_MM TSTATE_RMO
0090 #endif
0091
0092
0093 #define start_thread(regs, pc, sp) \
0094 do { \
0095 unsigned long __asi = ASI_PNF; \
0096 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
0097 regs->tpc = ((pc & (~3)) - 4); \
0098 regs->tnpc = regs->tpc + 4; \
0099 regs->y = 0; \
0100 set_thread_wstate(1 << 3); \
0101 if (current_thread_info()->utraps) { \
0102 if (*(current_thread_info()->utraps) < 2) \
0103 kfree(current_thread_info()->utraps); \
0104 else \
0105 (*(current_thread_info()->utraps))--; \
0106 current_thread_info()->utraps = NULL; \
0107 } \
0108 __asm__ __volatile__( \
0109 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
0110 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
0111 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
0112 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
0113 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
0114 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
0115 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
0116 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
0117 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
0118 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
0119 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
0120 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
0121 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
0122 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
0123 "stx %1, [%0 + %2 + 0x70]\n\t" \
0124 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
0125 "wrpr %%g0, (1 << 3), %%wstate\n\t" \
0126 : \
0127 : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
0128 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
0129 fprs_write(0); \
0130 current_thread_info()->xfsr[0] = 0; \
0131 current_thread_info()->fpsaved[0] = 0; \
0132 regs->tstate &= ~TSTATE_PEF; \
0133 } while (0)
0134
0135 #define start_thread32(regs, pc, sp) \
0136 do { \
0137 unsigned long __asi = ASI_PNF; \
0138 pc &= 0x00000000ffffffffUL; \
0139 sp &= 0x00000000ffffffffUL; \
0140 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
0141 regs->tpc = ((pc & (~3)) - 4); \
0142 regs->tnpc = regs->tpc + 4; \
0143 regs->y = 0; \
0144 set_thread_wstate(2 << 3); \
0145 if (current_thread_info()->utraps) { \
0146 if (*(current_thread_info()->utraps) < 2) \
0147 kfree(current_thread_info()->utraps); \
0148 else \
0149 (*(current_thread_info()->utraps))--; \
0150 current_thread_info()->utraps = NULL; \
0151 } \
0152 __asm__ __volatile__( \
0153 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
0154 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
0155 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
0156 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
0157 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
0158 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
0159 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
0160 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
0161 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
0162 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
0163 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
0164 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
0165 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
0166 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
0167 "stx %1, [%0 + %2 + 0x70]\n\t" \
0168 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
0169 "wrpr %%g0, (2 << 3), %%wstate\n\t" \
0170 : \
0171 : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
0172 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
0173 fprs_write(0); \
0174 current_thread_info()->xfsr[0] = 0; \
0175 current_thread_info()->fpsaved[0] = 0; \
0176 regs->tstate &= ~TSTATE_PEF; \
0177 } while (0)
0178
0179
0180 #define release_thread(tsk) do { } while (0)
0181
0182 unsigned long __get_wchan(struct task_struct *task);
0183
0184 #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
0185 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
0186 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
0187
0188
0189
0190
0191
0192
0193 #ifdef BUILD_VDSO
0194 #define cpu_relax() asm volatile("\n99:\n\t" \
0195 "rd %%ccr, %%g0\n\t" \
0196 "rd %%ccr, %%g0\n\t" \
0197 "rd %%ccr, %%g0\n\t" \
0198 ::: "memory")
0199 #else
0200 #define cpu_relax() asm volatile("\n99:\n\t" \
0201 "rd %%ccr, %%g0\n\t" \
0202 "rd %%ccr, %%g0\n\t" \
0203 "rd %%ccr, %%g0\n\t" \
0204 ".section .pause_3insn_patch,\"ax\"\n\t"\
0205 ".word 99b\n\t" \
0206 "wr %%g0, 128, %%asr27\n\t" \
0207 "nop\n\t" \
0208 "nop\n\t" \
0209 ".previous" \
0210 ::: "memory")
0211 #endif
0212
0213
0214
0215
0216
0217 #define ARCH_HAS_PREFETCH
0218 #define ARCH_HAS_PREFETCHW
0219 #define ARCH_HAS_SPINLOCK_PREFETCH
0220
0221 static inline void prefetch(const void *x)
0222 {
0223
0224
0225
0226
0227
0228
0229 __asm__ __volatile__("prefetch [%0], #one_write"
0230 :
0231 : "r" (x));
0232 }
0233
0234 static inline void prefetchw(const void *x)
0235 {
0236
0237
0238
0239
0240 __asm__ __volatile__("prefetch [%0], #n_writes"
0241 :
0242 : "r" (x));
0243 }
0244
0245 #define spin_lock_prefetch(x) prefetchw(x)
0246
0247 #define HAVE_ARCH_PICK_MMAP_LAYOUT
0248
0249 int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
0250
0251 #endif
0252
0253 #endif