Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * include/asm/processor.h
0004  *
0005  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
0006  */
0007 
0008 #ifndef __ASM_SPARC64_PROCESSOR_H
0009 #define __ASM_SPARC64_PROCESSOR_H
0010 
0011 #include <asm/asi.h>
0012 #include <asm/pstate.h>
0013 #include <asm/ptrace.h>
0014 #include <asm/page.h>
0015 
0016 /*
0017  * User lives in his very own context, and cannot reference us. Note
0018  * that TASK_SIZE is a misnomer, it really gives maximum user virtual
0019  * address that the kernel will allocate out.
0020  *
0021  * XXX No longer using virtual page tables, kill this upper limit...
0022  */
0023 #define VA_BITS     44
0024 #ifndef __ASSEMBLY__
0025 #define VPTE_SIZE   (1UL << (VA_BITS - PAGE_SHIFT + 3))
0026 #else
0027 #define VPTE_SIZE   (1 << (VA_BITS - PAGE_SHIFT + 3))
0028 #endif
0029 
0030 #define TASK_SIZE_OF(tsk) \
0031     (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
0032      (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
0033 #define TASK_SIZE \
0034     (test_thread_flag(TIF_32BIT) ? \
0035      (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
0036 #ifdef __KERNEL__
0037 
0038 #define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
0039 #define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
0040 
0041 #define STACK_TOP   (test_thread_flag(TIF_32BIT) ? \
0042              STACK_TOP32 : STACK_TOP64)
0043 
0044 #define STACK_TOP_MAX   STACK_TOP64
0045 
0046 #endif
0047 
0048 #ifndef __ASSEMBLY__
0049 
0050 /* The Sparc processor specific thread struct. */
0051 /* XXX This should die, everything can go into thread_info now. */
0052 struct thread_struct {
0053 #ifdef CONFIG_DEBUG_SPINLOCK
0054     /* How many spinlocks held by this thread.
0055      * Used with spin lock debugging to catch tasks
0056      * sleeping illegally with locks held.
0057      */
0058     int smp_lock_count;
0059     unsigned int smp_lock_pc;
0060 #else
0061     int dummy; /* f'in gcc bug... */
0062 #endif
0063 };
0064 
0065 #endif /* !(__ASSEMBLY__) */
0066 
0067 #ifndef CONFIG_DEBUG_SPINLOCK
0068 #define INIT_THREAD  {          \
0069     0,              \
0070 }
0071 #else /* CONFIG_DEBUG_SPINLOCK */
0072 #define INIT_THREAD  {                  \
0073 /* smp_lock_count, smp_lock_pc, */          \
0074    0,          0,                   \
0075 }
0076 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
0077 
0078 #ifndef __ASSEMBLY__
0079 
0080 #include <linux/types.h>
0081 #include <asm/fpumacro.h>
0082 
0083 struct task_struct;
0084 
0085 /* On Uniprocessor, even in RMO processes see TSO semantics */
0086 #ifdef CONFIG_SMP
0087 #define TSTATE_INITIAL_MM   TSTATE_TSO
0088 #else
0089 #define TSTATE_INITIAL_MM   TSTATE_RMO
0090 #endif
0091 
0092 /* Do necessary setup to start up a newly executed thread. */
0093 #define start_thread(regs, pc, sp) \
0094 do { \
0095     unsigned long __asi = ASI_PNF; \
0096     regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
0097     regs->tpc = ((pc & (~3)) - 4); \
0098     regs->tnpc = regs->tpc + 4; \
0099     regs->y = 0; \
0100     set_thread_wstate(1 << 3); \
0101     if (current_thread_info()->utraps) { \
0102         if (*(current_thread_info()->utraps) < 2) \
0103             kfree(current_thread_info()->utraps); \
0104         else \
0105             (*(current_thread_info()->utraps))--; \
0106         current_thread_info()->utraps = NULL; \
0107     } \
0108     __asm__ __volatile__( \
0109     "stx        %%g0, [%0 + %2 + 0x00]\n\t" \
0110     "stx        %%g0, [%0 + %2 + 0x08]\n\t" \
0111     "stx        %%g0, [%0 + %2 + 0x10]\n\t" \
0112     "stx        %%g0, [%0 + %2 + 0x18]\n\t" \
0113     "stx        %%g0, [%0 + %2 + 0x20]\n\t" \
0114     "stx        %%g0, [%0 + %2 + 0x28]\n\t" \
0115     "stx        %%g0, [%0 + %2 + 0x30]\n\t" \
0116     "stx        %%g0, [%0 + %2 + 0x38]\n\t" \
0117     "stx        %%g0, [%0 + %2 + 0x40]\n\t" \
0118     "stx        %%g0, [%0 + %2 + 0x48]\n\t" \
0119     "stx        %%g0, [%0 + %2 + 0x50]\n\t" \
0120     "stx        %%g0, [%0 + %2 + 0x58]\n\t" \
0121     "stx        %%g0, [%0 + %2 + 0x60]\n\t" \
0122     "stx        %%g0, [%0 + %2 + 0x68]\n\t" \
0123     "stx        %1,   [%0 + %2 + 0x70]\n\t" \
0124     "stx        %%g0, [%0 + %2 + 0x78]\n\t" \
0125     "wrpr       %%g0, (1 << 3), %%wstate\n\t" \
0126     : \
0127     : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
0128       "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
0129     fprs_write(0);  \
0130     current_thread_info()->xfsr[0] = 0; \
0131     current_thread_info()->fpsaved[0] = 0;  \
0132     regs->tstate &= ~TSTATE_PEF;    \
0133 } while (0)
0134 
0135 #define start_thread32(regs, pc, sp) \
0136 do { \
0137     unsigned long __asi = ASI_PNF; \
0138     pc &= 0x00000000ffffffffUL; \
0139     sp &= 0x00000000ffffffffUL; \
0140     regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
0141     regs->tpc = ((pc & (~3)) - 4); \
0142     regs->tnpc = regs->tpc + 4; \
0143     regs->y = 0; \
0144     set_thread_wstate(2 << 3); \
0145     if (current_thread_info()->utraps) { \
0146         if (*(current_thread_info()->utraps) < 2) \
0147             kfree(current_thread_info()->utraps); \
0148         else \
0149             (*(current_thread_info()->utraps))--; \
0150         current_thread_info()->utraps = NULL; \
0151     } \
0152     __asm__ __volatile__( \
0153     "stx        %%g0, [%0 + %2 + 0x00]\n\t" \
0154     "stx        %%g0, [%0 + %2 + 0x08]\n\t" \
0155     "stx        %%g0, [%0 + %2 + 0x10]\n\t" \
0156     "stx        %%g0, [%0 + %2 + 0x18]\n\t" \
0157     "stx        %%g0, [%0 + %2 + 0x20]\n\t" \
0158     "stx        %%g0, [%0 + %2 + 0x28]\n\t" \
0159     "stx        %%g0, [%0 + %2 + 0x30]\n\t" \
0160     "stx        %%g0, [%0 + %2 + 0x38]\n\t" \
0161     "stx        %%g0, [%0 + %2 + 0x40]\n\t" \
0162     "stx        %%g0, [%0 + %2 + 0x48]\n\t" \
0163     "stx        %%g0, [%0 + %2 + 0x50]\n\t" \
0164     "stx        %%g0, [%0 + %2 + 0x58]\n\t" \
0165     "stx        %%g0, [%0 + %2 + 0x60]\n\t" \
0166     "stx        %%g0, [%0 + %2 + 0x68]\n\t" \
0167     "stx        %1,   [%0 + %2 + 0x70]\n\t" \
0168     "stx        %%g0, [%0 + %2 + 0x78]\n\t" \
0169     "wrpr       %%g0, (2 << 3), %%wstate\n\t" \
0170     : \
0171     : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
0172       "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
0173     fprs_write(0);  \
0174     current_thread_info()->xfsr[0] = 0; \
0175     current_thread_info()->fpsaved[0] = 0;  \
0176     regs->tstate &= ~TSTATE_PEF;    \
0177 } while (0)
0178 
0179 /* Free all resources held by a thread. */
0180 #define release_thread(tsk)     do { } while (0)
0181 
0182 unsigned long __get_wchan(struct task_struct *task);
0183 
0184 #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
0185 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
0186 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
0187 
0188 /* Please see the commentary in asm/backoff.h for a description of
0189  * what these instructions are doing and how they have been chosen.
0190  * To make a long story short, we are trying to yield the current cpu
0191  * strand during busy loops.
0192  */
0193 #ifdef  BUILD_VDSO
0194 #define cpu_relax() asm volatile("\n99:\n\t"            \
0195                      "rd    %%ccr, %%g0\n\t"    \
0196                      "rd    %%ccr, %%g0\n\t"    \
0197                      "rd    %%ccr, %%g0\n\t"    \
0198                      ::: "memory")
0199 #else /* ! BUILD_VDSO */
0200 #define cpu_relax() asm volatile("\n99:\n\t"            \
0201                      "rd    %%ccr, %%g0\n\t"    \
0202                      "rd    %%ccr, %%g0\n\t"    \
0203                      "rd    %%ccr, %%g0\n\t"    \
0204                      ".section  .pause_3insn_patch,\"ax\"\n\t"\
0205                      ".word 99b\n\t"        \
0206                      "wr    %%g0, 128, %%asr27\n\t" \
0207                      "nop\n\t"              \
0208                      "nop\n\t"              \
0209                      ".previous"            \
0210                      ::: "memory")
0211 #endif
0212 
0213 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
0214  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
0215  * a shallower prefetch queue than later chips.
0216  */
0217 #define ARCH_HAS_PREFETCH
0218 #define ARCH_HAS_PREFETCHW
0219 #define ARCH_HAS_SPINLOCK_PREFETCH
0220 
0221 static inline void prefetch(const void *x)
0222 {
0223     /* We do not use the read prefetch mnemonic because that
0224      * prefetches into the prefetch-cache which only is accessible
0225      * by floating point operations in UltraSPARC-III and later.
0226      * By contrast, "#one_write" prefetches into the L2 cache
0227      * in shared state.
0228      */
0229     __asm__ __volatile__("prefetch [%0], #one_write"
0230                  : /* no outputs */
0231                  : "r" (x));
0232 }
0233 
0234 static inline void prefetchw(const void *x)
0235 {
0236     /* The most optimal prefetch to use for writes is
0237      * "#n_writes".  This brings the cacheline into the
0238      * L2 cache in "owned" state.
0239      */
0240     __asm__ __volatile__("prefetch [%0], #n_writes"
0241                  : /* no outputs */
0242                  : "r" (x));
0243 }
0244 
0245 #define spin_lock_prefetch(x)   prefetchw(x)
0246 
0247 #define HAVE_ARCH_PICK_MMAP_LAYOUT
0248 
0249 int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
0250 
0251 #endif /* !(__ASSEMBLY__) */
0252 
0253 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */