Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
0003  */
0004 #ifndef _ASM_POWERPC_PPC_ASM_H
0005 #define _ASM_POWERPC_PPC_ASM_H
0006 
0007 #include <linux/stringify.h>
0008 #include <asm/asm-compat.h>
0009 #include <asm/processor.h>
0010 #include <asm/ppc-opcode.h>
0011 #include <asm/firmware.h>
0012 #include <asm/feature-fixups.h>
0013 #include <asm/extable.h>
0014 
0015 #ifdef __ASSEMBLY__
0016 
0017 #define SZL         (BITS_PER_LONG/8)
0018 
0019 /*
0020  * This expands to a sequence of operations with reg incrementing from
0021  * start to end inclusive, of this form:
0022  *
0023  *   op  reg, (offset + (width * reg))(base)
0024  *
0025  * Note that offset is not the offset of the first operation unless start
0026  * is zero (or width is zero).
0027  */
0028 .macro OP_REGS op, width, start, end, base, offset
0029     .Lreg=\start
0030     .rept (\end - \start + 1)
0031     \op .Lreg, \offset + \width * .Lreg(\base)
0032     .Lreg=.Lreg+1
0033     .endr
0034 .endm
0035 
0036 /*
0037  * Macros for storing registers into and loading registers from
0038  * exception frames.
0039  */
0040 #ifdef __powerpc64__
0041 #define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
0042 #define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
0043 #define SAVE_NVGPRS(base)       SAVE_GPRS(14, 31, base)
0044 #define REST_NVGPRS(base)       REST_GPRS(14, 31, base)
0045 #else
0046 #define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
0047 #define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
0048 #define SAVE_NVGPRS(base)       SAVE_GPRS(13, 31, base)
0049 #define REST_NVGPRS(base)       REST_GPRS(13, 31, base)
0050 #endif
0051 
0052 #define SAVE_GPR(n, base)       SAVE_GPRS(n, n, base)
0053 #define REST_GPR(n, base)       REST_GPRS(n, n, base)
0054 
0055 #define SAVE_FPR(n, base)   stfd    n,8*TS_FPRWIDTH*(n)(base)
0056 #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
0057 #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
0058 #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
0059 #define SAVE_16FPRS(n, base)    SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
0060 #define SAVE_32FPRS(n, base)    SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
0061 #define REST_FPR(n, base)   lfd n,8*TS_FPRWIDTH*(n)(base)
0062 #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
0063 #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
0064 #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
0065 #define REST_16FPRS(n, base)    REST_8FPRS(n, base); REST_8FPRS(n+8, base)
0066 #define REST_32FPRS(n, base)    REST_16FPRS(n, base); REST_16FPRS(n+16, base)
0067 
0068 #define SAVE_VR(n,b,base)   li b,16*(n);  stvx n,base,b
0069 #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
0070 #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
0071 #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
0072 #define SAVE_16VRS(n,b,base)    SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
0073 #define SAVE_32VRS(n,b,base)    SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
0074 #define REST_VR(n,b,base)   li b,16*(n); lvx n,base,b
0075 #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
0076 #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
0077 #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
0078 #define REST_16VRS(n,b,base)    REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
0079 #define REST_32VRS(n,b,base)    REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
0080 
0081 #ifdef __BIG_ENDIAN__
0082 #define STXVD2X_ROT(n,b,base)       STXVD2X(n,b,base)
0083 #define LXVD2X_ROT(n,b,base)        LXVD2X(n,b,base)
0084 #else
0085 #define STXVD2X_ROT(n,b,base)       XXSWAPD(n,n);       \
0086                     STXVD2X(n,b,base);  \
0087                     XXSWAPD(n,n)
0088 
0089 #define LXVD2X_ROT(n,b,base)        LXVD2X(n,b,base);   \
0090                     XXSWAPD(n,n)
0091 #endif
0092 /* Save the lower 32 VSRs in the thread VSR region */
0093 #define SAVE_VSR(n,b,base)  li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
0094 #define SAVE_2VSRS(n,b,base)    SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
0095 #define SAVE_4VSRS(n,b,base)    SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
0096 #define SAVE_8VSRS(n,b,base)    SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
0097 #define SAVE_16VSRS(n,b,base)   SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
0098 #define SAVE_32VSRS(n,b,base)   SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
0099 #define REST_VSR(n,b,base)  li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
0100 #define REST_2VSRS(n,b,base)    REST_VSR(n,b,base); REST_VSR(n+1,b,base)
0101 #define REST_4VSRS(n,b,base)    REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
0102 #define REST_8VSRS(n,b,base)    REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
0103 #define REST_16VSRS(n,b,base)   REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
0104 #define REST_32VSRS(n,b,base)   REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
0105 
0106 /*
0107  * b = base register for addressing, o = base offset from register of 1st EVR
0108  * n = first EVR, s = scratch
0109  */
0110 #define SAVE_EVR(n,s,b,o)   evmergehi s,s,n; stw s,o+4*(n)(b)
0111 #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
0112 #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
0113 #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
0114 #define SAVE_16EVRS(n,s,b,o)    SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
0115 #define SAVE_32EVRS(n,s,b,o)    SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
0116 #define REST_EVR(n,s,b,o)   lwz s,o+4*(n)(b); evmergelo n,s,n
0117 #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
0118 #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
0119 #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
0120 #define REST_16EVRS(n,s,b,o)    REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
0121 #define REST_32EVRS(n,s,b,o)    REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
0122 
0123 /* Macros to adjust thread priority for hardware multithreading */
0124 #define HMT_VERY_LOW    or  31,31,31    # very low priority
0125 #define HMT_LOW     or  1,1,1
0126 #define HMT_MEDIUM_LOW  or  6,6,6       # medium low priority
0127 #define HMT_MEDIUM  or  2,2,2
0128 #define HMT_MEDIUM_HIGH or  5,5,5       # medium high priority
0129 #define HMT_HIGH    or  3,3,3
0130 #define HMT_EXTRA_HIGH  or  7,7,7       # power7 only
0131 
0132 #ifdef CONFIG_PPC64
0133 #define ULONG_SIZE  8
0134 #else
0135 #define ULONG_SIZE  4
0136 #endif
0137 #define __VCPU_GPR(n)   (VCPU_GPRS + (n * ULONG_SIZE))
0138 #define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
0139 
0140 #ifdef __KERNEL__
0141 
0142 /*
0143  * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
0144  * version below in the else case of the ifdef.
0145  */
0146 #ifdef __powerpc64__
0147 
0148 #define STACKFRAMESIZE 256
0149 #define __STK_REG(i)   (112 + ((i)-14)*8)
0150 #define STK_REG(i)     __STK_REG(__REG_##i)
0151 
0152 #ifdef CONFIG_PPC64_ELF_ABI_V2
0153 #define STK_GOT     24
0154 #define __STK_PARAM(i)  (32 + ((i)-3)*8)
0155 #else
0156 #define STK_GOT     40
0157 #define __STK_PARAM(i)  (48 + ((i)-3)*8)
0158 #endif
0159 #define STK_PARAM(i)    __STK_PARAM(__REG_##i)
0160 
0161 #ifdef CONFIG_PPC64_ELF_ABI_V2
0162 
0163 #define _GLOBAL(name) \
0164     .align 2 ; \
0165     .type name,@function; \
0166     .globl name; \
0167 name:
0168 
0169 #define _GLOBAL_TOC(name) \
0170     .align 2 ; \
0171     .type name,@function; \
0172     .globl name; \
0173 name: \
0174 0:  addis r2,r12,(.TOC.-0b)@ha; \
0175     addi r2,r2,(.TOC.-0b)@l; \
0176     .localentry name,.-name
0177 
0178 #define DOTSYM(a)   a
0179 
0180 #else
0181 
0182 #define XGLUE(a,b) a##b
0183 #define GLUE(a,b) XGLUE(a,b)
0184 
0185 #define _GLOBAL(name) \
0186     .align 2 ; \
0187     .globl name; \
0188     .globl GLUE(.,name); \
0189     .pushsection ".opd","aw"; \
0190 name: \
0191     .quad GLUE(.,name); \
0192     .quad .TOC.@tocbase; \
0193     .quad 0; \
0194     .popsection; \
0195     .type GLUE(.,name),@function; \
0196 GLUE(.,name):
0197 
0198 #define _GLOBAL_TOC(name) _GLOBAL(name)
0199 
0200 #define DOTSYM(a)   GLUE(.,a)
0201 
0202 #endif
0203 
0204 #else /* 32-bit */
0205 
0206 #define _GLOBAL(n)  \
0207     .globl n;   \
0208 n:
0209 
0210 #define _GLOBAL_TOC(name) _GLOBAL(name)
0211 
0212 #define DOTSYM(a)   a
0213 
0214 #endif
0215 
0216 /*
0217  * __kprobes (the C annotation) puts the symbol into the .kprobes.text
0218  * section, which gets emitted at the end of regular text.
0219  *
0220  * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
0221  * a blacklist. The former is for core kprobe functions/data, the
0222  * latter is for those that incdentially must be excluded from probing
0223  * and allows them to be linked at more optimal location within text.
0224  */
0225 #ifdef CONFIG_KPROBES
0226 #define _ASM_NOKPROBE_SYMBOL(entry)         \
0227     .pushsection "_kprobe_blacklist","aw";      \
0228     PPC_LONG (entry) ;              \
0229     .popsection
0230 #else
0231 #define _ASM_NOKPROBE_SYMBOL(entry)
0232 #endif
0233 
0234 #define FUNC_START(name)    _GLOBAL(name)
0235 #define FUNC_END(name)
0236 
0237 /* 
0238  * LOAD_REG_IMMEDIATE(rn, expr)
0239  *   Loads the value of the constant expression 'expr' into register 'rn'
0240  *   using immediate instructions only.  Use this when it's important not
0241  *   to reference other data (i.e. on ppc64 when the TOC pointer is not
0242  *   valid) and when 'expr' is a constant or absolute address.
0243  *
0244  * LOAD_REG_ADDR(rn, name)
0245  *   Loads the address of label 'name' into register 'rn'.  Use this when
0246  *   you don't particularly need immediate instructions only, but you need
0247  *   the whole address in one register (e.g. it's a structure address and
0248  *   you want to access various offsets within it).  On ppc32 this is
0249  *   identical to LOAD_REG_IMMEDIATE.
0250  *
0251  * LOAD_REG_ADDR_PIC(rn, name)
0252  *   Loads the address of label 'name' into register 'run'. Use this when
0253  *   the kernel doesn't run at the linked or relocated address. Please
0254  *   note that this macro will clobber the lr register.
0255  *
0256  * LOAD_REG_ADDRBASE(rn, name)
0257  * ADDROFF(name)
0258  *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
0259  *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
0260  *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
0261  *   in size, so is suitable for use directly as an offset in load and store
0262  *   instructions.  Use this when loading/storing a single word or less as:
0263  *      LOAD_REG_ADDRBASE(rX, name)
0264  *      ld  rY,ADDROFF(name)(rX)
0265  */
0266 
0267 /* Be careful, this will clobber the lr register. */
0268 #define LOAD_REG_ADDR_PIC(reg, name)        \
0269     bcl 20,31,$+4;          \
0270 0:  mflr    reg;                \
0271     addis   reg,reg,(name - 0b)@ha;     \
0272     addi    reg,reg,(name - 0b)@l;
0273 
0274 #if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH)
0275 #define __AS_ATHIGH high
0276 #else
0277 #define __AS_ATHIGH h
0278 #endif
0279 
0280 .macro __LOAD_REG_IMMEDIATE_32 r, x
0281     .if (\x) >= 0x8000 || (\x) < -0x8000
0282         lis \r, (\x)@__AS_ATHIGH
0283         .if (\x) & 0xffff != 0
0284             ori \r, \r, (\x)@l
0285         .endif
0286     .else
0287         li \r, (\x)@l
0288     .endif
0289 .endm
0290 
0291 .macro __LOAD_REG_IMMEDIATE r, x
0292     .if (\x) >= 0x80000000 || (\x) < -0x80000000
0293         __LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32
0294         sldi    \r, \r, 32
0295         .if (\x) & 0xffff0000 != 0
0296             oris \r, \r, (\x)@__AS_ATHIGH
0297         .endif
0298         .if (\x) & 0xffff != 0
0299             ori \r, \r, (\x)@l
0300         .endif
0301     .else
0302         __LOAD_REG_IMMEDIATE_32 \r, \x
0303     .endif
0304 .endm
0305 
0306 #ifdef __powerpc64__
0307 
0308 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
0309 
0310 #define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr)  \
0311     lis tmp, (expr)@highest;        \
0312     lis reg, (expr)@__AS_ATHIGH;    \
0313     ori tmp, tmp, (expr)@higher;    \
0314     ori reg, reg, (expr)@l;     \
0315     rldimi  reg, tmp, 32, 0
0316 
0317 #define LOAD_REG_ADDR(reg,name)         \
0318     ld  reg,name@got(r2)
0319 
0320 #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
0321 #define ADDROFF(name)           0
0322 
0323 /* offsets for stack frame layout */
0324 #define LRSAVE  16
0325 
0326 #else /* 32-bit */
0327 
0328 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
0329 
0330 #define LOAD_REG_IMMEDIATE_SYM(reg,expr)        \
0331     lis reg,(expr)@ha;      \
0332     addi    reg,reg,(expr)@l;
0333 
0334 #define LOAD_REG_ADDR(reg,name)     LOAD_REG_IMMEDIATE_SYM(reg, name)
0335 
0336 #define LOAD_REG_ADDRBASE(reg, name)    lis reg,name@ha
0337 #define ADDROFF(name)           name@l
0338 
0339 /* offsets for stack frame layout */
0340 #define LRSAVE  4
0341 
0342 #endif
0343 
0344 /* various errata or part fixups */
0345 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
0346 #define MFTB(dest)          \
0347 90: mfspr dest, SPRN_TBRL;      \
0348 BEGIN_FTR_SECTION_NESTED(96);       \
0349     cmpwi dest,0;           \
0350     beq-  90b;          \
0351 END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
0352 #else
0353 #define MFTB(dest)          MFTBL(dest)
0354 #endif
0355 
0356 #ifdef CONFIG_PPC_8xx
0357 #define MFTBL(dest)         mftb dest
0358 #define MFTBU(dest)         mftbu dest
0359 #else
0360 #define MFTBL(dest)         mfspr dest, SPRN_TBRL
0361 #define MFTBU(dest)         mfspr dest, SPRN_TBRU
0362 #endif
0363 
0364 #ifndef CONFIG_SMP
0365 #define TLBSYNC
0366 #else
0367 #define TLBSYNC     tlbsync; sync
0368 #endif
0369 
0370 #ifdef CONFIG_PPC64
0371 #define MTOCRF(FXM, RS)         \
0372     BEGIN_FTR_SECTION_NESTED(848);  \
0373     mtcrf   (FXM), RS;      \
0374     FTR_SECTION_ELSE_NESTED(848);   \
0375     mtocrf (FXM), RS;       \
0376     ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
0377 #endif
0378 
0379 /*
0380  * This instruction is not implemented on the PPC 603 or 601; however, on
0381  * the 403GCX and 405GP tlbia IS defined and tlbie is not.
0382  * All of these instructions exist in the 8xx, they have magical powers,
0383  * and they must be used.
0384  */
0385 
0386 #if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
0387 #define tlbia                   \
0388     li  r4,1024;            \
0389     mtctr   r4;             \
0390     lis r4,KERNELBASE@h;        \
0391     .machine push;              \
0392     .machine "power4";          \
0393 0:  tlbie   r4;             \
0394     .machine pop;               \
0395     addi    r4,r4,0x1000;           \
0396     bdnz    0b
0397 #endif
0398 
0399 
0400 #ifdef CONFIG_IBM440EP_ERR42
0401 #define PPC440EP_ERR42 isync
0402 #else
0403 #define PPC440EP_ERR42
0404 #endif
0405 
0406 /* The following stops all load and store data streams associated with stream
0407  * ID (ie. streams created explicitly).  The embedded and server mnemonics for
0408  * dcbt are different so this must only be used for server.
0409  */
0410 #define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch)    \
0411        lis     scratch,0x60000000@h;            \
0412        dcbt    0,scratch,0b01010
0413 
0414 /*
0415  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
0416  * keep the address intact to be compatible with code shared with
0417  * 32-bit classic.
0418  *
0419  * On the other hand, I find it useful to have them behave as expected
0420  * by their name (ie always do the addition) on 64-bit BookE
0421  */
0422 #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
0423 #define toreal(rd)
0424 #define fromreal(rd)
0425 
0426 /*
0427  * We use addis to ensure compatibility with the "classic" ppc versions of
0428  * these macros, which use rs = 0 to get the tophys offset in rd, rather than
0429  * converting the address in r0, and so this version has to do that too
0430  * (i.e. set register rd to 0 when rs == 0).
0431  */
0432 #define tophys(rd,rs)               \
0433     addis   rd,rs,0
0434 
0435 #define tovirt(rd,rs)               \
0436     addis   rd,rs,0
0437 
0438 #elif defined(CONFIG_PPC64)
0439 #define toreal(rd)      /* we can access c000... in real mode */
0440 #define fromreal(rd)
0441 
0442 #define tophys(rd,rs)                           \
0443     clrldi  rd,rs,2
0444 
0445 #define tovirt(rd,rs)                           \
0446     rotldi  rd,rs,16;           \
0447     ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
0448     rotldi  rd,rd,48
0449 #else
0450 #define toreal(rd)  tophys(rd,rd)
0451 #define fromreal(rd)    tovirt(rd,rd)
0452 
0453 #define tophys(rd, rs)  addis   rd, rs, -PAGE_OFFSET@h
0454 #define tovirt(rd, rs)  addis   rd, rs, PAGE_OFFSET@h
0455 #endif
0456 
0457 #ifdef CONFIG_PPC_BOOK3S_64
0458 #define MTMSRD(r)   mtmsrd  r
0459 #define MTMSR_EERI(reg) mtmsrd  reg,1
0460 #else
0461 #define MTMSRD(r)   mtmsr   r
0462 #define MTMSR_EERI(reg) mtmsr   reg
0463 #endif
0464 
0465 #endif /* __KERNEL__ */
0466 
0467 /* The boring bits... */
0468 
0469 /* Condition Register Bit Fields */
0470 
0471 #define cr0 0
0472 #define cr1 1
0473 #define cr2 2
0474 #define cr3 3
0475 #define cr4 4
0476 #define cr5 5
0477 #define cr6 6
0478 #define cr7 7
0479 
0480 
0481 /*
0482  * General Purpose Registers (GPRs)
0483  *
0484  * The lower case r0-r31 should be used in preference to the upper
0485  * case R0-R31 as they provide more error checking in the assembler.
0486  * Use R0-31 only when really nessesary.
0487  */
0488 
0489 #define r0  %r0
0490 #define r1  %r1
0491 #define r2  %r2
0492 #define r3  %r3
0493 #define r4  %r4
0494 #define r5  %r5
0495 #define r6  %r6
0496 #define r7  %r7
0497 #define r8  %r8
0498 #define r9  %r9
0499 #define r10 %r10
0500 #define r11 %r11
0501 #define r12 %r12
0502 #define r13 %r13
0503 #define r14 %r14
0504 #define r15 %r15
0505 #define r16 %r16
0506 #define r17 %r17
0507 #define r18 %r18
0508 #define r19 %r19
0509 #define r20 %r20
0510 #define r21 %r21
0511 #define r22 %r22
0512 #define r23 %r23
0513 #define r24 %r24
0514 #define r25 %r25
0515 #define r26 %r26
0516 #define r27 %r27
0517 #define r28 %r28
0518 #define r29 %r29
0519 #define r30 %r30
0520 #define r31 %r31
0521 
0522 
0523 /* Floating Point Registers (FPRs) */
0524 
0525 #define fr0 0
0526 #define fr1 1
0527 #define fr2 2
0528 #define fr3 3
0529 #define fr4 4
0530 #define fr5 5
0531 #define fr6 6
0532 #define fr7 7
0533 #define fr8 8
0534 #define fr9 9
0535 #define fr10    10
0536 #define fr11    11
0537 #define fr12    12
0538 #define fr13    13
0539 #define fr14    14
0540 #define fr15    15
0541 #define fr16    16
0542 #define fr17    17
0543 #define fr18    18
0544 #define fr19    19
0545 #define fr20    20
0546 #define fr21    21
0547 #define fr22    22
0548 #define fr23    23
0549 #define fr24    24
0550 #define fr25    25
0551 #define fr26    26
0552 #define fr27    27
0553 #define fr28    28
0554 #define fr29    29
0555 #define fr30    30
0556 #define fr31    31
0557 
0558 /* AltiVec Registers (VPRs) */
0559 
0560 #define v0  0
0561 #define v1  1
0562 #define v2  2
0563 #define v3  3
0564 #define v4  4
0565 #define v5  5
0566 #define v6  6
0567 #define v7  7
0568 #define v8  8
0569 #define v9  9
0570 #define v10 10
0571 #define v11 11
0572 #define v12 12
0573 #define v13 13
0574 #define v14 14
0575 #define v15 15
0576 #define v16 16
0577 #define v17 17
0578 #define v18 18
0579 #define v19 19
0580 #define v20 20
0581 #define v21 21
0582 #define v22 22
0583 #define v23 23
0584 #define v24 24
0585 #define v25 25
0586 #define v26 26
0587 #define v27 27
0588 #define v28 28
0589 #define v29 29
0590 #define v30 30
0591 #define v31 31
0592 
0593 /* VSX Registers (VSRs) */
0594 
0595 #define vs0 0
0596 #define vs1 1
0597 #define vs2 2
0598 #define vs3 3
0599 #define vs4 4
0600 #define vs5 5
0601 #define vs6 6
0602 #define vs7 7
0603 #define vs8 8
0604 #define vs9 9
0605 #define vs10    10
0606 #define vs11    11
0607 #define vs12    12
0608 #define vs13    13
0609 #define vs14    14
0610 #define vs15    15
0611 #define vs16    16
0612 #define vs17    17
0613 #define vs18    18
0614 #define vs19    19
0615 #define vs20    20
0616 #define vs21    21
0617 #define vs22    22
0618 #define vs23    23
0619 #define vs24    24
0620 #define vs25    25
0621 #define vs26    26
0622 #define vs27    27
0623 #define vs28    28
0624 #define vs29    29
0625 #define vs30    30
0626 #define vs31    31
0627 #define vs32    32
0628 #define vs33    33
0629 #define vs34    34
0630 #define vs35    35
0631 #define vs36    36
0632 #define vs37    37
0633 #define vs38    38
0634 #define vs39    39
0635 #define vs40    40
0636 #define vs41    41
0637 #define vs42    42
0638 #define vs43    43
0639 #define vs44    44
0640 #define vs45    45
0641 #define vs46    46
0642 #define vs47    47
0643 #define vs48    48
0644 #define vs49    49
0645 #define vs50    50
0646 #define vs51    51
0647 #define vs52    52
0648 #define vs53    53
0649 #define vs54    54
0650 #define vs55    55
0651 #define vs56    56
0652 #define vs57    57
0653 #define vs58    58
0654 #define vs59    59
0655 #define vs60    60
0656 #define vs61    61
0657 #define vs62    62
0658 #define vs63    63
0659 
0660 /* SPE Registers (EVPRs) */
0661 
0662 #define evr0    0
0663 #define evr1    1
0664 #define evr2    2
0665 #define evr3    3
0666 #define evr4    4
0667 #define evr5    5
0668 #define evr6    6
0669 #define evr7    7
0670 #define evr8    8
0671 #define evr9    9
0672 #define evr10   10
0673 #define evr11   11
0674 #define evr12   12
0675 #define evr13   13
0676 #define evr14   14
0677 #define evr15   15
0678 #define evr16   16
0679 #define evr17   17
0680 #define evr18   18
0681 #define evr19   19
0682 #define evr20   20
0683 #define evr21   21
0684 #define evr22   22
0685 #define evr23   23
0686 #define evr24   24
0687 #define evr25   25
0688 #define evr26   26
0689 #define evr27   27
0690 #define evr28   28
0691 #define evr29   29
0692 #define evr30   30
0693 #define evr31   31
0694 
0695 #define RFSCV   .long 0x4c0000a4
0696 
0697 /*
0698  * Create an endian fixup trampoline
0699  *
0700  * This starts with a "tdi 0,0,0x48" instruction which is
0701  * essentially a "trap never", and thus akin to a nop.
0702  *
0703  * The opcode for this instruction read with the wrong endian
0704  * however results in a b . + 8
0705  *
0706  * So essentially we use that trick to execute the following
0707  * trampoline in "reverse endian" if we are running with the
0708  * MSR_LE bit set the "wrong" way for whatever endianness the
0709  * kernel is built for.
0710  */
0711 
0712 #ifdef CONFIG_PPC_BOOK3E
0713 #define FIXUP_ENDIAN
0714 #else
0715 /*
0716  * This version may be used in HV or non-HV context.
0717  * MSR[EE] must be disabled.
0718  */
0719 #define FIXUP_ENDIAN                           \
0720     tdi   0,0,0x48;   /* Reverse endian of b . + 8      */ \
0721     b     191f;   /* Skip trampoline if endian is good  */ \
0722     .long 0xa600607d; /* mfmsr r11              */ \
0723     .long 0x01006b69; /* xori r11,r11,1         */ \
0724     .long 0x00004039; /* li r10,0               */ \
0725     .long 0x6401417d; /* mtmsrd r10,1           */ \
0726     .long 0x05009f42; /* bcl 20,31,$+4          */ \
0727     .long 0xa602487d; /* mflr r10               */ \
0728     .long 0x14004a39; /* addi r10,r10,20            */ \
0729     .long 0xa6035a7d; /* mtsrr0 r10             */ \
0730     .long 0xa6037b7d; /* mtsrr1 r11             */ \
0731     .long 0x2400004c; /* rfid               */ \
0732 191:
0733 
0734 /*
0735  * This version that may only be used with MSR[HV]=1
0736  * - Does not clear MSR[RI], so more robust.
0737  * - Slightly smaller and faster.
0738  */
0739 #define FIXUP_ENDIAN_HV                        \
0740     tdi   0,0,0x48;   /* Reverse endian of b . + 8      */ \
0741     b     191f;   /* Skip trampoline if endian is good  */ \
0742     .long 0xa600607d; /* mfmsr r11              */ \
0743     .long 0x01006b69; /* xori r11,r11,1         */ \
0744     .long 0x05009f42; /* bcl 20,31,$+4          */ \
0745     .long 0xa602487d; /* mflr r10               */ \
0746     .long 0x14004a39; /* addi r10,r10,20            */ \
0747     .long 0xa64b5a7d; /* mthsrr0 r10            */ \
0748     .long 0xa64b7b7d; /* mthsrr1 r11            */ \
0749     .long 0x2402004c; /* hrfid              */ \
0750 191:
0751 
0752 #endif /* !CONFIG_PPC_BOOK3E */
0753 
0754 #endif /*  __ASSEMBLY__ */
0755 
0756 #define SOFT_MASK_TABLE(_start, _end)       \
0757     stringify_in_c(.section __soft_mask_table,"a";)\
0758     stringify_in_c(.balign 8;)      \
0759     stringify_in_c(.llong (_start);)    \
0760     stringify_in_c(.llong (_end);)      \
0761     stringify_in_c(.previous)
0762 
0763 #define RESTART_TABLE(_start, _end, _target)    \
0764     stringify_in_c(.section __restart_table,"a";)\
0765     stringify_in_c(.balign 8;)      \
0766     stringify_in_c(.llong (_start);)    \
0767     stringify_in_c(.llong (_end);)      \
0768     stringify_in_c(.llong (_target);)   \
0769     stringify_in_c(.previous)
0770 
0771 #ifdef CONFIG_PPC_FSL_BOOK3E
0772 #define BTB_FLUSH(reg)          \
0773     lis reg,BUCSR_INIT@h;       \
0774     ori reg,reg,BUCSR_INIT@l;   \
0775     mtspr SPRN_BUCSR,reg;       \
0776     isync;
0777 #else
0778 #define BTB_FLUSH(reg)
0779 #endif /* CONFIG_PPC_FSL_BOOK3E */
0780 
0781 #endif /* _ASM_POWERPC_PPC_ASM_H */