Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  Boot code and exception vectors for Book3E processors
0004  *
0005  *  Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
0006  */
0007 
0008 #include <linux/threads.h>
0009 #include <asm/reg.h>
0010 #include <asm/page.h>
0011 #include <asm/ppc_asm.h>
0012 #include <asm/asm-offsets.h>
0013 #include <asm/cputable.h>
0014 #include <asm/setup.h>
0015 #include <asm/thread_info.h>
0016 #include <asm/reg_a2.h>
0017 #include <asm/exception-64e.h>
0018 #include <asm/bug.h>
0019 #include <asm/irqflags.h>
0020 #include <asm/ptrace.h>
0021 #include <asm/ppc-opcode.h>
0022 #include <asm/mmu.h>
0023 #include <asm/hw_irq.h>
0024 #include <asm/kvm_asm.h>
0025 #include <asm/kvm_booke_hv_asm.h>
0026 #include <asm/feature-fixups.h>
0027 #include <asm/context_tracking.h>
0028 
0029 /* 64e interrupt returns always use SRR registers */
0030 #define fast_interrupt_return fast_interrupt_return_srr
0031 #define interrupt_return interrupt_return_srr
0032 
0033 /* XXX This will ultimately add space for a special exception save
0034  *     structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
0035  *     when taking special interrupts. For now we don't support that,
0036  *     special interrupts from within a non-standard level will probably
0037  *     blow you up
0038  */
0039 #define SPECIAL_EXC_SRR0    0
0040 #define SPECIAL_EXC_SRR1    1
0041 #define SPECIAL_EXC_SPRG_GEN    2
0042 #define SPECIAL_EXC_SPRG_TLB    3
0043 #define SPECIAL_EXC_MAS0    4
0044 #define SPECIAL_EXC_MAS1    5
0045 #define SPECIAL_EXC_MAS2    6
0046 #define SPECIAL_EXC_MAS3    7
0047 #define SPECIAL_EXC_MAS6    8
0048 #define SPECIAL_EXC_MAS7    9
0049 #define SPECIAL_EXC_MAS5    10  /* E.HV only */
0050 #define SPECIAL_EXC_MAS8    11  /* E.HV only */
0051 #define SPECIAL_EXC_IRQHAPPENED 12
0052 #define SPECIAL_EXC_DEAR    13
0053 #define SPECIAL_EXC_ESR     14
0054 #define SPECIAL_EXC_SOFTE   15
0055 #define SPECIAL_EXC_CSRR0   16
0056 #define SPECIAL_EXC_CSRR1   17
0057 /* must be even to keep 16-byte stack alignment */
0058 #define SPECIAL_EXC_END     18
0059 
0060 #define SPECIAL_EXC_FRAME_SIZE  (INT_FRAME_SIZE + SPECIAL_EXC_END * 8)
0061 #define SPECIAL_EXC_FRAME_OFFS  (INT_FRAME_SIZE - 288)
0062 
0063 #define SPECIAL_EXC_STORE(reg, name) \
0064     std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
0065 
0066 #define SPECIAL_EXC_LOAD(reg, name) \
0067     ld  reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
0068 
0069 special_reg_save:
0070     /*
0071      * We only need (or have stack space) to save this stuff if
0072      * we interrupted the kernel.
0073      */
0074     ld  r3,_MSR(r1)
0075     andi.   r3,r3,MSR_PR
0076     bnelr
0077 
0078     /*
0079      * Advance to the next TLB exception frame for handler
0080      * types that don't do it automatically.
0081      */
0082     LOAD_REG_ADDR(r11,extlb_level_exc)
0083     lwz r12,0(r11)
0084     mfspr   r10,SPRN_SPRG_TLB_EXFRAME
0085     add r10,r10,r12
0086     mtspr   SPRN_SPRG_TLB_EXFRAME,r10
0087 
0088     /*
0089      * Save registers needed to allow nesting of certain exceptions
0090      * (such as TLB misses) inside special exception levels
0091      */
0092     mfspr   r10,SPRN_SRR0
0093     SPECIAL_EXC_STORE(r10,SRR0)
0094     mfspr   r10,SPRN_SRR1
0095     SPECIAL_EXC_STORE(r10,SRR1)
0096     mfspr   r10,SPRN_SPRG_GEN_SCRATCH
0097     SPECIAL_EXC_STORE(r10,SPRG_GEN)
0098     mfspr   r10,SPRN_SPRG_TLB_SCRATCH
0099     SPECIAL_EXC_STORE(r10,SPRG_TLB)
0100     mfspr   r10,SPRN_MAS0
0101     SPECIAL_EXC_STORE(r10,MAS0)
0102     mfspr   r10,SPRN_MAS1
0103     SPECIAL_EXC_STORE(r10,MAS1)
0104     mfspr   r10,SPRN_MAS2
0105     SPECIAL_EXC_STORE(r10,MAS2)
0106     mfspr   r10,SPRN_MAS3
0107     SPECIAL_EXC_STORE(r10,MAS3)
0108     mfspr   r10,SPRN_MAS6
0109     SPECIAL_EXC_STORE(r10,MAS6)
0110     mfspr   r10,SPRN_MAS7
0111     SPECIAL_EXC_STORE(r10,MAS7)
0112 BEGIN_FTR_SECTION
0113     mfspr   r10,SPRN_MAS5
0114     SPECIAL_EXC_STORE(r10,MAS5)
0115     mfspr   r10,SPRN_MAS8
0116     SPECIAL_EXC_STORE(r10,MAS8)
0117 
0118     /* MAS5/8 could have inappropriate values if we interrupted KVM code */
0119     li  r10,0
0120     mtspr   SPRN_MAS5,r10
0121     mtspr   SPRN_MAS8,r10
0122 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
0123     mfspr   r10,SPRN_DEAR
0124     SPECIAL_EXC_STORE(r10,DEAR)
0125     mfspr   r10,SPRN_ESR
0126     SPECIAL_EXC_STORE(r10,ESR)
0127 
0128     ld  r10,_NIP(r1)
0129     SPECIAL_EXC_STORE(r10,CSRR0)
0130     ld  r10,_MSR(r1)
0131     SPECIAL_EXC_STORE(r10,CSRR1)
0132 
0133     blr
0134 
0135 ret_from_level_except:
0136     ld  r3,_MSR(r1)
0137     andi.   r3,r3,MSR_PR
0138     beq 1f
0139     REST_NVGPRS(r1)
0140     b   interrupt_return
0141 1:
0142 
0143     LOAD_REG_ADDR(r11,extlb_level_exc)
0144     lwz r12,0(r11)
0145     mfspr   r10,SPRN_SPRG_TLB_EXFRAME
0146     sub r10,r10,r12
0147     mtspr   SPRN_SPRG_TLB_EXFRAME,r10
0148 
0149     /*
0150      * It's possible that the special level exception interrupted a
0151      * TLB miss handler, and inserted the same entry that the
0152      * interrupted handler was about to insert.  On CPUs without TLB
0153      * write conditional, this can result in a duplicate TLB entry.
0154      * Wipe all non-bolted entries to be safe.
0155      *
0156      * Note that this doesn't protect against any TLB misses
0157      * we may take accessing the stack from here to the end of
0158      * the special level exception.  It's not clear how we can
0159      * reasonably protect against that, but only CPUs with
0160      * neither TLB write conditional nor bolted kernel memory
0161      * are affected.  Do any such CPUs even exist?
0162      */
0163     PPC_TLBILX_ALL(0,R0)
0164 
0165     REST_NVGPRS(r1)
0166 
0167     SPECIAL_EXC_LOAD(r10,SRR0)
0168     mtspr   SPRN_SRR0,r10
0169     SPECIAL_EXC_LOAD(r10,SRR1)
0170     mtspr   SPRN_SRR1,r10
0171     SPECIAL_EXC_LOAD(r10,SPRG_GEN)
0172     mtspr   SPRN_SPRG_GEN_SCRATCH,r10
0173     SPECIAL_EXC_LOAD(r10,SPRG_TLB)
0174     mtspr   SPRN_SPRG_TLB_SCRATCH,r10
0175     SPECIAL_EXC_LOAD(r10,MAS0)
0176     mtspr   SPRN_MAS0,r10
0177     SPECIAL_EXC_LOAD(r10,MAS1)
0178     mtspr   SPRN_MAS1,r10
0179     SPECIAL_EXC_LOAD(r10,MAS2)
0180     mtspr   SPRN_MAS2,r10
0181     SPECIAL_EXC_LOAD(r10,MAS3)
0182     mtspr   SPRN_MAS3,r10
0183     SPECIAL_EXC_LOAD(r10,MAS6)
0184     mtspr   SPRN_MAS6,r10
0185     SPECIAL_EXC_LOAD(r10,MAS7)
0186     mtspr   SPRN_MAS7,r10
0187 BEGIN_FTR_SECTION
0188     SPECIAL_EXC_LOAD(r10,MAS5)
0189     mtspr   SPRN_MAS5,r10
0190     SPECIAL_EXC_LOAD(r10,MAS8)
0191     mtspr   SPRN_MAS8,r10
0192 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
0193 
0194     SPECIAL_EXC_LOAD(r10,DEAR)
0195     mtspr   SPRN_DEAR,r10
0196     SPECIAL_EXC_LOAD(r10,ESR)
0197     mtspr   SPRN_ESR,r10
0198 
0199     stdcx.  r0,0,r1     /* to clear the reservation */
0200 
0201     REST_GPRS(2, 9, r1)
0202 
0203     ld  r10,_CTR(r1)
0204     ld  r11,_XER(r1)
0205     mtctr   r10
0206     mtxer   r11
0207 
0208     blr
0209 
0210 .macro ret_from_level srr0 srr1 paca_ex scratch
0211     bl  ret_from_level_except
0212 
0213     ld  r10,_LINK(r1)
0214     ld  r11,_CCR(r1)
0215     ld  r0,GPR13(r1)
0216     mtlr    r10
0217     mtcr    r11
0218 
0219     ld  r10,GPR10(r1)
0220     ld  r11,GPR11(r1)
0221     ld  r12,GPR12(r1)
0222     mtspr   \scratch,r0
0223 
0224     std r10,\paca_ex+EX_R10(r13);
0225     std r11,\paca_ex+EX_R11(r13);
0226     ld  r10,_NIP(r1)
0227     ld  r11,_MSR(r1)
0228     ld  r0,GPR0(r1)
0229     ld  r1,GPR1(r1)
0230     mtspr   \srr0,r10
0231     mtspr   \srr1,r11
0232     ld  r10,\paca_ex+EX_R10(r13)
0233     ld  r11,\paca_ex+EX_R11(r13)
0234     mfspr   r13,\scratch
0235 .endm
0236 
0237 ret_from_crit_except:
0238     ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
0239     rfci
0240 
0241 ret_from_mc_except:
0242     ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
0243     rfmci
0244 
0245 /* Exception prolog code for all exceptions */
0246 #define EXCEPTION_PROLOG(n, intnum, type, addition)                 \
0247     mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
0248     mfspr   r13,SPRN_SPRG_PACA; /* get PACA */              \
0249     std r10,PACA_EX##type+EX_R10(r13);                  \
0250     std r11,PACA_EX##type+EX_R11(r13);                  \
0251     mfcr    r10;            /* save CR */               \
0252     mfspr   r11,SPRN_##type##_SRR1;/* what are we coming from */        \
0253     DO_KVM  intnum,SPRN_##type##_SRR1;    /* KVM hook */            \
0254     stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
0255     addition;           /* additional code for that exc. */ \
0256     std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */  \
0257     type##_SET_KSTACK;      /* get special stack if necessary */\
0258     andi.   r10,r11,MSR_PR;     /* save stack pointer */        \
0259     beq 1f;         /* branch around if supervisor */   \
0260     ld  r1,PACAKSAVE(r13);  /* get kernel stack coming from usr */\
0261 1:  type##_BTB_FLUSH        \
0262     cmpdi   cr1,r1,0;       /* check if SP makes sense */       \
0263     bge-    cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
0264     mfspr   r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
0265 
0266 /* Exception type-specific macros */
0267 #define GEN_SET_KSTACK                              \
0268     subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack */
0269 #define SPRN_GEN_SRR0   SPRN_SRR0
0270 #define SPRN_GEN_SRR1   SPRN_SRR1
0271 
0272 #define GDBELL_SET_KSTACK   GEN_SET_KSTACK
0273 #define SPRN_GDBELL_SRR0    SPRN_GSRR0
0274 #define SPRN_GDBELL_SRR1    SPRN_GSRR1
0275 
0276 #define CRIT_SET_KSTACK                                 \
0277     ld  r1,PACA_CRIT_STACK(r13);                    \
0278     subi    r1,r1,SPECIAL_EXC_FRAME_SIZE
0279 #define SPRN_CRIT_SRR0  SPRN_CSRR0
0280 #define SPRN_CRIT_SRR1  SPRN_CSRR1
0281 
0282 #define DBG_SET_KSTACK                                  \
0283     ld  r1,PACA_DBG_STACK(r13);                     \
0284     subi    r1,r1,SPECIAL_EXC_FRAME_SIZE
0285 #define SPRN_DBG_SRR0   SPRN_DSRR0
0286 #define SPRN_DBG_SRR1   SPRN_DSRR1
0287 
0288 #define MC_SET_KSTACK                                   \
0289     ld  r1,PACA_MC_STACK(r13);                      \
0290     subi    r1,r1,SPECIAL_EXC_FRAME_SIZE
0291 #define SPRN_MC_SRR0    SPRN_MCSRR0
0292 #define SPRN_MC_SRR1    SPRN_MCSRR1
0293 
0294 #ifdef CONFIG_PPC_FSL_BOOK3E
0295 #define GEN_BTB_FLUSH           \
0296     START_BTB_FLUSH_SECTION     \
0297         beq 1f;         \
0298         BTB_FLUSH(r10)          \
0299         1:      \
0300     END_BTB_FLUSH_SECTION
0301 
0302 #define CRIT_BTB_FLUSH          \
0303     START_BTB_FLUSH_SECTION     \
0304         BTB_FLUSH(r10)      \
0305     END_BTB_FLUSH_SECTION
0306 
0307 #define DBG_BTB_FLUSH CRIT_BTB_FLUSH
0308 #define MC_BTB_FLUSH CRIT_BTB_FLUSH
0309 #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
0310 #else
0311 #define GEN_BTB_FLUSH
0312 #define CRIT_BTB_FLUSH
0313 #define DBG_BTB_FLUSH
0314 #define MC_BTB_FLUSH
0315 #define GDBELL_BTB_FLUSH
0316 #endif
0317 
0318 #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition)                \
0319     EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
0320 
0321 #define CRIT_EXCEPTION_PROLOG(n, intnum, addition)              \
0322     EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
0323 
0324 #define DBG_EXCEPTION_PROLOG(n, intnum, addition)               \
0325     EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
0326 
0327 #define MC_EXCEPTION_PROLOG(n, intnum, addition)                \
0328     EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
0329 
0330 #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition)                \
0331     EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
0332 
0333 /* Variants of the "addition" argument for the prolog
0334  */
0335 #define PROLOG_ADDITION_NONE_GEN(n)
0336 #define PROLOG_ADDITION_NONE_GDBELL(n)
0337 #define PROLOG_ADDITION_NONE_CRIT(n)
0338 #define PROLOG_ADDITION_NONE_DBG(n)
0339 #define PROLOG_ADDITION_NONE_MC(n)
0340 
0341 #define PROLOG_ADDITION_MASKABLE_GEN(n)                     \
0342     lbz r10,PACAIRQSOFTMASK(r13);   /* are irqs soft-masked? */ \
0343     andi.   r10,r10,IRQS_DISABLED;  /* yes -> go out of line */ \
0344     bne masked_interrupt_book3e_##n
0345 
0346 /*
0347  * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
0348  * called, because that does SAVE_NVGPRS which must see the original register
0349  * values, otherwise the scratch values might be restored when exiting the
0350  * interrupt.
0351  */
0352 #define PROLOG_ADDITION_2REGS_GEN(n)                        \
0353     std r14,PACA_EXGEN+EX_R14(r13);                 \
0354     std r15,PACA_EXGEN+EX_R15(r13)
0355 
0356 #define PROLOG_ADDITION_1REG_GEN(n)                     \
0357     std r14,PACA_EXGEN+EX_R14(r13);
0358 
0359 #define PROLOG_ADDITION_2REGS_CRIT(n)                       \
0360     std r14,PACA_EXCRIT+EX_R14(r13);                    \
0361     std r15,PACA_EXCRIT+EX_R15(r13)
0362 
0363 #define PROLOG_ADDITION_2REGS_DBG(n)                        \
0364     std r14,PACA_EXDBG+EX_R14(r13);                 \
0365     std r15,PACA_EXDBG+EX_R15(r13)
0366 
0367 #define PROLOG_ADDITION_2REGS_MC(n)                     \
0368     std r14,PACA_EXMC+EX_R14(r13);                  \
0369     std r15,PACA_EXMC+EX_R15(r13)
0370 
0371 
0372 /* Core exception code for all exceptions except TLB misses. */
0373 #define EXCEPTION_COMMON_LVL(n, scratch, excf)                  \
0374 exc_##n##_common:                               \
0375     std r0,GPR0(r1);        /* save r0 in stackframe */     \
0376     std r2,GPR2(r1);        /* save r2 in stackframe */     \
0377     SAVE_GPRS(3, 9, r1);        /* save r3 - r9 in stackframe */    \
0378     std r10,_NIP(r1);       /* save SRR0 to stackframe */       \
0379     std r11,_MSR(r1);       /* save SRR1 to stackframe */       \
0380     beq 2f;         /* if from kernel mode */       \
0381 2:  ld  r3,excf+EX_R10(r13);    /* get back r10 */          \
0382     ld  r4,excf+EX_R11(r13);    /* get back r11 */          \
0383     mfspr   r5,scratch;     /* get back r13 */          \
0384     std r12,GPR12(r1);      /* save r12 in stackframe */        \
0385     ld  r2,PACATOC(r13);    /* get kernel TOC into r2 */        \
0386     mflr    r6;         /* save LR in stackframe */     \
0387     mfctr   r7;         /* save CTR in stackframe */        \
0388     mfspr   r8,SPRN_XER;        /* save XER in stackframe */        \
0389     ld  r9,excf+EX_R1(r13); /* load orig r1 back from PACA */   \
0390     lwz r10,excf+EX_CR(r13);    /* load orig CR back from PACA  */  \
0391     lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */       \
0392     ld  r12,exception_marker@toc(r2);                   \
0393     li  r0,0;                               \
0394     std r3,GPR10(r1);       /* save r10 to stackframe */        \
0395     std r4,GPR11(r1);       /* save r11 to stackframe */        \
0396     std r5,GPR13(r1);       /* save it to stackframe */     \
0397     std r6,_LINK(r1);                           \
0398     std r7,_CTR(r1);                            \
0399     std r8,_XER(r1);                            \
0400     li  r3,(n);         /* regs.trap vector */          \
0401     std r9,0(r1);       /* store stack frame back link */   \
0402     std r10,_CCR(r1);       /* store orig CR in stackframe */   \
0403     std r9,GPR1(r1);        /* store stack frame back link */   \
0404     std r11,SOFTE(r1);      /* and save it to stackframe */     \
0405     std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */       \
0406     std r3,_TRAP(r1);       /* set trap number      */  \
0407     std r0,RESULT(r1);      /* clear regs->result */        \
0408     SAVE_NVGPRS(r1);
0409 
0410 #define EXCEPTION_COMMON(n) \
0411     EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
0412 #define EXCEPTION_COMMON_CRIT(n) \
0413     EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)
0414 #define EXCEPTION_COMMON_MC(n) \
0415     EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)
0416 #define EXCEPTION_COMMON_DBG(n) \
0417     EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
0418 
0419 /* XXX FIXME: Restore r14/r15 when necessary */
0420 #define BAD_STACK_TRAMPOLINE(n)                         \
0421 exc_##n##_bad_stack:                                \
0422     li  r1,(n);         /* get exception number */      \
0423     sth r1,PACA_TRAP_SAVE(r13); /* store trap */            \
0424     b   bad_stack_book3e;   /* bad stack error */
0425 
0426 /* WARNING: If you change the layout of this stub, make sure you check
0427     *   the debug exception handler which handles single stepping
0428     *   into exceptions from userspace, and the MM code in
0429     *   arch/powerpc/mm/tlb_nohash.c which patches the branch here
0430     *   and would need to be updated if that branch is moved
0431     */
0432 #define EXCEPTION_STUB(loc, label)                  \
0433     . = interrupt_base_book3e + loc;                \
0434     nop;    /* To make debug interrupts happy */            \
0435     b   exc_##label##_book3e;
0436 
0437 #define ACK_NONE(r)
0438 #define ACK_DEC(r)                          \
0439     lis r,TSR_DIS@h;                        \
0440     mtspr   SPRN_TSR,r
0441 #define ACK_FIT(r)                          \
0442     lis r,TSR_FIS@h;                        \
0443     mtspr   SPRN_TSR,r
0444 
0445 /* Used by asynchronous interrupt that may happen in the idle loop.
0446  *
0447  * This check if the thread was in the idle loop, and if yes, returns
0448  * to the caller rather than the PC. This is to avoid a race if
0449  * interrupts happen before the wait instruction.
0450  */
0451 #define CHECK_NAPPING()                         \
0452     ld  r11, PACA_THREAD_INFO(r13);             \
0453     ld  r10,TI_LOCAL_FLAGS(r11);                \
0454     andi.   r9,r10,_TLF_NAPPING;                    \
0455     beq+    1f;                         \
0456     ld  r8,_LINK(r1);                       \
0457     rlwinm  r7,r10,0,~_TLF_NAPPING;                 \
0458     std r8,_NIP(r1);                        \
0459     std r7,TI_LOCAL_FLAGS(r11);                 \
0460 1:
0461 
0462 
0463 #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack)       \
0464     START_EXCEPTION(label);                     \
0465     NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
0466     EXCEPTION_COMMON(trapnum)                   \
0467     ack(r8);                            \
0468     CHECK_NAPPING();                        \
0469     addi    r3,r1,STACK_FRAME_OVERHEAD;             \
0470     bl  hdlr;                           \
0471     b   interrupt_return
0472 
0473 /* This value is used to mark exception frames on the stack. */
0474     .section    ".toc","aw"
0475 exception_marker:
0476     .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
0477 
0478 
0479 /*
0480  * And here we have the exception vectors !
0481  */
0482 
0483     .text
0484     .balign 0x1000
0485     .globl interrupt_base_book3e
0486 interrupt_base_book3e:                  /* fake trap */
0487     EXCEPTION_STUB(0x000, machine_check)
0488     EXCEPTION_STUB(0x020, critical_input)       /* 0x0100 */
0489     EXCEPTION_STUB(0x040, debug_crit)       /* 0x0d00 */
0490     EXCEPTION_STUB(0x060, data_storage)     /* 0x0300 */
0491     EXCEPTION_STUB(0x080, instruction_storage)  /* 0x0400 */
0492     EXCEPTION_STUB(0x0a0, external_input)       /* 0x0500 */
0493     EXCEPTION_STUB(0x0c0, alignment)        /* 0x0600 */
0494     EXCEPTION_STUB(0x0e0, program)          /* 0x0700 */
0495     EXCEPTION_STUB(0x100, fp_unavailable)       /* 0x0800 */
0496     EXCEPTION_STUB(0x120, system_call)      /* 0x0c00 */
0497     EXCEPTION_STUB(0x140, ap_unavailable)       /* 0x0f20 */
0498     EXCEPTION_STUB(0x160, decrementer)      /* 0x0900 */
0499     EXCEPTION_STUB(0x180, fixed_interval)       /* 0x0980 */
0500     EXCEPTION_STUB(0x1a0, watchdog)         /* 0x09f0 */
0501     EXCEPTION_STUB(0x1c0, data_tlb_miss)
0502     EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
0503     EXCEPTION_STUB(0x200, altivec_unavailable)
0504     EXCEPTION_STUB(0x220, altivec_assist)
0505     EXCEPTION_STUB(0x260, perfmon)
0506     EXCEPTION_STUB(0x280, doorbell)
0507     EXCEPTION_STUB(0x2a0, doorbell_crit)
0508     EXCEPTION_STUB(0x2c0, guest_doorbell)
0509     EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
0510     EXCEPTION_STUB(0x300, hypercall)
0511     EXCEPTION_STUB(0x320, ehpriv)
0512     EXCEPTION_STUB(0x340, lrat_error)
0513 
0514     .globl __end_interrupts
0515 __end_interrupts:
0516 
0517 /* Critical Input Interrupt */
0518     START_EXCEPTION(critical_input);
0519     CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
0520                   PROLOG_ADDITION_NONE)
0521     EXCEPTION_COMMON_CRIT(0x100)
0522     bl  special_reg_save
0523     CHECK_NAPPING();
0524     addi    r3,r1,STACK_FRAME_OVERHEAD
0525     bl  unknown_nmi_exception
0526     b   ret_from_crit_except
0527 
0528 /* Machine Check Interrupt */
0529     START_EXCEPTION(machine_check);
0530     MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
0531                 PROLOG_ADDITION_NONE)
0532     EXCEPTION_COMMON_MC(0x000)
0533     bl  special_reg_save
0534     CHECK_NAPPING();
0535     addi    r3,r1,STACK_FRAME_OVERHEAD
0536     bl  machine_check_exception
0537     b   ret_from_mc_except
0538 
0539 /* Data Storage Interrupt */
0540     START_EXCEPTION(data_storage)
0541     NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
0542                 PROLOG_ADDITION_2REGS)
0543     mfspr   r14,SPRN_DEAR
0544     mfspr   r15,SPRN_ESR
0545     std r14,_DEAR(r1)
0546     std r15,_ESR(r1)
0547     ld  r14,PACA_EXGEN+EX_R14(r13)
0548     ld  r15,PACA_EXGEN+EX_R15(r13)
0549     EXCEPTION_COMMON(0x300)
0550     b   storage_fault_common
0551 
0552 /* Instruction Storage Interrupt */
0553     START_EXCEPTION(instruction_storage);
0554     NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
0555                 PROLOG_ADDITION_2REGS)
0556     li  r15,0
0557     mr  r14,r10
0558     std r14,_DEAR(r1)
0559     std r15,_ESR(r1)
0560     ld  r14,PACA_EXGEN+EX_R14(r13)
0561     ld  r15,PACA_EXGEN+EX_R15(r13)
0562     EXCEPTION_COMMON(0x400)
0563     b   storage_fault_common
0564 
0565 /* External Input Interrupt */
0566     MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
0567                external_input, do_IRQ, ACK_NONE)
0568 
0569 /* Alignment */
0570     START_EXCEPTION(alignment);
0571     NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
0572                 PROLOG_ADDITION_2REGS)
0573     mfspr   r14,SPRN_DEAR
0574     mfspr   r15,SPRN_ESR
0575     std r14,_DEAR(r1)
0576     std r15,_ESR(r1)
0577     ld  r14,PACA_EXGEN+EX_R14(r13)
0578     ld  r15,PACA_EXGEN+EX_R15(r13)
0579     EXCEPTION_COMMON(0x600)
0580     b   alignment_more  /* no room, go out of line */
0581 
0582 /* Program Interrupt */
0583     START_EXCEPTION(program);
0584     NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
0585                 PROLOG_ADDITION_1REG)
0586     mfspr   r14,SPRN_ESR
0587     std r14,_ESR(r1)
0588     ld  r14,PACA_EXGEN+EX_R14(r13)
0589     EXCEPTION_COMMON(0x700)
0590     addi    r3,r1,STACK_FRAME_OVERHEAD
0591     bl  program_check_exception
0592     REST_NVGPRS(r1)
0593     b   interrupt_return
0594 
0595 /* Floating Point Unavailable Interrupt */
0596     START_EXCEPTION(fp_unavailable);
0597     NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
0598                 PROLOG_ADDITION_NONE)
0599     /* we can probably do a shorter exception entry for that one... */
0600     EXCEPTION_COMMON(0x800)
0601     ld  r12,_MSR(r1)
0602     andi.   r0,r12,MSR_PR;
0603     beq-    1f
0604     bl  load_up_fpu
0605     b   fast_interrupt_return
0606 1:  addi    r3,r1,STACK_FRAME_OVERHEAD
0607     bl  kernel_fp_unavailable_exception
0608     b   interrupt_return
0609 
0610 /* Altivec Unavailable Interrupt */
0611     START_EXCEPTION(altivec_unavailable);
0612     NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
0613                 PROLOG_ADDITION_NONE)
0614     /* we can probably do a shorter exception entry for that one... */
0615     EXCEPTION_COMMON(0x200)
0616 #ifdef CONFIG_ALTIVEC
0617 BEGIN_FTR_SECTION
0618     ld  r12,_MSR(r1)
0619     andi.   r0,r12,MSR_PR;
0620     beq-    1f
0621     bl  load_up_altivec
0622     b   fast_interrupt_return
0623 1:
0624 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
0625 #endif
0626     addi    r3,r1,STACK_FRAME_OVERHEAD
0627     bl  altivec_unavailable_exception
0628     b   interrupt_return
0629 
0630 /* AltiVec Assist */
0631     START_EXCEPTION(altivec_assist);
0632     NORMAL_EXCEPTION_PROLOG(0x220,
0633                 BOOKE_INTERRUPT_ALTIVEC_ASSIST,
0634                 PROLOG_ADDITION_NONE)
0635     EXCEPTION_COMMON(0x220)
0636     addi    r3,r1,STACK_FRAME_OVERHEAD
0637 #ifdef CONFIG_ALTIVEC
0638 BEGIN_FTR_SECTION
0639     bl  altivec_assist_exception
0640 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
0641     REST_NVGPRS(r1)
0642 #else
0643     bl  unknown_exception
0644 #endif
0645     b   interrupt_return
0646 
0647 
0648 /* Decrementer Interrupt */
0649     MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
0650                decrementer, timer_interrupt, ACK_DEC)
0651 
0652 /* Fixed Interval Timer Interrupt */
0653     MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
0654                fixed_interval, unknown_exception, ACK_FIT)
0655 
0656 /* Watchdog Timer Interrupt */
0657     START_EXCEPTION(watchdog);
0658     CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
0659                   PROLOG_ADDITION_NONE)
0660     EXCEPTION_COMMON_CRIT(0x9f0)
0661     bl  special_reg_save
0662     CHECK_NAPPING();
0663     addi    r3,r1,STACK_FRAME_OVERHEAD
0664 #ifdef CONFIG_BOOKE_WDT
0665     bl  WatchdogException
0666 #else
0667     bl  unknown_nmi_exception
0668 #endif
0669     b   ret_from_crit_except
0670 
0671 /* System Call Interrupt */
0672     START_EXCEPTION(system_call)
0673     mr  r9,r13          /* keep a copy of userland r13 */
0674     mfspr   r11,SPRN_SRR0       /* get return address */
0675     mfspr   r12,SPRN_SRR1       /* get previous MSR */
0676     mfspr   r13,SPRN_SPRG_PACA  /* get our PACA */
0677     b   system_call_common
0678 
0679 /* Auxiliary Processor Unavailable Interrupt */
0680     START_EXCEPTION(ap_unavailable);
0681     NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
0682                 PROLOG_ADDITION_NONE)
0683     EXCEPTION_COMMON(0xf20)
0684     addi    r3,r1,STACK_FRAME_OVERHEAD
0685     bl  unknown_exception
0686     b   interrupt_return
0687 
0688 /* Debug exception as a critical interrupt*/
0689     START_EXCEPTION(debug_crit);
0690     CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
0691                   PROLOG_ADDITION_2REGS)
0692 
0693     /*
0694      * If there is a single step or branch-taken exception in an
0695      * exception entry sequence, it was probably meant to apply to
0696      * the code where the exception occurred (since exception entry
0697      * doesn't turn off DE automatically).  We simulate the effect
0698      * of turning off DE on entry to an exception handler by turning
0699      * off DE in the CSRR1 value and clearing the debug status.
0700      */
0701 
0702     mfspr   r14,SPRN_DBSR       /* check single-step/branch taken */
0703     andis.  r15,r14,(DBSR_IC|DBSR_BT)@h
0704     beq+    1f
0705 
0706 #ifdef CONFIG_RELOCATABLE
0707     ld  r15,PACATOC(r13)
0708     ld  r14,interrupt_base_book3e@got(r15)
0709     ld  r15,__end_interrupts@got(r15)
0710     cmpld   cr0,r10,r14
0711     cmpld   cr1,r10,r15
0712 #else
0713     LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
0714     cmpld   cr0, r10, r14
0715     LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
0716     cmpld   cr1, r10, r14
0717 #endif
0718     blt+    cr0,1f
0719     bge+    cr1,1f
0720 
0721     /* here it looks like we got an inappropriate debug exception. */
0722     lis r14,(DBSR_IC|DBSR_BT)@h     /* clear the event */
0723     rlwinm  r11,r11,0,~MSR_DE   /* clear DE in the CSRR1 value */
0724     mtspr   SPRN_DBSR,r14
0725     mtspr   SPRN_CSRR1,r11
0726     lwz r10,PACA_EXCRIT+EX_CR(r13)  /* restore registers */
0727     ld  r1,PACA_EXCRIT+EX_R1(r13)
0728     ld  r14,PACA_EXCRIT+EX_R14(r13)
0729     ld  r15,PACA_EXCRIT+EX_R15(r13)
0730     mtcr    r10
0731     ld  r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
0732     ld  r11,PACA_EXCRIT+EX_R11(r13)
0733     mfspr   r13,SPRN_SPRG_CRIT_SCRATCH
0734     rfci
0735 
0736     /* Normal debug exception */
0737     /* XXX We only handle coming from userspace for now since we can't
0738      *     quite save properly an interrupted kernel state yet
0739      */
0740 1:  andi.   r14,r11,MSR_PR;     /* check for userspace again */
0741     beq kernel_dbg_exc;     /* if from kernel mode */
0742 
0743     /* Now we mash up things to make it look like we are coming on a
0744      * normal exception
0745      */
0746     mfspr   r14,SPRN_DBSR
0747     std r14,_DSISR(r1)
0748     ld  r14,PACA_EXCRIT+EX_R14(r13)
0749     ld  r15,PACA_EXCRIT+EX_R15(r13)
0750     EXCEPTION_COMMON_CRIT(0xd00)
0751     addi    r3,r1,STACK_FRAME_OVERHEAD
0752     bl  DebugException
0753     REST_NVGPRS(r1)
0754     b   interrupt_return
0755 
0756 kernel_dbg_exc:
0757     b   .   /* NYI */
0758 
0759 /* Debug exception as a debug interrupt*/
0760     START_EXCEPTION(debug_debug);
0761     DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
0762                          PROLOG_ADDITION_2REGS)
0763 
0764     /*
0765      * If there is a single step or branch-taken exception in an
0766      * exception entry sequence, it was probably meant to apply to
0767      * the code where the exception occurred (since exception entry
0768      * doesn't turn off DE automatically).  We simulate the effect
0769      * of turning off DE on entry to an exception handler by turning
0770      * off DE in the DSRR1 value and clearing the debug status.
0771      */
0772 
0773     mfspr   r14,SPRN_DBSR       /* check single-step/branch taken */
0774     andis.  r15,r14,(DBSR_IC|DBSR_BT)@h
0775     beq+    1f
0776 
0777 #ifdef CONFIG_RELOCATABLE
0778     ld  r15,PACATOC(r13)
0779     ld  r14,interrupt_base_book3e@got(r15)
0780     ld  r15,__end_interrupts@got(r15)
0781     cmpld   cr0,r10,r14
0782     cmpld   cr1,r10,r15
0783 #else
0784     LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
0785     cmpld   cr0, r10, r14
0786     LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
0787     cmpld   cr1, r10, r14
0788 #endif
0789     blt+    cr0,1f
0790     bge+    cr1,1f
0791 
0792     /* here it looks like we got an inappropriate debug exception. */
0793     lis r14,(DBSR_IC|DBSR_BT)@h     /* clear the event */
0794     rlwinm  r11,r11,0,~MSR_DE   /* clear DE in the DSRR1 value */
0795     mtspr   SPRN_DBSR,r14
0796     mtspr   SPRN_DSRR1,r11
0797     lwz r10,PACA_EXDBG+EX_CR(r13)   /* restore registers */
0798     ld  r1,PACA_EXDBG+EX_R1(r13)
0799     ld  r14,PACA_EXDBG+EX_R14(r13)
0800     ld  r15,PACA_EXDBG+EX_R15(r13)
0801     mtcr    r10
0802     ld  r10,PACA_EXDBG+EX_R10(r13)  /* restore registers */
0803     ld  r11,PACA_EXDBG+EX_R11(r13)
0804     mfspr   r13,SPRN_SPRG_DBG_SCRATCH
0805     rfdi
0806 
0807     /* Normal debug exception */
0808     /* XXX We only handle coming from userspace for now since we can't
0809      *     quite save properly an interrupted kernel state yet
0810      */
0811 1:  andi.   r14,r11,MSR_PR;     /* check for userspace again */
0812     beq kernel_dbg_exc;     /* if from kernel mode */
0813 
0814     /* Now we mash up things to make it look like we are coming on a
0815      * normal exception
0816      */
0817     mfspr   r14,SPRN_DBSR
0818     std r14,_DSISR(r1)
0819     ld  r14,PACA_EXDBG+EX_R14(r13)
0820     ld  r15,PACA_EXDBG+EX_R15(r13)
0821     EXCEPTION_COMMON_DBG(0xd08)
0822     addi    r3,r1,STACK_FRAME_OVERHEAD
0823     bl  DebugException
0824     REST_NVGPRS(r1)
0825     b   interrupt_return
0826 
0827     START_EXCEPTION(perfmon);
0828     NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
0829                 PROLOG_ADDITION_NONE)
0830     EXCEPTION_COMMON(0x260)
0831     CHECK_NAPPING()
0832     addi    r3,r1,STACK_FRAME_OVERHEAD
0833     bl  performance_monitor_exception
0834     b   interrupt_return
0835 
0836 /* Doorbell interrupt */
0837     MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
0838                doorbell, doorbell_exception, ACK_NONE)
0839 
0840 /* Doorbell critical Interrupt */
0841     START_EXCEPTION(doorbell_crit);
0842     CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
0843                   PROLOG_ADDITION_NONE)
0844     EXCEPTION_COMMON_CRIT(0x2a0)
0845     bl  special_reg_save
0846     CHECK_NAPPING();
0847     addi    r3,r1,STACK_FRAME_OVERHEAD
0848     bl  unknown_nmi_exception
0849     b   ret_from_crit_except
0850 
0851 /*
0852  *  Guest doorbell interrupt
0853  *  This general exception use GSRRx save/restore registers
0854  */
0855     START_EXCEPTION(guest_doorbell);
0856     GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
0857                     PROLOG_ADDITION_NONE)
0858     EXCEPTION_COMMON(0x2c0)
0859     addi    r3,r1,STACK_FRAME_OVERHEAD
0860     bl  unknown_exception
0861     b   interrupt_return
0862 
0863 /* Guest Doorbell critical Interrupt */
0864     START_EXCEPTION(guest_doorbell_crit);
0865     CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
0866                   PROLOG_ADDITION_NONE)
0867     EXCEPTION_COMMON_CRIT(0x2e0)
0868     bl  special_reg_save
0869     CHECK_NAPPING();
0870     addi    r3,r1,STACK_FRAME_OVERHEAD
0871     bl  unknown_nmi_exception
0872     b   ret_from_crit_except
0873 
0874 /* Hypervisor call */
0875     START_EXCEPTION(hypercall);
0876     NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
0877                     PROLOG_ADDITION_NONE)
0878     EXCEPTION_COMMON(0x310)
0879     addi    r3,r1,STACK_FRAME_OVERHEAD
0880     bl  unknown_exception
0881     b   interrupt_return
0882 
0883 /* Embedded Hypervisor priviledged  */
0884     START_EXCEPTION(ehpriv);
0885     NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
0886                     PROLOG_ADDITION_NONE)
0887     EXCEPTION_COMMON(0x320)
0888     addi    r3,r1,STACK_FRAME_OVERHEAD
0889     bl  unknown_exception
0890     b   interrupt_return
0891 
0892 /* LRAT Error interrupt */
0893     START_EXCEPTION(lrat_error);
0894     NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
0895                     PROLOG_ADDITION_NONE)
0896     EXCEPTION_COMMON(0x340)
0897     addi    r3,r1,STACK_FRAME_OVERHEAD
0898     bl  unknown_exception
0899     b   interrupt_return
0900 
0901 .macro SEARCH_RESTART_TABLE
0902 #ifdef CONFIG_RELOCATABLE
0903     ld  r11,PACATOC(r13)
0904     ld  r14,__start___restart_table@got(r11)
0905     ld  r15,__stop___restart_table@got(r11)
0906 #else
0907     LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
0908     LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
0909 #endif
0910 300:
0911     cmpd    r14,r15
0912     beq 302f
0913     ld  r11,0(r14)
0914     cmpld   r10,r11
0915     blt 301f
0916     ld  r11,8(r14)
0917     cmpld   r10,r11
0918     bge 301f
0919     ld  r11,16(r14)
0920     b   303f
0921 301:
0922     addi    r14,r14,24
0923     b   300b
0924 302:
0925     li  r11,0
0926 303:
0927 .endm
0928 
0929 /*
0930  * An interrupt came in while soft-disabled; We mark paca->irq_happened
0931  * accordingly and if the interrupt is level sensitive, we hard disable
0932  * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
0933  * keep these in synch.
0934  */
0935 
0936 .macro masked_interrupt_book3e paca_irq full_mask
0937     std r14,PACA_EXGEN+EX_R14(r13)
0938     std r15,PACA_EXGEN+EX_R15(r13)
0939 
0940     lbz r10,PACAIRQHAPPENED(r13)
0941     .if \full_mask == 1
0942     ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
0943     .else
0944     ori r10,r10,\paca_irq
0945     .endif
0946     stb r10,PACAIRQHAPPENED(r13)
0947 
0948     .if \full_mask == 1
0949     xori    r11,r11,MSR_EE      /* clear MSR_EE */
0950     mtspr   SPRN_SRR1,r11
0951     .endif
0952 
0953     mfspr   r10,SPRN_SRR0
0954     SEARCH_RESTART_TABLE
0955     cmpdi   r11,0
0956     beq 1f
0957     mtspr   SPRN_SRR0,r11       /* return to restart address */
0958 1:
0959 
0960     lwz r11,PACA_EXGEN+EX_CR(r13)
0961     mtcr    r11
0962     ld  r10,PACA_EXGEN+EX_R10(r13)
0963     ld  r11,PACA_EXGEN+EX_R11(r13)
0964     ld  r14,PACA_EXGEN+EX_R14(r13)
0965     ld  r15,PACA_EXGEN+EX_R15(r13)
0966     mfspr   r13,SPRN_SPRG_GEN_SCRATCH
0967     rfi
0968     b   .
0969 .endm
0970 
0971 masked_interrupt_book3e_0x500:
0972     masked_interrupt_book3e PACA_IRQ_EE 1
0973 
0974 masked_interrupt_book3e_0x900:
0975     ACK_DEC(r10);
0976     masked_interrupt_book3e PACA_IRQ_DEC 0
0977 
0978 masked_interrupt_book3e_0x980:
0979     ACK_FIT(r10);
0980     masked_interrupt_book3e PACA_IRQ_DEC 0
0981 
0982 masked_interrupt_book3e_0x280:
0983 masked_interrupt_book3e_0x2c0:
0984     masked_interrupt_book3e PACA_IRQ_DBELL 0
0985 
0986 /*
0987  * This is called from 0x300 and 0x400 handlers after the prologs with
0988  * r14 and r15 containing the fault address and error code, with the
0989  * original values stashed away in the PACA
0990  */
0991 storage_fault_common:
0992     addi    r3,r1,STACK_FRAME_OVERHEAD
0993     bl  do_page_fault
0994     b   interrupt_return
0995 
0996 /*
0997  * Alignment exception doesn't fit entirely in the 0x100 bytes so it
0998  * continues here.
0999  */
1000 alignment_more:
1001     addi    r3,r1,STACK_FRAME_OVERHEAD
1002     bl  alignment_exception
1003     REST_NVGPRS(r1)
1004     b   interrupt_return
1005 
1006 /*
1007  * Trampolines used when spotting a bad kernel stack pointer in
1008  * the exception entry code.
1009  *
1010  * TODO: move some bits like SRR0 read to trampoline, pass PACA
1011  * index around, etc... to handle crit & mcheck
1012  */
1013 BAD_STACK_TRAMPOLINE(0x000)
1014 BAD_STACK_TRAMPOLINE(0x100)
1015 BAD_STACK_TRAMPOLINE(0x200)
1016 BAD_STACK_TRAMPOLINE(0x220)
1017 BAD_STACK_TRAMPOLINE(0x260)
1018 BAD_STACK_TRAMPOLINE(0x280)
1019 BAD_STACK_TRAMPOLINE(0x2a0)
1020 BAD_STACK_TRAMPOLINE(0x2c0)
1021 BAD_STACK_TRAMPOLINE(0x2e0)
1022 BAD_STACK_TRAMPOLINE(0x300)
1023 BAD_STACK_TRAMPOLINE(0x310)
1024 BAD_STACK_TRAMPOLINE(0x320)
1025 BAD_STACK_TRAMPOLINE(0x340)
1026 BAD_STACK_TRAMPOLINE(0x400)
1027 BAD_STACK_TRAMPOLINE(0x500)
1028 BAD_STACK_TRAMPOLINE(0x600)
1029 BAD_STACK_TRAMPOLINE(0x700)
1030 BAD_STACK_TRAMPOLINE(0x800)
1031 BAD_STACK_TRAMPOLINE(0x900)
1032 BAD_STACK_TRAMPOLINE(0x980)
1033 BAD_STACK_TRAMPOLINE(0x9f0)
1034 BAD_STACK_TRAMPOLINE(0xa00)
1035 BAD_STACK_TRAMPOLINE(0xb00)
1036 BAD_STACK_TRAMPOLINE(0xc00)
1037 BAD_STACK_TRAMPOLINE(0xd00)
1038 BAD_STACK_TRAMPOLINE(0xd08)
1039 BAD_STACK_TRAMPOLINE(0xe00)
1040 BAD_STACK_TRAMPOLINE(0xf00)
1041 BAD_STACK_TRAMPOLINE(0xf20)
1042 
1043     .globl  bad_stack_book3e
1044 bad_stack_book3e:
1045     /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
1046     mfspr   r10,SPRN_SRR0;        /* read SRR0 before touching stack */
1047     ld  r1,PACAEMERGSP(r13)
1048     subi    r1,r1,64+INT_FRAME_SIZE
1049     std r10,_NIP(r1)
1050     std r11,_MSR(r1)
1051     ld  r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
1052     lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
1053     std r10,GPR1(r1)
1054     std r11,_CCR(r1)
1055     mfspr   r10,SPRN_DEAR
1056     mfspr   r11,SPRN_ESR
1057     std r10,_DEAR(r1)
1058     std r11,_ESR(r1)
1059     std r0,GPR0(r1);        /* save r0 in stackframe */     \
1060     std r2,GPR2(r1);        /* save r2 in stackframe */     \
1061     SAVE_GPRS(3, 9, r1);        /* save r3 - r9 in stackframe */    \
1062     ld  r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */            \
1063     ld  r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */            \
1064     mfspr   r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
1065     std r3,GPR10(r1);       /* save r10 to stackframe */        \
1066     std r4,GPR11(r1);       /* save r11 to stackframe */        \
1067     std r12,GPR12(r1);      /* save r12 in stackframe */        \
1068     std r5,GPR13(r1);       /* save it to stackframe */     \
1069     mflr    r10
1070     mfctr   r11
1071     mfxer   r12
1072     std r10,_LINK(r1)
1073     std r11,_CTR(r1)
1074     std r12,_XER(r1)
1075     SAVE_GPRS(14, 31, r1)
1076     lhz r12,PACA_TRAP_SAVE(r13)
1077     std r12,_TRAP(r1)
1078     addi    r11,r1,INT_FRAME_SIZE
1079     std r11,0(r1)
1080     li  r12,0
1081     std r12,0(r11)
1082     ld  r2,PACATOC(r13)
1083 1:  addi    r3,r1,STACK_FRAME_OVERHEAD
1084     bl  kernel_bad_stack
1085     b   1b
1086 
1087 /*
1088  * Setup the initial TLB for a core. This current implementation
1089  * assume that whatever we are running off will not conflict with
1090  * the new mapping at PAGE_OFFSET.
1091  */
1092 _GLOBAL(initial_tlb_book3e)
1093 
1094     /* Look for the first TLB with IPROT set */
1095     mfspr   r4,SPRN_TLB0CFG
1096     andi.   r3,r4,TLBnCFG_IPROT
1097     lis r3,MAS0_TLBSEL(0)@h
1098     bne found_iprot
1099 
1100     mfspr   r4,SPRN_TLB1CFG
1101     andi.   r3,r4,TLBnCFG_IPROT
1102     lis r3,MAS0_TLBSEL(1)@h
1103     bne found_iprot
1104 
1105     mfspr   r4,SPRN_TLB2CFG
1106     andi.   r3,r4,TLBnCFG_IPROT
1107     lis r3,MAS0_TLBSEL(2)@h
1108     bne found_iprot
1109 
1110     lis r3,MAS0_TLBSEL(3)@h
1111     mfspr   r4,SPRN_TLB3CFG
1112     /* fall through */
1113 
1114 found_iprot:
1115     andi.   r5,r4,TLBnCFG_HES
1116     bne have_hes
1117 
1118     mflr    r8              /* save LR */
1119 /* 1. Find the index of the entry we're executing in
1120  *
1121  * r3 = MAS0_TLBSEL (for the iprot array)
1122  * r4 = SPRN_TLBnCFG
1123  */
1124     bcl 20,31,$+4           /* Find our address */
1125 invstr: mflr    r6              /* Make it accessible */
1126     mfmsr   r7
1127     rlwinm  r5,r7,27,31,31          /* extract MSR[IS] */
1128     mfspr   r7,SPRN_PID
1129     slwi    r7,r7,16
1130     or  r7,r7,r5
1131     mtspr   SPRN_MAS6,r7
1132     tlbsx   0,r6                /* search MSR[IS], SPID=PID */
1133 
1134     mfspr   r3,SPRN_MAS0
1135     rlwinm  r5,r3,16,20,31          /* Extract MAS0(Entry) */
1136 
1137     mfspr   r7,SPRN_MAS1            /* Insure IPROT set */
1138     oris    r7,r7,MAS1_IPROT@h
1139     mtspr   SPRN_MAS1,r7
1140     tlbwe
1141 
1142 /* 2. Invalidate all entries except the entry we're executing in
1143  *
1144  * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1145  * r4 = SPRN_TLBnCFG
1146  * r5 = ESEL of entry we are running in
1147  */
1148     andi.   r4,r4,TLBnCFG_N_ENTRY       /* Extract # entries */
1149     li  r6,0                /* Set Entry counter to 0 */
1150 1:  mr  r7,r3               /* Set MAS0(TLBSEL) */
1151     rlwimi  r7,r6,16,4,15           /* Setup MAS0 = TLBSEL | ESEL(r6) */
1152     mtspr   SPRN_MAS0,r7
1153     tlbre
1154     mfspr   r7,SPRN_MAS1
1155     rlwinm  r7,r7,0,2,31            /* Clear MAS1 Valid and IPROT */
1156     cmpw    r5,r6
1157     beq skpinv              /* Dont update the current execution TLB */
1158     mtspr   SPRN_MAS1,r7
1159     tlbwe
1160     isync
1161 skpinv: addi    r6,r6,1             /* Increment */
1162     cmpw    r6,r4               /* Are we done? */
1163     bne 1b              /* If not, repeat */
1164 
1165     /* Invalidate all TLBs */
1166     PPC_TLBILX_ALL(0,R0)
1167     sync
1168     isync
1169 
1170 /* 3. Setup a temp mapping and jump to it
1171  *
1172  * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1173  * r5 = ESEL of entry we are running in
1174  */
1175     andi.   r7,r5,0x1   /* Find an entry not used and is non-zero */
1176     addi    r7,r7,0x1
1177     mr  r4,r3       /* Set MAS0(TLBSEL) = 1 */
1178     mtspr   SPRN_MAS0,r4
1179     tlbre
1180 
1181     rlwimi  r4,r7,16,4,15   /* Setup MAS0 = TLBSEL | ESEL(r7) */
1182     mtspr   SPRN_MAS0,r4
1183 
1184     mfspr   r7,SPRN_MAS1
1185     xori    r6,r7,MAS1_TS       /* Setup TMP mapping in the other Address space */
1186     mtspr   SPRN_MAS1,r6
1187 
1188     tlbwe
1189 
1190     mfmsr   r6
1191     xori    r6,r6,MSR_IS
1192     mtspr   SPRN_SRR1,r6
1193     bcl 20,31,$+4   /* Find our address */
1194 1:  mflr    r6
1195     addi    r6,r6,(2f - 1b)
1196     mtspr   SPRN_SRR0,r6
1197     rfi
1198 2:
1199 
1200 /* 4. Clear out PIDs & Search info
1201  *
1202  * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1203  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1204  * r5 = MAS3
1205  */
1206     li  r6,0
1207     mtspr   SPRN_MAS6,r6
1208     mtspr   SPRN_PID,r6
1209 
1210 /* 5. Invalidate mapping we started in
1211  *
1212  * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1213  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1214  * r5 = MAS3
1215  */
1216     mtspr   SPRN_MAS0,r3
1217     tlbre
1218     mfspr   r6,SPRN_MAS1
1219     rlwinm  r6,r6,0,2,31    /* clear IPROT and VALID */
1220     mtspr   SPRN_MAS1,r6
1221     tlbwe
1222     sync
1223     isync
1224 
1225 /* 6. Setup KERNELBASE mapping in TLB[0]
1226  *
1227  * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1228  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1229  * r5 = MAS3
1230  */
1231     rlwinm  r3,r3,0,16,3    /* clear ESEL */
1232     mtspr   SPRN_MAS0,r3
1233     lis r6,(MAS1_VALID|MAS1_IPROT)@h
1234     ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
1235     mtspr   SPRN_MAS1,r6
1236 
1237     LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
1238     mtspr   SPRN_MAS2,r6
1239 
1240     rlwinm  r5,r5,0,0,25
1241     ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
1242     mtspr   SPRN_MAS3,r5
1243     li  r5,-1
1244     rlwinm  r5,r5,0,0,25
1245 
1246     tlbwe
1247 
1248 /* 7. Jump to KERNELBASE mapping
1249  *
1250  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1251  */
1252     /* Now we branch the new virtual address mapped by this entry */
1253     bcl 20,31,$+4   /* Find our address */
1254 1:  mflr    r6
1255     addi    r6,r6,(2f - 1b)
1256     tovirt(r6,r6)
1257     lis r7,MSR_KERNEL@h
1258     ori r7,r7,MSR_KERNEL@l
1259     mtspr   SPRN_SRR0,r6
1260     mtspr   SPRN_SRR1,r7
1261     rfi             /* start execution out of TLB1[0] entry */
1262 2:
1263 
1264 /* 8. Clear out the temp mapping
1265  *
1266  * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1267  */
1268     mtspr   SPRN_MAS0,r4
1269     tlbre
1270     mfspr   r5,SPRN_MAS1
1271     rlwinm  r5,r5,0,2,31    /* clear IPROT and VALID */
1272     mtspr   SPRN_MAS1,r5
1273     tlbwe
1274     sync
1275     isync
1276 
1277     /* We translate LR and return */
1278     tovirt(r8,r8)
1279     mtlr    r8
1280     blr
1281 
1282 have_hes:
1283     /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
1284      * kernel linear mapping. We also set MAS8 once for all here though
1285      * that will have to be made dependent on whether we are running under
1286      * a hypervisor I suppose.
1287      */
1288 
1289     /* BEWARE, MAGIC
1290      * This code is called as an ordinary function on the boot CPU. But to
1291      * avoid duplication, this code is also used in SCOM bringup of
1292      * secondary CPUs. We read the code between the initial_tlb_code_start
1293      * and initial_tlb_code_end labels one instruction at a time and RAM it
1294      * into the new core via SCOM. That doesn't process branches, so there
1295      * must be none between those two labels. It also means if this code
1296      * ever takes any parameters, the SCOM code must also be updated to
1297      * provide them.
1298      */
1299     .globl a2_tlbinit_code_start
1300 a2_tlbinit_code_start:
1301 
1302     ori r11,r3,MAS0_WQ_ALLWAYS
1303     oris    r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
1304     mtspr   SPRN_MAS0,r11
1305     lis r3,(MAS1_VALID | MAS1_IPROT)@h
1306     ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
1307     mtspr   SPRN_MAS1,r3
1308     LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
1309     mtspr   SPRN_MAS2,r3
1310     li  r3,MAS3_SR | MAS3_SW | MAS3_SX
1311     mtspr   SPRN_MAS7_MAS3,r3
1312     li  r3,0
1313     mtspr   SPRN_MAS8,r3
1314 
1315     /* Write the TLB entry */
1316     tlbwe
1317 
1318     .globl a2_tlbinit_after_linear_map
1319 a2_tlbinit_after_linear_map:
1320 
1321     /* Now we branch the new virtual address mapped by this entry */
1322 #ifdef CONFIG_RELOCATABLE
1323     ld  r5,PACATOC(r13)
1324     ld  r3,1f@got(r5)
1325 #else
1326     LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
1327 #endif
1328     mtctr   r3
1329     bctr
1330 
1331 1:  /* We are now running at PAGE_OFFSET, clean the TLB of everything
1332      * else (including IPROTed things left by firmware)
1333      * r4 = TLBnCFG
1334      * r3 = current address (more or less)
1335      */
1336 
1337     li  r5,0
1338     mtspr   SPRN_MAS6,r5
1339     tlbsx   0,r3
1340 
1341     rlwinm  r9,r4,0,TLBnCFG_N_ENTRY
1342     rlwinm  r10,r4,8,0xff
1343     addi    r10,r10,-1  /* Get inner loop mask */
1344 
1345     li  r3,1
1346 
1347     mfspr   r5,SPRN_MAS1
1348     rlwinm  r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
1349 
1350     mfspr   r6,SPRN_MAS2
1351     rldicr  r6,r6,0,51      /* Extract EPN */
1352 
1353     mfspr   r7,SPRN_MAS0
1354     rlwinm  r7,r7,0,0xffff0fff  /* Clear HES and WQ */
1355 
1356     rlwinm  r8,r7,16,0xfff      /* Extract ESEL */
1357 
1358 2:  add r4,r3,r8
1359     and r4,r4,r10
1360 
1361     rlwimi  r7,r4,16,MAS0_ESEL_MASK
1362 
1363     mtspr   SPRN_MAS0,r7
1364     mtspr   SPRN_MAS1,r5
1365     mtspr   SPRN_MAS2,r6
1366     tlbwe
1367 
1368     addi    r3,r3,1
1369     and.    r4,r3,r10
1370 
1371     bne 3f
1372     addis   r6,r6,(1<<30)@h
1373 3:
1374     cmpw    r3,r9
1375     blt 2b
1376 
1377     .globl  a2_tlbinit_after_iprot_flush
1378 a2_tlbinit_after_iprot_flush:
1379 
1380     PPC_TLBILX(0,0,R0)
1381     sync
1382     isync
1383 
1384     .globl a2_tlbinit_code_end
1385 a2_tlbinit_code_end:
1386 
1387     /* We translate LR and return */
1388     mflr    r3
1389     tovirt(r3,r3)
1390     mtlr    r3
1391     blr
1392 
1393 /*
1394  * Main entry (boot CPU, thread 0)
1395  *
1396  * We enter here from head_64.S, possibly after the prom_init trampoline
1397  * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
1398  * mode. Anything else is as it was left by the bootloader
1399  *
1400  * Initial requirements of this port:
1401  *
1402  * - Kernel loaded at 0 physical
1403  * - A good lump of memory mapped 0:0 by UTLB entry 0
1404  * - MSR:IS & MSR:DS set to 0
1405  *
1406  * Note that some of the above requirements will be relaxed in the future
1407  * as the kernel becomes smarter at dealing with different initial conditions
1408  * but for now you have to be careful
1409  */
1410 _GLOBAL(start_initialization_book3e)
1411     mflr    r28
1412 
1413     /* First, we need to setup some initial TLBs to map the kernel
1414      * text, data and bss at PAGE_OFFSET. We don't have a real mode
1415      * and always use AS 0, so we just set it up to match our link
1416      * address and never use 0 based addresses.
1417      */
1418     bl  initial_tlb_book3e
1419 
1420     /* Init global core bits */
1421     bl  init_core_book3e
1422 
1423     /* Init per-thread bits */
1424     bl  init_thread_book3e
1425 
1426     /* Return to common init code */
1427     tovirt(r28,r28)
1428     mtlr    r28
1429     blr
1430 
1431 
1432 /*
1433  * Secondary core/processor entry
1434  *
1435  * This is entered for thread 0 of a secondary core, all other threads
1436  * are expected to be stopped. It's similar to start_initialization_book3e
1437  * except that it's generally entered from the holding loop in head_64.S
1438  * after CPUs have been gathered by Open Firmware.
1439  *
1440  * We assume we are in 32 bits mode running with whatever TLB entry was
1441  * set for us by the firmware or POR engine.
1442  */
1443 _GLOBAL(book3e_secondary_core_init_tlb_set)
1444     li  r4,1
1445     b   generic_secondary_smp_init
1446 
1447 _GLOBAL(book3e_secondary_core_init)
1448     mflr    r28
1449 
1450     /* Do we need to setup initial TLB entry ? */
1451     cmplwi  r4,0
1452     bne 2f
1453 
1454     /* Setup TLB for this core */
1455     bl  initial_tlb_book3e
1456 
1457     /* We can return from the above running at a different
1458      * address, so recalculate r2 (TOC)
1459      */
1460     bl  relative_toc
1461 
1462     /* Init global core bits */
1463 2:  bl  init_core_book3e
1464 
1465     /* Init per-thread bits */
1466 3:  bl  init_thread_book3e
1467 
1468     /* Return to common init code at proper virtual address.
1469      *
1470      * Due to various previous assumptions, we know we entered this
1471      * function at either the final PAGE_OFFSET mapping or using a
1472      * 1:1 mapping at 0, so we don't bother doing a complicated check
1473      * here, we just ensure the return address has the right top bits.
1474      *
1475      * Note that if we ever want to be smarter about where we can be
1476      * started from, we have to be careful that by the time we reach
1477      * the code below we may already be running at a different location
1478      * than the one we were called from since initial_tlb_book3e can
1479      * have moved us already.
1480      */
1481     cmpdi   cr0,r28,0
1482     blt 1f
1483     lis r3,PAGE_OFFSET@highest
1484     sldi    r3,r3,32
1485     or  r28,r28,r3
1486 1:  mtlr    r28
1487     blr
1488 
1489 _GLOBAL(book3e_secondary_thread_init)
1490     mflr    r28
1491     b   3b
1492 
1493     .globl init_core_book3e
1494 init_core_book3e:
1495     /* Establish the interrupt vector base */
1496     tovirt(r2,r2)
1497     LOAD_REG_ADDR(r3, interrupt_base_book3e)
1498     mtspr   SPRN_IVPR,r3
1499     sync
1500     blr
1501 
1502 init_thread_book3e:
1503     lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
1504     mtspr   SPRN_EPCR,r3
1505 
1506     /* Make sure interrupts are off */
1507     wrteei  0
1508 
1509     /* disable all timers and clear out status */
1510     li  r3,0
1511     mtspr   SPRN_TCR,r3
1512     mfspr   r3,SPRN_TSR
1513     mtspr   SPRN_TSR,r3
1514 
1515     blr
1516 
1517 _GLOBAL(__setup_base_ivors)
1518     SET_IVOR(0, 0x020) /* Critical Input */
1519     SET_IVOR(1, 0x000) /* Machine Check */
1520     SET_IVOR(2, 0x060) /* Data Storage */ 
1521     SET_IVOR(3, 0x080) /* Instruction Storage */
1522     SET_IVOR(4, 0x0a0) /* External Input */ 
1523     SET_IVOR(5, 0x0c0) /* Alignment */ 
1524     SET_IVOR(6, 0x0e0) /* Program */ 
1525     SET_IVOR(7, 0x100) /* FP Unavailable */ 
1526     SET_IVOR(8, 0x120) /* System Call */ 
1527     SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ 
1528     SET_IVOR(10, 0x160) /* Decrementer */ 
1529     SET_IVOR(11, 0x180) /* Fixed Interval Timer */ 
1530     SET_IVOR(12, 0x1a0) /* Watchdog Timer */ 
1531     SET_IVOR(13, 0x1c0) /* Data TLB Error */ 
1532     SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
1533     SET_IVOR(15, 0x040) /* Debug */
1534 
1535     sync
1536 
1537     blr
1538 
1539 _GLOBAL(setup_altivec_ivors)
1540     SET_IVOR(32, 0x200) /* AltiVec Unavailable */
1541     SET_IVOR(33, 0x220) /* AltiVec Assist */
1542     blr
1543 
1544 _GLOBAL(setup_perfmon_ivor)
1545     SET_IVOR(35, 0x260) /* Performance Monitor */
1546     blr
1547 
1548 _GLOBAL(setup_doorbell_ivors)
1549     SET_IVOR(36, 0x280) /* Processor Doorbell */
1550     SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1551     blr
1552 
1553 _GLOBAL(setup_ehv_ivors)
1554     SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1555     SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1556     SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1557     SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1558     blr
1559 
1560 _GLOBAL(setup_lrat_ivor)
1561     SET_IVOR(42, 0x340) /* LRAT Error */
1562     blr