Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * Kernel execution entry point code.
0004  *
0005  *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
0006  *  Initial PowerPC version.
0007  *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
0008  *  Rewritten for PReP
0009  *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
0010  *  Low-level exception handers, MMU support, and rewrite.
0011  *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
0012  *  PowerPC 8xx modifications.
0013  *    Copyright (c) 1998-1999 TiVo, Inc.
0014  *  PowerPC 403GCX modifications.
0015  *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
0016  *  PowerPC 403GCX/405GP modifications.
0017  *    Copyright 2000 MontaVista Software Inc.
0018  *  PPC405 modifications
0019  *  PowerPC 403GCX/405GP modifications.
0020  *  Author: MontaVista Software, Inc.
0021  *      frank_rowand@mvista.com or source@mvista.com
0022  *      debbie_chu@mvista.com
0023  *    Copyright 2002-2004 MontaVista Software, Inc.
0024  *  PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
0025  *    Copyright 2004 Freescale Semiconductor, Inc
0026  *  PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
0027  */
0028 
0029 #include <linux/init.h>
0030 #include <linux/threads.h>
0031 #include <linux/pgtable.h>
0032 #include <asm/processor.h>
0033 #include <asm/page.h>
0034 #include <asm/mmu.h>
0035 #include <asm/cputable.h>
0036 #include <asm/thread_info.h>
0037 #include <asm/ppc_asm.h>
0038 #include <asm/asm-offsets.h>
0039 #include <asm/cache.h>
0040 #include <asm/ptrace.h>
0041 #include <asm/export.h>
0042 #include <asm/feature-fixups.h>
0043 #include "head_booke.h"
0044 
0045 /* As with the other PowerPC ports, it is expected that when code
0046  * execution begins here, the following registers contain valid, yet
0047  * optional, information:
0048  *
0049  *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
0050  *   r4 - Starting address of the init RAM disk
0051  *   r5 - Ending address of the init RAM disk
0052  *   r6 - Start of kernel command line string (e.g. "mem=128")
0053  *   r7 - End of kernel command line string
0054  *
0055  */
0056     __HEAD
0057 _GLOBAL(_stext);
0058 _GLOBAL(_start);
0059     /*
0060      * Reserve a word at a fixed location to store the address
0061      * of abatron_pteptrs
0062      */
0063     nop
0064 
0065     /* Translate device tree address to physical, save in r30/r31 */
0066     bl  get_phys_addr
0067     mr  r30,r3
0068     mr  r31,r4
0069 
0070     li  r25,0           /* phys kernel start (low) */
0071     li  r24,0           /* CPU number */
0072     li  r23,0           /* phys kernel start (high) */
0073 
0074 #ifdef CONFIG_RELOCATABLE
0075     LOAD_REG_ADDR_PIC(r3, _stext)   /* Get our current runtime base */
0076 
0077     /* Translate _stext address to physical, save in r23/r25 */
0078     bl  get_phys_addr
0079     mr  r23,r3
0080     mr  r25,r4
0081 
0082     bcl 20,31,$+4
0083 0:  mflr    r8
0084     addis   r3,r8,(is_second_reloc - 0b)@ha
0085     lwz r19,(is_second_reloc - 0b)@l(r3)
0086 
0087     /* Check if this is the second relocation. */
0088     cmpwi   r19,1
0089     bne 1f
0090 
0091     /*
0092      * For the second relocation, we already get the real memstart_addr
0093      * from device tree. So we will map PAGE_OFFSET to memstart_addr,
0094      * then the virtual address of start kernel should be:
0095      *          PAGE_OFFSET + (kernstart_addr - memstart_addr)
0096      * Since the offset between kernstart_addr and memstart_addr should
0097      * never be beyond 1G, so we can just use the lower 32bit of them
0098      * for the calculation.
0099      */
0100     lis r3,PAGE_OFFSET@h
0101 
0102     addis   r4,r8,(kernstart_addr - 0b)@ha
0103     addi    r4,r4,(kernstart_addr - 0b)@l
0104     lwz r5,4(r4)
0105 
0106     addis   r6,r8,(memstart_addr - 0b)@ha
0107     addi    r6,r6,(memstart_addr - 0b)@l
0108     lwz r7,4(r6)
0109 
0110     subf    r5,r7,r5
0111     add r3,r3,r5
0112     b   2f
0113 
0114 1:
0115     /*
0116      * We have the runtime (virtual) address of our base.
0117      * We calculate our shift of offset from a 64M page.
0118      * We could map the 64M page we belong to at PAGE_OFFSET and
0119      * get going from there.
0120      */
0121     lis r4,KERNELBASE@h
0122     ori r4,r4,KERNELBASE@l
0123     rlwinm  r6,r25,0,0x3ffffff      /* r6 = PHYS_START % 64M */
0124     rlwinm  r5,r4,0,0x3ffffff       /* r5 = KERNELBASE % 64M */
0125     subf    r3,r5,r6            /* r3 = r6 - r5 */
0126     add r3,r4,r3            /* Required Virtual Address */
0127 
0128 2:  bl  relocate
0129 
0130     /*
0131      * For the second relocation, we already set the right tlb entries
0132      * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
0133     */
0134     cmpwi   r19,1
0135     beq set_ivor
0136 #endif
0137 
0138 /* We try to not make any assumptions about how the boot loader
0139  * setup or used the TLBs.  We invalidate all mappings from the
0140  * boot loader and load a single entry in TLB1[0] to map the
0141  * first 64M of kernel memory.  Any boot info passed from the
0142  * bootloader needs to live in this first 64M.
0143  *
0144  * Requirement on bootloader:
0145  *  - The page we're executing in needs to reside in TLB1 and
0146  *    have IPROT=1.  If not an invalidate broadcast could
0147  *    evict the entry we're currently executing in.
0148  *
0149  *  r3 = Index of TLB1 were executing in
0150  *  r4 = Current MSR[IS]
0151  *  r5 = Index of TLB1 temp mapping
0152  *
0153  * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
0154  * if needed
0155  */
0156 
0157 _GLOBAL(__early_start)
0158     LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
0159     lwz     r20,0(r20)
0160 
0161 #define ENTRY_MAPPING_BOOT_SETUP
0162 #include "fsl_booke_entry_mapping.S"
0163 #undef ENTRY_MAPPING_BOOT_SETUP
0164 
0165 set_ivor:
0166     /* Establish the interrupt vector offsets */
0167     SET_IVOR(0,  CriticalInput);
0168     SET_IVOR(1,  MachineCheck);
0169     SET_IVOR(2,  DataStorage);
0170     SET_IVOR(3,  InstructionStorage);
0171     SET_IVOR(4,  ExternalInput);
0172     SET_IVOR(5,  Alignment);
0173     SET_IVOR(6,  Program);
0174     SET_IVOR(7,  FloatingPointUnavailable);
0175     SET_IVOR(8,  SystemCall);
0176     SET_IVOR(9,  AuxillaryProcessorUnavailable);
0177     SET_IVOR(10, Decrementer);
0178     SET_IVOR(11, FixedIntervalTimer);
0179     SET_IVOR(12, WatchdogTimer);
0180     SET_IVOR(13, DataTLBError);
0181     SET_IVOR(14, InstructionTLBError);
0182     SET_IVOR(15, DebugCrit);
0183 
0184     /* Establish the interrupt vector base */
0185     lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
0186     mtspr   SPRN_IVPR,r4
0187 
0188     /* Setup the defaults for TLB entries */
0189     li  r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
0190     mtspr   SPRN_MAS4, r2
0191 
0192 #if !defined(CONFIG_BDI_SWITCH)
0193     /*
0194      * The Abatron BDI JTAG debugger does not tolerate others
0195      * mucking with the debug registers.
0196      */
0197     lis r2,DBCR0_IDM@h
0198     mtspr   SPRN_DBCR0,r2
0199     isync
0200     /* clear any residual debug events */
0201     li  r2,-1
0202     mtspr   SPRN_DBSR,r2
0203 #endif
0204 
0205 #ifdef CONFIG_SMP
0206     /* Check to see if we're the second processor, and jump
0207      * to the secondary_start code if so
0208      */
0209     LOAD_REG_ADDR_PIC(r24, boot_cpuid)
0210     lwz r24, 0(r24)
0211     cmpwi   r24, -1
0212     mfspr   r24,SPRN_PIR
0213     bne __secondary_start
0214 #endif
0215 
0216     /*
0217      * This is where the main kernel code starts.
0218      */
0219 
0220     /* ptr to current */
0221     lis r2,init_task@h
0222     ori r2,r2,init_task@l
0223 
0224     /* ptr to current thread */
0225     addi    r4,r2,THREAD    /* init task's THREAD */
0226     mtspr   SPRN_SPRG_THREAD,r4
0227 
0228     /* stack */
0229     lis r1,init_thread_union@h
0230     ori r1,r1,init_thread_union@l
0231     li  r0,0
0232     stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
0233 
0234 #ifdef CONFIG_SMP
0235     stw r24, TASK_CPU(r2)
0236 #endif
0237 
0238     bl  early_init
0239 
0240 #ifdef CONFIG_KASAN
0241     bl  kasan_early_init
0242 #endif
0243 #ifdef CONFIG_RELOCATABLE
0244     mr  r3,r30
0245     mr  r4,r31
0246 #ifdef CONFIG_PHYS_64BIT
0247     mr  r5,r23
0248     mr  r6,r25
0249 #else
0250     mr  r5,r25
0251 #endif
0252     bl  relocate_init
0253 #endif
0254 
0255 #ifdef CONFIG_DYNAMIC_MEMSTART
0256     lis r3,kernstart_addr@ha
0257     la  r3,kernstart_addr@l(r3)
0258 #ifdef CONFIG_PHYS_64BIT
0259     stw r23,0(r3)
0260     stw r25,4(r3)
0261 #else
0262     stw r25,0(r3)
0263 #endif
0264 #endif
0265 
0266 /*
0267  * Decide what sort of machine this is and initialize the MMU.
0268  */
0269     mr  r3,r30
0270     mr  r4,r31
0271     bl  machine_init
0272     bl  MMU_init
0273 
0274     /* Setup PTE pointers for the Abatron bdiGDB */
0275     lis r6, swapper_pg_dir@h
0276     ori r6, r6, swapper_pg_dir@l
0277     lis r5, abatron_pteptrs@h
0278     ori r5, r5, abatron_pteptrs@l
0279     lis     r3, kernstart_virt_addr@ha
0280     lwz     r4, kernstart_virt_addr@l(r3)
0281     stw r5, 0(r4)   /* Save abatron_pteptrs at a fixed location */
0282     stw r6, 0(r5)
0283 
0284     /* Let's move on */
0285     lis r4,start_kernel@h
0286     ori r4,r4,start_kernel@l
0287     lis r3,MSR_KERNEL@h
0288     ori r3,r3,MSR_KERNEL@l
0289     mtspr   SPRN_SRR0,r4
0290     mtspr   SPRN_SRR1,r3
0291     rfi         /* change context and jump to start_kernel */
0292 
0293 /* Macros to hide the PTE size differences
0294  *
0295  * FIND_PTE -- walks the page tables given EA & pgdir pointer
0296  *   r10 -- EA of fault
0297  *   r11 -- PGDIR pointer
0298  *   r12 -- free
0299  *   label 2: is the bailout case
0300  *
0301  * if we find the pte (fall through):
0302  *   r11 is low pte word
0303  *   r12 is pointer to the pte
0304  *   r10 is the pshift from the PGD, if we're a hugepage
0305  */
0306 #ifdef CONFIG_PTE_64BIT
0307 #ifdef CONFIG_HUGETLB_PAGE
0308 #define FIND_PTE    \
0309     rlwinm  r12, r10, 13, 19, 29;   /* Compute pgdir/pmd offset */  \
0310     lwzx    r11, r12, r11;      /* Get pgd/pmd entry */     \
0311     rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */   \
0312     blt 1000f;          /* Normal non-huge page */  \
0313     beq 2f;         /* Bail if no table */      \
0314     oris    r11, r11, PD_HUGE@h;    /* Put back address bit */  \
0315     andi.   r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \
0316     xor r12, r10, r11;      /* drop size bits from pointer */ \
0317     b   1001f;                          \
0318 1000:   rlwimi  r12, r10, 23, 20, 28;   /* Compute pte address */   \
0319     li  r10, 0;         /* clear r10 */         \
0320 1001:   lwz r11, 4(r12);        /* Get pte entry */
0321 #else
0322 #define FIND_PTE    \
0323     rlwinm  r12, r10, 13, 19, 29;   /* Compute pgdir/pmd offset */  \
0324     lwzx    r11, r12, r11;      /* Get pgd/pmd entry */     \
0325     rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */   \
0326     beq 2f;         /* Bail if no table */      \
0327     rlwimi  r12, r10, 23, 20, 28;   /* Compute pte address */   \
0328     lwz r11, 4(r12);        /* Get pte entry */
0329 #endif /* HUGEPAGE */
0330 #else /* !PTE_64BIT */
0331 #define FIND_PTE    \
0332     rlwimi  r11, r10, 12, 20, 29;   /* Create L1 (pgdir/pmd) address */ \
0333     lwz r11, 0(r11);        /* Get L1 entry */          \
0334     rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
0335     beq 2f;         /* Bail if no table */          \
0336     rlwimi  r12, r10, 22, 20, 29;   /* Compute PTE address */       \
0337     lwz r11, 0(r12);        /* Get Linux PTE */
0338 #endif
0339 
0340 /*
0341  * Interrupt vector entry code
0342  *
0343  * The Book E MMUs are always on so we don't need to handle
0344  * interrupts in real mode as with previous PPC processors. In
0345  * this case we handle interrupts in the kernel virtual address
0346  * space.
0347  *
0348  * Interrupt vectors are dynamically placed relative to the
0349  * interrupt prefix as determined by the address of interrupt_base.
0350  * The interrupt vectors offsets are programmed using the labels
0351  * for each interrupt vector entry.
0352  *
0353  * Interrupt vectors must be aligned on a 16 byte boundary.
0354  * We align on a 32 byte cache line boundary for good measure.
0355  */
0356 
0357 interrupt_base:
0358     /* Critical Input Interrupt */
0359     CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
0360 
0361     /* Machine Check Interrupt */
0362     MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
0363 
0364     /* Data Storage Interrupt */
0365     START_EXCEPTION(DataStorage)
0366     NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE)
0367     mfspr   r5,SPRN_ESR     /* Grab the ESR, save it */
0368     stw r5,_ESR(r11)
0369     mfspr   r4,SPRN_DEAR        /* Grab the DEAR, save it */
0370     stw r4, _DEAR(r11)
0371     andis.  r10,r5,(ESR_ILK|ESR_DLK)@h
0372     bne 1f
0373     prepare_transfer_to_handler
0374     bl  do_page_fault
0375     b   interrupt_return
0376 1:
0377     prepare_transfer_to_handler
0378     bl  CacheLockingException
0379     b   interrupt_return
0380 
0381     /* Instruction Storage Interrupt */
0382     INSTRUCTION_STORAGE_EXCEPTION
0383 
0384     /* External Input Interrupt */
0385     EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ)
0386 
0387     /* Alignment Interrupt */
0388     ALIGNMENT_EXCEPTION
0389 
0390     /* Program Interrupt */
0391     PROGRAM_EXCEPTION
0392 
0393     /* Floating Point Unavailable Interrupt */
0394 #ifdef CONFIG_PPC_FPU
0395     FP_UNAVAILABLE_EXCEPTION
0396 #else
0397     EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
0398 #endif
0399 
0400     /* System Call Interrupt */
0401     START_EXCEPTION(SystemCall)
0402     SYSCALL_ENTRY   0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1
0403 
0404     /* Auxiliary Processor Unavailable Interrupt */
0405     EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception)
0406 
0407     /* Decrementer Interrupt */
0408     DECREMENTER_EXCEPTION
0409 
0410     /* Fixed Internal Timer Interrupt */
0411     /* TODO: Add FIT support */
0412     EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception)
0413 
0414     /* Watchdog Timer Interrupt */
0415 #ifdef CONFIG_BOOKE_WDT
0416     CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
0417 #else
0418     CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
0419 #endif
0420 
0421     /* Data TLB Error Interrupt */
0422     START_EXCEPTION(DataTLBError)
0423     mtspr   SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
0424     mfspr   r10, SPRN_SPRG_THREAD
0425     stw r11, THREAD_NORMSAVE(0)(r10)
0426 #ifdef CONFIG_KVM_BOOKE_HV
0427 BEGIN_FTR_SECTION
0428     mfspr   r11, SPRN_SRR1
0429 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
0430 #endif
0431     stw r12, THREAD_NORMSAVE(1)(r10)
0432     stw r13, THREAD_NORMSAVE(2)(r10)
0433     mfcr    r13
0434     stw r13, THREAD_NORMSAVE(3)(r10)
0435     DO_KVM  BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
0436 START_BTB_FLUSH_SECTION
0437     mfspr r11, SPRN_SRR1
0438     andi. r10,r11,MSR_PR
0439     beq 1f
0440     BTB_FLUSH(r10)
0441 1:
0442 END_BTB_FLUSH_SECTION
0443     mfspr   r10, SPRN_DEAR      /* Get faulting address */
0444 
0445     /* If we are faulting a kernel address, we have to use the
0446      * kernel page tables.
0447      */
0448     lis r11, PAGE_OFFSET@h
0449     cmplw   5, r10, r11
0450     blt 5, 3f
0451     lis r11, swapper_pg_dir@h
0452     ori r11, r11, swapper_pg_dir@l
0453 
0454     mfspr   r12,SPRN_MAS1       /* Set TID to 0 */
0455     rlwinm  r12,r12,0,16,1
0456     mtspr   SPRN_MAS1,r12
0457 
0458     b   4f
0459 
0460     /* Get the PGD for the current thread */
0461 3:
0462     mfspr   r11,SPRN_SPRG_THREAD
0463     lwz r11,PGDIR(r11)
0464 
0465 #ifdef CONFIG_PPC_KUAP
0466     mfspr   r12, SPRN_MAS1
0467     rlwinm. r12,r12,0,0x3fff0000
0468     beq 2f          /* KUAP fault */
0469 #endif
0470 
0471 4:
0472     /* Mask of required permission bits. Note that while we
0473      * do copy ESR:ST to _PAGE_RW position as trying to write
0474      * to an RO page is pretty common, we don't do it with
0475      * _PAGE_DIRTY. We could do it, but it's a fairly rare
0476      * event so I'd rather take the overhead when it happens
0477      * rather than adding an instruction here. We should measure
0478      * whether the whole thing is worth it in the first place
0479      * as we could avoid loading SPRN_ESR completely in the first
0480      * place...
0481      *
0482      * TODO: Is it worth doing that mfspr & rlwimi in the first
0483      *       place or can we save a couple of instructions here ?
0484      */
0485     mfspr   r12,SPRN_ESR
0486 #ifdef CONFIG_PTE_64BIT
0487     li  r13,_PAGE_PRESENT
0488     oris    r13,r13,_PAGE_ACCESSED@h
0489 #else
0490     li  r13,_PAGE_PRESENT|_PAGE_ACCESSED
0491 #endif
0492     rlwimi  r13,r12,11,29,29
0493 
0494     FIND_PTE
0495     andc.   r13,r13,r11     /* Check permission */
0496 
0497 #ifdef CONFIG_PTE_64BIT
0498 #ifdef CONFIG_SMP
0499     subf    r13,r11,r12     /* create false data dep */
0500     lwzx    r13,r11,r13     /* Get upper pte bits */
0501 #else
0502     lwz r13,0(r12)      /* Get upper pte bits */
0503 #endif
0504 #endif
0505 
0506     bne 2f          /* Bail if permission/valid mismatch */
0507 
0508     /* Jump to common tlb load */
0509     b   finish_tlb_load
0510 2:
0511     /* The bailout.  Restore registers to pre-exception conditions
0512      * and call the heavyweights to help us out.
0513      */
0514     mfspr   r10, SPRN_SPRG_THREAD
0515     lwz r11, THREAD_NORMSAVE(3)(r10)
0516     mtcr    r11
0517     lwz r13, THREAD_NORMSAVE(2)(r10)
0518     lwz r12, THREAD_NORMSAVE(1)(r10)
0519     lwz r11, THREAD_NORMSAVE(0)(r10)
0520     mfspr   r10, SPRN_SPRG_RSCRATCH0
0521     b   DataStorage
0522 
0523     /* Instruction TLB Error Interrupt */
0524     /*
0525      * Nearly the same as above, except we get our
0526      * information from different registers and bailout
0527      * to a different point.
0528      */
0529     START_EXCEPTION(InstructionTLBError)
0530     mtspr   SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
0531     mfspr   r10, SPRN_SPRG_THREAD
0532     stw r11, THREAD_NORMSAVE(0)(r10)
0533 #ifdef CONFIG_KVM_BOOKE_HV
0534 BEGIN_FTR_SECTION
0535     mfspr   r11, SPRN_SRR1
0536 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
0537 #endif
0538     stw r12, THREAD_NORMSAVE(1)(r10)
0539     stw r13, THREAD_NORMSAVE(2)(r10)
0540     mfcr    r13
0541     stw r13, THREAD_NORMSAVE(3)(r10)
0542     DO_KVM  BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
0543 START_BTB_FLUSH_SECTION
0544     mfspr r11, SPRN_SRR1
0545     andi. r10,r11,MSR_PR
0546     beq 1f
0547     BTB_FLUSH(r10)
0548 1:
0549 END_BTB_FLUSH_SECTION
0550 
0551     mfspr   r10, SPRN_SRR0      /* Get faulting address */
0552 
0553     /* If we are faulting a kernel address, we have to use the
0554      * kernel page tables.
0555      */
0556     lis r11, PAGE_OFFSET@h
0557     cmplw   5, r10, r11
0558     blt 5, 3f
0559     lis r11, swapper_pg_dir@h
0560     ori r11, r11, swapper_pg_dir@l
0561 
0562     mfspr   r12,SPRN_MAS1       /* Set TID to 0 */
0563     rlwinm  r12,r12,0,16,1
0564     mtspr   SPRN_MAS1,r12
0565 
0566     /* Make up the required permissions for kernel code */
0567 #ifdef CONFIG_PTE_64BIT
0568     li  r13,_PAGE_PRESENT | _PAGE_BAP_SX
0569     oris    r13,r13,_PAGE_ACCESSED@h
0570 #else
0571     li  r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
0572 #endif
0573     b   4f
0574 
0575     /* Get the PGD for the current thread */
0576 3:
0577     mfspr   r11,SPRN_SPRG_THREAD
0578     lwz r11,PGDIR(r11)
0579 
0580 #ifdef CONFIG_PPC_KUAP
0581     mfspr   r12, SPRN_MAS1
0582     rlwinm. r12,r12,0,0x3fff0000
0583     beq 2f          /* KUAP fault */
0584 #endif
0585 
0586     /* Make up the required permissions for user code */
0587 #ifdef CONFIG_PTE_64BIT
0588     li  r13,_PAGE_PRESENT | _PAGE_BAP_UX
0589     oris    r13,r13,_PAGE_ACCESSED@h
0590 #else
0591     li  r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
0592 #endif
0593 
0594 4:
0595     FIND_PTE
0596     andc.   r13,r13,r11     /* Check permission */
0597 
0598 #ifdef CONFIG_PTE_64BIT
0599 #ifdef CONFIG_SMP
0600     subf    r13,r11,r12     /* create false data dep */
0601     lwzx    r13,r11,r13     /* Get upper pte bits */
0602 #else
0603     lwz r13,0(r12)      /* Get upper pte bits */
0604 #endif
0605 #endif
0606 
0607     bne 2f          /* Bail if permission mismatch */
0608 
0609     /* Jump to common TLB load point */
0610     b   finish_tlb_load
0611 
0612 2:
0613     /* The bailout.  Restore registers to pre-exception conditions
0614      * and call the heavyweights to help us out.
0615      */
0616     mfspr   r10, SPRN_SPRG_THREAD
0617     lwz r11, THREAD_NORMSAVE(3)(r10)
0618     mtcr    r11
0619     lwz r13, THREAD_NORMSAVE(2)(r10)
0620     lwz r12, THREAD_NORMSAVE(1)(r10)
0621     lwz r11, THREAD_NORMSAVE(0)(r10)
0622     mfspr   r10, SPRN_SPRG_RSCRATCH0
0623     b   InstructionStorage
0624 
0625 /* Define SPE handlers for e500v2 */
0626 #ifdef CONFIG_SPE
0627     /* SPE Unavailable */
0628     START_EXCEPTION(SPEUnavailable)
0629     NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL)
0630     beq 1f
0631     bl  load_up_spe
0632     b   fast_exception_return
0633 1:  prepare_transfer_to_handler
0634     bl  KernelSPE
0635     b   interrupt_return
0636 #elif defined(CONFIG_SPE_POSSIBLE)
0637     EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception)
0638 #endif /* CONFIG_SPE_POSSIBLE */
0639 
0640     /* SPE Floating Point Data */
0641 #ifdef CONFIG_SPE
0642     START_EXCEPTION(SPEFloatingPointData)
0643     NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA)
0644     prepare_transfer_to_handler
0645     bl  SPEFloatingPointException
0646     REST_NVGPRS(r1)
0647     b   interrupt_return
0648 
0649     /* SPE Floating Point Round */
0650     START_EXCEPTION(SPEFloatingPointRound)
0651     NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND)
0652     prepare_transfer_to_handler
0653     bl  SPEFloatingPointRoundException
0654     REST_NVGPRS(r1)
0655     b   interrupt_return
0656 #elif defined(CONFIG_SPE_POSSIBLE)
0657     EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception)
0658     EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception)
0659 #endif /* CONFIG_SPE_POSSIBLE */
0660 
0661 
0662     /* Performance Monitor */
0663     EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
0664           performance_monitor_exception)
0665 
0666     EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception)
0667 
0668     CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
0669                CriticalDoorbell, unknown_exception)
0670 
0671     /* Debug Interrupt */
0672     DEBUG_DEBUG_EXCEPTION
0673     DEBUG_CRIT_EXCEPTION
0674 
0675     GUEST_DOORBELL_EXCEPTION
0676 
0677     CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
0678                unknown_exception)
0679 
0680     /* Hypercall */
0681     EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception)
0682 
0683     /* Embedded Hypervisor Privilege */
0684     EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception)
0685 
0686 interrupt_end:
0687 
0688 /*
0689  * Local functions
0690  */
0691 
0692 /*
0693  * Both the instruction and data TLB miss get to this
0694  * point to load the TLB.
0695  *  r10 - tsize encoding (if HUGETLB_PAGE) or available to use
0696  *  r11 - TLB (info from Linux PTE)
0697  *  r12 - available to use
0698  *  r13 - upper bits of PTE (if PTE_64BIT) or available to use
0699  *  CR5 - results of addr >= PAGE_OFFSET
0700  *  MAS0, MAS1 - loaded with proper value when we get here
0701  *  MAS2, MAS3 - will need additional info from Linux PTE
0702  *  Upon exit, we reload everything and RFI.
0703  */
0704 finish_tlb_load:
0705 #ifdef CONFIG_HUGETLB_PAGE
0706     cmpwi   6, r10, 0           /* check for huge page */
0707     beq 6, finish_tlb_load_cont     /* !huge */
0708 
0709     /* Alas, we need more scratch registers for hugepages */
0710     mfspr   r12, SPRN_SPRG_THREAD
0711     stw r14, THREAD_NORMSAVE(4)(r12)
0712     stw r15, THREAD_NORMSAVE(5)(r12)
0713     stw r16, THREAD_NORMSAVE(6)(r12)
0714     stw r17, THREAD_NORMSAVE(7)(r12)
0715 
0716     /* Get the next_tlbcam_idx percpu var */
0717 #ifdef CONFIG_SMP
0718     lwz r15, TASK_CPU-THREAD(r12)
0719     lis     r14, __per_cpu_offset@h
0720     ori     r14, r14, __per_cpu_offset@l
0721     rlwinm  r15, r15, 2, 0, 29
0722     lwzx    r16, r14, r15
0723 #else
0724     li  r16, 0
0725 #endif
0726     lis     r17, next_tlbcam_idx@h
0727     ori r17, r17, next_tlbcam_idx@l
0728     add r17, r17, r16           /* r17 = *next_tlbcam_idx */
0729     lwz     r15, 0(r17)         /* r15 = next_tlbcam_idx */
0730 
0731     lis r14, MAS0_TLBSEL(1)@h       /* select TLB1 (TLBCAM) */
0732     rlwimi  r14, r15, 16, 4, 15     /* next_tlbcam_idx entry */
0733     mtspr   SPRN_MAS0, r14
0734 
0735     /* Extract TLB1CFG(NENTRY) */
0736     mfspr   r16, SPRN_TLB1CFG
0737     andi.   r16, r16, 0xfff
0738 
0739     /* Update next_tlbcam_idx, wrapping when necessary */
0740     addi    r15, r15, 1
0741     cmpw    r15, r16
0742     blt     100f
0743     lis r14, tlbcam_index@h
0744     ori r14, r14, tlbcam_index@l
0745     lwz r15, 0(r14)
0746 100:    stw r15, 0(r17)
0747 
0748     /*
0749      * Calc MAS1_TSIZE from r10 (which has pshift encoded)
0750      * tlb_enc = (pshift - 10).
0751      */
0752     subi    r15, r10, 10
0753     mfspr   r16, SPRN_MAS1
0754     rlwimi  r16, r15, 7, 20, 24
0755     mtspr   SPRN_MAS1, r16
0756 
0757     /* copy the pshift for use later */
0758     mr  r14, r10
0759 
0760     /* fall through */
0761 
0762 #endif /* CONFIG_HUGETLB_PAGE */
0763 
0764     /*
0765      * We set execute, because we don't have the granularity to
0766      * properly set this at the page level (Linux problem).
0767      * Many of these bits are software only.  Bits we don't set
0768      * here we (properly should) assume have the appropriate value.
0769      */
0770 finish_tlb_load_cont:
0771 #ifdef CONFIG_PTE_64BIT
0772     rlwinm  r12, r11, 32-2, 26, 31  /* Move in perm bits */
0773     andi.   r10, r11, _PAGE_DIRTY
0774     bne 1f
0775     li  r10, MAS3_SW | MAS3_UW
0776     andc    r12, r12, r10
0777 1:  rlwimi  r12, r13, 20, 0, 11 /* grab RPN[32:43] */
0778     rlwimi  r12, r11, 20, 12, 19    /* grab RPN[44:51] */
0779 2:  mtspr   SPRN_MAS3, r12
0780 BEGIN_MMU_FTR_SECTION
0781     srwi    r10, r13, 12        /* grab RPN[12:31] */
0782     mtspr   SPRN_MAS7, r10
0783 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
0784 #else
0785     li  r10, (_PAGE_EXEC | _PAGE_PRESENT)
0786     mr  r13, r11
0787     rlwimi  r10, r11, 31, 29, 29    /* extract _PAGE_DIRTY into SW */
0788     and r12, r11, r10
0789     andi.   r10, r11, _PAGE_USER    /* Test for _PAGE_USER */
0790     slwi    r10, r12, 1
0791     or  r10, r10, r12
0792     rlwinm  r10, r10, 0, ~_PAGE_EXEC    /* Clear SX on user pages */
0793     iseleq  r12, r12, r10
0794     rlwimi  r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
0795     mtspr   SPRN_MAS3, r13
0796 #endif
0797 
0798     mfspr   r12, SPRN_MAS2
0799 #ifdef CONFIG_PTE_64BIT
0800     rlwimi  r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
0801 #else
0802     rlwimi  r12, r11, 26, 27, 31    /* extract WIMGE from pte */
0803 #endif
0804 #ifdef CONFIG_HUGETLB_PAGE
0805     beq 6, 3f           /* don't mask if page isn't huge */
0806     li  r13, 1
0807     slw r13, r13, r14
0808     subi    r13, r13, 1
0809     rlwinm  r13, r13, 0, 0, 19  /* bottom bits used for WIMGE/etc */
0810     andc    r12, r12, r13       /* mask off ea bits within the page */
0811 #endif
0812 3:  mtspr   SPRN_MAS2, r12
0813 
0814 tlb_write_entry:
0815     tlbwe
0816 
0817     /* Done...restore registers and get out of here.  */
0818     mfspr   r10, SPRN_SPRG_THREAD
0819 #ifdef CONFIG_HUGETLB_PAGE
0820     beq 6, 8f /* skip restore for 4k page faults */
0821     lwz r14, THREAD_NORMSAVE(4)(r10)
0822     lwz r15, THREAD_NORMSAVE(5)(r10)
0823     lwz r16, THREAD_NORMSAVE(6)(r10)
0824     lwz r17, THREAD_NORMSAVE(7)(r10)
0825 #endif
0826 8:  lwz r11, THREAD_NORMSAVE(3)(r10)
0827     mtcr    r11
0828     lwz r13, THREAD_NORMSAVE(2)(r10)
0829     lwz r12, THREAD_NORMSAVE(1)(r10)
0830     lwz r11, THREAD_NORMSAVE(0)(r10)
0831     mfspr   r10, SPRN_SPRG_RSCRATCH0
0832     rfi                 /* Force context change */
0833 
0834 #ifdef CONFIG_SPE
0835 /* Note that the SPE support is closely modeled after the AltiVec
0836  * support.  Changes to one are likely to be applicable to the
0837  * other!  */
0838 _GLOBAL(load_up_spe)
0839 /*
0840  * Disable SPE for the task which had SPE previously,
0841  * and save its SPE registers in its thread_struct.
0842  * Enables SPE for use in the kernel on return.
0843  * On SMP we know the SPE units are free, since we give it up every
0844  * switch.  -- Kumar
0845  */
0846     mfmsr   r5
0847     oris    r5,r5,MSR_SPE@h
0848     mtmsr   r5          /* enable use of SPE now */
0849     isync
0850     /* enable use of SPE after return */
0851     oris    r9,r9,MSR_SPE@h
0852     mfspr   r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
0853     li  r4,1
0854     li  r10,THREAD_ACC
0855     stw r4,THREAD_USED_SPE(r5)
0856     evlddx  evr4,r10,r5
0857     evmra   evr4,evr4
0858     REST_32EVRS(0,r10,r5,THREAD_EVR0)
0859     blr
0860 
0861 /*
0862  * SPE unavailable trap from kernel - print a message, but let
0863  * the task use SPE in the kernel until it returns to user mode.
0864  */
0865 KernelSPE:
0866     lwz r3,_MSR(r1)
0867     oris    r3,r3,MSR_SPE@h
0868     stw r3,_MSR(r1) /* enable use of SPE after return */
0869 #ifdef CONFIG_PRINTK
0870     lis r3,87f@h
0871     ori r3,r3,87f@l
0872     mr  r4,r2       /* current */
0873     lwz r5,_NIP(r1)
0874     bl  _printk
0875 #endif
0876     b   interrupt_return
0877 #ifdef CONFIG_PRINTK
0878 87: .string "SPE used in kernel  (task=%p, pc=%x)  \n"
0879 #endif
0880     .align  4,0
0881 
0882 #endif /* CONFIG_SPE */
0883 
0884 /*
0885  * Translate the effec addr in r3 to phys addr. The phys addr will be put
0886  * into r3(higher 32bit) and r4(lower 32bit)
0887  */
0888 get_phys_addr:
0889     mfmsr   r8
0890     mfspr   r9,SPRN_PID
0891     rlwinm  r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
0892     rlwimi  r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
0893     mtspr   SPRN_MAS6,r9
0894 
0895     tlbsx   0,r3            /* must succeed */
0896 
0897     mfspr   r8,SPRN_MAS1
0898     mfspr   r12,SPRN_MAS3
0899     rlwinm  r9,r8,25,0x1f       /* r9 = log2(page size) */
0900     li  r10,1024
0901     slw r10,r10,r9      /* r10 = page size */
0902     addi    r10,r10,-1
0903     and r11,r3,r10      /* r11 = page offset */
0904     andc    r4,r12,r10      /* r4 = page base */
0905     or  r4,r4,r11       /* r4 = devtree phys addr */
0906 #ifdef CONFIG_PHYS_64BIT
0907     mfspr   r3,SPRN_MAS7
0908 #endif
0909     blr
0910 
0911 /*
0912  * Global functions
0913  */
0914 
0915 #ifdef CONFIG_E500
0916 #ifndef CONFIG_PPC_E500MC
0917 /* Adjust or setup IVORs for e500v1/v2 */
0918 _GLOBAL(__setup_e500_ivors)
0919     li  r3,DebugCrit@l
0920     mtspr   SPRN_IVOR15,r3
0921     li  r3,SPEUnavailable@l
0922     mtspr   SPRN_IVOR32,r3
0923     li  r3,SPEFloatingPointData@l
0924     mtspr   SPRN_IVOR33,r3
0925     li  r3,SPEFloatingPointRound@l
0926     mtspr   SPRN_IVOR34,r3
0927     li  r3,PerformanceMonitor@l
0928     mtspr   SPRN_IVOR35,r3
0929     sync
0930     blr
0931 #else
0932 /* Adjust or setup IVORs for e500mc */
0933 _GLOBAL(__setup_e500mc_ivors)
0934     li  r3,DebugDebug@l
0935     mtspr   SPRN_IVOR15,r3
0936     li  r3,PerformanceMonitor@l
0937     mtspr   SPRN_IVOR35,r3
0938     li  r3,Doorbell@l
0939     mtspr   SPRN_IVOR36,r3
0940     li  r3,CriticalDoorbell@l
0941     mtspr   SPRN_IVOR37,r3
0942     sync
0943     blr
0944 
0945 /* setup ehv ivors for */
0946 _GLOBAL(__setup_ehv_ivors)
0947     li  r3,GuestDoorbell@l
0948     mtspr   SPRN_IVOR38,r3
0949     li  r3,CriticalGuestDoorbell@l
0950     mtspr   SPRN_IVOR39,r3
0951     li  r3,Hypercall@l
0952     mtspr   SPRN_IVOR40,r3
0953     li  r3,Ehvpriv@l
0954     mtspr   SPRN_IVOR41,r3
0955     sync
0956     blr
0957 #endif /* CONFIG_PPC_E500MC */
0958 #endif /* CONFIG_E500 */
0959 
0960 #ifdef CONFIG_SPE
0961 /*
0962  * extern void __giveup_spe(struct task_struct *prev)
0963  *
0964  */
0965 _GLOBAL(__giveup_spe)
0966     addi    r3,r3,THREAD        /* want THREAD of task */
0967     lwz r5,PT_REGS(r3)
0968     cmpi    0,r5,0
0969     SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
0970     evxor   evr6, evr6, evr6    /* clear out evr6 */
0971     evmwumiaa evr6, evr6, evr6  /* evr6 <- ACC = 0 * 0 + ACC */
0972     li  r4,THREAD_ACC
0973     evstddx evr6, r4, r3        /* save off accumulator */
0974     beq 1f
0975     lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
0976     lis r3,MSR_SPE@h
0977     andc    r4,r4,r3        /* disable SPE for previous task */
0978     stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
0979 1:
0980     blr
0981 #endif /* CONFIG_SPE */
0982 
0983 /*
0984  * extern void abort(void)
0985  *
0986  * At present, this routine just applies a system reset.
0987  */
0988 _GLOBAL(abort)
0989     li  r13,0
0990     mtspr   SPRN_DBCR0,r13      /* disable all debug events */
0991     isync
0992     mfmsr   r13
0993     ori r13,r13,MSR_DE@l    /* Enable Debug Events */
0994     mtmsr   r13
0995     isync
0996     mfspr   r13,SPRN_DBCR0
0997     lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
0998     mtspr   SPRN_DBCR0,r13
0999     isync
1000 
1001 #ifdef CONFIG_SMP
1002 /* When we get here, r24 needs to hold the CPU # */
1003     .globl __secondary_start
1004 __secondary_start:
1005     LOAD_REG_ADDR_PIC(r3, tlbcam_index)
1006     lwz r3,0(r3)
1007     mtctr   r3
1008     li  r26,0       /* r26 safe? */
1009 
1010     bl  switch_to_as1
1011     mr  r27,r3      /* tlb entry */
1012     /* Load each CAM entry */
1013 1:  mr  r3,r26
1014     bl  loadcam_entry
1015     addi    r26,r26,1
1016     bdnz    1b
1017     mr  r3,r27      /* tlb entry */
1018     LOAD_REG_ADDR_PIC(r4, memstart_addr)
1019     lwz r4,0(r4)
1020     mr  r5,r25      /* phys kernel start */
1021     rlwinm  r5,r5,0,~0x3ffffff  /* aligned 64M */
1022     subf    r4,r5,r4    /* memstart_addr - phys kernel start */
1023     lis r7,KERNELBASE@h
1024     ori r7,r7,KERNELBASE@l
1025     cmpw    r20,r7      /* if kernstart_virt_addr != KERNELBASE, randomized */
1026     beq 2f
1027     li  r4,0
1028 2:  li  r5,0        /* no device tree */
1029     li  r6,0        /* not boot cpu */
1030     bl  restore_to_as0
1031 
1032 
1033     lis r3,__secondary_hold_acknowledge@h
1034     ori r3,r3,__secondary_hold_acknowledge@l
1035     stw r24,0(r3)
1036 
1037     li  r3,0
1038     mr  r4,r24      /* Why? */
1039     bl  call_setup_cpu
1040 
1041     /* get current's stack and current */
1042     lis r2,secondary_current@ha
1043     lwz r2,secondary_current@l(r2)
1044     lwz r1,TASK_STACK(r2)
1045 
1046     /* stack */
1047     addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1048     li  r0,0
1049     stw r0,0(r1)
1050 
1051     /* ptr to current thread */
1052     addi    r4,r2,THREAD    /* address of our thread_struct */
1053     mtspr   SPRN_SPRG_THREAD,r4
1054 
1055     /* Setup the defaults for TLB entries */
1056     li  r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
1057     mtspr   SPRN_MAS4,r4
1058 
1059     /* Jump to start_secondary */
1060     lis r4,MSR_KERNEL@h
1061     ori r4,r4,MSR_KERNEL@l
1062     lis r3,start_secondary@h
1063     ori r3,r3,start_secondary@l
1064     mtspr   SPRN_SRR0,r3
1065     mtspr   SPRN_SRR1,r4
1066     sync
1067     rfi
1068     sync
1069 
1070     .globl __secondary_hold_acknowledge
1071 __secondary_hold_acknowledge:
1072     .long   -1
1073 #endif
1074 
1075 /*
1076  * Create a 64M tlb by address and entry
1077  * r3 - entry
1078  * r4 - virtual address
1079  * r5/r6 - physical address
1080  */
1081 _GLOBAL(create_kaslr_tlb_entry)
1082     lis     r7,0x1000               /* Set MAS0(TLBSEL) = 1 */
1083     rlwimi  r7,r3,16,4,15           /* Setup MAS0 = TLBSEL | ESEL(r6) */
1084     mtspr   SPRN_MAS0,r7            /* Write MAS0 */
1085 
1086     lis     r3,(MAS1_VALID|MAS1_IPROT)@h
1087     ori     r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
1088     mtspr   SPRN_MAS1,r3            /* Write MAS1 */
1089 
1090     lis     r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
1091     ori     r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
1092     and     r3,r3,r4
1093     ori r3,r3,MAS2_M_IF_NEEDED@l
1094     mtspr   SPRN_MAS2,r3            /* Write MAS2(EPN) */
1095 
1096 #ifdef CONFIG_PHYS_64BIT
1097     ori     r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX)
1098     mtspr   SPRN_MAS3,r8            /* Write MAS3(RPN) */
1099     mtspr   SPRN_MAS7,r5
1100 #else
1101     ori     r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX)
1102     mtspr   SPRN_MAS3,r8            /* Write MAS3(RPN) */
1103 #endif
1104 
1105     tlbwe                           /* Write TLB */
1106     isync
1107     sync
1108     blr
1109 
1110 /*
1111  * Return to the start of the relocated kernel and run again
1112  * r3 - virtual address of fdt
1113  * r4 - entry of the kernel
1114  */
1115 _GLOBAL(reloc_kernel_entry)
1116     mfmsr   r7
1117     rlwinm  r7, r7, 0, ~(MSR_IS | MSR_DS)
1118 
1119     mtspr   SPRN_SRR0,r4
1120     mtspr   SPRN_SRR1,r7
1121     rfi
1122 
1123 /*
1124  * Create a tlb entry with the same effective and physical address as
1125  * the tlb entry used by the current running code. But set the TS to 1.
1126  * Then switch to the address space 1. It will return with the r3 set to
1127  * the ESEL of the new created tlb.
1128  */
1129 _GLOBAL(switch_to_as1)
1130     mflr    r5
1131 
1132     /* Find a entry not used */
1133     mfspr   r3,SPRN_TLB1CFG
1134     andi.   r3,r3,0xfff
1135     mfspr   r4,SPRN_PID
1136     rlwinm  r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */
1137     mtspr   SPRN_MAS6,r4
1138 1:  lis r4,0x1000       /* Set MAS0(TLBSEL) = 1 */
1139     addi    r3,r3,-1
1140     rlwimi  r4,r3,16,4,15       /* Setup MAS0 = TLBSEL | ESEL(r3) */
1141     mtspr   SPRN_MAS0,r4
1142     tlbre
1143     mfspr   r4,SPRN_MAS1
1144     andis.  r4,r4,MAS1_VALID@h
1145     bne 1b
1146 
1147     /* Get the tlb entry used by the current running code */
1148     bcl 20,31,$+4
1149 0:  mflr    r4
1150     tlbsx   0,r4
1151 
1152     mfspr   r4,SPRN_MAS1
1153     ori r4,r4,MAS1_TS       /* Set the TS = 1 */
1154     mtspr   SPRN_MAS1,r4
1155 
1156     mfspr   r4,SPRN_MAS0
1157     rlwinm  r4,r4,0,~MAS0_ESEL_MASK
1158     rlwimi  r4,r3,16,4,15       /* Setup MAS0 = TLBSEL | ESEL(r3) */
1159     mtspr   SPRN_MAS0,r4
1160     tlbwe
1161     isync
1162     sync
1163 
1164     mfmsr   r4
1165     ori r4,r4,MSR_IS | MSR_DS
1166     mtspr   SPRN_SRR0,r5
1167     mtspr   SPRN_SRR1,r4
1168     sync
1169     rfi
1170 
1171 /*
1172  * Restore to the address space 0 and also invalidate the tlb entry created
1173  * by switch_to_as1.
1174  * r3 - the tlb entry which should be invalidated
1175  * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
1176  * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
1177  * r6 - boot cpu
1178 */
1179 _GLOBAL(restore_to_as0)
1180     mflr    r0
1181 
1182     bcl 20,31,$+4
1183 0:  mflr    r9
1184     addi    r9,r9,1f - 0b
1185 
1186     /*
1187      * We may map the PAGE_OFFSET in AS0 to a different physical address,
1188      * so we need calculate the right jump and device tree address based
1189      * on the offset passed by r4.
1190      */
1191     add r9,r9,r4
1192     add r5,r5,r4
1193     add r0,r0,r4
1194 
1195 2:  mfmsr   r7
1196     li  r8,(MSR_IS | MSR_DS)
1197     andc    r7,r7,r8
1198 
1199     mtspr   SPRN_SRR0,r9
1200     mtspr   SPRN_SRR1,r7
1201     sync
1202     rfi
1203 
1204     /* Invalidate the temporary tlb entry for AS1 */
1205 1:  lis r9,0x1000       /* Set MAS0(TLBSEL) = 1 */
1206     rlwimi  r9,r3,16,4,15       /* Setup MAS0 = TLBSEL | ESEL(r3) */
1207     mtspr   SPRN_MAS0,r9
1208     tlbre
1209     mfspr   r9,SPRN_MAS1
1210     rlwinm  r9,r9,0,2,31        /* Clear MAS1 Valid and IPPROT */
1211     mtspr   SPRN_MAS1,r9
1212     tlbwe
1213     isync
1214 
1215     cmpwi   r4,0
1216     cmpwi   cr1,r6,0
1217     cror    eq,4*cr1+eq,eq
1218     bne 3f          /* offset != 0 && is_boot_cpu */
1219     mtlr    r0
1220     blr
1221 
1222     /*
1223      * The PAGE_OFFSET will map to a different physical address,
1224      * jump to _start to do another relocation again.
1225     */
1226 3:  mr  r3,r5
1227     bl  _start