Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  PowerPC version
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *
0006  *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
0007  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
0008  *  Adapted for Power Macintosh by Paul Mackerras.
0009  *  Low-level exception handlers and MMU support
0010  *  rewritten by Paul Mackerras.
0011  *    Copyright (C) 1996 Paul Mackerras.
0012  *
0013  *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
0014  *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
0015  *
0016  *  This file contains the entry point for the 64-bit kernel along
0017  *  with some early initialization code common to all 64-bit powerpc
0018  *  variants.
0019  */
0020 
0021 #include <linux/threads.h>
0022 #include <linux/init.h>
0023 #include <asm/reg.h>
0024 #include <asm/page.h>
0025 #include <asm/mmu.h>
0026 #include <asm/ppc_asm.h>
0027 #include <asm/head-64.h>
0028 #include <asm/asm-offsets.h>
0029 #include <asm/bug.h>
0030 #include <asm/cputable.h>
0031 #include <asm/setup.h>
0032 #include <asm/hvcall.h>
0033 #include <asm/thread_info.h>
0034 #include <asm/firmware.h>
0035 #include <asm/page_64.h>
0036 #include <asm/irqflags.h>
0037 #include <asm/kvm_book3s_asm.h>
0038 #include <asm/ptrace.h>
0039 #include <asm/hw_irq.h>
0040 #include <asm/cputhreads.h>
0041 #include <asm/ppc-opcode.h>
0042 #include <asm/export.h>
0043 #include <asm/feature-fixups.h>
0044 #ifdef CONFIG_PPC_BOOK3S
0045 #include <asm/exception-64s.h>
0046 #else
0047 #include <asm/exception-64e.h>
0048 #endif
0049 
0050 /* The physical memory is laid out such that the secondary processor
0051  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
0052  * using the layout described in exceptions-64s.S
0053  */
0054 
0055 /*
0056  * Entering into this code we make the following assumptions:
0057  *
0058  *  For pSeries or server processors:
0059  *   1. The MMU is off & open firmware is running in real mode.
0060  *   2. The primary CPU enters at __start.
0061  *   3. If the RTAS supports "query-cpu-stopped-state", then secondary
0062  *      CPUs will enter as directed by "start-cpu" RTAS call, which is
0063  *      generic_secondary_smp_init, with PIR in r3.
0064  *   4. Else the secondary CPUs will enter at secondary_hold (0x60) as
0065  *      directed by the "start-cpu" RTS call, with PIR in r3.
0066  * -or- For OPAL entry:
0067  *   1. The MMU is off, processor in HV mode.
0068  *   2. The primary CPU enters at 0 with device-tree in r3, OPAL base
0069  *      in r8, and entry in r9 for debugging purposes.
0070  *   3. Secondary CPUs enter as directed by OPAL_START_CPU call, which
0071  *      is at generic_secondary_smp_init, with PIR in r3.
0072  *
0073  *  For Book3E processors:
0074  *   1. The MMU is on running in AS0 in a state defined in ePAPR
0075  *   2. The kernel is entered at __start
0076  */
0077 
0078 OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
0079 USE_FIXED_SECTION(first_256B)
0080     /*
0081      * Offsets are relative from the start of fixed section, and
0082      * first_256B starts at 0. Offsets are a bit easier to use here
0083      * than the fixed section entry macros.
0084      */
0085     . = 0x0
0086 _GLOBAL(__start)
0087     /* NOP this out unconditionally */
0088 BEGIN_FTR_SECTION
0089     FIXUP_ENDIAN
0090     b   __start_initialization_multiplatform
0091 END_FTR_SECTION(0, 1)
0092 
0093     /* Catch branch to 0 in real mode */
0094     trap
0095 
0096     /* Secondary processors spin on this value until it becomes non-zero.
0097      * When non-zero, it contains the real address of the function the cpu
0098      * should jump to.
0099      */
0100     .balign 8
0101     .globl  __secondary_hold_spinloop
0102 __secondary_hold_spinloop:
0103     .8byte  0x0
0104 
0105     /* Secondary processors write this value with their cpu # */
0106     /* after they enter the spin loop immediately below.      */
0107     .globl  __secondary_hold_acknowledge
0108 __secondary_hold_acknowledge:
0109     .8byte  0x0
0110 
0111 #ifdef CONFIG_RELOCATABLE
0112     /* This flag is set to 1 by a loader if the kernel should run
0113      * at the loaded address instead of the linked address.  This
0114      * is used by kexec-tools to keep the kdump kernel in the
0115      * crash_kernel region.  The loader is responsible for
0116      * observing the alignment requirement.
0117      */
0118 
0119 #ifdef CONFIG_RELOCATABLE_TEST
0120 #define RUN_AT_LOAD_DEFAULT 1       /* Test relocation, do not copy to 0 */
0121 #else
0122 #define RUN_AT_LOAD_DEFAULT 0x72756e30  /* "run0" -- relocate to 0 by default */
0123 #endif
0124 
0125     /* Do not move this variable as kexec-tools knows about it. */
0126     . = 0x5c
0127     .globl  __run_at_load
0128 __run_at_load:
0129 DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)
0130     .long   RUN_AT_LOAD_DEFAULT
0131 #endif
0132 
0133     . = 0x60
0134 /*
0135  * The following code is used to hold secondary processors
0136  * in a spin loop after they have entered the kernel, but
0137  * before the bulk of the kernel has been relocated.  This code
0138  * is relocated to physical address 0x60 before prom_init is run.
0139  * All of it must fit below the first exception vector at 0x100.
0140  * Use .globl here not _GLOBAL because we want __secondary_hold
0141  * to be the actual text address, not a descriptor.
0142  */
0143     .globl  __secondary_hold
0144 __secondary_hold:
0145     FIXUP_ENDIAN
0146 #ifndef CONFIG_PPC_BOOK3E
0147     mfmsr   r24
0148     ori r24,r24,MSR_RI
0149     mtmsrd  r24         /* RI on */
0150 #endif
0151     /* Grab our physical cpu number */
0152     mr  r24,r3
0153     /* stash r4 for book3e */
0154     mr  r25,r4
0155 
0156     /* Tell the master cpu we're here */
0157     /* Relocation is off & we are located at an address less */
0158     /* than 0x100, so only need to grab low order offset.    */
0159     std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)
0160     sync
0161 
0162     li  r26,0
0163 #ifdef CONFIG_PPC_BOOK3E
0164     tovirt(r26,r26)
0165 #endif
0166     /* All secondary cpus wait here until told to start. */
0167 100:    ld  r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(r26)
0168     cmpdi   0,r12,0
0169     beq 100b
0170 
0171 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
0172 #ifdef CONFIG_PPC_BOOK3E
0173     tovirt(r12,r12)
0174 #endif
0175     mtctr   r12
0176     mr  r3,r24
0177     /*
0178      * it may be the case that other platforms have r4 right to
0179      * begin with, this gives us some safety in case it is not
0180      */
0181 #ifdef CONFIG_PPC_BOOK3E
0182     mr  r4,r25
0183 #else
0184     li  r4,0
0185 #endif
0186     /* Make sure that patched code is visible */
0187     isync
0188     bctr
0189 #else
0190 0:  trap
0191     EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
0192 #endif
0193 CLOSE_FIXED_SECTION(first_256B)
0194 
0195 /* This value is used to mark exception frames on the stack. */
0196     .section ".toc","aw"
0197 /* This value is used to mark exception frames on the stack. */
0198 exception_marker:
0199     .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
0200     .previous
0201 
0202 /*
0203  * On server, we include the exception vectors code here as it
0204  * relies on absolute addressing which is only possible within
0205  * this compilation unit
0206  */
0207 #ifdef CONFIG_PPC_BOOK3S
0208 #include "exceptions-64s.S"
0209 #else
0210 OPEN_TEXT_SECTION(0x100)
0211 #endif
0212 
0213 USE_TEXT_SECTION()
0214 
0215 #include "interrupt_64.S"
0216 
0217 #ifdef CONFIG_PPC_BOOK3E
0218 /*
0219  * The booting_thread_hwid holds the thread id we want to boot in cpu
0220  * hotplug case. It is set by cpu hotplug code, and is invalid by default.
0221  * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
0222  * bit field.
0223  */
0224     .globl  booting_thread_hwid
0225 booting_thread_hwid:
0226     .long  INVALID_THREAD_HWID
0227     .align 3
0228 /*
0229  * start a thread in the same core
0230  * input parameters:
0231  * r3 = the thread physical id
0232  * r4 = the entry point where thread starts
0233  */
0234 _GLOBAL(book3e_start_thread)
0235     LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
0236     cmpwi   r3, 0
0237     beq 10f
0238     cmpwi   r3, 1
0239     beq 11f
0240     /* If the thread id is invalid, just exit. */
0241     b   13f
0242 10:
0243     MTTMR(TMRN_IMSR0, 5)
0244     MTTMR(TMRN_INIA0, 4)
0245     b   12f
0246 11:
0247     MTTMR(TMRN_IMSR1, 5)
0248     MTTMR(TMRN_INIA1, 4)
0249 12:
0250     isync
0251     li  r6, 1
0252     sld r6, r6, r3
0253     mtspr   SPRN_TENS, r6
0254 13:
0255     blr
0256 
0257 /*
0258  * stop a thread in the same core
0259  * input parameter:
0260  * r3 = the thread physical id
0261  */
0262 _GLOBAL(book3e_stop_thread)
0263     cmpwi   r3, 0
0264     beq 10f
0265     cmpwi   r3, 1
0266     beq 10f
0267     /* If the thread id is invalid, just exit. */
0268     b   13f
0269 10:
0270     li  r4, 1
0271     sld r4, r4, r3
0272     mtspr   SPRN_TENC, r4
0273 13:
0274     blr
0275 
0276 _GLOBAL(fsl_secondary_thread_init)
0277     mfspr   r4,SPRN_BUCSR
0278 
0279     /* Enable branch prediction */
0280     lis     r3,BUCSR_INIT@h
0281     ori     r3,r3,BUCSR_INIT@l
0282     mtspr   SPRN_BUCSR,r3
0283     isync
0284 
0285     /*
0286      * Fix PIR to match the linear numbering in the device tree.
0287      *
0288      * On e6500, the reset value of PIR uses the low three bits for
0289      * the thread within a core, and the upper bits for the core
0290      * number.  There are two threads per core, so shift everything
0291      * but the low bit right by two bits so that the cpu numbering is
0292      * continuous.
0293      *
0294      * If the old value of BUCSR is non-zero, this thread has run
0295      * before.  Thus, we assume we are coming from kexec or a similar
0296      * scenario, and PIR is already set to the correct value.  This
0297      * is a bit of a hack, but there are limited opportunities for
0298      * getting information into the thread and the alternatives
0299      * seemed like they'd be overkill.  We can't tell just by looking
0300      * at the old PIR value which state it's in, since the same value
0301      * could be valid for one thread out of reset and for a different
0302      * thread in Linux.
0303      */
0304 
0305     mfspr   r3, SPRN_PIR
0306     cmpwi   r4,0
0307     bne 1f
0308     rlwimi  r3, r3, 30, 2, 30
0309     mtspr   SPRN_PIR, r3
0310 1:
0311     mr  r24,r3
0312 
0313     /* turn on 64-bit mode */
0314     bl  enable_64b_mode
0315 
0316     /* get a valid TOC pointer, wherever we're mapped at */
0317     bl  relative_toc
0318     tovirt(r2,r2)
0319 
0320     /* Book3E initialization */
0321     mr  r3,r24
0322     bl  book3e_secondary_thread_init
0323     b   generic_secondary_common_init
0324 
0325 #endif /* CONFIG_PPC_BOOK3E */
0326 
0327 /*
0328  * On pSeries and most other platforms, secondary processors spin
0329  * in the following code.
0330  * At entry, r3 = this processor's number (physical cpu id)
0331  *
0332  * On Book3E, r4 = 1 to indicate that the initial TLB entry for
0333  * this core already exists (setup via some other mechanism such
0334  * as SCOM before entry).
0335  */
0336 _GLOBAL(generic_secondary_smp_init)
0337     FIXUP_ENDIAN
0338     mr  r24,r3
0339     mr  r25,r4
0340 
0341     /* turn on 64-bit mode */
0342     bl  enable_64b_mode
0343 
0344     /* get a valid TOC pointer, wherever we're mapped at */
0345     bl  relative_toc
0346     tovirt(r2,r2)
0347 
0348 #ifdef CONFIG_PPC_BOOK3E
0349     /* Book3E initialization */
0350     mr  r3,r24
0351     mr  r4,r25
0352     bl  book3e_secondary_core_init
0353 
0354 /*
0355  * After common core init has finished, check if the current thread is the
0356  * one we wanted to boot. If not, start the specified thread and stop the
0357  * current thread.
0358  */
0359     LOAD_REG_ADDR(r4, booting_thread_hwid)
0360     lwz     r3, 0(r4)
0361     li  r5, INVALID_THREAD_HWID
0362     cmpw    r3, r5
0363     beq 20f
0364 
0365     /*
0366      * The value of booting_thread_hwid has been stored in r3,
0367      * so make it invalid.
0368      */
0369     stw r5, 0(r4)
0370 
0371     /*
0372      * Get the current thread id and check if it is the one we wanted.
0373      * If not, start the one specified in booting_thread_hwid and stop
0374      * the current thread.
0375      */
0376     mfspr   r8, SPRN_TIR
0377     cmpw    r3, r8
0378     beq 20f
0379 
0380     /* start the specified thread */
0381     LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
0382     ld  r4, 0(r5)
0383     bl  book3e_start_thread
0384 
0385     /* stop the current thread */
0386     mr  r3, r8
0387     bl  book3e_stop_thread
0388 10:
0389     b   10b
0390 20:
0391 #endif
0392 
0393 generic_secondary_common_init:
0394     /* Set up a paca value for this processor. Since we have the
0395      * physical cpu id in r24, we need to search the pacas to find
0396      * which logical id maps to our physical one.
0397      */
0398 #ifndef CONFIG_SMP
0399     b   kexec_wait      /* wait for next kernel if !SMP  */
0400 #else
0401     LOAD_REG_ADDR(r8, paca_ptrs)    /* Load paca_ptrs pointe     */
0402     ld  r8,0(r8)        /* Get base vaddr of array   */
0403     LOAD_REG_ADDR(r7, nr_cpu_ids)   /* Load nr_cpu_ids address       */
0404     lwz r7,0(r7)        /* also the max paca allocated   */
0405     li  r5,0            /* logical cpu id                */
0406 1:
0407     sldi    r9,r5,3         /* get paca_ptrs[] index from cpu id */
0408     ldx r13,r9,r8       /* r13 = paca_ptrs[cpu id]       */
0409     lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca      */
0410     cmpw    r6,r24          /* Compare to our id             */
0411     beq 2f
0412     addi    r5,r5,1
0413     cmpw    r5,r7           /* Check if more pacas exist     */
0414     blt 1b
0415 
0416     mr  r3,r24          /* not found, copy phys to r3    */
0417     b   kexec_wait      /* next kernel might do better   */
0418 
0419 2:  SET_PACA(r13)
0420 #ifdef CONFIG_PPC_BOOK3E
0421     addi    r12,r13,PACA_EXTLB  /* and TLB exc frame in another  */
0422     mtspr   SPRN_SPRG_TLB_EXFRAME,r12
0423 #endif
0424 
0425     /* From now on, r24 is expected to be logical cpuid */
0426     mr  r24,r5
0427 
0428     /* Create a temp kernel stack for use before relocation is on.  */
0429     ld  r1,PACAEMERGSP(r13)
0430     subi    r1,r1,STACK_FRAME_OVERHEAD
0431 
0432     /* See if we need to call a cpu state restore handler */
0433     LOAD_REG_ADDR(r23, cur_cpu_spec)
0434     ld  r23,0(r23)
0435     ld  r12,CPU_SPEC_RESTORE(r23)
0436     cmpdi   0,r12,0
0437     beq 3f
0438 #ifdef CONFIG_PPC64_ELF_ABI_V1
0439     ld  r12,0(r12)
0440 #endif
0441     mtctr   r12
0442     bctrl
0443 
0444 3:  LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
0445     lwarx   r4,0,r3
0446     subi    r4,r4,1
0447     stwcx.  r4,0,r3
0448     bne 3b
0449     isync
0450 
0451 4:  HMT_LOW
0452     lbz r23,PACAPROCSTART(r13)  /* Test if this processor should */
0453                     /* start.            */
0454     cmpwi   0,r23,0
0455     beq 4b          /* Loop until told to go     */
0456 
0457     sync                /* order paca.run and cur_cpu_spec */
0458     isync               /* In case code patching happened */
0459 
0460     b   __secondary_start
0461 #endif /* SMP */
0462 
0463 /*
0464  * Turn the MMU off.
0465  * Assumes we're mapped EA == RA if the MMU is on.
0466  */
0467 #ifdef CONFIG_PPC_BOOK3S
0468 __mmu_off:
0469     mfmsr   r3
0470     andi.   r0,r3,MSR_IR|MSR_DR
0471     beqlr
0472     mflr    r4
0473     andc    r3,r3,r0
0474     mtspr   SPRN_SRR0,r4
0475     mtspr   SPRN_SRR1,r3
0476     sync
0477     rfid
0478     b   .   /* prevent speculative execution */
0479 #endif
0480 
0481 
0482 /*
0483  * Here is our main kernel entry point. We support currently 2 kind of entries
0484  * depending on the value of r5.
0485  *
0486  *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
0487  *                 in r3...r7
0488  *   
0489  *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
0490  *                 DT block, r4 is a physical pointer to the kernel itself
0491  *
0492  */
0493 __start_initialization_multiplatform:
0494     /* Make sure we are running in 64 bits mode */
0495     bl  enable_64b_mode
0496 
0497     /* Get TOC pointer (current runtime address) */
0498     bl  relative_toc
0499 
0500     /* find out where we are now */
0501     bcl 20,31,$+4
0502 0:  mflr    r26         /* r26 = runtime addr here */
0503     addis   r26,r26,(_stext - 0b)@ha
0504     addi    r26,r26,(_stext - 0b)@l /* current runtime base addr */
0505 
0506     /*
0507      * Are we booted from a PROM Of-type client-interface ?
0508      */
0509     cmpldi  cr0,r5,0
0510     beq 1f
0511     b   __boot_from_prom        /* yes -> prom */
0512 1:
0513     /* Save parameters */
0514     mr  r31,r3
0515     mr  r30,r4
0516 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
0517     /* Save OPAL entry */
0518     mr  r28,r8
0519     mr  r29,r9
0520 #endif
0521 
0522 #ifdef CONFIG_PPC_BOOK3E
0523     bl  start_initialization_book3e
0524     b   __after_prom_start
0525 #else
0526     /* Setup some critical 970 SPRs before switching MMU off */
0527     mfspr   r0,SPRN_PVR
0528     srwi    r0,r0,16
0529     cmpwi   r0,0x39     /* 970 */
0530     beq 1f
0531     cmpwi   r0,0x3c     /* 970FX */
0532     beq 1f
0533     cmpwi   r0,0x44     /* 970MP */
0534     beq 1f
0535     cmpwi   r0,0x45     /* 970GX */
0536     bne 2f
0537 1:  bl  __cpu_preinit_ppc970
0538 2:
0539 
0540     /* Switch off MMU if not already off */
0541     bl  __mmu_off
0542     b   __after_prom_start
0543 #endif /* CONFIG_PPC_BOOK3E */
0544 
0545 __REF
0546 __boot_from_prom:
0547 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
0548     /* Save parameters */
0549     mr  r31,r3
0550     mr  r30,r4
0551     mr  r29,r5
0552     mr  r28,r6
0553     mr  r27,r7
0554 
0555     /*
0556      * Align the stack to 16-byte boundary
0557      * Depending on the size and layout of the ELF sections in the initial
0558      * boot binary, the stack pointer may be unaligned on PowerMac
0559      */
0560     rldicr  r1,r1,0,59
0561 
0562 #ifdef CONFIG_RELOCATABLE
0563     /* Relocate code for where we are now */
0564     mr  r3,r26
0565     bl  relocate
0566 #endif
0567 
0568     /* Restore parameters */
0569     mr  r3,r31
0570     mr  r4,r30
0571     mr  r5,r29
0572     mr  r6,r28
0573     mr  r7,r27
0574 
0575     /* Do all of the interaction with OF client interface */
0576     mr  r8,r26
0577     bl  prom_init
0578 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
0579 
0580     /* We never return. We also hit that trap if trying to boot
0581      * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
0582     trap
0583     .previous
0584 
0585 __after_prom_start:
0586 #ifdef CONFIG_RELOCATABLE
0587     /* process relocations for the final address of the kernel */
0588     lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
0589     sldi    r25,r25,32
0590 #if defined(CONFIG_PPC_BOOK3E)
0591     tovirt(r26,r26)     /* on booke, we already run at PAGE_OFFSET */
0592 #endif
0593     lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
0594 #if defined(CONFIG_PPC_BOOK3E)
0595     tophys(r26,r26)
0596 #endif
0597     cmplwi  cr0,r7,1    /* flagged to stay where we are ? */
0598     bne 1f
0599     add r25,r25,r26
0600 1:  mr  r3,r25
0601     bl  relocate
0602 #if defined(CONFIG_PPC_BOOK3E)
0603     /* IVPR needs to be set after relocation. */
0604     bl  init_core_book3e
0605 #endif
0606 #endif
0607 
0608 /*
0609  * We need to run with _stext at physical address PHYSICAL_START.
0610  * This will leave some code in the first 256B of
0611  * real memory, which are reserved for software use.
0612  *
0613  * Note: This process overwrites the OF exception vectors.
0614  */
0615     li  r3,0            /* target addr */
0616 #ifdef CONFIG_PPC_BOOK3E
0617     tovirt(r3,r3)       /* on booke, we already run at PAGE_OFFSET */
0618 #endif
0619     mr. r4,r26          /* In some cases the loader may  */
0620 #if defined(CONFIG_PPC_BOOK3E)
0621     tovirt(r4,r4)
0622 #endif
0623     beq 9f          /* have already put us at zero */
0624     li  r6,0x100        /* Start offset, the first 0x100 */
0625                     /* bytes were copied earlier.    */
0626 
0627 #ifdef CONFIG_RELOCATABLE
0628 /*
0629  * Check if the kernel has to be running as relocatable kernel based on the
0630  * variable __run_at_load, if it is set the kernel is treated as relocatable
0631  * kernel, otherwise it will be moved to PHYSICAL_START
0632  */
0633 #if defined(CONFIG_PPC_BOOK3E)
0634     tovirt(r26,r26)     /* on booke, we already run at PAGE_OFFSET */
0635 #endif
0636     lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
0637     cmplwi  cr0,r7,1
0638     bne 3f
0639 
0640 #ifdef CONFIG_PPC_BOOK3E
0641     LOAD_REG_ADDR(r5, __end_interrupts)
0642     LOAD_REG_ADDR(r11, _stext)
0643     sub r5,r5,r11
0644 #else
0645     /* just copy interrupts */
0646     LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
0647 #endif
0648     b   5f
0649 3:
0650 #endif
0651     /* # bytes of memory to copy */
0652     lis r5,(ABS_ADDR(copy_to_here, text))@ha
0653     addi    r5,r5,(ABS_ADDR(copy_to_here, text))@l
0654 
0655     bl  copy_and_flush      /* copy the first n bytes    */
0656                     /* this includes the code being  */
0657                     /* executed here.        */
0658     /* Jump to the copy of this code that we just made */
0659     addis   r8,r3,(ABS_ADDR(4f, text))@ha
0660     addi    r12,r8,(ABS_ADDR(4f, text))@l
0661     mtctr   r12
0662     bctr
0663 
0664 .balign 8
0665 p_end: .8byte _end - copy_to_here
0666 
0667 4:
0668     /*
0669      * Now copy the rest of the kernel up to _end, add
0670      * _end - copy_to_here to the copy limit and run again.
0671      */
0672     addis   r8,r26,(ABS_ADDR(p_end, text))@ha
0673     ld      r8,(ABS_ADDR(p_end, text))@l(r8)
0674     add r5,r5,r8
0675 5:  bl  copy_and_flush      /* copy the rest */
0676 
0677 9:  b   start_here_multiplatform
0678 
0679 /*
0680  * Copy routine used to copy the kernel to start at physical address 0
0681  * and flush and invalidate the caches as needed.
0682  * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
0683  * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
0684  *
0685  * Note: this routine *only* clobbers r0, r6 and lr
0686  */
0687 _GLOBAL(copy_and_flush)
0688     addi    r5,r5,-8
0689     addi    r6,r6,-8
0690 4:  li  r0,8            /* Use the smallest common  */
0691                     /* denominator cache line   */
0692                     /* size.  This results in   */
0693                     /* extra cache line flushes */
0694                     /* but operation is correct.    */
0695                     /* Can't get cache line size    */
0696                     /* from NACA as it is being */
0697                     /* moved too.           */
0698 
0699     mtctr   r0          /* put # words/line in ctr  */
0700 3:  addi    r6,r6,8         /* copy a cache line        */
0701     ldx r0,r6,r4
0702     stdx    r0,r6,r3
0703     bdnz    3b
0704     dcbst   r6,r3           /* write it to memory       */
0705     sync
0706     icbi    r6,r3           /* flush the icache line    */
0707     cmpld   0,r6,r5
0708     blt 4b
0709     sync
0710     addi    r5,r5,8
0711     addi    r6,r6,8
0712     isync
0713     blr
0714 
0715 _ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */
0716 
0717 .align 8
0718 copy_to_here:
0719 
0720 #ifdef CONFIG_SMP
0721 #ifdef CONFIG_PPC_PMAC
0722 /*
0723  * On PowerMac, secondary processors starts from the reset vector, which
0724  * is temporarily turned into a call to one of the functions below.
0725  */
0726     .section ".text";
0727     .align 2 ;
0728 
0729     .globl  __secondary_start_pmac_0
0730 __secondary_start_pmac_0:
0731     /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
0732     li  r24,0
0733     b   1f
0734     li  r24,1
0735     b   1f
0736     li  r24,2
0737     b   1f
0738     li  r24,3
0739 1:
0740     
0741 _GLOBAL(pmac_secondary_start)
0742     /* turn on 64-bit mode */
0743     bl  enable_64b_mode
0744 
0745     li  r0,0
0746     mfspr   r3,SPRN_HID4
0747     rldimi  r3,r0,40,23 /* clear bit 23 (rm_ci) */
0748     sync
0749     mtspr   SPRN_HID4,r3
0750     isync
0751     sync
0752     slbia
0753 
0754     /* get TOC pointer (real address) */
0755     bl  relative_toc
0756     tovirt(r2,r2)
0757 
0758     /* Copy some CPU settings from CPU 0 */
0759     bl  __restore_cpu_ppc970
0760 
0761     /* pSeries do that early though I don't think we really need it */
0762     mfmsr   r3
0763     ori r3,r3,MSR_RI
0764     mtmsrd  r3          /* RI on */
0765 
0766     /* Set up a paca value for this processor. */
0767     LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer        */
0768     ld  r4,0(r4)        /* Get base vaddr of paca_ptrs array */
0769     sldi    r5,r24,3        /* get paca_ptrs[] index from cpu id */
0770     ldx r13,r5,r4       /* r13 = paca_ptrs[cpu id]       */
0771     SET_PACA(r13)           /* Save vaddr of paca in an SPRG*/
0772 
0773     /* Mark interrupts soft and hard disabled (they might be enabled
0774      * in the PACA when doing hotplug)
0775      */
0776     li  r0,IRQS_DISABLED
0777     stb r0,PACAIRQSOFTMASK(r13)
0778     li  r0,PACA_IRQ_HARD_DIS
0779     stb r0,PACAIRQHAPPENED(r13)
0780 
0781     /* Create a temp kernel stack for use before relocation is on.  */
0782     ld  r1,PACAEMERGSP(r13)
0783     subi    r1,r1,STACK_FRAME_OVERHEAD
0784 
0785     b   __secondary_start
0786 
0787 #endif /* CONFIG_PPC_PMAC */
0788 
0789 /*
0790  * This function is called after the master CPU has released the
0791  * secondary processors.  The execution environment is relocation off.
0792  * The paca for this processor has the following fields initialized at
0793  * this point:
0794  *   1. Processor number
0795  *   2. Segment table pointer (virtual address)
0796  * On entry the following are set:
0797  *   r1        = stack pointer (real addr of temp stack)
0798  *   r24       = cpu# (in Linux terms)
0799  *   r13       = paca virtual address
0800  *   SPRG_PACA = paca virtual address
0801  */
0802     .section ".text";
0803     .align 2 ;
0804 
0805     .globl  __secondary_start
0806 __secondary_start:
0807     /* Set thread priority to MEDIUM */
0808     HMT_MEDIUM
0809 
0810     /*
0811      * Do early setup for this CPU, in particular initialising the MMU so we
0812      * can turn it on below. This is a call to C, which is OK, we're still
0813      * running on the emergency stack.
0814      */
0815     bl  early_setup_secondary
0816 
0817     /*
0818      * The primary has initialized our kernel stack for us in the paca, grab
0819      * it and put it in r1. We must *not* use it until we turn on the MMU
0820      * below, because it may not be inside the RMO.
0821      */
0822     ld  r1, PACAKSAVE(r13)
0823 
0824     /* Clear backchain so we get nice backtraces */
0825     li  r7,0
0826     mtlr    r7
0827 
0828     /* Mark interrupts soft and hard disabled (they might be enabled
0829      * in the PACA when doing hotplug)
0830      */
0831     li  r7,IRQS_DISABLED
0832     stb r7,PACAIRQSOFTMASK(r13)
0833     li  r0,PACA_IRQ_HARD_DIS
0834     stb r0,PACAIRQHAPPENED(r13)
0835 
0836     /* enable MMU and jump to start_secondary */
0837     LOAD_REG_ADDR(r3, start_secondary_prolog)
0838     LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
0839 
0840     mtspr   SPRN_SRR0,r3
0841     mtspr   SPRN_SRR1,r4
0842     RFI_TO_KERNEL
0843     b   .   /* prevent speculative execution */
0844 
0845 /* 
0846  * Running with relocation on at this point.  All we want to do is
0847  * zero the stack back-chain pointer and get the TOC virtual address
0848  * before going into C code.
0849  */
0850 start_secondary_prolog:
0851     ld  r2,PACATOC(r13)
0852     li  r3,0
0853     std r3,0(r1)        /* Zero the stack frame pointer */
0854     bl  start_secondary
0855     b   .
0856 /*
0857  * Reset stack pointer and call start_secondary
0858  * to continue with online operation when woken up
0859  * from cede in cpu offline.
0860  */
0861 _GLOBAL(start_secondary_resume)
0862     ld  r1,PACAKSAVE(r13)   /* Reload kernel stack pointer */
0863     li  r3,0
0864     std r3,0(r1)        /* Zero the stack frame pointer */
0865     bl  start_secondary
0866     b   .
0867 #endif
0868 
0869 /*
0870  * This subroutine clobbers r11 and r12
0871  */
0872 enable_64b_mode:
0873     mfmsr   r11         /* grab the current MSR */
0874 #ifdef CONFIG_PPC_BOOK3E
0875     oris    r11,r11,0x8000      /* CM bit set, we'll set ICM later */
0876     mtmsr   r11
0877 #else /* CONFIG_PPC_BOOK3E */
0878     LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
0879     or  r11,r11,r12
0880     mtmsrd  r11
0881     isync
0882 #endif
0883     blr
0884 
0885 /*
0886  * This puts the TOC pointer into r2, offset by 0x8000 (as expected
0887  * by the toolchain).  It computes the correct value for wherever we
0888  * are running at the moment, using position-independent code.
0889  *
0890  * Note: The compiler constructs pointers using offsets from the
0891  * TOC in -mcmodel=medium mode. After we relocate to 0 but before
0892  * the MMU is on we need our TOC to be a virtual address otherwise
0893  * these pointers will be real addresses which may get stored and
0894  * accessed later with the MMU on. We use tovirt() at the call
0895  * sites to handle this.
0896  */
0897 _GLOBAL(relative_toc)
0898     mflr    r0
0899     bcl 20,31,$+4
0900 0:  mflr    r11
0901     ld  r2,(p_toc - 0b)(r11)
0902     add r2,r2,r11
0903     mtlr    r0
0904     blr
0905 
0906 .balign 8
0907 p_toc:  .8byte  .TOC. - 0b
0908 
0909 /*
0910  * This is where the main kernel code starts.
0911  */
0912 __REF
0913 start_here_multiplatform:
0914     /* set up the TOC */
0915     bl      relative_toc
0916     tovirt(r2,r2)
0917 
0918     /* Clear out the BSS. It may have been done in prom_init,
0919      * already but that's irrelevant since prom_init will soon
0920      * be detached from the kernel completely. Besides, we need
0921      * to clear it now for kexec-style entry.
0922      */
0923     LOAD_REG_ADDR(r11,__bss_stop)
0924     LOAD_REG_ADDR(r8,__bss_start)
0925     sub r11,r11,r8      /* bss size         */
0926     addi    r11,r11,7       /* round up to an even double word */
0927     srdi.   r11,r11,3       /* shift right by 3     */
0928     beq 4f
0929     addi    r8,r8,-8
0930     li  r0,0
0931     mtctr   r11         /* zero this many doublewords   */
0932 3:  stdu    r0,8(r8)
0933     bdnz    3b
0934 4:
0935 
0936 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
0937     /* Setup OPAL entry */
0938     LOAD_REG_ADDR(r11, opal)
0939     std r28,0(r11);
0940     std r29,8(r11);
0941 #endif
0942 
0943 #ifndef CONFIG_PPC_BOOK3E
0944     mfmsr   r6
0945     ori r6,r6,MSR_RI
0946     mtmsrd  r6          /* RI on */
0947 #endif
0948 
0949 #ifdef CONFIG_RELOCATABLE
0950     /* Save the physical address we're running at in kernstart_addr */
0951     LOAD_REG_ADDR(r4, kernstart_addr)
0952     clrldi  r0,r25,2
0953     std r0,0(r4)
0954 #endif
0955 
0956     /* set up a stack pointer */
0957     LOAD_REG_ADDR(r3,init_thread_union)
0958     LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
0959     add r1,r3,r1
0960     li  r0,0
0961     stdu    r0,-STACK_FRAME_OVERHEAD(r1)
0962 
0963     /*
0964      * Do very early kernel initializations, including initial hash table
0965      * and SLB setup before we turn on relocation.
0966      */
0967 
0968 #ifdef CONFIG_KASAN
0969     bl  kasan_early_init
0970 #endif
0971     /* Restore parameters passed from prom_init/kexec */
0972     mr  r3,r31
0973     LOAD_REG_ADDR(r12, DOTSYM(early_setup))
0974     mtctr   r12
0975     bctrl       /* also sets r13 and SPRG_PACA */
0976 
0977     LOAD_REG_ADDR(r3, start_here_common)
0978     ld  r4,PACAKMSR(r13)
0979     mtspr   SPRN_SRR0,r3
0980     mtspr   SPRN_SRR1,r4
0981     RFI_TO_KERNEL
0982     b   .   /* prevent speculative execution */
0983 
0984     /* This is where all platforms converge execution */
0985 
0986 start_here_common:
0987     /* relocation is on at this point */
0988     std r1,PACAKSAVE(r13)
0989 
0990     /* Load the TOC (virtual address) */
0991     ld  r2,PACATOC(r13)
0992 
0993     /* Mark interrupts soft and hard disabled (they might be enabled
0994      * in the PACA when doing hotplug)
0995      */
0996     li  r0,IRQS_DISABLED
0997     stb r0,PACAIRQSOFTMASK(r13)
0998     li  r0,PACA_IRQ_HARD_DIS
0999     stb r0,PACAIRQHAPPENED(r13)
1000 
1001     /* Generic kernel entry */
1002     bl  start_kernel
1003 
1004     /* Not reached */
1005 0:  trap
1006     EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
1007     .previous