Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  PowerPC version 
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
0006  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
0007  *  Adapted for Power Macintosh by Paul Mackerras.
0008  *  Low-level exception handlers and MMU support
0009  *  rewritten by Paul Mackerras.
0010  *    Copyright (C) 1996 Paul Mackerras.
0011  *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
0012  *
0013  *  This file contains the system call entry code, context switch
0014  *  code, and exception/interrupt return code for PowerPC.
0015  */
0016 
0017 #include <linux/errno.h>
0018 #include <linux/err.h>
0019 #include <asm/cache.h>
0020 #include <asm/unistd.h>
0021 #include <asm/processor.h>
0022 #include <asm/page.h>
0023 #include <asm/mmu.h>
0024 #include <asm/thread_info.h>
0025 #include <asm/code-patching-asm.h>
0026 #include <asm/ppc_asm.h>
0027 #include <asm/asm-offsets.h>
0028 #include <asm/cputable.h>
0029 #include <asm/firmware.h>
0030 #include <asm/bug.h>
0031 #include <asm/ptrace.h>
0032 #include <asm/irqflags.h>
0033 #include <asm/hw_irq.h>
0034 #include <asm/context_tracking.h>
0035 #include <asm/ppc-opcode.h>
0036 #include <asm/barrier.h>
0037 #include <asm/export.h>
0038 #include <asm/asm-compat.h>
0039 #ifdef CONFIG_PPC_BOOK3S
0040 #include <asm/exception-64s.h>
0041 #else
0042 #include <asm/exception-64e.h>
0043 #endif
0044 #include <asm/feature-fixups.h>
0045 #include <asm/kup.h>
0046 
0047 /*
0048  * System calls.
0049  */
0050     .section    ".text"
0051 
0052 #ifdef CONFIG_PPC_BOOK3S_64
0053 
0054 #define FLUSH_COUNT_CACHE   \
0055 1:  nop;            \
0056     patch_site 1b, patch__call_flush_branch_caches1; \
0057 1:  nop;            \
0058     patch_site 1b, patch__call_flush_branch_caches2; \
0059 1:  nop;            \
0060     patch_site 1b, patch__call_flush_branch_caches3
0061 
0062 .macro nops number
0063     .rept \number
0064     nop
0065     .endr
0066 .endm
0067 
0068 .balign 32
0069 .global flush_branch_caches
0070 flush_branch_caches:
0071     /* Save LR into r9 */
0072     mflr    r9
0073 
0074     // Flush the link stack
0075     .rept 64
0076     bl  .+4
0077     .endr
0078     b   1f
0079     nops    6
0080 
0081     .balign 32
0082     /* Restore LR */
0083 1:  mtlr    r9
0084 
0085     // If we're just flushing the link stack, return here
0086 3:  nop
0087     patch_site 3b patch__flush_link_stack_return
0088 
0089     li  r9,0x7fff
0090     mtctr   r9
0091 
0092     PPC_BCCTR_FLUSH
0093 
0094 2:  nop
0095     patch_site 2b patch__flush_count_cache_return
0096 
0097     nops    3
0098 
0099     .rept 278
0100     .balign 32
0101     PPC_BCCTR_FLUSH
0102     nops    7
0103     .endr
0104 
0105     blr
0106 #else
0107 #define FLUSH_COUNT_CACHE
0108 #endif /* CONFIG_PPC_BOOK3S_64 */
0109 
0110 /*
0111  * This routine switches between two different tasks.  The process
0112  * state of one is saved on its kernel stack.  Then the state
0113  * of the other is restored from its kernel stack.  The memory
0114  * management hardware is updated to the second process's state.
0115  * Finally, we can return to the second process, via interrupt_return.
0116  * On entry, r3 points to the THREAD for the current task, r4
0117  * points to the THREAD for the new task.
0118  *
0119  * Note: there are two ways to get to the "going out" portion
0120  * of this code; either by coming in via the entry (_switch)
0121  * or via "fork" which must set up an environment equivalent
0122  * to the "_switch" path.  If you change this you'll have to change
0123  * the fork code also.
0124  *
0125  * The code which creates the new task context is in 'copy_thread'
0126  * in arch/powerpc/kernel/process.c 
0127  */
0128     .align  7
0129 _GLOBAL(_switch)
0130     mflr    r0
0131     std r0,16(r1)
0132     stdu    r1,-SWITCH_FRAME_SIZE(r1)
0133     /* r3-r13 are caller saved -- Cort */
0134     SAVE_NVGPRS(r1)
0135     std r0,_NIP(r1) /* Return to switch caller */
0136     mfcr    r23
0137     std r23,_CCR(r1)
0138     std r1,KSP(r3)  /* Set old stack pointer */
0139 
0140     kuap_check_amr r9, r10
0141 
0142     FLUSH_COUNT_CACHE   /* Clobbers r9, ctr */
0143 
0144     /*
0145      * On SMP kernels, care must be taken because a task may be
0146      * scheduled off CPUx and on to CPUy. Memory ordering must be
0147      * considered.
0148      *
0149      * Cacheable stores on CPUx will be visible when the task is
0150      * scheduled on CPUy by virtue of the core scheduler barriers
0151      * (see "Notes on Program-Order guarantees on SMP systems." in
0152      * kernel/sched/core.c).
0153      *
0154      * Uncacheable stores in the case of involuntary preemption must
0155      * be taken care of. The smp_mb__after_spinlock() in __schedule()
0156      * is implemented as hwsync on powerpc, which orders MMIO too. So
0157      * long as there is an hwsync in the context switch path, it will
0158      * be executed on the source CPU after the task has performed
0159      * all MMIO ops on that CPU, and on the destination CPU before the
0160      * task performs any MMIO ops there.
0161      */
0162 
0163     /*
0164      * The kernel context switch path must contain a spin_lock,
0165      * which contains larx/stcx, which will clear any reservation
0166      * of the task being switched.
0167      */
0168 #ifdef CONFIG_PPC_BOOK3S
0169 /* Cancel all explict user streams as they will have no use after context
0170  * switch and will stop the HW from creating streams itself
0171  */
0172     DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
0173 #endif
0174 
0175     addi    r6,r4,-THREAD   /* Convert THREAD to 'current' */
0176     std r6,PACACURRENT(r13) /* Set new 'current' */
0177 #if defined(CONFIG_STACKPROTECTOR)
0178     ld  r6, TASK_CANARY(r6)
0179     std r6, PACA_CANARY(r13)
0180 #endif
0181 
0182     ld  r8,KSP(r4)  /* new stack pointer */
0183 #ifdef CONFIG_PPC_64S_HASH_MMU
0184 BEGIN_MMU_FTR_SECTION
0185     b   2f
0186 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
0187 BEGIN_FTR_SECTION
0188     clrrdi  r6,r8,28    /* get its ESID */
0189     clrrdi  r9,r1,28    /* get current sp ESID */
0190 FTR_SECTION_ELSE
0191     clrrdi  r6,r8,40    /* get its 1T ESID */
0192     clrrdi  r9,r1,40    /* get current sp 1T ESID */
0193 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
0194     clrldi. r0,r6,2     /* is new ESID c00000000? */
0195     cmpd    cr1,r6,r9   /* or is new ESID the same as current ESID? */
0196     cror    eq,4*cr1+eq,eq
0197     beq 2f      /* if yes, don't slbie it */
0198 
0199     /* Bolt in the new stack SLB entry */
0200     ld  r7,KSP_VSID(r4) /* Get new stack's VSID */
0201     oris    r0,r6,(SLB_ESID_V)@h
0202     ori r0,r0,(SLB_NUM_BOLTED-1)@l
0203 BEGIN_FTR_SECTION
0204     li  r9,MMU_SEGSIZE_1T   /* insert B field */
0205     oris    r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
0206     rldimi  r7,r9,SLB_VSID_SSIZE_SHIFT,0
0207 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
0208 
0209     /* Update the last bolted SLB.  No write barriers are needed
0210      * here, provided we only update the current CPU's SLB shadow
0211      * buffer.
0212      */
0213     ld  r9,PACA_SLBSHADOWPTR(r13)
0214     li  r12,0
0215     std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
0216     li  r12,SLBSHADOW_STACKVSID
0217     STDX_BE r7,r12,r9           /* Save VSID */
0218     li  r12,SLBSHADOW_STACKESID
0219     STDX_BE r0,r12,r9           /* Save ESID */
0220 
0221     /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
0222      * we have 1TB segments, the only CPUs known to have the errata
0223      * only support less than 1TB of system memory and we'll never
0224      * actually hit this code path.
0225      */
0226 
0227     isync
0228     slbie   r6
0229 BEGIN_FTR_SECTION
0230     slbie   r6      /* Workaround POWER5 < DD2.1 issue */
0231 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
0232     slbmte  r7,r0
0233     isync
0234 2:
0235 #endif /* CONFIG_PPC_64S_HASH_MMU */
0236 
0237     clrrdi  r7, r8, THREAD_SHIFT    /* base of new stack */
0238     /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
0239        because we don't need to leave the 288-byte ABI gap at the
0240        top of the kernel stack. */
0241     addi    r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
0242 
0243     /*
0244      * PMU interrupts in radix may come in here. They will use r1, not
0245      * PACAKSAVE, so this stack switch will not cause a problem. They
0246      * will store to the process stack, which may then be migrated to
0247      * another CPU. However the rq lock release on this CPU paired with
0248      * the rq lock acquire on the new CPU before the stack becomes
0249      * active on the new CPU, will order those stores.
0250      */
0251     mr  r1,r8       /* start using new stack pointer */
0252     std r7,PACAKSAVE(r13)
0253 
0254     ld  r6,_CCR(r1)
0255     mtcrf   0xFF,r6
0256 
0257     /* r3-r13 are destroyed -- Cort */
0258     REST_NVGPRS(r1)
0259 
0260     /* convert old thread to its task_struct for return value */
0261     addi    r3,r3,-THREAD
0262     ld  r7,_NIP(r1) /* Return to _switch caller in new task */
0263     mtlr    r7
0264     addi    r1,r1,SWITCH_FRAME_SIZE
0265     blr
0266 
0267 _GLOBAL(enter_prom)
0268     mflr    r0
0269     std r0,16(r1)
0270         stdu    r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
0271 
0272     /* Because PROM is running in 32b mode, it clobbers the high order half
0273      * of all registers that it saves.  We therefore save those registers
0274      * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
0275      */
0276     SAVE_GPR(2, r1)
0277     SAVE_GPR(13, r1)
0278     SAVE_NVGPRS(r1)
0279     mfcr    r10
0280     mfmsr   r11
0281     std r10,_CCR(r1)
0282     std r11,_MSR(r1)
0283 
0284     /* Put PROM address in SRR0 */
0285     mtsrr0  r4
0286 
0287     /* Setup our trampoline return addr in LR */
0288     bcl 20,31,$+4
0289 0:  mflr    r4
0290     addi    r4,r4,(1f - 0b)
0291         mtlr    r4
0292 
0293     /* Prepare a 32-bit mode big endian MSR
0294      */
0295 #ifdef CONFIG_PPC_BOOK3E
0296     rlwinm  r11,r11,0,1,31
0297     mtsrr1  r11
0298     rfi
0299 #else /* CONFIG_PPC_BOOK3E */
0300     LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
0301     andc    r11,r11,r12
0302     mtsrr1  r11
0303     RFI_TO_KERNEL
0304 #endif /* CONFIG_PPC_BOOK3E */
0305 
0306 1:  /* Return from OF */
0307     FIXUP_ENDIAN
0308 
0309     /* Just make sure that r1 top 32 bits didn't get
0310      * corrupt by OF
0311      */
0312     rldicl  r1,r1,0,32
0313 
0314     /* Restore the MSR (back to 64 bits) */
0315     ld  r0,_MSR(r1)
0316     MTMSRD(r0)
0317         isync
0318 
0319     /* Restore other registers */
0320     REST_GPR(2, r1)
0321     REST_GPR(13, r1)
0322     REST_NVGPRS(r1)
0323     ld  r4,_CCR(r1)
0324     mtcr    r4
0325 
0326         addi    r1,r1,SWITCH_FRAME_SIZE
0327     ld  r0,16(r1)
0328     mtlr    r0
0329         blr