Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * This file contains low level CPU setup functions.
0004  * Kumar Gala <galak@kernel.crashing.org>
0005  * Copyright 2009 Freescale Semiconductor, Inc.
0006  *
0007  * Based on cpu_setup_6xx code by
0008  * Benjamin Herrenschmidt <benh@kernel.crashing.org>
0009  */
0010 
0011 #include <asm/page.h>
0012 #include <asm/processor.h>
0013 #include <asm/cputable.h>
0014 #include <asm/ppc_asm.h>
0015 #include <asm/nohash/mmu-book3e.h>
0016 #include <asm/asm-offsets.h>
0017 #include <asm/mpc85xx.h>
0018 
0019 _GLOBAL(__e500_icache_setup)
0020     mfspr   r0, SPRN_L1CSR1
0021     andi.   r3, r0, L1CSR1_ICE
0022     bnelr               /* Already enabled */
0023     oris    r0, r0, L1CSR1_CPE@h
0024     ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR |  L1CSR1_ICE)
0025     mtspr   SPRN_L1CSR1, r0     /* Enable I-Cache */
0026     isync
0027     blr
0028 
0029 _GLOBAL(__e500_dcache_setup)
0030     mfspr   r0, SPRN_L1CSR0
0031     andi.   r3, r0, L1CSR0_DCE
0032     bnelr               /* Already enabled */
0033     msync
0034     isync
0035     li  r0, 0
0036     mtspr   SPRN_L1CSR0, r0     /* Disable */
0037     msync
0038     isync
0039     li  r0, (L1CSR0_DCFI | L1CSR0_CLFC)
0040     mtspr   SPRN_L1CSR0, r0     /* Invalidate */
0041     isync
0042 1:  mfspr   r0, SPRN_L1CSR0
0043     andi.   r3, r0, L1CSR0_CLFC
0044     bne+    1b          /* Wait for lock bits reset */
0045     oris    r0, r0, L1CSR0_CPE@h
0046     ori r0, r0, L1CSR0_DCE
0047     msync
0048     isync
0049     mtspr   SPRN_L1CSR0, r0     /* Enable */
0050     isync
0051     blr
0052 
0053 /*
0054  * FIXME - we haven't yet done testing to determine a reasonable default
0055  * value for PW20_WAIT_IDLE_BIT.
0056  */
0057 #define PW20_WAIT_IDLE_BIT      50 /* 1ms, TB frequency is 41.66MHZ */
0058 _GLOBAL(setup_pw20_idle)
0059     mfspr   r3, SPRN_PWRMGTCR0
0060 
0061     /* Set PW20_WAIT bit, enable pw20 state*/
0062     ori r3, r3, PWRMGTCR0_PW20_WAIT
0063     li  r11, PW20_WAIT_IDLE_BIT
0064 
0065     /* Set Automatic PW20 Core Idle Count */
0066     rlwimi  r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
0067 
0068     mtspr   SPRN_PWRMGTCR0, r3
0069 
0070     blr
0071 
0072 /*
0073  * FIXME - we haven't yet done testing to determine a reasonable default
0074  * value for AV_WAIT_IDLE_BIT.
0075  */
0076 #define AV_WAIT_IDLE_BIT        50 /* 1ms, TB frequency is 41.66MHZ */
0077 _GLOBAL(setup_altivec_idle)
0078     mfspr   r3, SPRN_PWRMGTCR0
0079 
0080     /* Enable Altivec Idle */
0081     oris    r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
0082     li  r11, AV_WAIT_IDLE_BIT
0083 
0084     /* Set Automatic AltiVec Idle Count */
0085     rlwimi  r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
0086 
0087     mtspr   SPRN_PWRMGTCR0, r3
0088 
0089     blr
0090 
0091 #ifdef CONFIG_PPC_E500MC
0092 _GLOBAL(__setup_cpu_e6500)
0093     mflr    r6
0094 #ifdef CONFIG_PPC64
0095     bl  setup_altivec_ivors
0096     /* Touch IVOR42 only if the CPU supports E.HV category */
0097     mfspr   r10,SPRN_MMUCFG
0098     rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
0099     beq 1f
0100     bl  setup_lrat_ivor
0101 1:
0102 #endif
0103     bl  setup_pw20_idle
0104     bl  setup_altivec_idle
0105     bl  __setup_cpu_e5500
0106     mtlr    r6
0107     blr
0108 #endif /* CONFIG_PPC_E500MC */
0109 
0110 #ifdef CONFIG_PPC32
0111 #ifdef CONFIG_E500
0112 #ifndef CONFIG_PPC_E500MC
0113 _GLOBAL(__setup_cpu_e500v1)
0114 _GLOBAL(__setup_cpu_e500v2)
0115     mflr    r4
0116     bl  __e500_icache_setup
0117     bl  __e500_dcache_setup
0118     bl  __setup_e500_ivors
0119 #if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
0120     /* Ensure that RFXE is set */
0121     mfspr   r3,SPRN_HID1
0122     oris    r3,r3,HID1_RFXE@h
0123     mtspr   SPRN_HID1,r3
0124 #endif
0125     mtlr    r4
0126     blr
0127 #else /* CONFIG_PPC_E500MC */
0128 _GLOBAL(__setup_cpu_e500mc)
0129 _GLOBAL(__setup_cpu_e5500)
0130     mflr    r5
0131     bl  __e500_icache_setup
0132     bl  __e500_dcache_setup
0133     bl  __setup_e500mc_ivors
0134     /*
0135      * We only want to touch IVOR38-41 if we're running on hardware
0136      * that supports category E.HV.  The architectural way to determine
0137      * this is MMUCFG[LPIDSIZE].
0138      */
0139     mfspr   r3, SPRN_MMUCFG
0140     rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
0141     beq 1f
0142     bl  __setup_ehv_ivors
0143     b   2f
0144 1:
0145     lwz r3, CPU_SPEC_FEATURES(r4)
0146     /* We need this check as cpu_setup is also called for
0147      * the secondary cores. So, if we have already cleared
0148      * the feature on the primary core, avoid doing it on the
0149      * secondary core.
0150      */
0151     andi.   r6, r3, CPU_FTR_EMB_HV
0152     beq 2f
0153     rlwinm  r3, r3, 0, ~CPU_FTR_EMB_HV
0154     stw r3, CPU_SPEC_FEATURES(r4)
0155 2:
0156     mtlr    r5
0157     blr
0158 #endif /* CONFIG_PPC_E500MC */
0159 #endif /* CONFIG_E500 */
0160 #endif /* CONFIG_PPC32 */
0161 
0162 #ifdef CONFIG_PPC_BOOK3E_64
0163 _GLOBAL(__restore_cpu_e6500)
0164     mflr    r5
0165     bl  setup_altivec_ivors
0166     /* Touch IVOR42 only if the CPU supports E.HV category */
0167     mfspr   r10,SPRN_MMUCFG
0168     rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
0169     beq 1f
0170     bl  setup_lrat_ivor
0171 1:
0172     bl  setup_pw20_idle
0173     bl  setup_altivec_idle
0174     bl  __restore_cpu_e5500
0175     mtlr    r5
0176     blr
0177 
0178 _GLOBAL(__restore_cpu_e5500)
0179     mflr    r4
0180     bl  __e500_icache_setup
0181     bl  __e500_dcache_setup
0182     bl  __setup_base_ivors
0183     bl  setup_perfmon_ivor
0184     bl  setup_doorbell_ivors
0185     /*
0186      * We only want to touch IVOR38-41 if we're running on hardware
0187      * that supports category E.HV.  The architectural way to determine
0188      * this is MMUCFG[LPIDSIZE].
0189      */
0190     mfspr   r10,SPRN_MMUCFG
0191     rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
0192     beq 1f
0193     bl  setup_ehv_ivors
0194 1:
0195     mtlr    r4
0196     blr
0197 
0198 _GLOBAL(__setup_cpu_e5500)
0199     mflr    r5
0200     bl  __e500_icache_setup
0201     bl  __e500_dcache_setup
0202     bl  __setup_base_ivors
0203     bl  setup_perfmon_ivor
0204     bl  setup_doorbell_ivors
0205     /*
0206      * We only want to touch IVOR38-41 if we're running on hardware
0207      * that supports category E.HV.  The architectural way to determine
0208      * this is MMUCFG[LPIDSIZE].
0209      */
0210     mfspr   r10,SPRN_MMUCFG
0211     rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
0212     beq 1f
0213     bl  setup_ehv_ivors
0214     b   2f
0215 1:
0216     ld  r10,CPU_SPEC_FEATURES(r4)
0217     LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
0218     andc    r10,r10,r9
0219     std r10,CPU_SPEC_FEATURES(r4)
0220 2:
0221     mtlr    r5
0222     blr
0223 #endif
0224 
0225 /* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
0226 _GLOBAL(flush_dcache_L1)
0227     mfmsr   r10
0228     wrteei  0
0229 
0230     mfspr   r3,SPRN_L1CFG0
0231     rlwinm  r5,r3,9,3   /* Extract cache block size */
0232     twlgti  r5,1        /* Only 32 and 64 byte cache blocks
0233                  * are currently defined.
0234                  */
0235     li  r4,32
0236     subfic  r6,r5,2     /* r6 = log2(1KiB / cache block size) -
0237                  *      log2(number of ways)
0238                  */
0239     slw r5,r4,r5    /* r5 = cache block size */
0240 
0241     rlwinm  r7,r3,0,0xff    /* Extract number of KiB in the cache */
0242     mulli   r7,r7,13    /* An 8-way cache will require 13
0243                  * loads per set.
0244                  */
0245     slw r7,r7,r6
0246 
0247     /* save off HID0 and set DCFA */
0248     mfspr   r8,SPRN_HID0
0249     ori r9,r8,HID0_DCFA@l
0250     mtspr   SPRN_HID0,r9
0251     isync
0252 
0253     LOAD_REG_IMMEDIATE(r6, KERNELBASE)
0254     mr  r4, r6
0255     mtctr   r7
0256 
0257 1:  lwz r3,0(r4)    /* Load... */
0258     add r4,r4,r5
0259     bdnz    1b
0260 
0261     msync
0262     mr  r4, r6
0263     mtctr   r7
0264 
0265 1:  dcbf    0,r4        /* ...and flush. */
0266     add r4,r4,r5
0267     bdnz    1b
0268 
0269     /* restore HID0 */
0270     mtspr   SPRN_HID0,r8
0271     isync
0272 
0273     wrtee r10
0274 
0275     blr
0276 
0277 has_L2_cache:
0278     /* skip L2 cache on P2040/P2040E as they have no L2 cache */
0279     mfspr   r3, SPRN_SVR
0280     /* shift right by 8 bits and clear E bit of SVR */
0281     rlwinm  r4, r3, 24, ~0x800
0282 
0283     lis r3, SVR_P2040@h
0284     ori r3, r3, SVR_P2040@l
0285     cmpw    r4, r3
0286     beq 1f
0287 
0288     li  r3, 1
0289     blr
0290 1:
0291     li  r3, 0
0292     blr
0293 
0294 /* flush backside L2 cache */
0295 flush_backside_L2_cache:
0296     mflr    r10
0297     bl  has_L2_cache
0298     mtlr    r10
0299     cmpwi   r3, 0
0300     beq 2f
0301 
0302     /* Flush the L2 cache */
0303     mfspr   r3, SPRN_L2CSR0
0304     ori r3, r3, L2CSR0_L2FL@l
0305     msync
0306     isync
0307     mtspr   SPRN_L2CSR0,r3
0308     isync
0309 
0310     /* check if it is complete */
0311 1:  mfspr   r3,SPRN_L2CSR0
0312     andi.   r3, r3, L2CSR0_L2FL@l
0313     bne 1b
0314 2:
0315     blr
0316 
0317 _GLOBAL(cpu_down_flush_e500v2)
0318     mflr r0
0319     bl  flush_dcache_L1
0320     mtlr r0
0321     blr
0322 
0323 _GLOBAL(cpu_down_flush_e500mc)
0324 _GLOBAL(cpu_down_flush_e5500)
0325     mflr r0
0326     bl  flush_dcache_L1
0327     bl  flush_backside_L2_cache
0328     mtlr r0
0329     blr
0330 
0331 /* L1 Data Cache of e6500 contains no modified data, no flush is required */
0332 _GLOBAL(cpu_down_flush_e6500)
0333     blr