Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003     L2CR functions
0004     Copyright © 1997-1998 by PowerLogix R & D, Inc.
0005 
0006 */
0007 /*
0008     Thur, Dec. 12, 1998.
0009     - First public release, contributed by PowerLogix.
0010     ***********
0011     Sat, Aug. 7, 1999.
0012     - Terry: Made sure code disabled interrupts before running. (Previously
0013             it was assumed interrupts were already disabled).
0014     - Terry: Updated for tentative G4 support.  4MB of memory is now flushed
0015             instead of 2MB.  (Prob. only 3 is necessary).
0016     - Terry: Updated for workaround to HID0[DPM] processor bug
0017             during global invalidates.
0018     ***********
0019     Thu, July 13, 2000.
0020     - Terry: Added isync to correct for an errata.
0021 
0022     22 August 2001.
0023     - DanM: Finally added the 7450 patch I've had for the past
0024         several months.  The L2CR is similar, but I'm going
0025         to assume the user of this functions knows what they
0026         are doing.
0027 
0028     Author: Terry Greeniaus (tgree@phys.ualberta.ca)
0029     Please e-mail updates to this file to me, thanks!
0030 */
0031 #include <asm/processor.h>
0032 #include <asm/cputable.h>
0033 #include <asm/ppc_asm.h>
0034 #include <asm/cache.h>
0035 #include <asm/page.h>
0036 #include <asm/feature-fixups.h>
0037 
0038 /* Usage:
0039 
0040     When setting the L2CR register, you must do a few special
0041     things.  If you are enabling the cache, you must perform a
0042     global invalidate.  If you are disabling the cache, you must
0043     flush the cache contents first.  This routine takes care of
0044     doing these things.  When first enabling the cache, make sure
0045     you pass in the L2CR you want, as well as passing in the
0046     global invalidate bit set.  A global invalidate will only be
0047     performed if the L2I bit is set in applyThis.  When enabling
0048     the cache, you should also set the L2E bit in applyThis.  If
0049     you want to modify the L2CR contents after the cache has been
0050     enabled, the recommended procedure is to first call
0051     __setL2CR(0) to disable the cache and then call it again with
0052     the new values for L2CR.  Examples:
0053 
0054     _setL2CR(0)     - disables the cache
0055     _setL2CR(0xB3A04000)    - enables my G3 upgrade card:
0056                 - L2E set to turn on the cache
0057                 - L2SIZ set to 1MB
0058                 - L2CLK set to 1:1
0059                 - L2RAM set to pipelined synchronous late-write
0060                 - L2I set to perform a global invalidation
0061                 - L2OH set to 0.5 nS
0062                 - L2DF set because this upgrade card
0063                   requires it
0064 
0065     A similar call should work for your card.  You need to know
0066     the correct setting for your card and then place them in the
0067     fields I have outlined above.  Other fields support optional
0068     features, such as L2DO which caches only data, or L2TS which
0069     causes cache pushes from the L1 cache to go to the L2 cache
0070     instead of to main memory.
0071 
0072 IMPORTANT:
0073     Starting with the 7450, the bits in this register have moved
0074     or behave differently.  The Enable, Parity Enable, Size,
0075     and L2 Invalidate are the only bits that have not moved.
0076     The size is read-only for these processors with internal L2
0077     cache, and the invalidate is a control as well as status.
0078         -- Dan
0079 
0080 */
0081 /*
0082  * Summary: this procedure ignores the L2I bit in the value passed in,
0083  * flushes the cache if it was already enabled, always invalidates the
0084  * cache, then enables the cache if the L2E bit is set in the value
0085  * passed in.
0086  *   -- paulus.
0087  */
0088 _GLOBAL(_set_L2CR)
0089     /* Make sure this is a 750 or 7400 chip */
0090 BEGIN_FTR_SECTION
0091     li  r3,-1
0092     blr
0093 END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
0094 
0095     mflr    r9
0096 
0097     /* Stop DST streams */
0098 BEGIN_FTR_SECTION
0099     PPC_DSSALL
0100     sync
0101 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
0102 
0103     /* Turn off interrupts and data relocation. */
0104     mfmsr   r7      /* Save MSR in r7 */
0105     rlwinm  r4,r7,0,17,15
0106     rlwinm  r4,r4,0,28,26   /* Turn off DR bit */
0107     sync
0108     mtmsr   r4
0109     isync
0110 
0111     /* Before we perform the global invalidation, we must disable dynamic
0112      * power management via HID0[DPM] to work around a processor bug where
0113      * DPM can possibly interfere with the state machine in the processor
0114      * that invalidates the L2 cache tags.
0115      */
0116     mfspr   r8,SPRN_HID0        /* Save HID0 in r8 */
0117     rlwinm  r4,r8,0,12,10       /* Turn off HID0[DPM] */
0118     sync
0119     mtspr   SPRN_HID0,r4        /* Disable DPM */
0120     sync
0121 
0122     /* Get the current enable bit of the L2CR into r4 */
0123     mfspr   r4,SPRN_L2CR
0124 
0125     /* Tweak some bits */
0126     rlwinm  r5,r3,0,0,0     /* r5 contains the new enable bit */
0127     rlwinm  r3,r3,0,11,9        /* Turn off the invalidate bit */
0128     rlwinm  r3,r3,0,1,31        /* Turn off the enable bit */
0129 
0130     /* Check to see if we need to flush */
0131     rlwinm. r4,r4,0,0,0
0132     beq 2f
0133 
0134     /* Flush the cache. First, read the first 4MB of memory (physical) to
0135      * put new data in the cache.  (Actually we only need
0136      * the size of the L2 cache plus the size of the L1 cache, but 4MB will
0137      * cover everything just to be safe).
0138      */
0139 
0140      /**** Might be a good idea to set L2DO here - to prevent instructions
0141            from getting into the cache.  But since we invalidate
0142            the next time we enable the cache it doesn't really matter.
0143            Don't do this unless you accommodate all processor variations.
0144            The bit moved on the 7450.....
0145       ****/
0146 
0147 BEGIN_FTR_SECTION
0148     /* Disable L2 prefetch on some 745x and try to ensure
0149      * L2 prefetch engines are idle. As explained by errata
0150      * text, we can't be sure they are, we just hope very hard
0151      * that well be enough (sic !). At least I noticed Apple
0152      * doesn't even bother doing the dcbf's here...
0153      */
0154     mfspr   r4,SPRN_MSSCR0
0155     rlwinm  r4,r4,0,0,29
0156     sync
0157     mtspr   SPRN_MSSCR0,r4
0158     sync
0159     isync
0160     lis r4,KERNELBASE@h
0161     dcbf    0,r4
0162     dcbf    0,r4
0163     dcbf    0,r4
0164     dcbf    0,r4
0165 END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
0166 
0167     /* TODO: use HW flush assist when available */
0168 
0169     lis r4,0x0002
0170     mtctr   r4
0171     li  r4,0
0172 1:
0173     lwzx    r0,0,r4
0174     addi    r4,r4,32        /* Go to start of next cache line */
0175     bdnz    1b
0176     isync
0177 
0178     /* Now, flush the first 4MB of memory */
0179     lis r4,0x0002
0180     mtctr   r4
0181     li  r4,0
0182     sync
0183 1:
0184     dcbf    0,r4
0185     addi    r4,r4,32        /* Go to start of next cache line */
0186     bdnz    1b
0187 
0188 2:
0189     /* Set up the L2CR configuration bits (and switch L2 off) */
0190     /* CPU errata: Make sure the mtspr below is already in the
0191      * L1 icache
0192      */
0193     b   20f
0194     .balign L1_CACHE_BYTES
0195 22:
0196     sync
0197     mtspr   SPRN_L2CR,r3
0198     sync
0199     b   23f
0200 20:
0201     b   21f
0202 21: sync
0203     isync
0204     b   22b
0205 
0206 23:
0207     /* Perform a global invalidation */
0208     oris    r3,r3,0x0020
0209     sync
0210     mtspr   SPRN_L2CR,r3
0211     sync
0212     isync               /* For errata */
0213 
0214 BEGIN_FTR_SECTION
0215     /* On the 7450, we wait for the L2I bit to clear......
0216     */
0217 10: mfspr   r3,SPRN_L2CR
0218     andis.  r4,r3,0x0020
0219     bne 10b
0220     b   11f
0221 END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
0222 
0223     /* Wait for the invalidation to complete */
0224 3:  mfspr   r3,SPRN_L2CR
0225     rlwinm. r4,r3,0,31,31
0226     bne 3b
0227 
0228 11: rlwinm  r3,r3,0,11,9        /* Turn off the L2I bit */
0229     sync
0230     mtspr   SPRN_L2CR,r3
0231     sync
0232 
0233     /* See if we need to enable the cache */
0234     cmplwi  r5,0
0235     beq 4f
0236 
0237     /* Enable the cache */
0238     oris    r3,r3,0x8000
0239     mtspr   SPRN_L2CR,r3
0240     sync
0241     
0242     /* Enable L2 HW prefetch on 744x/745x */
0243 BEGIN_FTR_SECTION
0244     mfspr   r3,SPRN_MSSCR0
0245     ori r3,r3,3
0246     sync
0247     mtspr   SPRN_MSSCR0,r3
0248     sync
0249     isync
0250 END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
0251 4:
0252 
0253     /* Restore HID0[DPM] to whatever it was before */
0254     sync
0255     mtspr   1008,r8
0256     sync
0257 
0258     /* Restore MSR (restores EE and DR bits to original state) */
0259     mtmsr   r7
0260     isync
0261 
0262     mtlr    r9
0263     blr
0264 
0265 _GLOBAL(_get_L2CR)
0266     /* Return the L2CR contents */
0267     li  r3,0
0268 BEGIN_FTR_SECTION
0269     mfspr   r3,SPRN_L2CR
0270 END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
0271     blr
0272 
0273 
0274 /*
0275  * Here is a similar routine for dealing with the L3 cache
0276  * on the 745x family of chips
0277  */
0278 
0279 _GLOBAL(_set_L3CR)
0280     /* Make sure this is a 745x chip */
0281 BEGIN_FTR_SECTION
0282     li  r3,-1
0283     blr
0284 END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
0285 
0286     /* Turn off interrupts and data relocation. */
0287     mfmsr   r7      /* Save MSR in r7 */
0288     rlwinm  r4,r7,0,17,15
0289     rlwinm  r4,r4,0,28,26   /* Turn off DR bit */
0290     sync
0291     mtmsr   r4
0292     isync
0293 
0294     /* Stop DST streams */
0295     PPC_DSSALL
0296     sync
0297 
0298     /* Get the current enable bit of the L3CR into r4 */
0299     mfspr   r4,SPRN_L3CR
0300 
0301     /* Tweak some bits */
0302     rlwinm  r5,r3,0,0,0     /* r5 contains the new enable bit */
0303     rlwinm  r3,r3,0,22,20       /* Turn off the invalidate bit */
0304     rlwinm  r3,r3,0,2,31        /* Turn off the enable & PE bits */
0305     rlwinm  r3,r3,0,5,3     /* Turn off the clken bit */
0306     /* Check to see if we need to flush */
0307     rlwinm. r4,r4,0,0,0
0308     beq 2f
0309 
0310     /* Flush the cache.
0311      */
0312 
0313     /* TODO: use HW flush assist */
0314 
0315     lis r4,0x0008
0316     mtctr   r4
0317     li  r4,0
0318 1:
0319     lwzx    r0,0,r4
0320     dcbf    0,r4
0321     addi    r4,r4,32        /* Go to start of next cache line */
0322     bdnz    1b
0323 
0324 2:
0325     /* Set up the L3CR configuration bits (and switch L3 off) */
0326     sync
0327     mtspr   SPRN_L3CR,r3
0328     sync
0329 
0330     oris    r3,r3,L3CR_L3RES@h      /* Set reserved bit 5 */
0331     mtspr   SPRN_L3CR,r3
0332     sync
0333     oris    r3,r3,L3CR_L3CLKEN@h        /* Set clken */
0334     mtspr   SPRN_L3CR,r3
0335     sync
0336 
0337     /* Wait for stabilize */
0338     li  r0,256
0339     mtctr   r0
0340 1:  bdnz    1b
0341 
0342     /* Perform a global invalidation */
0343     ori r3,r3,0x0400
0344     sync
0345     mtspr   SPRN_L3CR,r3
0346     sync
0347     isync
0348 
0349     /* We wait for the L3I bit to clear...... */
0350 10: mfspr   r3,SPRN_L3CR
0351     andi.   r4,r3,0x0400
0352     bne 10b
0353 
0354     /* Clear CLKEN */
0355     rlwinm  r3,r3,0,5,3     /* Turn off the clken bit */
0356     mtspr   SPRN_L3CR,r3
0357     sync
0358 
0359     /* Wait for stabilize */
0360     li  r0,256
0361     mtctr   r0
0362 1:  bdnz    1b
0363 
0364     /* See if we need to enable the cache */
0365     cmplwi  r5,0
0366     beq 4f
0367 
0368     /* Enable the cache */
0369     oris    r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
0370     mtspr   SPRN_L3CR,r3
0371     sync
0372 
0373     /* Wait for stabilize */
0374     li  r0,256
0375     mtctr   r0
0376 1:  bdnz    1b
0377 
0378     /* Restore MSR (restores EE and DR bits to original state) */
0379 4:
0380     mtmsr   r7
0381     isync
0382     blr
0383 
0384 _GLOBAL(_get_L3CR)
0385     /* Return the L3CR contents */
0386     li  r3,0
0387 BEGIN_FTR_SECTION
0388     mfspr   r3,SPRN_L3CR
0389 END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
0390     blr
0391 
0392 /* --- End of PowerLogix code ---
0393  */
0394 
0395 
0396 /* flush_disable_L1()   - Flush and disable L1 cache
0397  *
0398  * clobbers r0, r3, ctr, cr0
0399  * Must be called with interrupts disabled and MMU enabled.
0400  */
0401 _GLOBAL(__flush_disable_L1)
0402     /* Stop pending alitvec streams and memory accesses */
0403 BEGIN_FTR_SECTION
0404     PPC_DSSALL
0405 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
0406     sync
0407 
0408     /* Load counter to 0x4000 cache lines (512k) and
0409      * load cache with datas
0410      */
0411     li  r3,0x4000   /* 512kB / 32B */
0412     mtctr   r3
0413     lis r3,KERNELBASE@h
0414 1:
0415     lwz r0,0(r3)
0416     addi    r3,r3,0x0020    /* Go to start of next cache line */
0417     bdnz    1b
0418     isync
0419     sync
0420 
0421     /* Now flush those cache lines */
0422     li  r3,0x4000   /* 512kB / 32B */
0423     mtctr   r3
0424     lis r3,KERNELBASE@h
0425 1:
0426     dcbf    0,r3
0427     addi    r3,r3,0x0020    /* Go to start of next cache line */
0428     bdnz    1b
0429     sync
0430 
0431     /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
0432     mfspr   r3,SPRN_HID0
0433     rlwinm  r3,r3,0,18,15
0434     mtspr   SPRN_HID0,r3
0435     sync
0436     isync
0437     blr
0438 
0439 /* inval_enable_L1  - Invalidate and enable L1 cache
0440  *
0441  * Assumes L1 is already disabled and MSR:EE is off
0442  *
0443  * clobbers r3
0444  */
0445 _GLOBAL(__inval_enable_L1)
0446     /* Enable and then Flash inval the instruction & data cache */
0447     mfspr   r3,SPRN_HID0
0448     ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
0449     sync
0450     isync
0451     mtspr   SPRN_HID0,r3
0452     xori    r3,r3, HID0_ICFI|HID0_DCI
0453     mtspr   SPRN_HID0,r3
0454     sync
0455 
0456     blr
0457 _ASM_NOKPROBE_SYMBOL(__inval_enable_L1)
0458 
0459