Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/mm/cache-v4wb.S
0004  *
0005  *  Copyright (C) 1997-2002 Russell king
0006  */
0007 #include <linux/linkage.h>
0008 #include <linux/init.h>
0009 #include <asm/assembler.h>
0010 #include <asm/memory.h>
0011 #include <asm/page.h>
0012 #include "proc-macros.S"
0013 
0014 /*
0015  * The size of one data cache line.
0016  */
0017 #define CACHE_DLINESIZE 32
0018 
0019 /*
0020  * The total size of the data cache.
0021  */
0022 #if defined(CONFIG_CPU_SA110)
0023 # define CACHE_DSIZE    16384
0024 #elif defined(CONFIG_CPU_SA1100)
0025 # define CACHE_DSIZE    8192
0026 #else
0027 # error Unknown cache size
0028 #endif
0029 
0030 /*
0031  * This is the size at which it becomes more efficient to
0032  * clean the whole cache, rather than using the individual
0033  * cache line maintenance instructions.
0034  *
0035  *  Size  Clean (ticks) Dirty (ticks)
0036  *   4096   21  20  21    53  55  54
0037  *   8192   40  41  40   106 100 102
0038  *  16384   77  77  76   140 140 138
0039  *  32768  150 149 150   214 216 212 <---
0040  *  65536  296 297 296   351 358 361
0041  * 131072  591 591 591   656 657 651
0042  *  Whole  132 136 132   221 217 207 <---
0043  */
0044 #define CACHE_DLIMIT    (CACHE_DSIZE * 4)
0045 
0046     .data
0047     .align  2
0048 flush_base:
0049     .long   FLUSH_BASE
0050     .text
0051 
0052 /*
0053  *  flush_icache_all()
0054  *
0055  *  Unconditionally clean and invalidate the entire icache.
0056  */
0057 ENTRY(v4wb_flush_icache_all)
0058     mov r0, #0
0059     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0060     ret lr
0061 ENDPROC(v4wb_flush_icache_all)
0062 
0063 /*
0064  *  flush_user_cache_all()
0065  *
0066  *  Clean and invalidate all cache entries in a particular address
0067  *  space.
0068  */
0069 ENTRY(v4wb_flush_user_cache_all)
0070     /* FALLTHROUGH */
0071 /*
0072  *  flush_kern_cache_all()
0073  *
0074  *  Clean and invalidate the entire cache.
0075  */
0076 ENTRY(v4wb_flush_kern_cache_all)
0077     mov ip, #0
0078     mcr p15, 0, ip, c7, c5, 0       @ invalidate I cache
0079 __flush_whole_cache:
0080     ldr r3, =flush_base
0081     ldr r1, [r3, #0]
0082     eor r1, r1, #CACHE_DSIZE
0083     str r1, [r3, #0]
0084     add r2, r1, #CACHE_DSIZE
0085 1:  ldr r3, [r1], #32
0086     cmp r1, r2
0087     blo 1b
0088 #ifdef FLUSH_BASE_MINICACHE
0089     add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
0090     sub r1, r2, #512            @ only 512 bytes
0091 1:  ldr r3, [r1], #32
0092     cmp r1, r2
0093     blo 1b
0094 #endif
0095     mcr p15, 0, ip, c7, c10, 4      @ drain write buffer
0096     ret lr
0097 
0098 /*
0099  *  flush_user_cache_range(start, end, flags)
0100  *
0101  *  Invalidate a range of cache entries in the specified
0102  *  address space.
0103  *
0104  *  - start - start address (inclusive, page aligned)
0105  *  - end   - end address (exclusive, page aligned)
0106  *  - flags - vma_area_struct flags describing address space
0107  */
0108 ENTRY(v4wb_flush_user_cache_range)
0109     mov ip, #0
0110     sub r3, r1, r0          @ calculate total size
0111     tst r2, #VM_EXEC            @ executable region?
0112     mcrne   p15, 0, ip, c7, c5, 0       @ invalidate I cache
0113 
0114     cmp r3, #CACHE_DLIMIT       @ total size >= limit?
0115     bhs __flush_whole_cache     @ flush whole D cache
0116 
0117 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0118     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0119     add r0, r0, #CACHE_DLINESIZE
0120     cmp r0, r1
0121     blo 1b
0122     tst r2, #VM_EXEC
0123     mcrne   p15, 0, ip, c7, c10, 4      @ drain write buffer
0124     ret lr
0125 
0126 /*
0127  *  flush_kern_dcache_area(void *addr, size_t size)
0128  *
0129  *  Ensure no D cache aliasing occurs, either with itself or
0130  *  the I cache
0131  *
0132  *  - addr  - kernel address
0133  *  - size  - region size
0134  */
0135 ENTRY(v4wb_flush_kern_dcache_area)
0136     add r1, r0, r1
0137     /* fall through */
0138 
0139 /*
0140  *  coherent_kern_range(start, end)
0141  *
0142  *  Ensure coherency between the Icache and the Dcache in the
0143  *  region described by start.  If you have non-snooping
0144  *  Harvard caches, you need to implement this function.
0145  *
0146  *  - start  - virtual start address
0147  *  - end    - virtual end address
0148  */
0149 ENTRY(v4wb_coherent_kern_range)
0150     /* fall through */
0151 
0152 /*
0153  *  coherent_user_range(start, end)
0154  *
0155  *  Ensure coherency between the Icache and the Dcache in the
0156  *  region described by start.  If you have non-snooping
0157  *  Harvard caches, you need to implement this function.
0158  *
0159  *  - start  - virtual start address
0160  *  - end    - virtual end address
0161  */
0162 ENTRY(v4wb_coherent_user_range)
0163     bic r0, r0, #CACHE_DLINESIZE - 1
0164 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0165     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0166     add r0, r0, #CACHE_DLINESIZE
0167     cmp r0, r1
0168     blo 1b
0169     mov r0, #0
0170     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0171     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0172     ret lr
0173 
0174 
0175 /*
0176  *  dma_inv_range(start, end)
0177  *
0178  *  Invalidate (discard) the specified virtual address range.
0179  *  May not write back any entries.  If 'start' or 'end'
0180  *  are not cache line aligned, those lines must be written
0181  *  back.
0182  *
0183  *  - start  - virtual start address
0184  *  - end    - virtual end address
0185  */
0186 v4wb_dma_inv_range:
0187     tst r0, #CACHE_DLINESIZE - 1
0188     bic r0, r0, #CACHE_DLINESIZE - 1
0189     mcrne   p15, 0, r0, c7, c10, 1      @ clean D entry
0190     tst r1, #CACHE_DLINESIZE - 1
0191     mcrne   p15, 0, r1, c7, c10, 1      @ clean D entry
0192 1:  mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0193     add r0, r0, #CACHE_DLINESIZE
0194     cmp r0, r1
0195     blo 1b
0196     mcr p15, 0, r0, c7, c10, 4      @ drain write buffer
0197     ret lr
0198 
0199 /*
0200  *  dma_clean_range(start, end)
0201  *
0202  *  Clean (write back) the specified virtual address range.
0203  *
0204  *  - start  - virtual start address
0205  *  - end    - virtual end address
0206  */
0207 v4wb_dma_clean_range:
0208     bic r0, r0, #CACHE_DLINESIZE - 1
0209 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0210     add r0, r0, #CACHE_DLINESIZE
0211     cmp r0, r1
0212     blo 1b
0213     mcr p15, 0, r0, c7, c10, 4      @ drain write buffer
0214     ret lr
0215 
0216 /*
0217  *  dma_flush_range(start, end)
0218  *
0219  *  Clean and invalidate the specified virtual address range.
0220  *
0221  *  - start  - virtual start address
0222  *  - end    - virtual end address
0223  *
0224  *  This is actually the same as v4wb_coherent_kern_range()
0225  */
0226     .globl  v4wb_dma_flush_range
0227     .set    v4wb_dma_flush_range, v4wb_coherent_kern_range
0228 
0229 /*
0230  *  dma_map_area(start, size, dir)
0231  *  - start - kernel virtual start address
0232  *  - size  - size of region
0233  *  - dir   - DMA direction
0234  */
0235 ENTRY(v4wb_dma_map_area)
0236     add r1, r1, r0
0237     cmp r2, #DMA_TO_DEVICE
0238     beq v4wb_dma_clean_range
0239     bcs v4wb_dma_inv_range
0240     b   v4wb_dma_flush_range
0241 ENDPROC(v4wb_dma_map_area)
0242 
0243 /*
0244  *  dma_unmap_area(start, size, dir)
0245  *  - start - kernel virtual start address
0246  *  - size  - size of region
0247  *  - dir   - DMA direction
0248  */
0249 ENTRY(v4wb_dma_unmap_area)
0250     ret lr
0251 ENDPROC(v4wb_dma_unmap_area)
0252 
0253     .globl  v4wb_flush_kern_cache_louis
0254     .equ    v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
0255 
0256     __INITDATA
0257 
0258     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0259     define_cache_functions v4wb