Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/mm/cache-v4wt.S
0004  *
0005  *  Copyright (C) 1997-2002 Russell king
0006  *
0007  *  ARMv4 write through cache operations support.
0008  *
0009  *  We assume that the write buffer is not enabled.
0010  */
0011 #include <linux/linkage.h>
0012 #include <linux/init.h>
0013 #include <asm/assembler.h>
0014 #include <asm/page.h>
0015 #include "proc-macros.S"
0016 
0017 /*
0018  * The size of one data cache line.
0019  */
0020 #define CACHE_DLINESIZE 32
0021 
0022 /*
0023  * The number of data cache segments.
0024  */
0025 #define CACHE_DSEGMENTS 8
0026 
0027 /*
0028  * The number of lines in a cache segment.
0029  */
0030 #define CACHE_DENTRIES  64
0031 
0032 /*
0033  * This is the size at which it becomes more efficient to
0034  * clean the whole cache, rather than using the individual
0035  * cache line maintenance instructions.
0036  *
0037  * *** This needs benchmarking
0038  */
0039 #define CACHE_DLIMIT    16384
0040 
0041 /*
0042  *  flush_icache_all()
0043  *
0044  *  Unconditionally clean and invalidate the entire icache.
0045  */
0046 ENTRY(v4wt_flush_icache_all)
0047     mov r0, #0
0048     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0049     ret lr
0050 ENDPROC(v4wt_flush_icache_all)
0051 
0052 /*
0053  *  flush_user_cache_all()
0054  *
0055  *  Invalidate all cache entries in a particular address
0056  *  space.
0057  */
0058 ENTRY(v4wt_flush_user_cache_all)
0059     /* FALLTHROUGH */
0060 /*
0061  *  flush_kern_cache_all()
0062  *
0063  *  Clean and invalidate the entire cache.
0064  */
0065 ENTRY(v4wt_flush_kern_cache_all)
0066     mov r2, #VM_EXEC
0067     mov ip, #0
0068 __flush_whole_cache:
0069     tst r2, #VM_EXEC
0070     mcrne   p15, 0, ip, c7, c5, 0       @ invalidate I cache
0071     mcr p15, 0, ip, c7, c6, 0       @ invalidate D cache
0072     ret lr
0073 
0074 /*
0075  *  flush_user_cache_range(start, end, flags)
0076  *
0077  *  Clean and invalidate a range of cache entries in the specified
0078  *  address space.
0079  *
0080  *  - start - start address (inclusive, page aligned)
0081  *  - end   - end address (exclusive, page aligned)
0082  *  - flags - vma_area_struct flags describing address space
0083  */
0084 ENTRY(v4wt_flush_user_cache_range)
0085     sub r3, r1, r0          @ calculate total size
0086     cmp r3, #CACHE_DLIMIT
0087     bhs __flush_whole_cache
0088 
0089 1:  mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0090     tst r2, #VM_EXEC
0091     mcrne   p15, 0, r0, c7, c5, 1       @ invalidate I entry
0092     add r0, r0, #CACHE_DLINESIZE
0093     cmp r0, r1
0094     blo 1b
0095     ret lr
0096 
0097 /*
0098  *  coherent_kern_range(start, end)
0099  *
0100  *  Ensure coherency between the Icache and the Dcache in the
0101  *  region described by start.  If you have non-snooping
0102  *  Harvard caches, you need to implement this function.
0103  *
0104  *  - start  - virtual start address
0105  *  - end    - virtual end address
0106  */
0107 ENTRY(v4wt_coherent_kern_range)
0108     /* FALLTRHOUGH */
0109 
0110 /*
0111  *  coherent_user_range(start, end)
0112  *
0113  *  Ensure coherency between the Icache and the Dcache in the
0114  *  region described by start.  If you have non-snooping
0115  *  Harvard caches, you need to implement this function.
0116  *
0117  *  - start  - virtual start address
0118  *  - end    - virtual end address
0119  */
0120 ENTRY(v4wt_coherent_user_range)
0121     bic r0, r0, #CACHE_DLINESIZE - 1
0122 1:  mcr p15, 0, r0, c7, c5, 1       @ invalidate I entry
0123     add r0, r0, #CACHE_DLINESIZE
0124     cmp r0, r1
0125     blo 1b
0126     mov r0, #0
0127     ret lr
0128 
0129 /*
0130  *  flush_kern_dcache_area(void *addr, size_t size)
0131  *
0132  *  Ensure no D cache aliasing occurs, either with itself or
0133  *  the I cache
0134  *
0135  *  - addr  - kernel address
0136  *  - size  - region size
0137  */
0138 ENTRY(v4wt_flush_kern_dcache_area)
0139     mov r2, #0
0140     mcr p15, 0, r2, c7, c5, 0       @ invalidate I cache
0141     add r1, r0, r1
0142     /* fallthrough */
0143 
0144 /*
0145  *  dma_inv_range(start, end)
0146  *
0147  *  Invalidate (discard) the specified virtual address range.
0148  *  May not write back any entries.  If 'start' or 'end'
0149  *  are not cache line aligned, those lines must be written
0150  *  back.
0151  *
0152  *  - start  - virtual start address
0153  *  - end    - virtual end address
0154  */
0155 v4wt_dma_inv_range:
0156     bic r0, r0, #CACHE_DLINESIZE - 1
0157 1:  mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0158     add r0, r0, #CACHE_DLINESIZE
0159     cmp r0, r1
0160     blo 1b
0161     ret lr
0162 
0163 /*
0164  *  dma_flush_range(start, end)
0165  *
0166  *  Clean and invalidate the specified virtual address range.
0167  *
0168  *  - start  - virtual start address
0169  *  - end    - virtual end address
0170  */
0171     .globl  v4wt_dma_flush_range
0172     .equ    v4wt_dma_flush_range, v4wt_dma_inv_range
0173 
0174 /*
0175  *  dma_unmap_area(start, size, dir)
0176  *  - start - kernel virtual start address
0177  *  - size  - size of region
0178  *  - dir   - DMA direction
0179  */
0180 ENTRY(v4wt_dma_unmap_area)
0181     add r1, r1, r0
0182     teq r2, #DMA_TO_DEVICE
0183     bne v4wt_dma_inv_range
0184     /* FALLTHROUGH */
0185 
0186 /*
0187  *  dma_map_area(start, size, dir)
0188  *  - start - kernel virtual start address
0189  *  - size  - size of region
0190  *  - dir   - DMA direction
0191  */
0192 ENTRY(v4wt_dma_map_area)
0193     ret lr
0194 ENDPROC(v4wt_dma_unmap_area)
0195 ENDPROC(v4wt_dma_map_area)
0196 
0197     .globl  v4wt_flush_kern_cache_louis
0198     .equ    v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
0199 
0200     __INITDATA
0201 
0202     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0203     define_cache_functions v4wt