Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/mm/cache-v4.S
0004  *
0005  *  Copyright (C) 1997-2002 Russell king
0006  */
0007 #include <linux/linkage.h>
0008 #include <linux/init.h>
0009 #include <asm/assembler.h>
0010 #include <asm/page.h>
0011 #include "proc-macros.S"
0012 
0013 /*
0014  *  flush_icache_all()
0015  *
0016  *  Unconditionally clean and invalidate the entire icache.
0017  */
0018 ENTRY(v4_flush_icache_all)
0019     ret lr
0020 ENDPROC(v4_flush_icache_all)
0021 
0022 /*
0023  *  flush_user_cache_all()
0024  *
0025  *  Invalidate all cache entries in a particular address
0026  *  space.
0027  *
0028  *  - mm    - mm_struct describing address space
0029  */
0030 ENTRY(v4_flush_user_cache_all)
0031     /* FALLTHROUGH */
0032 /*
0033  *  flush_kern_cache_all()
0034  *
0035  *  Clean and invalidate the entire cache.
0036  */
0037 ENTRY(v4_flush_kern_cache_all)
0038 #ifdef CONFIG_CPU_CP15
0039     mov r0, #0
0040     mcr p15, 0, r0, c7, c7, 0       @ flush ID cache
0041     ret lr
0042 #else
0043     /* FALLTHROUGH */
0044 #endif
0045 
0046 /*
0047  *  flush_user_cache_range(start, end, flags)
0048  *
0049  *  Invalidate a range of cache entries in the specified
0050  *  address space.
0051  *
0052  *  - start - start address (may not be aligned)
0053  *  - end   - end address (exclusive, may not be aligned)
0054  *  - flags - vma_area_struct flags describing address space
0055  */
0056 ENTRY(v4_flush_user_cache_range)
0057 #ifdef CONFIG_CPU_CP15
0058     mov ip, #0
0059     mcr p15, 0, ip, c7, c7, 0       @ flush ID cache
0060     ret lr
0061 #else
0062     /* FALLTHROUGH */
0063 #endif
0064 
0065 /*
0066  *  coherent_kern_range(start, end)
0067  *
0068  *  Ensure coherency between the Icache and the Dcache in the
0069  *  region described by start.  If you have non-snooping
0070  *  Harvard caches, you need to implement this function.
0071  *
0072  *  - start  - virtual start address
0073  *  - end    - virtual end address
0074  */
0075 ENTRY(v4_coherent_kern_range)
0076     /* FALLTHROUGH */
0077 
0078 /*
0079  *  coherent_user_range(start, end)
0080  *
0081  *  Ensure coherency between the Icache and the Dcache in the
0082  *  region described by start.  If you have non-snooping
0083  *  Harvard caches, you need to implement this function.
0084  *
0085  *  - start  - virtual start address
0086  *  - end    - virtual end address
0087  */
0088 ENTRY(v4_coherent_user_range)
0089     mov r0, #0
0090     ret lr
0091 
0092 /*
0093  *  flush_kern_dcache_area(void *addr, size_t size)
0094  *
0095  *  Ensure no D cache aliasing occurs, either with itself or
0096  *  the I cache
0097  *
0098  *  - addr  - kernel address
0099  *  - size  - region size
0100  */
0101 ENTRY(v4_flush_kern_dcache_area)
0102     /* FALLTHROUGH */
0103 
0104 /*
0105  *  dma_flush_range(start, end)
0106  *
0107  *  Clean and invalidate the specified virtual address range.
0108  *
0109  *  - start  - virtual start address
0110  *  - end    - virtual end address
0111  */
0112 ENTRY(v4_dma_flush_range)
0113 #ifdef CONFIG_CPU_CP15
0114     mov r0, #0
0115     mcr p15, 0, r0, c7, c7, 0       @ flush ID cache
0116 #endif
0117     ret lr
0118 
0119 /*
0120  *  dma_unmap_area(start, size, dir)
0121  *  - start - kernel virtual start address
0122  *  - size  - size of region
0123  *  - dir   - DMA direction
0124  */
0125 ENTRY(v4_dma_unmap_area)
0126     teq r2, #DMA_TO_DEVICE
0127     bne v4_dma_flush_range
0128     /* FALLTHROUGH */
0129 
0130 /*
0131  *  dma_map_area(start, size, dir)
0132  *  - start - kernel virtual start address
0133  *  - size  - size of region
0134  *  - dir   - DMA direction
0135  */
0136 ENTRY(v4_dma_map_area)
0137     ret lr
0138 ENDPROC(v4_dma_unmap_area)
0139 ENDPROC(v4_dma_map_area)
0140 
0141     .globl  v4_flush_kern_cache_louis
0142     .equ    v4_flush_kern_cache_louis, v4_flush_kern_cache_all
0143 
0144     __INITDATA
0145 
0146     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0147     define_cache_functions v4