Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  arch/arm/include/asm/cacheflush.h
0004  *
0005  *  Copyright (C) 1999-2002 Russell King
0006  */
0007 #ifndef _ASMARM_CACHEFLUSH_H
0008 #define _ASMARM_CACHEFLUSH_H
0009 
0010 #include <linux/mm.h>
0011 
0012 #include <asm/glue-cache.h>
0013 #include <asm/shmparam.h>
0014 #include <asm/cachetype.h>
0015 #include <asm/outercache.h>
0016 
0017 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
0018 
0019 /*
0020  * This flag is used to indicate that the page pointed to by a pte is clean
0021  * and does not require cleaning before returning it to the user.
0022  */
0023 #define PG_dcache_clean PG_arch_1
0024 
0025 /*
0026  *  MM Cache Management
0027  *  ===================
0028  *
0029  *  The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
0030  *  implement these methods.
0031  *
0032  *  Start addresses are inclusive and end addresses are exclusive;
0033  *  start addresses should be rounded down, end addresses up.
0034  *
0035  *  See Documentation/core-api/cachetlb.rst for more information.
0036  *  Please note that the implementation of these, and the required
0037  *  effects are cache-type (VIVT/VIPT/PIPT) specific.
0038  *
0039  *  flush_icache_all()
0040  *
0041  *      Unconditionally clean and invalidate the entire icache.
0042  *      Currently only needed for cache-v6.S and cache-v7.S, see
0043  *      __flush_icache_all for the generic implementation.
0044  *
0045  *  flush_kern_all()
0046  *
0047  *      Unconditionally clean and invalidate the entire cache.
0048  *
0049  *     flush_kern_louis()
0050  *
0051  *             Flush data cache levels up to the level of unification
0052  *             inner shareable and invalidate the I-cache.
0053  *             Only needed from v7 onwards, falls back to flush_cache_all()
0054  *             for all other processor versions.
0055  *
0056  *  flush_user_all()
0057  *
0058  *      Clean and invalidate all user space cache entries
0059  *      before a change of page tables.
0060  *
0061  *  flush_user_range(start, end, flags)
0062  *
0063  *      Clean and invalidate a range of cache entries in the
0064  *      specified address space before a change of page tables.
0065  *      - start - user start address (inclusive, page aligned)
0066  *      - end   - user end address   (exclusive, page aligned)
0067  *      - flags - vma->vm_flags field
0068  *
0069  *  coherent_kern_range(start, end)
0070  *
0071  *      Ensure coherency between the Icache and the Dcache in the
0072  *      region described by start, end.  If you have non-snooping
0073  *      Harvard caches, you need to implement this function.
0074  *      - start  - virtual start address
0075  *      - end    - virtual end address
0076  *
0077  *  coherent_user_range(start, end)
0078  *
0079  *      Ensure coherency between the Icache and the Dcache in the
0080  *      region described by start, end.  If you have non-snooping
0081  *      Harvard caches, you need to implement this function.
0082  *      - start  - virtual start address
0083  *      - end    - virtual end address
0084  *
0085  *  flush_kern_dcache_area(kaddr, size)
0086  *
0087  *      Ensure that the data held in page is written back.
0088  *      - kaddr  - page address
0089  *      - size   - region size
0090  *
0091  *  DMA Cache Coherency
0092  *  ===================
0093  *
0094  *  dma_flush_range(start, end)
0095  *
0096  *      Clean and invalidate the specified virtual address range.
0097  *      - start  - virtual start address
0098  *      - end    - virtual end address
0099  */
0100 
0101 struct cpu_cache_fns {
0102     void (*flush_icache_all)(void);
0103     void (*flush_kern_all)(void);
0104     void (*flush_kern_louis)(void);
0105     void (*flush_user_all)(void);
0106     void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
0107 
0108     void (*coherent_kern_range)(unsigned long, unsigned long);
0109     int  (*coherent_user_range)(unsigned long, unsigned long);
0110     void (*flush_kern_dcache_area)(void *, size_t);
0111 
0112     void (*dma_map_area)(const void *, size_t, int);
0113     void (*dma_unmap_area)(const void *, size_t, int);
0114 
0115     void (*dma_flush_range)(const void *, const void *);
0116 } __no_randomize_layout;
0117 
0118 /*
0119  * Select the calling method
0120  */
0121 #ifdef MULTI_CACHE
0122 
0123 extern struct cpu_cache_fns cpu_cache;
0124 
0125 #define __cpuc_flush_icache_all     cpu_cache.flush_icache_all
0126 #define __cpuc_flush_kern_all       cpu_cache.flush_kern_all
0127 #define __cpuc_flush_kern_louis     cpu_cache.flush_kern_louis
0128 #define __cpuc_flush_user_all       cpu_cache.flush_user_all
0129 #define __cpuc_flush_user_range     cpu_cache.flush_user_range
0130 #define __cpuc_coherent_kern_range  cpu_cache.coherent_kern_range
0131 #define __cpuc_coherent_user_range  cpu_cache.coherent_user_range
0132 #define __cpuc_flush_dcache_area    cpu_cache.flush_kern_dcache_area
0133 
0134 /*
0135  * These are private to the dma-mapping API.  Do not use directly.
0136  * Their sole purpose is to ensure that data held in the cache
0137  * is visible to DMA, or data written by DMA to system memory is
0138  * visible to the CPU.
0139  */
0140 #define dmac_flush_range        cpu_cache.dma_flush_range
0141 
0142 #else
0143 
0144 extern void __cpuc_flush_icache_all(void);
0145 extern void __cpuc_flush_kern_all(void);
0146 extern void __cpuc_flush_kern_louis(void);
0147 extern void __cpuc_flush_user_all(void);
0148 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
0149 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
0150 extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);
0151 extern void __cpuc_flush_dcache_area(void *, size_t);
0152 
0153 /*
0154  * These are private to the dma-mapping API.  Do not use directly.
0155  * Their sole purpose is to ensure that data held in the cache
0156  * is visible to DMA, or data written by DMA to system memory is
0157  * visible to the CPU.
0158  */
0159 extern void dmac_flush_range(const void *, const void *);
0160 
0161 #endif
0162 
0163 /*
0164  * Copy user data from/to a page which is mapped into a different
0165  * processes address space.  Really, we want to allow our "user
0166  * space" model to handle this.
0167  */
0168 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
0169     unsigned long, void *, const void *, unsigned long);
0170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
0171     do {                            \
0172         memcpy(dst, src, len);              \
0173     } while (0)
0174 
0175 /*
0176  * Convert calls to our calling convention.
0177  */
0178 
0179 /* Invalidate I-cache */
0180 #define __flush_icache_all_generic()                    \
0181     asm("mcr    p15, 0, %0, c7, c5, 0"              \
0182         : : "r" (0));
0183 
0184 /* Invalidate I-cache inner shareable */
0185 #define __flush_icache_all_v7_smp()                 \
0186     asm("mcr    p15, 0, %0, c7, c1, 0"              \
0187         : : "r" (0));
0188 
0189 /*
0190  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
0191  * will fall through to use __flush_icache_all_generic.
0192  */
0193 #if (defined(CONFIG_CPU_V7) && \
0194      (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
0195     defined(CONFIG_SMP_ON_UP)
0196 #define __flush_icache_preferred    __cpuc_flush_icache_all
0197 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
0198 #define __flush_icache_preferred    __flush_icache_all_v7_smp
0199 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
0200 #define __flush_icache_preferred    __cpuc_flush_icache_all
0201 #else
0202 #define __flush_icache_preferred    __flush_icache_all_generic
0203 #endif
0204 
0205 static inline void __flush_icache_all(void)
0206 {
0207     __flush_icache_preferred();
0208     dsb(ishst);
0209 }
0210 
0211 /*
0212  * Flush caches up to Level of Unification Inner Shareable
0213  */
0214 #define flush_cache_louis()     __cpuc_flush_kern_louis()
0215 
0216 #define flush_cache_all()       __cpuc_flush_kern_all()
0217 
0218 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
0219 {
0220     if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
0221         __cpuc_flush_user_all();
0222 }
0223 
0224 static inline void
0225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
0226 {
0227     struct mm_struct *mm = vma->vm_mm;
0228 
0229     if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
0230         __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
0231                     vma->vm_flags);
0232 }
0233 
0234 static inline void
0235 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
0236 {
0237     struct mm_struct *mm = vma->vm_mm;
0238 
0239     if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
0240         unsigned long addr = user_addr & PAGE_MASK;
0241         __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
0242     }
0243 }
0244 
0245 #ifndef CONFIG_CPU_CACHE_VIPT
0246 #define flush_cache_mm(mm) \
0247         vivt_flush_cache_mm(mm)
0248 #define flush_cache_range(vma,start,end) \
0249         vivt_flush_cache_range(vma,start,end)
0250 #define flush_cache_page(vma,addr,pfn) \
0251         vivt_flush_cache_page(vma,addr,pfn)
0252 #else
0253 extern void flush_cache_mm(struct mm_struct *mm);
0254 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
0255 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
0256 #endif
0257 
0258 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
0259 
0260 /*
0261  * flush_icache_user_range is used when we want to ensure that the
0262  * Harvard caches are synchronised for the user space address range.
0263  * This is used for the ARM private sys_cacheflush system call.
0264  */
0265 #define flush_icache_user_range(s,e)    __cpuc_coherent_user_range(s,e)
0266 
0267 /*
0268  * Perform necessary cache operations to ensure that data previously
0269  * stored within this range of addresses can be executed by the CPU.
0270  */
0271 #define flush_icache_range(s,e)     __cpuc_coherent_kern_range(s,e)
0272 
0273 /*
0274  * Perform necessary cache operations to ensure that the TLB will
0275  * see data written in the specified area.
0276  */
0277 #define clean_dcache_area(start,size)   cpu_dcache_clean_area(start, size)
0278 
0279 /*
0280  * flush_dcache_page is used when the kernel has written to the page
0281  * cache page at virtual address page->virtual.
0282  *
0283  * If this page isn't mapped (ie, page_mapping == NULL), or it might
0284  * have userspace mappings, then we _must_ always clean + invalidate
0285  * the dcache entries associated with the kernel mapping.
0286  *
0287  * Otherwise we can defer the operation, and clean the cache when we are
0288  * about to change to user space.  This is the same method as used on SPARC64.
0289  * See update_mmu_cache for the user space part.
0290  */
0291 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0292 extern void flush_dcache_page(struct page *);
0293 
0294 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
0295 static inline void flush_kernel_vmap_range(void *addr, int size)
0296 {
0297     if ((cache_is_vivt() || cache_is_vipt_aliasing()))
0298       __cpuc_flush_dcache_area(addr, (size_t)size);
0299 }
0300 static inline void invalidate_kernel_vmap_range(void *addr, int size)
0301 {
0302     if ((cache_is_vivt() || cache_is_vipt_aliasing()))
0303       __cpuc_flush_dcache_area(addr, (size_t)size);
0304 }
0305 
0306 #define ARCH_HAS_FLUSH_ANON_PAGE
0307 static inline void flush_anon_page(struct vm_area_struct *vma,
0308              struct page *page, unsigned long vmaddr)
0309 {
0310     extern void __flush_anon_page(struct vm_area_struct *vma,
0311                 struct page *, unsigned long);
0312     if (PageAnon(page))
0313         __flush_anon_page(vma, page, vmaddr);
0314 }
0315 
0316 #define flush_dcache_mmap_lock(mapping)     xa_lock_irq(&mapping->i_pages)
0317 #define flush_dcache_mmap_unlock(mapping)   xa_unlock_irq(&mapping->i_pages)
0318 
0319 /*
0320  * We don't appear to need to do anything here.  In fact, if we did, we'd
0321  * duplicate cache flushing elsewhere performed by flush_dcache_page().
0322  */
0323 #define flush_icache_page(vma,page) do { } while (0)
0324 
0325 /*
0326  * flush_cache_vmap() is used when creating mappings (eg, via vmap,
0327  * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
0328  * caches, since the direct-mappings of these pages may contain cached
0329  * data, we need to do a full cache flush to ensure that writebacks
0330  * don't corrupt data placed into these pages via the new mappings.
0331  */
0332 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
0333 {
0334     if (!cache_is_vipt_nonaliasing())
0335         flush_cache_all();
0336     else
0337         /*
0338          * set_pte_at() called from vmap_pte_range() does not
0339          * have a DSB after cleaning the cache line.
0340          */
0341         dsb(ishst);
0342 }
0343 
0344 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
0345 {
0346     if (!cache_is_vipt_nonaliasing())
0347         flush_cache_all();
0348 }
0349 
0350 /*
0351  * Memory synchronization helpers for mixed cached vs non cached accesses.
0352  *
0353  * Some synchronization algorithms have to set states in memory with the
0354  * cache enabled or disabled depending on the code path.  It is crucial
0355  * to always ensure proper cache maintenance to update main memory right
0356  * away in that case.
0357  *
0358  * Any cached write must be followed by a cache clean operation.
0359  * Any cached read must be preceded by a cache invalidate operation.
0360  * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
0361  * operation is needed to avoid discarding possible concurrent writes to the
0362  * accessed memory.
0363  *
0364  * Also, in order to prevent a cached writer from interfering with an
0365  * adjacent non-cached writer, each state variable must be located to
0366  * a separate cache line.
0367  */
0368 
0369 /*
0370  * This needs to be >= the max cache writeback size of all
0371  * supported platforms included in the current kernel configuration.
0372  * This is used to align state variables to their own cache lines.
0373  */
0374 #define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */
0375 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
0376 
0377 /*
0378  * There is no __cpuc_clean_dcache_area but we use it anyway for
0379  * code intent clarity, and alias it to __cpuc_flush_dcache_area.
0380  */
0381 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
0382 
0383 /*
0384  * Ensure preceding writes to *p by this CPU are visible to
0385  * subsequent reads by other CPUs:
0386  */
0387 static inline void __sync_cache_range_w(volatile void *p, size_t size)
0388 {
0389     char *_p = (char *)p;
0390 
0391     __cpuc_clean_dcache_area(_p, size);
0392     outer_clean_range(__pa(_p), __pa(_p + size));
0393 }
0394 
0395 /*
0396  * Ensure preceding writes to *p by other CPUs are visible to
0397  * subsequent reads by this CPU.  We must be careful not to
0398  * discard data simultaneously written by another CPU, hence the
0399  * usage of flush rather than invalidate operations.
0400  */
0401 static inline void __sync_cache_range_r(volatile void *p, size_t size)
0402 {
0403     char *_p = (char *)p;
0404 
0405 #ifdef CONFIG_OUTER_CACHE
0406     if (outer_cache.flush_range) {
0407         /*
0408          * Ensure dirty data migrated from other CPUs into our cache
0409          * are cleaned out safely before the outer cache is cleaned:
0410          */
0411         __cpuc_clean_dcache_area(_p, size);
0412 
0413         /* Clean and invalidate stale data for *p from outer ... */
0414         outer_flush_range(__pa(_p), __pa(_p + size));
0415     }
0416 #endif
0417 
0418     /* ... and inner cache: */
0419     __cpuc_flush_dcache_area(_p, size);
0420 }
0421 
0422 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
0423 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
0424 
0425 /*
0426  * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
0427  * To do so we must:
0428  *
0429  * - Clear the SCTLR.C bit to prevent further cache allocations
0430  * - Flush the desired level of cache
0431  * - Clear the ACTLR "SMP" bit to disable local coherency
0432  *
0433  * ... and so without any intervening memory access in between those steps,
0434  * not even to the stack.
0435  *
0436  * WARNING -- After this has been called:
0437  *
0438  * - No ldrex/strex (and similar) instructions must be used.
0439  * - The CPU is obviously no longer coherent with the other CPUs.
0440  * - This is unlikely to work as expected if Linux is running non-secure.
0441  *
0442  * Note:
0443  *
0444  * - This is known to apply to several ARMv7 processor implementations,
0445  *   however some exceptions may exist.  Caveat emptor.
0446  *
0447  * - The clobber list is dictated by the call to v7_flush_dcache_*.
0448  */
0449 #define v7_exit_coherency_flush(level) \
0450     asm volatile( \
0451     ".arch  armv7-a \n\t" \
0452     "mrc    p15, 0, r0, c1, c0, 0   @ get SCTLR \n\t" \
0453     "bic    r0, r0, #"__stringify(CR_C)" \n\t" \
0454     "mcr    p15, 0, r0, c1, c0, 0   @ set SCTLR \n\t" \
0455     "isb    \n\t" \
0456     "bl v7_flush_dcache_"__stringify(level)" \n\t" \
0457     "mrc    p15, 0, r0, c1, c0, 1   @ get ACTLR \n\t" \
0458     "bic    r0, r0, #(1 << 6)   @ disable local coherency \n\t" \
0459     "mcr    p15, 0, r0, c1, c0, 1   @ set ACTLR \n\t" \
0460     "isb    \n\t" \
0461     "dsb" \
0462     : : : "r0","r1","r2","r3","r4","r5","r6", \
0463           "r9","r10","ip","lr","memory" )
0464 
0465 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
0466                  void *kaddr, unsigned long len);
0467 
0468 
0469 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
0470 void check_cpu_icache_size(int cpuid);
0471 #else
0472 static inline void check_cpu_icache_size(int cpuid) { }
0473 #endif
0474 
0475 #endif