0001
0002
0003
0004
0005
0006
0007 #ifndef _ASMARM_CACHEFLUSH_H
0008 #define _ASMARM_CACHEFLUSH_H
0009
0010 #include <linux/mm.h>
0011
0012 #include <asm/glue-cache.h>
0013 #include <asm/shmparam.h>
0014 #include <asm/cachetype.h>
0015 #include <asm/outercache.h>
0016
0017 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
0018
0019
0020
0021
0022
0023 #define PG_dcache_clean PG_arch_1
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 struct cpu_cache_fns {
0102 void (*flush_icache_all)(void);
0103 void (*flush_kern_all)(void);
0104 void (*flush_kern_louis)(void);
0105 void (*flush_user_all)(void);
0106 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
0107
0108 void (*coherent_kern_range)(unsigned long, unsigned long);
0109 int (*coherent_user_range)(unsigned long, unsigned long);
0110 void (*flush_kern_dcache_area)(void *, size_t);
0111
0112 void (*dma_map_area)(const void *, size_t, int);
0113 void (*dma_unmap_area)(const void *, size_t, int);
0114
0115 void (*dma_flush_range)(const void *, const void *);
0116 } __no_randomize_layout;
0117
0118
0119
0120
0121 #ifdef MULTI_CACHE
0122
0123 extern struct cpu_cache_fns cpu_cache;
0124
0125 #define __cpuc_flush_icache_all cpu_cache.flush_icache_all
0126 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
0127 #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
0128 #define __cpuc_flush_user_all cpu_cache.flush_user_all
0129 #define __cpuc_flush_user_range cpu_cache.flush_user_range
0130 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
0131 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
0132 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
0133
0134
0135
0136
0137
0138
0139
0140 #define dmac_flush_range cpu_cache.dma_flush_range
0141
0142 #else
0143
0144 extern void __cpuc_flush_icache_all(void);
0145 extern void __cpuc_flush_kern_all(void);
0146 extern void __cpuc_flush_kern_louis(void);
0147 extern void __cpuc_flush_user_all(void);
0148 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
0149 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
0150 extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
0151 extern void __cpuc_flush_dcache_area(void *, size_t);
0152
0153
0154
0155
0156
0157
0158
0159 extern void dmac_flush_range(const void *, const void *);
0160
0161 #endif
0162
0163
0164
0165
0166
0167
0168 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
0169 unsigned long, void *, const void *, unsigned long);
0170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
0171 do { \
0172 memcpy(dst, src, len); \
0173 } while (0)
0174
0175
0176
0177
0178
0179
0180 #define __flush_icache_all_generic() \
0181 asm("mcr p15, 0, %0, c7, c5, 0" \
0182 : : "r" (0));
0183
0184
0185 #define __flush_icache_all_v7_smp() \
0186 asm("mcr p15, 0, %0, c7, c1, 0" \
0187 : : "r" (0));
0188
0189
0190
0191
0192
0193 #if (defined(CONFIG_CPU_V7) && \
0194 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
0195 defined(CONFIG_SMP_ON_UP)
0196 #define __flush_icache_preferred __cpuc_flush_icache_all
0197 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
0198 #define __flush_icache_preferred __flush_icache_all_v7_smp
0199 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
0200 #define __flush_icache_preferred __cpuc_flush_icache_all
0201 #else
0202 #define __flush_icache_preferred __flush_icache_all_generic
0203 #endif
0204
0205 static inline void __flush_icache_all(void)
0206 {
0207 __flush_icache_preferred();
0208 dsb(ishst);
0209 }
0210
0211
0212
0213
0214 #define flush_cache_louis() __cpuc_flush_kern_louis()
0215
0216 #define flush_cache_all() __cpuc_flush_kern_all()
0217
0218 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
0219 {
0220 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
0221 __cpuc_flush_user_all();
0222 }
0223
0224 static inline void
0225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
0226 {
0227 struct mm_struct *mm = vma->vm_mm;
0228
0229 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
0230 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
0231 vma->vm_flags);
0232 }
0233
0234 static inline void
0235 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
0236 {
0237 struct mm_struct *mm = vma->vm_mm;
0238
0239 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
0240 unsigned long addr = user_addr & PAGE_MASK;
0241 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
0242 }
0243 }
0244
0245 #ifndef CONFIG_CPU_CACHE_VIPT
0246 #define flush_cache_mm(mm) \
0247 vivt_flush_cache_mm(mm)
0248 #define flush_cache_range(vma,start,end) \
0249 vivt_flush_cache_range(vma,start,end)
0250 #define flush_cache_page(vma,addr,pfn) \
0251 vivt_flush_cache_page(vma,addr,pfn)
0252 #else
0253 extern void flush_cache_mm(struct mm_struct *mm);
0254 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
0255 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
0256 #endif
0257
0258 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
0259
0260
0261
0262
0263
0264
0265 #define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
0266
0267
0268
0269
0270
0271 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
0272
0273
0274
0275
0276
0277 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0292 extern void flush_dcache_page(struct page *);
0293
0294 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
0295 static inline void flush_kernel_vmap_range(void *addr, int size)
0296 {
0297 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
0298 __cpuc_flush_dcache_area(addr, (size_t)size);
0299 }
0300 static inline void invalidate_kernel_vmap_range(void *addr, int size)
0301 {
0302 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
0303 __cpuc_flush_dcache_area(addr, (size_t)size);
0304 }
0305
0306 #define ARCH_HAS_FLUSH_ANON_PAGE
0307 static inline void flush_anon_page(struct vm_area_struct *vma,
0308 struct page *page, unsigned long vmaddr)
0309 {
0310 extern void __flush_anon_page(struct vm_area_struct *vma,
0311 struct page *, unsigned long);
0312 if (PageAnon(page))
0313 __flush_anon_page(vma, page, vmaddr);
0314 }
0315
0316 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
0317 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
0318
0319
0320
0321
0322
0323 #define flush_icache_page(vma,page) do { } while (0)
0324
0325
0326
0327
0328
0329
0330
0331
0332 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
0333 {
0334 if (!cache_is_vipt_nonaliasing())
0335 flush_cache_all();
0336 else
0337
0338
0339
0340
0341 dsb(ishst);
0342 }
0343
0344 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
0345 {
0346 if (!cache_is_vipt_nonaliasing())
0347 flush_cache_all();
0348 }
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 #define __CACHE_WRITEBACK_ORDER 6
0375 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
0376
0377
0378
0379
0380
0381 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
0382
0383
0384
0385
0386
0387 static inline void __sync_cache_range_w(volatile void *p, size_t size)
0388 {
0389 char *_p = (char *)p;
0390
0391 __cpuc_clean_dcache_area(_p, size);
0392 outer_clean_range(__pa(_p), __pa(_p + size));
0393 }
0394
0395
0396
0397
0398
0399
0400
0401 static inline void __sync_cache_range_r(volatile void *p, size_t size)
0402 {
0403 char *_p = (char *)p;
0404
0405 #ifdef CONFIG_OUTER_CACHE
0406 if (outer_cache.flush_range) {
0407
0408
0409
0410
0411 __cpuc_clean_dcache_area(_p, size);
0412
0413
0414 outer_flush_range(__pa(_p), __pa(_p + size));
0415 }
0416 #endif
0417
0418
0419 __cpuc_flush_dcache_area(_p, size);
0420 }
0421
0422 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
0423 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 #define v7_exit_coherency_flush(level) \
0450 asm volatile( \
0451 ".arch armv7-a \n\t" \
0452 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
0453 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
0454 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
0455 "isb \n\t" \
0456 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
0457 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
0458 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
0459 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
0460 "isb \n\t" \
0461 "dsb" \
0462 : : : "r0","r1","r2","r3","r4","r5","r6", \
0463 "r9","r10","ip","lr","memory" )
0464
0465 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
0466 void *kaddr, unsigned long len);
0467
0468
0469 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
0470 void check_cpu_icache_size(int cpuid);
0471 #else
0472 static inline void check_cpu_icache_size(int cpuid) { }
0473 #endif
0474
0475 #endif