Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  arch/arm/include/asm/tlbflush.h
0004  *
0005  *  Copyright (C) 1999-2003 Russell King
0006  */
0007 #ifndef _ASMARM_TLBFLUSH_H
0008 #define _ASMARM_TLBFLUSH_H
0009 
0010 #ifndef __ASSEMBLY__
0011 # include <linux/mm_types.h>
0012 #endif
0013 
0014 #ifdef CONFIG_MMU
0015 
0016 #include <asm/glue.h>
0017 
0018 #define TLB_V4_U_PAGE   (1 << 1)
0019 #define TLB_V4_D_PAGE   (1 << 2)
0020 #define TLB_V4_I_PAGE   (1 << 3)
0021 #define TLB_V6_U_PAGE   (1 << 4)
0022 #define TLB_V6_D_PAGE   (1 << 5)
0023 #define TLB_V6_I_PAGE   (1 << 6)
0024 
0025 #define TLB_V4_U_FULL   (1 << 9)
0026 #define TLB_V4_D_FULL   (1 << 10)
0027 #define TLB_V4_I_FULL   (1 << 11)
0028 #define TLB_V6_U_FULL   (1 << 12)
0029 #define TLB_V6_D_FULL   (1 << 13)
0030 #define TLB_V6_I_FULL   (1 << 14)
0031 
0032 #define TLB_V6_U_ASID   (1 << 16)
0033 #define TLB_V6_D_ASID   (1 << 17)
0034 #define TLB_V6_I_ASID   (1 << 18)
0035 
0036 #define TLB_V6_BP   (1 << 19)
0037 
0038 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
0039 #define TLB_V7_UIS_PAGE (1 << 20)
0040 #define TLB_V7_UIS_FULL (1 << 21)
0041 #define TLB_V7_UIS_ASID (1 << 22)
0042 #define TLB_V7_UIS_BP   (1 << 23)
0043 
0044 #define TLB_BARRIER (1 << 28)
0045 #define TLB_L2CLEAN_FR  (1 << 29)       /* Feroceon */
0046 #define TLB_DCLEAN  (1 << 30)
0047 #define TLB_WB      (1 << 31)
0048 
0049 /*
0050  *  MMU TLB Model
0051  *  =============
0052  *
0053  *  We have the following to choose from:
0054  *    v4    - ARMv4 without write buffer
0055  *    v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
0056  *    v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
0057  *    fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
0058  *    fa    - Faraday (v4 with write buffer with UTLB)
0059  *    v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
0060  *    v7wbi - identical to v6wbi
0061  */
0062 #undef _TLB
0063 #undef MULTI_TLB
0064 
0065 #ifdef CONFIG_SMP_ON_UP
0066 #define MULTI_TLB 1
0067 #endif
0068 
0069 #define v4_tlb_flags    (TLB_V4_U_FULL | TLB_V4_U_PAGE)
0070 
0071 #ifdef CONFIG_CPU_TLB_V4WT
0072 # define v4_possible_flags  v4_tlb_flags
0073 # define v4_always_flags    v4_tlb_flags
0074 # ifdef _TLB
0075 #  define MULTI_TLB 1
0076 # else
0077 #  define _TLB v4
0078 # endif
0079 #else
0080 # define v4_possible_flags  0
0081 # define v4_always_flags    (-1UL)
0082 #endif
0083 
0084 #define fa_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
0085              TLB_V4_U_FULL | TLB_V4_U_PAGE)
0086 
0087 #ifdef CONFIG_CPU_TLB_FA
0088 # define fa_possible_flags  fa_tlb_flags
0089 # define fa_always_flags    fa_tlb_flags
0090 # ifdef _TLB
0091 #  define MULTI_TLB 1
0092 # else
0093 #  define _TLB fa
0094 # endif
0095 #else
0096 # define fa_possible_flags  0
0097 # define fa_always_flags    (-1UL)
0098 #endif
0099 
0100 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
0101              TLB_V4_I_FULL | TLB_V4_D_FULL | \
0102              TLB_V4_I_PAGE | TLB_V4_D_PAGE)
0103 
0104 #ifdef CONFIG_CPU_TLB_V4WBI
0105 # define v4wbi_possible_flags   v4wbi_tlb_flags
0106 # define v4wbi_always_flags v4wbi_tlb_flags
0107 # ifdef _TLB
0108 #  define MULTI_TLB 1
0109 # else
0110 #  define _TLB v4wbi
0111 # endif
0112 #else
0113 # define v4wbi_possible_flags   0
0114 # define v4wbi_always_flags (-1UL)
0115 #endif
0116 
0117 #define fr_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
0118              TLB_V4_I_FULL | TLB_V4_D_FULL | \
0119              TLB_V4_I_PAGE | TLB_V4_D_PAGE)
0120 
0121 #ifdef CONFIG_CPU_TLB_FEROCEON
0122 # define fr_possible_flags  fr_tlb_flags
0123 # define fr_always_flags    fr_tlb_flags
0124 # ifdef _TLB
0125 #  define MULTI_TLB 1
0126 # else
0127 #  define _TLB v4wbi
0128 # endif
0129 #else
0130 # define fr_possible_flags  0
0131 # define fr_always_flags    (-1UL)
0132 #endif
0133 
0134 #define v4wb_tlb_flags  (TLB_WB | TLB_DCLEAN | \
0135              TLB_V4_I_FULL | TLB_V4_D_FULL | \
0136              TLB_V4_D_PAGE)
0137 
0138 #ifdef CONFIG_CPU_TLB_V4WB
0139 # define v4wb_possible_flags    v4wb_tlb_flags
0140 # define v4wb_always_flags  v4wb_tlb_flags
0141 # ifdef _TLB
0142 #  define MULTI_TLB 1
0143 # else
0144 #  define _TLB v4wb
0145 # endif
0146 #else
0147 # define v4wb_possible_flags    0
0148 # define v4wb_always_flags  (-1UL)
0149 #endif
0150 
0151 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
0152              TLB_V6_I_FULL | TLB_V6_D_FULL | \
0153              TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
0154              TLB_V6_I_ASID | TLB_V6_D_ASID | \
0155              TLB_V6_BP)
0156 
0157 #ifdef CONFIG_CPU_TLB_V6
0158 # define v6wbi_possible_flags   v6wbi_tlb_flags
0159 # define v6wbi_always_flags v6wbi_tlb_flags
0160 # ifdef _TLB
0161 #  define MULTI_TLB 1
0162 # else
0163 #  define _TLB v6wbi
0164 # endif
0165 #else
0166 # define v6wbi_possible_flags   0
0167 # define v6wbi_always_flags (-1UL)
0168 #endif
0169 
0170 #define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
0171                  TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
0172                  TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
0173 #define v7wbi_tlb_flags_up  (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
0174                  TLB_V6_U_FULL | TLB_V6_U_PAGE | \
0175                  TLB_V6_U_ASID | TLB_V6_BP)
0176 
0177 #ifdef CONFIG_CPU_TLB_V7
0178 
0179 # ifdef CONFIG_SMP_ON_UP
0180 #  define v7wbi_possible_flags  (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
0181 #  define v7wbi_always_flags    (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
0182 # elif defined(CONFIG_SMP)
0183 #  define v7wbi_possible_flags  v7wbi_tlb_flags_smp
0184 #  define v7wbi_always_flags    v7wbi_tlb_flags_smp
0185 # else
0186 #  define v7wbi_possible_flags  v7wbi_tlb_flags_up
0187 #  define v7wbi_always_flags    v7wbi_tlb_flags_up
0188 # endif
0189 # ifdef _TLB
0190 #  define MULTI_TLB 1
0191 # else
0192 #  define _TLB v7wbi
0193 # endif
0194 #else
0195 # define v7wbi_possible_flags   0
0196 # define v7wbi_always_flags (-1UL)
0197 #endif
0198 
0199 #ifndef _TLB
0200 #error Unknown TLB model
0201 #endif
0202 
0203 #ifndef __ASSEMBLY__
0204 
0205 #include <linux/sched.h>
0206 
0207 struct cpu_tlb_fns {
0208     void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
0209     void (*flush_kern_range)(unsigned long, unsigned long);
0210     unsigned long tlb_flags;
0211 };
0212 
0213 /*
0214  * Select the calling method
0215  */
0216 #ifdef MULTI_TLB
0217 
0218 #define __cpu_flush_user_tlb_range  cpu_tlb.flush_user_range
0219 #define __cpu_flush_kern_tlb_range  cpu_tlb.flush_kern_range
0220 
0221 #else
0222 
0223 #define __cpu_flush_user_tlb_range  __glue(_TLB,_flush_user_tlb_range)
0224 #define __cpu_flush_kern_tlb_range  __glue(_TLB,_flush_kern_tlb_range)
0225 
0226 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
0227 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
0228 
0229 #endif
0230 
0231 extern struct cpu_tlb_fns cpu_tlb;
0232 
0233 #define __cpu_tlb_flags         cpu_tlb.tlb_flags
0234 
0235 /*
0236  *  TLB Management
0237  *  ==============
0238  *
0239  *  The arch/arm/mm/tlb-*.S files implement these methods.
0240  *
0241  *  The TLB specific code is expected to perform whatever tests it
0242  *  needs to determine if it should invalidate the TLB for each
0243  *  call.  Start addresses are inclusive and end addresses are
0244  *  exclusive; it is safe to round these addresses down.
0245  *
0246  *  flush_tlb_all()
0247  *
0248  *      Invalidate the entire TLB.
0249  *
0250  *  flush_tlb_mm(mm)
0251  *
0252  *      Invalidate all TLB entries in a particular address
0253  *      space.
0254  *      - mm    - mm_struct describing address space
0255  *
0256  *  flush_tlb_range(vma,start,end)
0257  *
0258  *      Invalidate a range of TLB entries in the specified
0259  *      address space.
0260  *      - mm    - mm_struct describing address space
0261  *      - start - start address (may not be aligned)
0262  *      - end   - end address (exclusive, may not be aligned)
0263  *
0264  *  flush_tlb_page(vma, uaddr)
0265  *
0266  *      Invalidate the specified page in the specified address range.
0267  *      - vma   - vm_area_struct describing address range
0268  *      - vaddr - virtual address (may not be aligned)
0269  */
0270 
0271 /*
0272  * We optimise the code below by:
0273  *  - building a set of TLB flags that might be set in __cpu_tlb_flags
0274  *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
0275  *  - if we're going to need __cpu_tlb_flags, access it once and only once
0276  *
0277  * This allows us to build optimal assembly for the single-CPU type case,
0278  * and as close to optimal given the compiler constrants for multi-CPU
0279  * case.  We could do better for the multi-CPU case if the compiler
0280  * implemented the "%?" method, but this has been discontinued due to too
0281  * many people getting it wrong.
0282  */
0283 #define possible_tlb_flags  (v4_possible_flags | \
0284                  v4wbi_possible_flags | \
0285                  fr_possible_flags | \
0286                  v4wb_possible_flags | \
0287                  fa_possible_flags | \
0288                  v6wbi_possible_flags | \
0289                  v7wbi_possible_flags)
0290 
0291 #define always_tlb_flags    (v4_always_flags & \
0292                  v4wbi_always_flags & \
0293                  fr_always_flags & \
0294                  v4wb_always_flags & \
0295                  fa_always_flags & \
0296                  v6wbi_always_flags & \
0297                  v7wbi_always_flags)
0298 
0299 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
0300 
0301 #define __tlb_op(f, insnarg, arg)                   \
0302     do {                                \
0303         if (always_tlb_flags & (f))             \
0304             asm("mcr " insnarg              \
0305                 : : "r" (arg) : "cc");          \
0306         else if (possible_tlb_flags & (f))          \
0307             asm("tst %1, %2\n\t"                \
0308                 "mcrne " insnarg                \
0309                 : : "r" (arg), "r" (__tlb_flag), "Ir" (f)   \
0310                 : "cc");                    \
0311     } while (0)
0312 
0313 #define tlb_op(f, regs, arg)    __tlb_op(f, "p15, 0, %0, " regs, arg)
0314 #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
0315 
0316 static inline void __local_flush_tlb_all(void)
0317 {
0318     const int zero = 0;
0319     const unsigned int __tlb_flag = __cpu_tlb_flags;
0320 
0321     tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
0322     tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
0323     tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
0324 }
0325 
0326 static inline void local_flush_tlb_all(void)
0327 {
0328     const int zero = 0;
0329     const unsigned int __tlb_flag = __cpu_tlb_flags;
0330 
0331     if (tlb_flag(TLB_WB))
0332         dsb(nshst);
0333 
0334     __local_flush_tlb_all();
0335     tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
0336 
0337     if (tlb_flag(TLB_BARRIER)) {
0338         dsb(nsh);
0339         isb();
0340     }
0341 }
0342 
0343 static inline void __flush_tlb_all(void)
0344 {
0345     const int zero = 0;
0346     const unsigned int __tlb_flag = __cpu_tlb_flags;
0347 
0348     if (tlb_flag(TLB_WB))
0349         dsb(ishst);
0350 
0351     __local_flush_tlb_all();
0352     tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
0353 
0354     if (tlb_flag(TLB_BARRIER)) {
0355         dsb(ish);
0356         isb();
0357     }
0358 }
0359 
0360 static inline void __local_flush_tlb_mm(struct mm_struct *mm)
0361 {
0362     const int zero = 0;
0363     const int asid = ASID(mm);
0364     const unsigned int __tlb_flag = __cpu_tlb_flags;
0365 
0366     if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
0367         if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
0368             tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
0369             tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
0370             tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
0371         }
0372     }
0373 
0374     tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
0375     tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
0376     tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
0377 }
0378 
0379 static inline void local_flush_tlb_mm(struct mm_struct *mm)
0380 {
0381     const int asid = ASID(mm);
0382     const unsigned int __tlb_flag = __cpu_tlb_flags;
0383 
0384     if (tlb_flag(TLB_WB))
0385         dsb(nshst);
0386 
0387     __local_flush_tlb_mm(mm);
0388     tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
0389 
0390     if (tlb_flag(TLB_BARRIER))
0391         dsb(nsh);
0392 }
0393 
0394 static inline void __flush_tlb_mm(struct mm_struct *mm)
0395 {
0396     const unsigned int __tlb_flag = __cpu_tlb_flags;
0397 
0398     if (tlb_flag(TLB_WB))
0399         dsb(ishst);
0400 
0401     __local_flush_tlb_mm(mm);
0402 #ifdef CONFIG_ARM_ERRATA_720789
0403     tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
0404 #else
0405     tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
0406 #endif
0407 
0408     if (tlb_flag(TLB_BARRIER))
0409         dsb(ish);
0410 }
0411 
0412 static inline void
0413 __local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
0414 {
0415     const int zero = 0;
0416     const unsigned int __tlb_flag = __cpu_tlb_flags;
0417 
0418     uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
0419 
0420     if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
0421         cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
0422         tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
0423         tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
0424         tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
0425         if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
0426             asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
0427     }
0428 
0429     tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
0430     tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
0431     tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
0432 }
0433 
0434 static inline void
0435 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
0436 {
0437     const unsigned int __tlb_flag = __cpu_tlb_flags;
0438 
0439     uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
0440 
0441     if (tlb_flag(TLB_WB))
0442         dsb(nshst);
0443 
0444     __local_flush_tlb_page(vma, uaddr);
0445     tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
0446 
0447     if (tlb_flag(TLB_BARRIER))
0448         dsb(nsh);
0449 }
0450 
0451 static inline void
0452 __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
0453 {
0454     const unsigned int __tlb_flag = __cpu_tlb_flags;
0455 
0456     uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
0457 
0458     if (tlb_flag(TLB_WB))
0459         dsb(ishst);
0460 
0461     __local_flush_tlb_page(vma, uaddr);
0462 #ifdef CONFIG_ARM_ERRATA_720789
0463     tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
0464 #else
0465     tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
0466 #endif
0467 
0468     if (tlb_flag(TLB_BARRIER))
0469         dsb(ish);
0470 }
0471 
0472 static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
0473 {
0474     const int zero = 0;
0475     const unsigned int __tlb_flag = __cpu_tlb_flags;
0476 
0477     tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
0478     tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
0479     tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
0480     if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
0481         asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
0482 
0483     tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
0484     tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
0485     tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
0486 }
0487 
0488 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
0489 {
0490     const unsigned int __tlb_flag = __cpu_tlb_flags;
0491 
0492     kaddr &= PAGE_MASK;
0493 
0494     if (tlb_flag(TLB_WB))
0495         dsb(nshst);
0496 
0497     __local_flush_tlb_kernel_page(kaddr);
0498     tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
0499 
0500     if (tlb_flag(TLB_BARRIER)) {
0501         dsb(nsh);
0502         isb();
0503     }
0504 }
0505 
0506 static inline void __flush_tlb_kernel_page(unsigned long kaddr)
0507 {
0508     const unsigned int __tlb_flag = __cpu_tlb_flags;
0509 
0510     kaddr &= PAGE_MASK;
0511 
0512     if (tlb_flag(TLB_WB))
0513         dsb(ishst);
0514 
0515     __local_flush_tlb_kernel_page(kaddr);
0516     tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
0517 
0518     if (tlb_flag(TLB_BARRIER)) {
0519         dsb(ish);
0520         isb();
0521     }
0522 }
0523 
0524 /*
0525  * Branch predictor maintenance is paired with full TLB invalidation, so
0526  * there is no need for any barriers here.
0527  */
0528 static inline void __local_flush_bp_all(void)
0529 {
0530     const int zero = 0;
0531     const unsigned int __tlb_flag = __cpu_tlb_flags;
0532 
0533     if (tlb_flag(TLB_V6_BP))
0534         asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
0535 }
0536 
0537 static inline void local_flush_bp_all(void)
0538 {
0539     const int zero = 0;
0540     const unsigned int __tlb_flag = __cpu_tlb_flags;
0541 
0542     __local_flush_bp_all();
0543     if (tlb_flag(TLB_V7_UIS_BP))
0544         asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
0545 }
0546 
0547 static inline void __flush_bp_all(void)
0548 {
0549     const int zero = 0;
0550     const unsigned int __tlb_flag = __cpu_tlb_flags;
0551 
0552     __local_flush_bp_all();
0553     if (tlb_flag(TLB_V7_UIS_BP))
0554         asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
0555 }
0556 
0557 /*
0558  *  flush_pmd_entry
0559  *
0560  *  Flush a PMD entry (word aligned, or double-word aligned) to
0561  *  RAM if the TLB for the CPU we are running on requires this.
0562  *  This is typically used when we are creating PMD entries.
0563  *
0564  *  clean_pmd_entry
0565  *
0566  *  Clean (but don't drain the write buffer) if the CPU requires
0567  *  these operations.  This is typically used when we are removing
0568  *  PMD entries.
0569  */
0570 static inline void flush_pmd_entry(void *pmd)
0571 {
0572     const unsigned int __tlb_flag = __cpu_tlb_flags;
0573 
0574     tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
0575     tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
0576 
0577     if (tlb_flag(TLB_WB))
0578         dsb(ishst);
0579 }
0580 
0581 static inline void clean_pmd_entry(void *pmd)
0582 {
0583     const unsigned int __tlb_flag = __cpu_tlb_flags;
0584 
0585     tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
0586     tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
0587 }
0588 
0589 #undef tlb_op
0590 #undef tlb_flag
0591 #undef always_tlb_flags
0592 #undef possible_tlb_flags
0593 
0594 /*
0595  * Convert calls to our calling convention.
0596  */
0597 #define local_flush_tlb_range(vma,start,end)    __cpu_flush_user_tlb_range(start,end,vma)
0598 #define local_flush_tlb_kernel_range(s,e)   __cpu_flush_kern_tlb_range(s,e)
0599 
0600 #ifndef CONFIG_SMP
0601 #define flush_tlb_all       local_flush_tlb_all
0602 #define flush_tlb_mm        local_flush_tlb_mm
0603 #define flush_tlb_page      local_flush_tlb_page
0604 #define flush_tlb_kernel_page   local_flush_tlb_kernel_page
0605 #define flush_tlb_range     local_flush_tlb_range
0606 #define flush_tlb_kernel_range  local_flush_tlb_kernel_range
0607 #define flush_bp_all        local_flush_bp_all
0608 #else
0609 extern void flush_tlb_all(void);
0610 extern void flush_tlb_mm(struct mm_struct *mm);
0611 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
0612 extern void flush_tlb_kernel_page(unsigned long kaddr);
0613 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
0614 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
0615 extern void flush_bp_all(void);
0616 #endif
0617 
0618 /*
0619  * If PG_dcache_clean is not set for the page, we need to ensure that any
0620  * cache entries for the kernels virtual memory range are written
0621  * back to the page. On ARMv6 and later, the cache coherency is handled via
0622  * the set_pte_at() function.
0623  */
0624 #if __LINUX_ARM_ARCH__ < 6
0625 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
0626     pte_t *ptep);
0627 #else
0628 static inline void update_mmu_cache(struct vm_area_struct *vma,
0629                     unsigned long addr, pte_t *ptep)
0630 {
0631 }
0632 #endif
0633 
0634 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
0635 
0636 #endif
0637 
0638 #elif defined(CONFIG_SMP)   /* !CONFIG_MMU */
0639 
0640 #ifndef __ASSEMBLY__
0641 static inline void local_flush_tlb_all(void)                                    { }
0642 static inline void local_flush_tlb_mm(struct mm_struct *mm)                         { }
0643 static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)            { }
0644 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)                     { }
0645 static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)    { }
0646 static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)             { }
0647 static inline void local_flush_bp_all(void)                                 { }
0648 
0649 extern void flush_tlb_all(void);
0650 extern void flush_tlb_mm(struct mm_struct *mm);
0651 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
0652 extern void flush_tlb_kernel_page(unsigned long kaddr);
0653 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
0654 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
0655 extern void flush_bp_all(void);
0656 #endif  /* __ASSEMBLY__ */
0657 
0658 #endif
0659 
0660 #ifndef __ASSEMBLY__
0661 #ifdef CONFIG_ARM_ERRATA_798181
0662 extern void erratum_a15_798181_init(void);
0663 #else
0664 static inline void erratum_a15_798181_init(void) {}
0665 #endif
0666 extern bool (*erratum_a15_798181_handler)(void);
0667 
0668 static inline bool erratum_a15_798181(void)
0669 {
0670     if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
0671         erratum_a15_798181_handler))
0672         return erratum_a15_798181_handler();
0673     return false;
0674 }
0675 #endif
0676 
0677 #endif