Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
0003 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
0004 
0005 #define MMU_NO_CONTEXT  ~0UL
0006 
0007 #include <linux/mm_types.h>
0008 #include <asm/book3s/64/tlbflush-hash.h>
0009 #include <asm/book3s/64/tlbflush-radix.h>
0010 
0011 /* TLB flush actions. Used as argument to tlbiel_all() */
0012 enum {
0013     TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
0014     TLB_INVAL_SCOPE_LPID = 1,   /* invalidate TLBs for current LPID */
0015 };
0016 
0017 static inline void tlbiel_all(void)
0018 {
0019     /*
0020      * This is used for host machine check and bootup.
0021      *
0022      * This uses early_radix_enabled and implementations use
0023      * early_cpu_has_feature etc because that works early in boot
0024      * and this is the machine check path which is not performance
0025      * critical.
0026      */
0027     if (early_radix_enabled())
0028         radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
0029     else
0030         hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
0031 }
0032 
0033 static inline void tlbiel_all_lpid(bool radix)
0034 {
0035     /*
0036      * This is used for guest machine check.
0037      */
0038     if (radix)
0039         radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
0040     else
0041         hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
0042 }
0043 
0044 
0045 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
0046 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
0047                        unsigned long start, unsigned long end)
0048 {
0049     if (radix_enabled())
0050         return radix__flush_pmd_tlb_range(vma, start, end);
0051     return hash__flush_tlb_range(vma, start, end);
0052 }
0053 
0054 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
0055 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
0056                        unsigned long start,
0057                        unsigned long end)
0058 {
0059     if (radix_enabled())
0060         return radix__flush_hugetlb_tlb_range(vma, start, end);
0061     return hash__flush_tlb_range(vma, start, end);
0062 }
0063 
0064 static inline void flush_tlb_range(struct vm_area_struct *vma,
0065                    unsigned long start, unsigned long end)
0066 {
0067     if (radix_enabled())
0068         return radix__flush_tlb_range(vma, start, end);
0069     return hash__flush_tlb_range(vma, start, end);
0070 }
0071 
0072 static inline void flush_tlb_kernel_range(unsigned long start,
0073                       unsigned long end)
0074 {
0075     if (radix_enabled())
0076         return radix__flush_tlb_kernel_range(start, end);
0077     return hash__flush_tlb_kernel_range(start, end);
0078 }
0079 
0080 static inline void local_flush_tlb_mm(struct mm_struct *mm)
0081 {
0082     if (radix_enabled())
0083         return radix__local_flush_tlb_mm(mm);
0084     return hash__local_flush_tlb_mm(mm);
0085 }
0086 
0087 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
0088                     unsigned long vmaddr)
0089 {
0090     if (radix_enabled())
0091         return radix__local_flush_tlb_page(vma, vmaddr);
0092     return hash__local_flush_tlb_page(vma, vmaddr);
0093 }
0094 
0095 static inline void local_flush_all_mm(struct mm_struct *mm)
0096 {
0097     if (radix_enabled())
0098         return radix__local_flush_all_mm(mm);
0099     return hash__local_flush_all_mm(mm);
0100 }
0101 
0102 static inline void tlb_flush(struct mmu_gather *tlb)
0103 {
0104     if (radix_enabled())
0105         return radix__tlb_flush(tlb);
0106     return hash__tlb_flush(tlb);
0107 }
0108 
0109 #ifdef CONFIG_SMP
0110 static inline void flush_tlb_mm(struct mm_struct *mm)
0111 {
0112     if (radix_enabled())
0113         return radix__flush_tlb_mm(mm);
0114     return hash__flush_tlb_mm(mm);
0115 }
0116 
0117 static inline void flush_tlb_page(struct vm_area_struct *vma,
0118                   unsigned long vmaddr)
0119 {
0120     if (radix_enabled())
0121         return radix__flush_tlb_page(vma, vmaddr);
0122     return hash__flush_tlb_page(vma, vmaddr);
0123 }
0124 
0125 static inline void flush_all_mm(struct mm_struct *mm)
0126 {
0127     if (radix_enabled())
0128         return radix__flush_all_mm(mm);
0129     return hash__flush_all_mm(mm);
0130 }
0131 #else
0132 #define flush_tlb_mm(mm)        local_flush_tlb_mm(mm)
0133 #define flush_tlb_page(vma, addr)   local_flush_tlb_page(vma, addr)
0134 #define flush_all_mm(mm)        local_flush_all_mm(mm)
0135 #endif /* CONFIG_SMP */
0136 
0137 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
0138 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
0139                         unsigned long address)
0140 {
0141     /*
0142      * Book3S 64 does not require spurious fault flushes because the PTE
0143      * must be re-fetched in case of an access permission problem. So the
0144      * only reason for a spurious fault should be concurrent modification
0145      * to the PTE, in which case the PTE will eventually be re-fetched by
0146      * the MMU when it attempts the access again.
0147      *
0148      * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
0149      * Entry, Setting a Reference or Change Bit or Upgrading Access
0150      * Authority (PTE Subject to Atomic Hardware Updates):
0151      *
0152      * "If the only change being made to a valid PTE that is subject to
0153      *  atomic hardware updates is to set the Reference or Change bit to
0154      *  1 or to upgrade access authority, a simpler sequence suffices
0155      *  because the translation hardware will refetch the PTE if an
0156      *  access is attempted for which the only problems were reference
0157      *  and/or change bits needing to be set or insufficient access
0158      *  authority."
0159      *
0160      * The nest MMU in POWER9 does not perform this PTE re-fetch, but
0161      * it avoids the spurious fault problem by flushing the TLB before
0162      * upgrading PTE permissions, see radix__ptep_set_access_flags.
0163      */
0164 }
0165 
0166 extern bool tlbie_capable;
0167 extern bool tlbie_enabled;
0168 
0169 static inline bool cputlb_use_tlbie(void)
0170 {
0171     return tlbie_enabled;
0172 }
0173 
0174 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */