0001
0002 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
0003 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
0004
0005
0006
0007
0008
0009 #include <linux/percpu.h>
0010 #include <asm/page.h>
0011
0012 #define PPC64_TLB_BATCH_NR 192
0013
0014 struct ppc64_tlb_batch {
0015 int active;
0016 unsigned long index;
0017 struct mm_struct *mm;
0018 real_pte_t pte[PPC64_TLB_BATCH_NR];
0019 unsigned long vpn[PPC64_TLB_BATCH_NR];
0020 unsigned int psize;
0021 int ssize;
0022 };
0023 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
0024
0025 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
0026
0027 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
0028
0029 static inline void arch_enter_lazy_mmu_mode(void)
0030 {
0031 struct ppc64_tlb_batch *batch;
0032
0033 if (radix_enabled())
0034 return;
0035 batch = this_cpu_ptr(&ppc64_tlb_batch);
0036 batch->active = 1;
0037 }
0038
0039 static inline void arch_leave_lazy_mmu_mode(void)
0040 {
0041 struct ppc64_tlb_batch *batch;
0042
0043 if (radix_enabled())
0044 return;
0045 batch = this_cpu_ptr(&ppc64_tlb_batch);
0046
0047 if (batch->index)
0048 __flush_tlb_pending(batch);
0049 batch->active = 0;
0050 }
0051
0052 #define arch_flush_lazy_mmu_mode() do {} while (0)
0053
0054 extern void hash__tlbiel_all(unsigned int action);
0055
0056 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
0057 int ssize, unsigned long flags);
0058 extern void flush_hash_range(unsigned long number, int local);
0059 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
0060 pmd_t *pmdp, unsigned int psize, int ssize,
0061 unsigned long flags);
0062 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
0063 {
0064 }
0065
0066 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
0067 {
0068 }
0069
0070 static inline void hash__local_flush_all_mm(struct mm_struct *mm)
0071 {
0072
0073
0074
0075
0076
0077
0078 WARN_ON_ONCE(1);
0079 }
0080
0081 static inline void hash__flush_all_mm(struct mm_struct *mm)
0082 {
0083
0084
0085
0086
0087
0088
0089 WARN_ON_ONCE(1);
0090 }
0091
0092 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
0093 unsigned long vmaddr)
0094 {
0095 }
0096
0097 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
0098 unsigned long vmaddr)
0099 {
0100 }
0101
0102 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
0103 unsigned long start, unsigned long end)
0104 {
0105 }
0106
0107 static inline void hash__flush_tlb_kernel_range(unsigned long start,
0108 unsigned long end)
0109 {
0110 }
0111
0112
0113 struct mmu_gather;
0114 extern void hash__tlb_flush(struct mmu_gather *tlb);
0115 void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
0116
0117 #ifdef CONFIG_PPC_64S_HASH_MMU
0118
0119 extern void __flush_hash_table_range(unsigned long start, unsigned long end);
0120 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
0121 unsigned long addr);
0122 #else
0123 static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
0124 #endif
0125 #endif