0001
0002 #ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H
0003 #define _ASM_POWERPC_NOHASH_TLBFLUSH_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 struct vm_area_struct;
0027 struct mm_struct;
0028
0029 #define MMU_NO_CONTEXT ((unsigned int)-1)
0030
0031 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
0032 unsigned long end);
0033
0034 #ifdef CONFIG_PPC_8xx
0035 static inline void local_flush_tlb_mm(struct mm_struct *mm)
0036 {
0037 unsigned int pid = READ_ONCE(mm->context.id);
0038
0039 if (pid != MMU_NO_CONTEXT)
0040 asm volatile ("sync; tlbia; isync" : : : "memory");
0041 }
0042
0043 static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
0044 {
0045 asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
0046 }
0047
0048 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
0049 {
0050 start &= PAGE_MASK;
0051
0052 if (end - start <= PAGE_SIZE)
0053 asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
0054 else
0055 asm volatile ("sync; tlbia; isync" : : : "memory");
0056 }
0057 #else
0058 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
0059 extern void local_flush_tlb_mm(struct mm_struct *mm);
0060 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
0061
0062 extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
0063 int tsize, int ind);
0064 #endif
0065
0066 #ifdef CONFIG_SMP
0067 extern void flush_tlb_mm(struct mm_struct *mm);
0068 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
0069 extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
0070 int tsize, int ind);
0071 #else
0072 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
0073 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
0074 #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
0075 #endif
0076
0077 #endif