0001
0002 #ifndef _SPARC64_TLBFLUSH_H
0003 #define _SPARC64_TLBFLUSH_H
0004
0005 #include <asm/mmu_context.h>
0006
0007
0008
0009 #define TLB_BATCH_NR 192
0010
0011 struct tlb_batch {
0012 unsigned int hugepage_shift;
0013 struct mm_struct *mm;
0014 unsigned long tlb_nr;
0015 unsigned long active;
0016 unsigned long vaddrs[TLB_BATCH_NR];
0017 };
0018
0019 void flush_tsb_kernel_range(unsigned long start, unsigned long end);
0020 void flush_tsb_user(struct tlb_batch *tb);
0021 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
0022 unsigned int hugepage_shift);
0023
0024
0025
0026 static inline void flush_tlb_mm(struct mm_struct *mm)
0027 {
0028 }
0029
0030 static inline void flush_tlb_page(struct vm_area_struct *vma,
0031 unsigned long vmaddr)
0032 {
0033 }
0034
0035 static inline void flush_tlb_range(struct vm_area_struct *vma,
0036 unsigned long start, unsigned long end)
0037 {
0038 }
0039
0040 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
0041
0042 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
0043
0044 void flush_tlb_pending(void);
0045 void arch_enter_lazy_mmu_mode(void);
0046 void arch_leave_lazy_mmu_mode(void);
0047 #define arch_flush_lazy_mmu_mode() do {} while (0)
0048
0049
0050 void __flush_tlb_all(void);
0051 void __flush_tlb_page(unsigned long context, unsigned long vaddr);
0052 void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
0053
0054 #ifndef CONFIG_SMP
0055
0056 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
0057 {
0058 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
0059 }
0060
0061 #else
0062
0063 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
0064 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
0065
0066 #define global_flush_tlb_page(mm, vaddr) \
0067 smp_flush_tlb_page(mm, vaddr)
0068
0069 #endif
0070
0071 #endif