0001
0002 #ifndef _ALPHA_TLBFLUSH_H
0003 #define _ALPHA_TLBFLUSH_H
0004
0005 #include <linux/mm.h>
0006 #include <linux/sched.h>
0007 #include <asm/compiler.h>
0008
0009 #ifndef __EXTERN_INLINE
0010 #define __EXTERN_INLINE extern inline
0011 #define __MMU_EXTERN_INLINE
0012 #endif
0013
0014 extern void __load_new_mm_context(struct mm_struct *);
0015
0016
0017
0018
0019
0020 __EXTERN_INLINE void
0021 ev4_flush_tlb_current(struct mm_struct *mm)
0022 {
0023 __load_new_mm_context(mm);
0024 tbiap();
0025 }
0026
0027 __EXTERN_INLINE void
0028 ev5_flush_tlb_current(struct mm_struct *mm)
0029 {
0030 __load_new_mm_context(mm);
0031 }
0032
0033
0034
0035
0036
0037 __EXTERN_INLINE void
0038 ev4_flush_tlb_current_page(struct mm_struct * mm,
0039 struct vm_area_struct *vma,
0040 unsigned long addr)
0041 {
0042 int tbi_flag = 2;
0043 if (vma->vm_flags & VM_EXEC) {
0044 __load_new_mm_context(mm);
0045 tbi_flag = 3;
0046 }
0047 tbi(tbi_flag, addr);
0048 }
0049
0050 __EXTERN_INLINE void
0051 ev5_flush_tlb_current_page(struct mm_struct * mm,
0052 struct vm_area_struct *vma,
0053 unsigned long addr)
0054 {
0055 if (vma->vm_flags & VM_EXEC)
0056 __load_new_mm_context(mm);
0057 else
0058 tbi(2, addr);
0059 }
0060
0061
0062 #ifdef CONFIG_ALPHA_GENERIC
0063 # define flush_tlb_current alpha_mv.mv_flush_tlb_current
0064 # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
0065 #else
0066 # ifdef CONFIG_ALPHA_EV4
0067 # define flush_tlb_current ev4_flush_tlb_current
0068 # define flush_tlb_current_page ev4_flush_tlb_current_page
0069 # else
0070 # define flush_tlb_current ev5_flush_tlb_current
0071 # define flush_tlb_current_page ev5_flush_tlb_current_page
0072 # endif
0073 #endif
0074
0075 #ifdef __MMU_EXTERN_INLINE
0076 #undef __EXTERN_INLINE
0077 #undef __MMU_EXTERN_INLINE
0078 #endif
0079
0080
0081 static inline void
0082 flush_tlb(void)
0083 {
0084 flush_tlb_current(current->active_mm);
0085 }
0086
0087
0088 static inline void
0089 flush_tlb_other(struct mm_struct *mm)
0090 {
0091 unsigned long *mmc = &mm->context[smp_processor_id()];
0092
0093
0094 if (*mmc) *mmc = 0;
0095 }
0096
0097 #ifndef CONFIG_SMP
0098
0099
0100 static inline void flush_tlb_all(void)
0101 {
0102 tbia();
0103 }
0104
0105
0106 static inline void
0107 flush_tlb_mm(struct mm_struct *mm)
0108 {
0109 if (mm == current->active_mm)
0110 flush_tlb_current(mm);
0111 else
0112 flush_tlb_other(mm);
0113 }
0114
0115
0116 static inline void
0117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
0118 {
0119 struct mm_struct *mm = vma->vm_mm;
0120
0121 if (mm == current->active_mm)
0122 flush_tlb_current_page(mm, vma, addr);
0123 else
0124 flush_tlb_other(mm);
0125 }
0126
0127
0128
0129 static inline void
0130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
0131 unsigned long end)
0132 {
0133 flush_tlb_mm(vma->vm_mm);
0134 }
0135
0136 #else
0137
0138 extern void flush_tlb_all(void);
0139 extern void flush_tlb_mm(struct mm_struct *);
0140 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
0141 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
0142 unsigned long);
0143
0144 #endif
0145
0146 static inline void flush_tlb_kernel_range(unsigned long start,
0147 unsigned long end)
0148 {
0149 flush_tlb_all();
0150 }
0151
0152 #endif