0001
0002 #ifndef _ASM_X86_MMU_CONTEXT_H
0003 #define _ASM_X86_MMU_CONTEXT_H
0004
0005 #include <asm/desc.h>
0006 #include <linux/atomic.h>
0007 #include <linux/mm_types.h>
0008 #include <linux/pkeys.h>
0009
0010 #include <trace/events/tlb.h>
0011
0012 #include <asm/tlbflush.h>
0013 #include <asm/paravirt.h>
0014 #include <asm/debugreg.h>
0015
0016 extern atomic64_t last_mm_ctx_id;
0017
0018 #ifndef CONFIG_PARAVIRT_XXL
0019 static inline void paravirt_activate_mm(struct mm_struct *prev,
0020 struct mm_struct *next)
0021 {
0022 }
0023 #endif
0024
0025 #ifdef CONFIG_PERF_EVENTS
0026 DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
0027 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
0028 void cr4_update_pce(void *ignored);
0029 #endif
0030
0031 #ifdef CONFIG_MODIFY_LDT_SYSCALL
0032
0033
0034
0035
0036 struct ldt_struct {
0037
0038
0039
0040
0041
0042
0043 struct desc_struct *entries;
0044 unsigned int nr_entries;
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 int slot;
0056 };
0057
0058
0059
0060
0061 static inline void init_new_context_ldt(struct mm_struct *mm)
0062 {
0063 mm->context.ldt = NULL;
0064 init_rwsem(&mm->context.ldt_usr_sem);
0065 }
0066 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
0067 void destroy_context_ldt(struct mm_struct *mm);
0068 void ldt_arch_exit_mmap(struct mm_struct *mm);
0069 #else
0070 static inline void init_new_context_ldt(struct mm_struct *mm) { }
0071 static inline int ldt_dup_context(struct mm_struct *oldmm,
0072 struct mm_struct *mm)
0073 {
0074 return 0;
0075 }
0076 static inline void destroy_context_ldt(struct mm_struct *mm) { }
0077 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
0078 #endif
0079
0080 #ifdef CONFIG_MODIFY_LDT_SYSCALL
0081 extern void load_mm_ldt(struct mm_struct *mm);
0082 extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
0083 #else
0084 static inline void load_mm_ldt(struct mm_struct *mm)
0085 {
0086 clear_LDT();
0087 }
0088 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
0089 {
0090 DEBUG_LOCKS_WARN_ON(preemptible());
0091 }
0092 #endif
0093
0094 #define enter_lazy_tlb enter_lazy_tlb
0095 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
0096
0097
0098
0099
0100
0101 #define init_new_context init_new_context
0102 static inline int init_new_context(struct task_struct *tsk,
0103 struct mm_struct *mm)
0104 {
0105 mutex_init(&mm->context.lock);
0106
0107 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
0108 atomic64_set(&mm->context.tlb_gen, 0);
0109
0110 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
0111 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
0112
0113 mm->context.pkey_allocation_map = 0x1;
0114
0115 mm->context.execute_only_pkey = -1;
0116 }
0117 #endif
0118 init_new_context_ldt(mm);
0119 return 0;
0120 }
0121
0122 #define destroy_context destroy_context
0123 static inline void destroy_context(struct mm_struct *mm)
0124 {
0125 destroy_context_ldt(mm);
0126 }
0127
0128 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
0129 struct task_struct *tsk);
0130
0131 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
0132 struct task_struct *tsk);
0133 #define switch_mm_irqs_off switch_mm_irqs_off
0134
0135 #define activate_mm(prev, next) \
0136 do { \
0137 paravirt_activate_mm((prev), (next)); \
0138 switch_mm((prev), (next), NULL); \
0139 } while (0);
0140
0141 #ifdef CONFIG_X86_32
0142 #define deactivate_mm(tsk, mm) \
0143 do { \
0144 loadsegment(gs, 0); \
0145 } while (0)
0146 #else
0147 #define deactivate_mm(tsk, mm) \
0148 do { \
0149 load_gs_index(0); \
0150 loadsegment(fs, 0); \
0151 } while (0)
0152 #endif
0153
0154 static inline void arch_dup_pkeys(struct mm_struct *oldmm,
0155 struct mm_struct *mm)
0156 {
0157 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
0158 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
0159 return;
0160
0161
0162 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
0163 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
0164 #endif
0165 }
0166
0167 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
0168 {
0169 arch_dup_pkeys(oldmm, mm);
0170 paravirt_arch_dup_mmap(oldmm, mm);
0171 return ldt_dup_context(oldmm, mm);
0172 }
0173
0174 static inline void arch_exit_mmap(struct mm_struct *mm)
0175 {
0176 paravirt_arch_exit_mmap(mm);
0177 ldt_arch_exit_mmap(mm);
0178 }
0179
0180 #ifdef CONFIG_X86_64
0181 static inline bool is_64bit_mm(struct mm_struct *mm)
0182 {
0183 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
0184 !(mm->context.flags & MM_CONTEXT_UPROBE_IA32);
0185 }
0186 #else
0187 static inline bool is_64bit_mm(struct mm_struct *mm)
0188 {
0189 return false;
0190 }
0191 #endif
0192
0193 static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
0194 unsigned long end)
0195 {
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
0208 bool write, bool execute, bool foreign)
0209 {
0210
0211 if (execute)
0212 return true;
0213
0214 if (foreign || vma_is_foreign(vma))
0215 return true;
0216 return __pkru_allows_pkey(vma_pkey(vma), write);
0217 }
0218
0219 unsigned long __get_current_cr3_fast(void);
0220
0221 #include <asm-generic/mmu_context.h>
0222
0223 #endif