0001
0002 #ifndef _ASM_X86_TLBFLUSH_H
0003 #define _ASM_X86_TLBFLUSH_H
0004
0005 #include <linux/mm.h>
0006 #include <linux/sched.h>
0007
0008 #include <asm/processor.h>
0009 #include <asm/cpufeature.h>
0010 #include <asm/special_insns.h>
0011 #include <asm/smp.h>
0012 #include <asm/invpcid.h>
0013 #include <asm/pti.h>
0014 #include <asm/processor-flags.h>
0015
0016 void __flush_tlb_all(void);
0017
0018 #define TLB_FLUSH_ALL -1UL
0019 #define TLB_GENERATION_INVALID 0
0020
0021 void cr4_update_irqsoff(unsigned long set, unsigned long clear);
0022 unsigned long cr4_read_shadow(void);
0023
0024
0025 static inline void cr4_set_bits_irqsoff(unsigned long mask)
0026 {
0027 cr4_update_irqsoff(mask, 0);
0028 }
0029
0030
0031 static inline void cr4_clear_bits_irqsoff(unsigned long mask)
0032 {
0033 cr4_update_irqsoff(0, mask);
0034 }
0035
0036
0037 static inline void cr4_set_bits(unsigned long mask)
0038 {
0039 unsigned long flags;
0040
0041 local_irq_save(flags);
0042 cr4_set_bits_irqsoff(mask);
0043 local_irq_restore(flags);
0044 }
0045
0046
0047 static inline void cr4_clear_bits(unsigned long mask)
0048 {
0049 unsigned long flags;
0050
0051 local_irq_save(flags);
0052 cr4_clear_bits_irqsoff(mask);
0053 local_irq_restore(flags);
0054 }
0055
0056 #ifndef MODULE
0057
0058
0059
0060
0061 #define TLB_NR_DYN_ASIDS 6
0062
0063 struct tlb_context {
0064 u64 ctx_id;
0065 u64 tlb_gen;
0066 };
0067
0068 struct tlb_state {
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 struct mm_struct *loaded_mm;
0081
0082 #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
0083
0084
0085 union {
0086 struct mm_struct *last_user_mm;
0087 unsigned long last_user_mm_spec;
0088 };
0089
0090 u16 loaded_mm_asid;
0091 u16 next_asid;
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 bool invalidate_other;
0103
0104
0105
0106
0107
0108
0109 unsigned short user_pcid_flush_mask;
0110
0111
0112
0113
0114
0115 unsigned long cr4;
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
0137 };
0138 DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
0139
0140 struct tlb_state_shared {
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 bool is_lazy;
0157 };
0158 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
0159
0160 bool nmi_uaccess_okay(void);
0161 #define nmi_uaccess_okay nmi_uaccess_okay
0162
0163
0164 static inline void cr4_init_shadow(void)
0165 {
0166 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
0167 }
0168
0169 extern unsigned long mmu_cr4_features;
0170 extern u32 *trampoline_cr4_features;
0171
0172 extern void initialize_tlbstate_and_flush(void);
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 struct flush_tlb_info {
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 struct mm_struct *mm;
0205 unsigned long start;
0206 unsigned long end;
0207 u64 new_tlb_gen;
0208 unsigned int initiating_cpu;
0209 u8 stride_shift;
0210 u8 freed_tables;
0211 };
0212
0213 void flush_tlb_local(void);
0214 void flush_tlb_one_user(unsigned long addr);
0215 void flush_tlb_one_kernel(unsigned long addr);
0216 void flush_tlb_multi(const struct cpumask *cpumask,
0217 const struct flush_tlb_info *info);
0218
0219 #ifdef CONFIG_PARAVIRT
0220 #include <asm/paravirt.h>
0221 #endif
0222
0223 #define flush_tlb_mm(mm) \
0224 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
0225
0226 #define flush_tlb_range(vma, start, end) \
0227 flush_tlb_mm_range((vma)->vm_mm, start, end, \
0228 ((vma)->vm_flags & VM_HUGETLB) \
0229 ? huge_page_shift(hstate_vma(vma)) \
0230 : PAGE_SHIFT, false)
0231
0232 extern void flush_tlb_all(void);
0233 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
0234 unsigned long end, unsigned int stride_shift,
0235 bool freed_tables);
0236 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
0237
0238 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
0239 {
0240 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
0241 }
0242
0243 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
0244 {
0245
0246
0247
0248
0249
0250
0251 return atomic64_inc_return(&mm->context.tlb_gen);
0252 }
0253
0254 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
0255 struct mm_struct *mm)
0256 {
0257 inc_mm_tlb_gen(mm);
0258 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
0259 }
0260
0261 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
0262
0263 static inline bool pte_flags_need_flush(unsigned long oldflags,
0264 unsigned long newflags,
0265 bool ignore_access)
0266 {
0267
0268
0269
0270
0271
0272
0273 const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
0274 _PAGE_ACCESSED;
0275 const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
0276 _PAGE_SOFTW3 | _PAGE_SOFTW4;
0277 const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
0278 _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
0279 _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
0280 _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
0281 unsigned long diff = oldflags ^ newflags;
0282
0283 BUILD_BUG_ON(flush_on_clear & software_flags);
0284 BUILD_BUG_ON(flush_on_clear & flush_on_change);
0285 BUILD_BUG_ON(flush_on_change & software_flags);
0286
0287
0288 diff &= ~software_flags;
0289
0290 if (ignore_access)
0291 diff &= ~_PAGE_ACCESSED;
0292
0293
0294
0295
0296
0297 if (diff & oldflags & flush_on_clear)
0298 return true;
0299
0300
0301 if (diff & flush_on_change)
0302 return true;
0303
0304
0305 if (IS_ENABLED(CONFIG_DEBUG_VM) &&
0306 (diff & ~(flush_on_clear | software_flags | flush_on_change))) {
0307 VM_WARN_ON_ONCE(1);
0308 return true;
0309 }
0310
0311 return false;
0312 }
0313
0314
0315
0316
0317
0318 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
0319 {
0320
0321 if (!(pte_flags(oldpte) & _PAGE_PRESENT))
0322 return false;
0323
0324
0325 if (pte_pfn(oldpte) != pte_pfn(newpte))
0326 return true;
0327
0328
0329
0330
0331
0332 return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
0333 true);
0334 }
0335 #define pte_needs_flush pte_needs_flush
0336
0337
0338
0339
0340
0341 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
0342 {
0343
0344 if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
0345 return false;
0346
0347
0348 if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
0349 return true;
0350
0351
0352
0353
0354
0355 return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
0356 false);
0357 }
0358 #define huge_pmd_needs_flush huge_pmd_needs_flush
0359
0360 #endif
0361
0362 static inline void __native_tlb_flush_global(unsigned long cr4)
0363 {
0364 native_write_cr4(cr4 ^ X86_CR4_PGE);
0365 native_write_cr4(cr4);
0366 }
0367 #endif