0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef _ASM_GENERIC__TLB_H
0012 #define _ASM_GENERIC__TLB_H
0013
0014 #include <linux/mmu_notifier.h>
0015 #include <linux/swap.h>
0016 #include <linux/hugetlb_inline.h>
0017 #include <asm/tlbflush.h>
0018 #include <asm/cacheflush.h>
0019
0020
0021
0022
0023
0024
0025 #ifndef nmi_uaccess_okay
0026 # define nmi_uaccess_okay() true
0027 #endif
0028
0029 #ifdef CONFIG_MMU
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
0192
0193 struct mmu_table_batch {
0194 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
0195 struct rcu_head rcu;
0196 #endif
0197 unsigned int nr;
0198 void *tables[];
0199 };
0200
0201 #define MAX_TABLE_BATCH \
0202 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
0203
0204 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
0205
0206 #else
0207
0208
0209
0210
0211
0212 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
0213
0214 #endif
0215
0216 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
0217
0218
0219
0220
0221 #ifndef tlb_needs_table_invalidate
0222 #define tlb_needs_table_invalidate() (true)
0223 #endif
0224
0225 #else
0226
0227 #ifdef tlb_needs_table_invalidate
0228 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
0229 #endif
0230
0231 #endif
0232
0233
0234 #ifndef CONFIG_MMU_GATHER_NO_GATHER
0235
0236
0237
0238
0239 #define MMU_GATHER_BUNDLE 8
0240
0241 struct mmu_gather_batch {
0242 struct mmu_gather_batch *next;
0243 unsigned int nr;
0244 unsigned int max;
0245 struct page *pages[];
0246 };
0247
0248 #define MAX_GATHER_BATCH \
0249 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
0250
0251
0252
0253
0254
0255
0256
0257 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
0258
0259 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
0260 int page_size);
0261 #endif
0262
0263
0264
0265
0266
0267 struct mmu_gather {
0268 struct mm_struct *mm;
0269
0270 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
0271 struct mmu_table_batch *batch;
0272 #endif
0273
0274 unsigned long start;
0275 unsigned long end;
0276
0277
0278
0279
0280 unsigned int fullmm : 1;
0281
0282
0283
0284
0285
0286 unsigned int need_flush_all : 1;
0287
0288
0289
0290
0291 unsigned int freed_tables : 1;
0292
0293
0294
0295
0296 unsigned int cleared_ptes : 1;
0297 unsigned int cleared_pmds : 1;
0298 unsigned int cleared_puds : 1;
0299 unsigned int cleared_p4ds : 1;
0300
0301
0302
0303
0304 unsigned int vma_exec : 1;
0305 unsigned int vma_huge : 1;
0306 unsigned int vma_pfn : 1;
0307
0308 unsigned int batch_count;
0309
0310 #ifndef CONFIG_MMU_GATHER_NO_GATHER
0311 struct mmu_gather_batch *active;
0312 struct mmu_gather_batch local;
0313 struct page *__pages[MMU_GATHER_BUNDLE];
0314
0315 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
0316 unsigned int page_size;
0317 #endif
0318 #endif
0319 };
0320
0321 void tlb_flush_mmu(struct mmu_gather *tlb);
0322
0323 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
0324 unsigned long address,
0325 unsigned int range_size)
0326 {
0327 tlb->start = min(tlb->start, address);
0328 tlb->end = max(tlb->end, address + range_size);
0329 }
0330
0331 static inline void __tlb_reset_range(struct mmu_gather *tlb)
0332 {
0333 if (tlb->fullmm) {
0334 tlb->start = tlb->end = ~0;
0335 } else {
0336 tlb->start = TASK_SIZE;
0337 tlb->end = 0;
0338 }
0339 tlb->freed_tables = 0;
0340 tlb->cleared_ptes = 0;
0341 tlb->cleared_pmds = 0;
0342 tlb->cleared_puds = 0;
0343 tlb->cleared_p4ds = 0;
0344
0345
0346
0347
0348
0349 }
0350
0351 #ifdef CONFIG_MMU_GATHER_NO_RANGE
0352
0353 #if defined(tlb_flush)
0354 #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
0355 #endif
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 static inline void tlb_flush(struct mmu_gather *tlb)
0366 {
0367 if (tlb->end)
0368 flush_tlb_mm(tlb->mm);
0369 }
0370
0371 #else
0372
0373 #ifndef tlb_flush
0374
0375
0376
0377
0378
0379 static inline void tlb_flush(struct mmu_gather *tlb)
0380 {
0381 if (tlb->fullmm || tlb->need_flush_all) {
0382 flush_tlb_mm(tlb->mm);
0383 } else if (tlb->end) {
0384 struct vm_area_struct vma = {
0385 .vm_mm = tlb->mm,
0386 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
0387 (tlb->vma_huge ? VM_HUGETLB : 0),
0388 };
0389
0390 flush_tlb_range(&vma, tlb->start, tlb->end);
0391 }
0392 }
0393 #endif
0394
0395 #endif
0396
0397 static inline void
0398 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
0399 {
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 tlb->vma_huge = is_vm_hugetlb_page(vma);
0412 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
0413 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
0414 }
0415
0416 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
0417 {
0418
0419
0420
0421
0422 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
0423 tlb->cleared_puds || tlb->cleared_p4ds))
0424 return;
0425
0426 tlb_flush(tlb);
0427 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
0428 __tlb_reset_range(tlb);
0429 }
0430
0431 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
0432 struct page *page, int page_size)
0433 {
0434 if (__tlb_remove_page_size(tlb, page, page_size))
0435 tlb_flush_mmu(tlb);
0436 }
0437
0438 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
0439 {
0440 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
0441 }
0442
0443
0444
0445
0446
0447 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
0448 {
0449 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
0450 }
0451
0452 static inline void tlb_change_page_size(struct mmu_gather *tlb,
0453 unsigned int page_size)
0454 {
0455 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
0456 if (tlb->page_size && tlb->page_size != page_size) {
0457 if (!tlb->fullmm && !tlb->need_flush_all)
0458 tlb_flush_mmu(tlb);
0459 }
0460
0461 tlb->page_size = page_size;
0462 #endif
0463 }
0464
0465 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
0466 {
0467 if (tlb->cleared_ptes)
0468 return PAGE_SHIFT;
0469 if (tlb->cleared_pmds)
0470 return PMD_SHIFT;
0471 if (tlb->cleared_puds)
0472 return PUD_SHIFT;
0473 if (tlb->cleared_p4ds)
0474 return P4D_SHIFT;
0475
0476 return PAGE_SHIFT;
0477 }
0478
0479 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
0480 {
0481 return 1UL << tlb_get_unmap_shift(tlb);
0482 }
0483
0484
0485
0486
0487
0488
0489 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
0490 {
0491 if (tlb->fullmm)
0492 return;
0493
0494 tlb_update_vma_flags(tlb, vma);
0495 #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
0496 flush_cache_range(vma, vma->vm_start, vma->vm_end);
0497 #endif
0498 }
0499
0500 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
0501 {
0502 if (tlb->fullmm)
0503 return;
0504
0505
0506
0507
0508
0509
0510
0511 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
0512
0513
0514
0515
0516 tlb_flush_mmu_tlbonly(tlb);
0517 }
0518 }
0519
0520
0521
0522
0523
0524 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
0525 unsigned long address, unsigned long size)
0526 {
0527 __tlb_adjust_range(tlb, address, size);
0528 tlb->cleared_ptes = 1;
0529 }
0530
0531 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
0532 unsigned long address, unsigned long size)
0533 {
0534 __tlb_adjust_range(tlb, address, size);
0535 tlb->cleared_pmds = 1;
0536 }
0537
0538 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
0539 unsigned long address, unsigned long size)
0540 {
0541 __tlb_adjust_range(tlb, address, size);
0542 tlb->cleared_puds = 1;
0543 }
0544
0545 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
0546 unsigned long address, unsigned long size)
0547 {
0548 __tlb_adjust_range(tlb, address, size);
0549 tlb->cleared_p4ds = 1;
0550 }
0551
0552 #ifndef __tlb_remove_tlb_entry
0553 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
0554 #endif
0555
0556
0557
0558
0559
0560
0561
0562
0563 #define tlb_remove_tlb_entry(tlb, ptep, address) \
0564 do { \
0565 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
0566 __tlb_remove_tlb_entry(tlb, ptep, address); \
0567 } while (0)
0568
0569 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
0570 do { \
0571 unsigned long _sz = huge_page_size(h); \
0572 if (_sz >= P4D_SIZE) \
0573 tlb_flush_p4d_range(tlb, address, _sz); \
0574 else if (_sz >= PUD_SIZE) \
0575 tlb_flush_pud_range(tlb, address, _sz); \
0576 else if (_sz >= PMD_SIZE) \
0577 tlb_flush_pmd_range(tlb, address, _sz); \
0578 else \
0579 tlb_flush_pte_range(tlb, address, _sz); \
0580 __tlb_remove_tlb_entry(tlb, ptep, address); \
0581 } while (0)
0582
0583
0584
0585
0586
0587 #ifndef __tlb_remove_pmd_tlb_entry
0588 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
0589 #endif
0590
0591 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
0592 do { \
0593 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
0594 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
0595 } while (0)
0596
0597
0598
0599
0600
0601 #ifndef __tlb_remove_pud_tlb_entry
0602 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
0603 #endif
0604
0605 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
0606 do { \
0607 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
0608 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
0609 } while (0)
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 #ifndef pte_free_tlb
0630 #define pte_free_tlb(tlb, ptep, address) \
0631 do { \
0632 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
0633 tlb->freed_tables = 1; \
0634 __pte_free_tlb(tlb, ptep, address); \
0635 } while (0)
0636 #endif
0637
0638 #ifndef pmd_free_tlb
0639 #define pmd_free_tlb(tlb, pmdp, address) \
0640 do { \
0641 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
0642 tlb->freed_tables = 1; \
0643 __pmd_free_tlb(tlb, pmdp, address); \
0644 } while (0)
0645 #endif
0646
0647 #ifndef pud_free_tlb
0648 #define pud_free_tlb(tlb, pudp, address) \
0649 do { \
0650 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
0651 tlb->freed_tables = 1; \
0652 __pud_free_tlb(tlb, pudp, address); \
0653 } while (0)
0654 #endif
0655
0656 #ifndef p4d_free_tlb
0657 #define p4d_free_tlb(tlb, pudp, address) \
0658 do { \
0659 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
0660 tlb->freed_tables = 1; \
0661 __p4d_free_tlb(tlb, pudp, address); \
0662 } while (0)
0663 #endif
0664
0665 #ifndef pte_needs_flush
0666 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
0667 {
0668 return true;
0669 }
0670 #endif
0671
0672 #ifndef huge_pmd_needs_flush
0673 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
0674 {
0675 return true;
0676 }
0677 #endif
0678
0679 #endif
0680
0681 #endif