0001
0002 #ifndef _LINUX_PGTABLE_H
0003 #define _LINUX_PGTABLE_H
0004
0005 #include <linux/pfn.h>
0006 #include <asm/pgtable.h>
0007
0008 #ifndef __ASSEMBLY__
0009 #ifdef CONFIG_MMU
0010
0011 #include <linux/mm_types.h>
0012 #include <linux/bug.h>
0013 #include <linux/errno.h>
0014 #include <asm-generic/pgtable_uffd.h>
0015 #include <linux/page_table_check.h>
0016
0017 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
0018 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
0019 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
0020 #endif
0021
0022
0023
0024
0025
0026
0027
0028 #ifndef USER_PGTABLES_CEILING
0029 #define USER_PGTABLES_CEILING 0UL
0030 #endif
0031
0032
0033
0034
0035
0036
0037 #ifndef FIRST_USER_ADDRESS
0038 #define FIRST_USER_ADDRESS 0UL
0039 #endif
0040
0041
0042
0043
0044
0045
0046 #ifndef pmd_pgtable
0047 #define pmd_pgtable(pmd) pmd_page(pmd)
0048 #endif
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 static inline unsigned long pte_index(unsigned long address)
0063 {
0064 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
0065 }
0066 #define pte_index pte_index
0067
0068 #ifndef pmd_index
0069 static inline unsigned long pmd_index(unsigned long address)
0070 {
0071 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
0072 }
0073 #define pmd_index pmd_index
0074 #endif
0075
0076 #ifndef pud_index
0077 static inline unsigned long pud_index(unsigned long address)
0078 {
0079 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
0080 }
0081 #define pud_index pud_index
0082 #endif
0083
0084 #ifndef pgd_index
0085
0086 #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
0087 #endif
0088
0089 #ifndef pte_offset_kernel
0090 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
0091 {
0092 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
0093 }
0094 #define pte_offset_kernel pte_offset_kernel
0095 #endif
0096
0097 #if defined(CONFIG_HIGHPTE)
0098 #define pte_offset_map(dir, address) \
0099 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
0100 pte_index((address)))
0101 #define pte_unmap(pte) kunmap_atomic((pte))
0102 #else
0103 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
0104 #define pte_unmap(pte) ((void)(pte))
0105 #endif
0106
0107
0108 #ifndef pmd_offset
0109 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
0110 {
0111 return pud_pgtable(*pud) + pmd_index(address);
0112 }
0113 #define pmd_offset pmd_offset
0114 #endif
0115
0116 #ifndef pud_offset
0117 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
0118 {
0119 return p4d_pgtable(*p4d) + pud_index(address);
0120 }
0121 #define pud_offset pud_offset
0122 #endif
0123
0124 static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
0125 {
0126 return (pgd + pgd_index(address));
0127 };
0128
0129
0130
0131
0132 #ifndef pgd_offset
0133 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
0134 #endif
0135
0136
0137
0138
0139
0140 #ifndef pgd_offset_k
0141 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
0142 #endif
0143
0144
0145
0146
0147
0148
0149
0150
0151 static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
0152 {
0153 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
0154 }
0155
0156 static inline pmd_t *pmd_off_k(unsigned long va)
0157 {
0158 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
0159 }
0160
0161 static inline pte_t *virt_to_kpte(unsigned long vaddr)
0162 {
0163 pmd_t *pmd = pmd_off_k(vaddr);
0164
0165 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
0166 }
0167
0168 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
0169 extern int ptep_set_access_flags(struct vm_area_struct *vma,
0170 unsigned long address, pte_t *ptep,
0171 pte_t entry, int dirty);
0172 #endif
0173
0174 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
0175 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0176 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
0177 unsigned long address, pmd_t *pmdp,
0178 pmd_t entry, int dirty);
0179 extern int pudp_set_access_flags(struct vm_area_struct *vma,
0180 unsigned long address, pud_t *pudp,
0181 pud_t entry, int dirty);
0182 #else
0183 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
0184 unsigned long address, pmd_t *pmdp,
0185 pmd_t entry, int dirty)
0186 {
0187 BUILD_BUG();
0188 return 0;
0189 }
0190 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
0191 unsigned long address, pud_t *pudp,
0192 pud_t entry, int dirty)
0193 {
0194 BUILD_BUG();
0195 return 0;
0196 }
0197 #endif
0198 #endif
0199
0200 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
0201 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
0202 unsigned long address,
0203 pte_t *ptep)
0204 {
0205 pte_t pte = *ptep;
0206 int r = 1;
0207 if (!pte_young(pte))
0208 r = 0;
0209 else
0210 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
0211 return r;
0212 }
0213 #endif
0214
0215 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
0216 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0217 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
0218 unsigned long address,
0219 pmd_t *pmdp)
0220 {
0221 pmd_t pmd = *pmdp;
0222 int r = 1;
0223 if (!pmd_young(pmd))
0224 r = 0;
0225 else
0226 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
0227 return r;
0228 }
0229 #else
0230 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
0231 unsigned long address,
0232 pmd_t *pmdp)
0233 {
0234 BUILD_BUG();
0235 return 0;
0236 }
0237 #endif
0238 #endif
0239
0240 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
0241 int ptep_clear_flush_young(struct vm_area_struct *vma,
0242 unsigned long address, pte_t *ptep);
0243 #endif
0244
0245 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
0246 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0247 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
0248 unsigned long address, pmd_t *pmdp);
0249 #else
0250
0251
0252
0253
0254 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
0255 unsigned long address, pmd_t *pmdp)
0256 {
0257 BUILD_BUG();
0258 return 0;
0259 }
0260 #endif
0261 #endif
0262
0263 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
0264 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
0265 unsigned long address,
0266 pte_t *ptep)
0267 {
0268 pte_t pte = *ptep;
0269 pte_clear(mm, address, ptep);
0270 page_table_check_pte_clear(mm, address, pte);
0271 return pte;
0272 }
0273 #endif
0274
0275 static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
0276 pte_t *ptep)
0277 {
0278 ptep_get_and_clear(mm, addr, ptep);
0279 }
0280
0281 #ifndef __HAVE_ARCH_PTEP_GET
0282 static inline pte_t ptep_get(pte_t *ptep)
0283 {
0284 return READ_ONCE(*ptep);
0285 }
0286 #endif
0287
0288 #ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 static inline pte_t ptep_get_lockless(pte_t *ptep)
0321 {
0322 pte_t pte;
0323
0324 do {
0325 pte.pte_low = ptep->pte_low;
0326 smp_rmb();
0327 pte.pte_high = ptep->pte_high;
0328 smp_rmb();
0329 } while (unlikely(pte.pte_low != ptep->pte_low));
0330
0331 return pte;
0332 }
0333 #else
0334
0335
0336
0337 static inline pte_t ptep_get_lockless(pte_t *ptep)
0338 {
0339 return ptep_get(ptep);
0340 }
0341 #endif
0342
0343 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0344 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
0345 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
0346 unsigned long address,
0347 pmd_t *pmdp)
0348 {
0349 pmd_t pmd = *pmdp;
0350
0351 pmd_clear(pmdp);
0352 page_table_check_pmd_clear(mm, address, pmd);
0353
0354 return pmd;
0355 }
0356 #endif
0357 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
0358 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
0359 unsigned long address,
0360 pud_t *pudp)
0361 {
0362 pud_t pud = *pudp;
0363
0364 pud_clear(pudp);
0365 page_table_check_pud_clear(mm, address, pud);
0366
0367 return pud;
0368 }
0369 #endif
0370 #endif
0371
0372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0373 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
0374 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
0375 unsigned long address, pmd_t *pmdp,
0376 int full)
0377 {
0378 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
0379 }
0380 #endif
0381
0382 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
0383 static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
0384 unsigned long address, pud_t *pudp,
0385 int full)
0386 {
0387 return pudp_huge_get_and_clear(mm, address, pudp);
0388 }
0389 #endif
0390 #endif
0391
0392 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
0393 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
0394 unsigned long address, pte_t *ptep,
0395 int full)
0396 {
0397 pte_t pte;
0398 pte = ptep_get_and_clear(mm, address, ptep);
0399 return pte;
0400 }
0401 #endif
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 #ifndef __HAVE_ARCH_UPDATE_MMU_TLB
0413 static inline void update_mmu_tlb(struct vm_area_struct *vma,
0414 unsigned long address, pte_t *ptep)
0415 {
0416 }
0417 #define __HAVE_ARCH_UPDATE_MMU_TLB
0418 #endif
0419
0420
0421
0422
0423
0424
0425 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
0426 static inline void pte_clear_not_present_full(struct mm_struct *mm,
0427 unsigned long address,
0428 pte_t *ptep,
0429 int full)
0430 {
0431 pte_clear(mm, address, ptep);
0432 }
0433 #endif
0434
0435 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
0436 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
0437 unsigned long address,
0438 pte_t *ptep);
0439 #endif
0440
0441 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
0442 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
0443 unsigned long address,
0444 pmd_t *pmdp);
0445 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
0446 unsigned long address,
0447 pud_t *pudp);
0448 #endif
0449
0450 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
0451 struct mm_struct;
0452 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
0453 {
0454 pte_t old_pte = *ptep;
0455 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
0456 }
0457 #endif
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 #ifndef pte_sw_mkyoung
0468 static inline pte_t pte_sw_mkyoung(pte_t pte)
0469 {
0470 return pte;
0471 }
0472 #define pte_sw_mkyoung pte_sw_mkyoung
0473 #endif
0474
0475 #ifndef pte_savedwrite
0476 #define pte_savedwrite pte_write
0477 #endif
0478
0479 #ifndef pte_mk_savedwrite
0480 #define pte_mk_savedwrite pte_mkwrite
0481 #endif
0482
0483 #ifndef pte_clear_savedwrite
0484 #define pte_clear_savedwrite pte_wrprotect
0485 #endif
0486
0487 #ifndef pmd_savedwrite
0488 #define pmd_savedwrite pmd_write
0489 #endif
0490
0491 #ifndef pmd_mk_savedwrite
0492 #define pmd_mk_savedwrite pmd_mkwrite
0493 #endif
0494
0495 #ifndef pmd_clear_savedwrite
0496 #define pmd_clear_savedwrite pmd_wrprotect
0497 #endif
0498
0499 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
0500 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0501 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
0502 unsigned long address, pmd_t *pmdp)
0503 {
0504 pmd_t old_pmd = *pmdp;
0505 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
0506 }
0507 #else
0508 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
0509 unsigned long address, pmd_t *pmdp)
0510 {
0511 BUILD_BUG();
0512 }
0513 #endif
0514 #endif
0515 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
0516 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0517 static inline void pudp_set_wrprotect(struct mm_struct *mm,
0518 unsigned long address, pud_t *pudp)
0519 {
0520 pud_t old_pud = *pudp;
0521
0522 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
0523 }
0524 #else
0525 static inline void pudp_set_wrprotect(struct mm_struct *mm,
0526 unsigned long address, pud_t *pudp)
0527 {
0528 BUILD_BUG();
0529 }
0530 #endif
0531 #endif
0532
0533 #ifndef pmdp_collapse_flush
0534 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0535 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
0536 unsigned long address, pmd_t *pmdp);
0537 #else
0538 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
0539 unsigned long address,
0540 pmd_t *pmdp)
0541 {
0542 BUILD_BUG();
0543 return *pmdp;
0544 }
0545 #define pmdp_collapse_flush pmdp_collapse_flush
0546 #endif
0547 #endif
0548
0549 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
0550 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
0551 pgtable_t pgtable);
0552 #endif
0553
0554 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
0555 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
0556 #endif
0557
0558 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0559
0560
0561
0562
0563
0564 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
0565 unsigned long address, pmd_t *pmdp, pmd_t pmd)
0566 {
0567 pmd_t old_pmd = *pmdp;
0568 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
0569 return old_pmd;
0570 }
0571 #endif
0572
0573 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
0574 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
0575 pmd_t *pmdp);
0576 #endif
0577
0578 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
0595 unsigned long address, pmd_t *pmdp);
0596 #endif
0597
0598 #ifndef __HAVE_ARCH_PTE_SAME
0599 static inline int pte_same(pte_t pte_a, pte_t pte_b)
0600 {
0601 return pte_val(pte_a) == pte_val(pte_b);
0602 }
0603 #endif
0604
0605 #ifndef __HAVE_ARCH_PTE_UNUSED
0606
0607
0608
0609
0610
0611
0612 static inline int pte_unused(pte_t pte)
0613 {
0614 return 0;
0615 }
0616 #endif
0617
0618 #ifndef pte_access_permitted
0619 #define pte_access_permitted(pte, write) \
0620 (pte_present(pte) && (!(write) || pte_write(pte)))
0621 #endif
0622
0623 #ifndef pmd_access_permitted
0624 #define pmd_access_permitted(pmd, write) \
0625 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
0626 #endif
0627
0628 #ifndef pud_access_permitted
0629 #define pud_access_permitted(pud, write) \
0630 (pud_present(pud) && (!(write) || pud_write(pud)))
0631 #endif
0632
0633 #ifndef p4d_access_permitted
0634 #define p4d_access_permitted(p4d, write) \
0635 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
0636 #endif
0637
0638 #ifndef pgd_access_permitted
0639 #define pgd_access_permitted(pgd, write) \
0640 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
0641 #endif
0642
0643 #ifndef __HAVE_ARCH_PMD_SAME
0644 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
0645 {
0646 return pmd_val(pmd_a) == pmd_val(pmd_b);
0647 }
0648
0649 static inline int pud_same(pud_t pud_a, pud_t pud_b)
0650 {
0651 return pud_val(pud_a) == pud_val(pud_b);
0652 }
0653 #endif
0654
0655 #ifndef __HAVE_ARCH_P4D_SAME
0656 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
0657 {
0658 return p4d_val(p4d_a) == p4d_val(p4d_b);
0659 }
0660 #endif
0661
0662 #ifndef __HAVE_ARCH_PGD_SAME
0663 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
0664 {
0665 return pgd_val(pgd_a) == pgd_val(pgd_b);
0666 }
0667 #endif
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677 #define set_pte_safe(ptep, pte) \
0678 ({ \
0679 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
0680 set_pte(ptep, pte); \
0681 })
0682
0683 #define set_pmd_safe(pmdp, pmd) \
0684 ({ \
0685 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
0686 set_pmd(pmdp, pmd); \
0687 })
0688
0689 #define set_pud_safe(pudp, pud) \
0690 ({ \
0691 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
0692 set_pud(pudp, pud); \
0693 })
0694
0695 #define set_p4d_safe(p4dp, p4d) \
0696 ({ \
0697 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
0698 set_p4d(p4dp, p4d); \
0699 })
0700
0701 #define set_pgd_safe(pgdp, pgd) \
0702 ({ \
0703 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
0704 set_pgd(pgdp, pgd); \
0705 })
0706
0707 #ifndef __HAVE_ARCH_DO_SWAP_PAGE
0708
0709
0710
0711
0712
0713
0714
0715
0716 static inline void arch_do_swap_page(struct mm_struct *mm,
0717 struct vm_area_struct *vma,
0718 unsigned long addr,
0719 pte_t pte, pte_t oldpte)
0720 {
0721
0722 }
0723 #endif
0724
0725 #ifndef __HAVE_ARCH_UNMAP_ONE
0726
0727
0728
0729
0730
0731
0732
0733
0734 static inline int arch_unmap_one(struct mm_struct *mm,
0735 struct vm_area_struct *vma,
0736 unsigned long addr,
0737 pte_t orig_pte)
0738 {
0739 return 0;
0740 }
0741 #endif
0742
0743
0744
0745
0746
0747
0748 #ifndef __HAVE_ARCH_PREPARE_TO_SWAP
0749 static inline int arch_prepare_to_swap(struct page *page)
0750 {
0751 return 0;
0752 }
0753 #endif
0754
0755 #ifndef __HAVE_ARCH_SWAP_INVALIDATE
0756 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
0757 {
0758 }
0759
0760 static inline void arch_swap_invalidate_area(int type)
0761 {
0762 }
0763 #endif
0764
0765 #ifndef __HAVE_ARCH_SWAP_RESTORE
0766 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
0767 {
0768 }
0769 #endif
0770
0771 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
0772 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
0773 #endif
0774
0775 #ifndef __HAVE_ARCH_MOVE_PTE
0776 #define move_pte(pte, prot, old_addr, new_addr) (pte)
0777 #endif
0778
0779 #ifndef pte_accessible
0780 # define pte_accessible(mm, pte) ((void)(pte), 1)
0781 #endif
0782
0783 #ifndef flush_tlb_fix_spurious_fault
0784 #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
0785 #endif
0786
0787
0788
0789
0790
0791
0792
0793 #define pgd_addr_end(addr, end) \
0794 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
0795 (__boundary - 1 < (end) - 1)? __boundary: (end); \
0796 })
0797
0798 #ifndef p4d_addr_end
0799 #define p4d_addr_end(addr, end) \
0800 ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
0801 (__boundary - 1 < (end) - 1)? __boundary: (end); \
0802 })
0803 #endif
0804
0805 #ifndef pud_addr_end
0806 #define pud_addr_end(addr, end) \
0807 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
0808 (__boundary - 1 < (end) - 1)? __boundary: (end); \
0809 })
0810 #endif
0811
0812 #ifndef pmd_addr_end
0813 #define pmd_addr_end(addr, end) \
0814 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
0815 (__boundary - 1 < (end) - 1)? __boundary: (end); \
0816 })
0817 #endif
0818
0819
0820
0821
0822
0823
0824 void pgd_clear_bad(pgd_t *);
0825
0826 #ifndef __PAGETABLE_P4D_FOLDED
0827 void p4d_clear_bad(p4d_t *);
0828 #else
0829 #define p4d_clear_bad(p4d) do { } while (0)
0830 #endif
0831
0832 #ifndef __PAGETABLE_PUD_FOLDED
0833 void pud_clear_bad(pud_t *);
0834 #else
0835 #define pud_clear_bad(p4d) do { } while (0)
0836 #endif
0837
0838 void pmd_clear_bad(pmd_t *);
0839
0840 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
0841 {
0842 if (pgd_none(*pgd))
0843 return 1;
0844 if (unlikely(pgd_bad(*pgd))) {
0845 pgd_clear_bad(pgd);
0846 return 1;
0847 }
0848 return 0;
0849 }
0850
0851 static inline int p4d_none_or_clear_bad(p4d_t *p4d)
0852 {
0853 if (p4d_none(*p4d))
0854 return 1;
0855 if (unlikely(p4d_bad(*p4d))) {
0856 p4d_clear_bad(p4d);
0857 return 1;
0858 }
0859 return 0;
0860 }
0861
0862 static inline int pud_none_or_clear_bad(pud_t *pud)
0863 {
0864 if (pud_none(*pud))
0865 return 1;
0866 if (unlikely(pud_bad(*pud))) {
0867 pud_clear_bad(pud);
0868 return 1;
0869 }
0870 return 0;
0871 }
0872
0873 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
0874 {
0875 if (pmd_none(*pmd))
0876 return 1;
0877 if (unlikely(pmd_bad(*pmd))) {
0878 pmd_clear_bad(pmd);
0879 return 1;
0880 }
0881 return 0;
0882 }
0883
0884 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
0885 unsigned long addr,
0886 pte_t *ptep)
0887 {
0888
0889
0890
0891
0892
0893 return ptep_get_and_clear(vma->vm_mm, addr, ptep);
0894 }
0895
0896 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
0897 unsigned long addr,
0898 pte_t *ptep, pte_t pte)
0899 {
0900
0901
0902
0903
0904 set_pte_at(vma->vm_mm, addr, ptep, pte);
0905 }
0906
0907 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
0923 unsigned long addr,
0924 pte_t *ptep)
0925 {
0926 return __ptep_modify_prot_start(vma, addr, ptep);
0927 }
0928
0929
0930
0931
0932
0933 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
0934 unsigned long addr,
0935 pte_t *ptep, pte_t old_pte, pte_t pte)
0936 {
0937 __ptep_modify_prot_commit(vma, addr, ptep, pte);
0938 }
0939 #endif
0940 #endif
0941
0942
0943
0944
0945
0946
0947 #ifndef pgprot_nx
0948 #define pgprot_nx(prot) (prot)
0949 #endif
0950
0951 #ifndef pgprot_noncached
0952 #define pgprot_noncached(prot) (prot)
0953 #endif
0954
0955 #ifndef pgprot_writecombine
0956 #define pgprot_writecombine pgprot_noncached
0957 #endif
0958
0959 #ifndef pgprot_writethrough
0960 #define pgprot_writethrough pgprot_noncached
0961 #endif
0962
0963 #ifndef pgprot_device
0964 #define pgprot_device pgprot_noncached
0965 #endif
0966
0967 #ifndef pgprot_mhp
0968 #define pgprot_mhp(prot) (prot)
0969 #endif
0970
0971 #ifdef CONFIG_MMU
0972 #ifndef pgprot_modify
0973 #define pgprot_modify pgprot_modify
0974 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
0975 {
0976 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
0977 newprot = pgprot_noncached(newprot);
0978 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
0979 newprot = pgprot_writecombine(newprot);
0980 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
0981 newprot = pgprot_device(newprot);
0982 return newprot;
0983 }
0984 #endif
0985 #endif
0986
0987 #ifndef pgprot_encrypted
0988 #define pgprot_encrypted(prot) (prot)
0989 #endif
0990
0991 #ifndef pgprot_decrypted
0992 #define pgprot_decrypted(prot) (prot)
0993 #endif
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1011 #define arch_enter_lazy_mmu_mode() do {} while (0)
1012 #define arch_leave_lazy_mmu_mode() do {} while (0)
1013 #define arch_flush_lazy_mmu_mode() do {} while (0)
1014 #endif
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
1028 #define arch_start_context_switch(prev) do {} while (0)
1029 #endif
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 #ifndef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
1044 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1045 {
1046 return pte;
1047 }
1048
1049 static inline int pte_swp_exclusive(pte_t pte)
1050 {
1051 return false;
1052 }
1053
1054 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1055 {
1056 return pte;
1057 }
1058 #endif
1059
1060 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1061 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
1062 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1063 {
1064 return pmd;
1065 }
1066
1067 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1068 {
1069 return 0;
1070 }
1071
1072 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1073 {
1074 return pmd;
1075 }
1076 #endif
1077 #else
1078 static inline int pte_soft_dirty(pte_t pte)
1079 {
1080 return 0;
1081 }
1082
1083 static inline int pmd_soft_dirty(pmd_t pmd)
1084 {
1085 return 0;
1086 }
1087
1088 static inline pte_t pte_mksoft_dirty(pte_t pte)
1089 {
1090 return pte;
1091 }
1092
1093 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1094 {
1095 return pmd;
1096 }
1097
1098 static inline pte_t pte_clear_soft_dirty(pte_t pte)
1099 {
1100 return pte;
1101 }
1102
1103 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1104 {
1105 return pmd;
1106 }
1107
1108 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1109 {
1110 return pte;
1111 }
1112
1113 static inline int pte_swp_soft_dirty(pte_t pte)
1114 {
1115 return 0;
1116 }
1117
1118 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1119 {
1120 return pte;
1121 }
1122
1123 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1124 {
1125 return pmd;
1126 }
1127
1128 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1129 {
1130 return 0;
1131 }
1132
1133 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1134 {
1135 return pmd;
1136 }
1137 #endif
1138
1139 #ifndef __HAVE_PFNMAP_TRACKING
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1151 unsigned long pfn, unsigned long addr,
1152 unsigned long size)
1153 {
1154 return 0;
1155 }
1156
1157
1158
1159
1160
1161 static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
1162 pfn_t pfn)
1163 {
1164 }
1165
1166
1167
1168
1169
1170 static inline int track_pfn_copy(struct vm_area_struct *vma)
1171 {
1172 return 0;
1173 }
1174
1175
1176
1177
1178
1179
1180 static inline void untrack_pfn(struct vm_area_struct *vma,
1181 unsigned long pfn, unsigned long size)
1182 {
1183 }
1184
1185
1186
1187
1188 static inline void untrack_pfn_moved(struct vm_area_struct *vma)
1189 {
1190 }
1191 #else
1192 extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1193 unsigned long pfn, unsigned long addr,
1194 unsigned long size);
1195 extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
1196 pfn_t pfn);
1197 extern int track_pfn_copy(struct vm_area_struct *vma);
1198 extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1199 unsigned long size);
1200 extern void untrack_pfn_moved(struct vm_area_struct *vma);
1201 #endif
1202
1203 #ifdef CONFIG_MMU
1204 #ifdef __HAVE_COLOR_ZERO_PAGE
1205 static inline int is_zero_pfn(unsigned long pfn)
1206 {
1207 extern unsigned long zero_pfn;
1208 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1209 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1210 }
1211
1212 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
1213
1214 #else
1215 static inline int is_zero_pfn(unsigned long pfn)
1216 {
1217 extern unsigned long zero_pfn;
1218 return pfn == zero_pfn;
1219 }
1220
1221 static inline unsigned long my_zero_pfn(unsigned long addr)
1222 {
1223 extern unsigned long zero_pfn;
1224 return zero_pfn;
1225 }
1226 #endif
1227 #else
1228 static inline int is_zero_pfn(unsigned long pfn)
1229 {
1230 return 0;
1231 }
1232
1233 static inline unsigned long my_zero_pfn(unsigned long addr)
1234 {
1235 return 0;
1236 }
1237 #endif
1238
1239 #ifdef CONFIG_MMU
1240
1241 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
1242 static inline int pmd_trans_huge(pmd_t pmd)
1243 {
1244 return 0;
1245 }
1246 #ifndef pmd_write
1247 static inline int pmd_write(pmd_t pmd)
1248 {
1249 BUG();
1250 return 0;
1251 }
1252 #endif
1253 #endif
1254
1255 #ifndef pud_write
1256 static inline int pud_write(pud_t pud)
1257 {
1258 BUG();
1259 return 0;
1260 }
1261 #endif
1262
1263 #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
1264 static inline int pmd_devmap(pmd_t pmd)
1265 {
1266 return 0;
1267 }
1268 static inline int pud_devmap(pud_t pud)
1269 {
1270 return 0;
1271 }
1272 static inline int pgd_devmap(pgd_t pgd)
1273 {
1274 return 0;
1275 }
1276 #endif
1277
1278 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
1279 (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1280 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
1281 static inline int pud_trans_huge(pud_t pud)
1282 {
1283 return 0;
1284 }
1285 #endif
1286
1287
1288 static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
1289 {
1290 pud_t pudval = READ_ONCE(*pud);
1291
1292 if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
1293 return 1;
1294 if (unlikely(pud_bad(pudval))) {
1295 pud_clear_bad(pud);
1296 return 1;
1297 }
1298 return 0;
1299 }
1300
1301
1302 static inline int pud_trans_unstable(pud_t *pud)
1303 {
1304 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1305 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1306 return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
1307 #else
1308 return 0;
1309 #endif
1310 }
1311
1312 #ifndef pmd_read_atomic
1313 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
1314 {
1315
1316
1317
1318
1319
1320 return *pmdp;
1321 }
1322 #endif
1323
1324 #ifndef arch_needs_pgtable_deposit
1325 #define arch_needs_pgtable_deposit() (false)
1326 #endif
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
1349 {
1350 pmd_t pmdval = pmd_read_atomic(pmd);
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1366 barrier();
1367 #endif
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383 if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
1384 (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
1385 return 1;
1386 if (unlikely(pmd_bad(pmdval))) {
1387 pmd_clear_bad(pmd);
1388 return 1;
1389 }
1390 return 0;
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 static inline int pmd_trans_unstable(pmd_t *pmd)
1406 {
1407 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1408 return pmd_none_or_trans_huge_or_clear_bad(pmd);
1409 #else
1410 return 0;
1411 #endif
1412 }
1413
1414
1415
1416
1417
1418
1419
1420 static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
1421 {
1422 return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
1423 }
1424
1425 #ifndef CONFIG_NUMA_BALANCING
1426
1427
1428
1429
1430
1431
1432
1433
1434 static inline int pte_protnone(pte_t pte)
1435 {
1436 return 0;
1437 }
1438
1439 static inline int pmd_protnone(pmd_t pmd)
1440 {
1441 return 0;
1442 }
1443 #endif
1444
1445 #endif
1446
1447 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1448
1449 #ifndef __PAGETABLE_P4D_FOLDED
1450 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1451 void p4d_clear_huge(p4d_t *p4d);
1452 #else
1453 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1454 {
1455 return 0;
1456 }
1457 static inline void p4d_clear_huge(p4d_t *p4d) { }
1458 #endif
1459
1460 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1461 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1462 int pud_clear_huge(pud_t *pud);
1463 int pmd_clear_huge(pmd_t *pmd);
1464 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1465 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1466 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1467 #else
1468 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1469 {
1470 return 0;
1471 }
1472 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1473 {
1474 return 0;
1475 }
1476 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1477 {
1478 return 0;
1479 }
1480 static inline void p4d_clear_huge(p4d_t *p4d) { }
1481 static inline int pud_clear_huge(pud_t *pud)
1482 {
1483 return 0;
1484 }
1485 static inline int pmd_clear_huge(pmd_t *pmd)
1486 {
1487 return 0;
1488 }
1489 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1490 {
1491 return 0;
1492 }
1493 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1494 {
1495 return 0;
1496 }
1497 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1498 {
1499 return 0;
1500 }
1501 #endif
1502
1503 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1504 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1515 #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1516 #else
1517 #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
1518 #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
1519 #endif
1520 #endif
1521
1522 struct file;
1523 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1524 unsigned long size, pgprot_t *vma_prot);
1525
1526 #ifndef CONFIG_X86_ESPFIX64
1527 static inline void init_espfix_bsp(void) { }
1528 #endif
1529
1530 extern void __init pgtable_cache_init(void);
1531
1532 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1533 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1534 {
1535 return true;
1536 }
1537
1538 static inline bool arch_has_pfn_modify_check(void)
1539 {
1540 return false;
1541 }
1542 #endif
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 #ifndef PAGE_KERNEL_RO
1555 # define PAGE_KERNEL_RO PAGE_KERNEL
1556 #endif
1557
1558 #ifndef PAGE_KERNEL_EXEC
1559 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1560 #endif
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570 #define __PGTBL_PGD_MODIFIED 0
1571 #define __PGTBL_P4D_MODIFIED 1
1572 #define __PGTBL_PUD_MODIFIED 2
1573 #define __PGTBL_PMD_MODIFIED 3
1574 #define __PGTBL_PTE_MODIFIED 4
1575
1576 #define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
1577 #define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
1578 #define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
1579 #define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
1580 #define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
1581
1582
1583 typedef unsigned int pgtbl_mod_mask;
1584
1585 #endif
1586
1587 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
1588 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1589
1590
1591
1592
1593
1594 #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
1595 #else
1596 #define MAX_POSSIBLE_PHYSMEM_BITS 32
1597 #endif
1598 #endif
1599
1600 #ifndef has_transparent_hugepage
1601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1602 #define has_transparent_hugepage() 1
1603 #else
1604 #define has_transparent_hugepage() 0
1605 #endif
1606 #endif
1607
1608
1609
1610
1611
1612 #ifndef mm_p4d_folded
1613 #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1614 #endif
1615
1616 #ifndef mm_pud_folded
1617 #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1618 #endif
1619
1620 #ifndef mm_pmd_folded
1621 #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1622 #endif
1623
1624 #ifndef p4d_offset_lockless
1625 #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
1626 #endif
1627 #ifndef pud_offset_lockless
1628 #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
1629 #endif
1630 #ifndef pmd_offset_lockless
1631 #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
1632 #endif
1633
1634
1635
1636
1637
1638
1639
1640
1641 #ifndef pgd_leaf
1642 #define pgd_leaf(x) 0
1643 #endif
1644 #ifndef p4d_leaf
1645 #define p4d_leaf(x) 0
1646 #endif
1647 #ifndef pud_leaf
1648 #define pud_leaf(x) 0
1649 #endif
1650 #ifndef pmd_leaf
1651 #define pmd_leaf(x) 0
1652 #endif
1653
1654 #ifndef pgd_leaf_size
1655 #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
1656 #endif
1657 #ifndef p4d_leaf_size
1658 #define p4d_leaf_size(x) P4D_SIZE
1659 #endif
1660 #ifndef pud_leaf_size
1661 #define pud_leaf_size(x) PUD_SIZE
1662 #endif
1663 #ifndef pmd_leaf_size
1664 #define pmd_leaf_size(x) PMD_SIZE
1665 #endif
1666 #ifndef pte_leaf_size
1667 #define pte_leaf_size(x) PAGE_SIZE
1668 #endif
1669
1670
1671
1672
1673
1674
1675
1676 #ifndef MAX_PTRS_PER_PTE
1677 #define MAX_PTRS_PER_PTE PTRS_PER_PTE
1678 #endif
1679
1680 #ifndef MAX_PTRS_PER_PMD
1681 #define MAX_PTRS_PER_PMD PTRS_PER_PMD
1682 #endif
1683
1684 #ifndef MAX_PTRS_PER_PUD
1685 #define MAX_PTRS_PER_PUD PTRS_PER_PUD
1686 #endif
1687
1688 #ifndef MAX_PTRS_PER_P4D
1689 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
1690 #endif
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 #define DECLARE_VM_GET_PAGE_PROT \
1713 pgprot_t vm_get_page_prot(unsigned long vm_flags) \
1714 { \
1715 return protection_map[vm_flags & \
1716 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
1717 } \
1718 EXPORT_SYMBOL(vm_get_page_prot);
1719
1720 #endif