0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
0012
0013 #include <linux/gfp.h>
0014 #include <linux/highmem.h>
0015 #include <linux/hugetlb.h>
0016 #include <linux/kernel.h>
0017 #include <linux/kconfig.h>
0018 #include <linux/mm.h>
0019 #include <linux/mman.h>
0020 #include <linux/mm_types.h>
0021 #include <linux/module.h>
0022 #include <linux/pfn_t.h>
0023 #include <linux/printk.h>
0024 #include <linux/pgtable.h>
0025 #include <linux/random.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/swap.h>
0028 #include <linux/swapops.h>
0029 #include <linux/start_kernel.h>
0030 #include <linux/sched/mm.h>
0031 #include <linux/io.h>
0032
0033 #include <asm/cacheflush.h>
0034 #include <asm/pgalloc.h>
0035 #include <asm/tlbflush.h>
0036
0037
0038
0039
0040
0041
0042
0043 #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 #define S390_SKIP_MASK GENMASK(3, 0)
0054 #if __BITS_PER_LONG == 64
0055 #define PPC64_SKIP_MASK GENMASK(62, 62)
0056 #else
0057 #define PPC64_SKIP_MASK 0x0
0058 #endif
0059 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
0060 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
0061 #define RANDOM_NZVALUE GENMASK(7, 0)
0062
0063 struct pgtable_debug_args {
0064 struct mm_struct *mm;
0065 struct vm_area_struct *vma;
0066
0067 pgd_t *pgdp;
0068 p4d_t *p4dp;
0069 pud_t *pudp;
0070 pmd_t *pmdp;
0071 pte_t *ptep;
0072
0073 p4d_t *start_p4dp;
0074 pud_t *start_pudp;
0075 pmd_t *start_pmdp;
0076 pgtable_t start_ptep;
0077
0078 unsigned long vaddr;
0079 pgprot_t page_prot;
0080 pgprot_t page_prot_none;
0081
0082 bool is_contiguous_page;
0083 unsigned long pud_pfn;
0084 unsigned long pmd_pfn;
0085 unsigned long pte_pfn;
0086
0087 unsigned long fixed_pgd_pfn;
0088 unsigned long fixed_p4d_pfn;
0089 unsigned long fixed_pud_pfn;
0090 unsigned long fixed_pmd_pfn;
0091 unsigned long fixed_pte_pfn;
0092 };
0093
0094 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
0095 {
0096 pgprot_t prot = vm_get_page_prot(idx);
0097 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
0098 unsigned long val = idx, *ptr = &val;
0099
0100 pr_debug("Validating PTE basic (%pGv)\n", ptr);
0101
0102
0103
0104
0105
0106
0107
0108
0109 WARN_ON(pte_dirty(pte_wrprotect(pte)));
0110
0111 WARN_ON(!pte_same(pte, pte));
0112 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
0113 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
0114 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
0115 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
0116 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
0117 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
0118 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
0119 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
0120 }
0121
0122 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
0123 {
0124 struct page *page;
0125 pte_t pte;
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
0139 if (!page)
0140 return;
0141
0142 pr_debug("Validating PTE advanced\n");
0143 pte = pfn_pte(args->pte_pfn, args->page_prot);
0144 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
0145 flush_dcache_page(page);
0146 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
0147 pte = ptep_get(args->ptep);
0148 WARN_ON(pte_write(pte));
0149 ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
0150 pte = ptep_get(args->ptep);
0151 WARN_ON(!pte_none(pte));
0152
0153 pte = pfn_pte(args->pte_pfn, args->page_prot);
0154 pte = pte_wrprotect(pte);
0155 pte = pte_mkclean(pte);
0156 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
0157 flush_dcache_page(page);
0158 pte = pte_mkwrite(pte);
0159 pte = pte_mkdirty(pte);
0160 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
0161 pte = ptep_get(args->ptep);
0162 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
0163 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
0164 pte = ptep_get(args->ptep);
0165 WARN_ON(!pte_none(pte));
0166
0167 pte = pfn_pte(args->pte_pfn, args->page_prot);
0168 pte = pte_mkyoung(pte);
0169 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
0170 flush_dcache_page(page);
0171 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
0172 pte = ptep_get(args->ptep);
0173 WARN_ON(pte_young(pte));
0174
0175 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
0176 }
0177
0178 static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)
0179 {
0180 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
0181
0182 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
0183 return;
0184
0185 pr_debug("Validating PTE saved write\n");
0186 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
0187 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
0188 }
0189
0190 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0191 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
0192 {
0193 pgprot_t prot = vm_get_page_prot(idx);
0194 unsigned long val = idx, *ptr = &val;
0195 pmd_t pmd;
0196
0197 if (!has_transparent_hugepage())
0198 return;
0199
0200 pr_debug("Validating PMD basic (%pGv)\n", ptr);
0201 pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
0202
0203
0204
0205
0206
0207
0208
0209
0210 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
0211
0212
0213 WARN_ON(!pmd_same(pmd, pmd));
0214 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
0215 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
0216 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
0217 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
0218 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
0219 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
0220 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
0221 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
0222
0223
0224
0225
0226 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
0227 }
0228
0229 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
0230 {
0231 struct page *page;
0232 pmd_t pmd;
0233 unsigned long vaddr = args->vaddr;
0234
0235 if (!has_transparent_hugepage())
0236 return;
0237
0238 page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
0239 if (!page)
0240 return;
0241
0242
0243
0244
0245
0246
0247
0248
0249 pr_debug("Validating PMD advanced\n");
0250
0251 vaddr &= HPAGE_PMD_MASK;
0252
0253 pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
0254
0255 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
0256 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
0257 flush_dcache_page(page);
0258 pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
0259 pmd = READ_ONCE(*args->pmdp);
0260 WARN_ON(pmd_write(pmd));
0261 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
0262 pmd = READ_ONCE(*args->pmdp);
0263 WARN_ON(!pmd_none(pmd));
0264
0265 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
0266 pmd = pmd_wrprotect(pmd);
0267 pmd = pmd_mkclean(pmd);
0268 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
0269 flush_dcache_page(page);
0270 pmd = pmd_mkwrite(pmd);
0271 pmd = pmd_mkdirty(pmd);
0272 pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
0273 pmd = READ_ONCE(*args->pmdp);
0274 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
0275 pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
0276 pmd = READ_ONCE(*args->pmdp);
0277 WARN_ON(!pmd_none(pmd));
0278
0279 pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
0280 pmd = pmd_mkyoung(pmd);
0281 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
0282 flush_dcache_page(page);
0283 pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
0284 pmd = READ_ONCE(*args->pmdp);
0285 WARN_ON(pmd_young(pmd));
0286
0287
0288 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
0289 pgtable_trans_huge_withdraw(args->mm, args->pmdp);
0290 }
0291
0292 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
0293 {
0294 pmd_t pmd;
0295
0296 if (!has_transparent_hugepage())
0297 return;
0298
0299 pr_debug("Validating PMD leaf\n");
0300 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
0301
0302
0303
0304
0305 pmd = pmd_mkhuge(pmd);
0306 WARN_ON(!pmd_leaf(pmd));
0307 }
0308
0309 static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args)
0310 {
0311 pmd_t pmd;
0312
0313 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
0314 return;
0315
0316 if (!has_transparent_hugepage())
0317 return;
0318
0319 pr_debug("Validating PMD saved write\n");
0320 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none);
0321 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
0322 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
0323 }
0324
0325 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0326 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
0327 {
0328 pgprot_t prot = vm_get_page_prot(idx);
0329 unsigned long val = idx, *ptr = &val;
0330 pud_t pud;
0331
0332 if (!has_transparent_hugepage())
0333 return;
0334
0335 pr_debug("Validating PUD basic (%pGv)\n", ptr);
0336 pud = pfn_pud(args->fixed_pud_pfn, prot);
0337
0338
0339
0340
0341
0342
0343
0344
0345 WARN_ON(pud_dirty(pud_wrprotect(pud)));
0346
0347 WARN_ON(!pud_same(pud, pud));
0348 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
0349 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
0350 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
0351 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
0352 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
0353 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
0354 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
0355 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
0356
0357 if (mm_pmd_folded(args->mm))
0358 return;
0359
0360
0361
0362
0363
0364 WARN_ON(!pud_bad(pud_mkhuge(pud)));
0365 }
0366
0367 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
0368 {
0369 struct page *page;
0370 unsigned long vaddr = args->vaddr;
0371 pud_t pud;
0372
0373 if (!has_transparent_hugepage())
0374 return;
0375
0376 page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
0377 if (!page)
0378 return;
0379
0380
0381
0382
0383
0384
0385
0386
0387 pr_debug("Validating PUD advanced\n");
0388
0389 vaddr &= HPAGE_PUD_MASK;
0390
0391 pud = pfn_pud(args->pud_pfn, args->page_prot);
0392 set_pud_at(args->mm, vaddr, args->pudp, pud);
0393 flush_dcache_page(page);
0394 pudp_set_wrprotect(args->mm, vaddr, args->pudp);
0395 pud = READ_ONCE(*args->pudp);
0396 WARN_ON(pud_write(pud));
0397
0398 #ifndef __PAGETABLE_PMD_FOLDED
0399 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
0400 pud = READ_ONCE(*args->pudp);
0401 WARN_ON(!pud_none(pud));
0402 #endif
0403 pud = pfn_pud(args->pud_pfn, args->page_prot);
0404 pud = pud_wrprotect(pud);
0405 pud = pud_mkclean(pud);
0406 set_pud_at(args->mm, vaddr, args->pudp, pud);
0407 flush_dcache_page(page);
0408 pud = pud_mkwrite(pud);
0409 pud = pud_mkdirty(pud);
0410 pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
0411 pud = READ_ONCE(*args->pudp);
0412 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
0413
0414 #ifndef __PAGETABLE_PMD_FOLDED
0415 pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1);
0416 pud = READ_ONCE(*args->pudp);
0417 WARN_ON(!pud_none(pud));
0418 #endif
0419
0420 pud = pfn_pud(args->pud_pfn, args->page_prot);
0421 pud = pud_mkyoung(pud);
0422 set_pud_at(args->mm, vaddr, args->pudp, pud);
0423 flush_dcache_page(page);
0424 pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
0425 pud = READ_ONCE(*args->pudp);
0426 WARN_ON(pud_young(pud));
0427
0428 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
0429 }
0430
0431 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
0432 {
0433 pud_t pud;
0434
0435 if (!has_transparent_hugepage())
0436 return;
0437
0438 pr_debug("Validating PUD leaf\n");
0439 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
0440
0441
0442
0443 pud = pud_mkhuge(pud);
0444 WARN_ON(!pud_leaf(pud));
0445 }
0446 #else
0447 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
0448 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
0449 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
0450 #endif
0451 #else
0452 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
0453 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
0454 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
0455 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
0456 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
0457 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
0458 static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) { }
0459 #endif
0460
0461 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
0462 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
0463 {
0464 pmd_t pmd;
0465
0466 if (!arch_vmap_pmd_supported(args->page_prot))
0467 return;
0468
0469 pr_debug("Validating PMD huge\n");
0470
0471
0472
0473
0474 WRITE_ONCE(*args->pmdp, __pmd(0));
0475 WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
0476 WARN_ON(!pmd_clear_huge(args->pmdp));
0477 pmd = READ_ONCE(*args->pmdp);
0478 WARN_ON(!pmd_none(pmd));
0479 }
0480
0481 static void __init pud_huge_tests(struct pgtable_debug_args *args)
0482 {
0483 pud_t pud;
0484
0485 if (!arch_vmap_pud_supported(args->page_prot))
0486 return;
0487
0488 pr_debug("Validating PUD huge\n");
0489
0490
0491
0492
0493 WRITE_ONCE(*args->pudp, __pud(0));
0494 WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
0495 WARN_ON(!pud_clear_huge(args->pudp));
0496 pud = READ_ONCE(*args->pudp);
0497 WARN_ON(!pud_none(pud));
0498 }
0499 #else
0500 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
0501 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
0502 #endif
0503
0504 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
0505 {
0506 p4d_t p4d;
0507
0508 pr_debug("Validating P4D basic\n");
0509 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
0510 WARN_ON(!p4d_same(p4d, p4d));
0511 }
0512
0513 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
0514 {
0515 pgd_t pgd;
0516
0517 pr_debug("Validating PGD basic\n");
0518 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
0519 WARN_ON(!pgd_same(pgd, pgd));
0520 }
0521
0522 #ifndef __PAGETABLE_PUD_FOLDED
0523 static void __init pud_clear_tests(struct pgtable_debug_args *args)
0524 {
0525 pud_t pud = READ_ONCE(*args->pudp);
0526
0527 if (mm_pmd_folded(args->mm))
0528 return;
0529
0530 pr_debug("Validating PUD clear\n");
0531 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
0532 WRITE_ONCE(*args->pudp, pud);
0533 pud_clear(args->pudp);
0534 pud = READ_ONCE(*args->pudp);
0535 WARN_ON(!pud_none(pud));
0536 }
0537
0538 static void __init pud_populate_tests(struct pgtable_debug_args *args)
0539 {
0540 pud_t pud;
0541
0542 if (mm_pmd_folded(args->mm))
0543 return;
0544
0545 pr_debug("Validating PUD populate\n");
0546
0547
0548
0549
0550 pud_populate(args->mm, args->pudp, args->start_pmdp);
0551 pud = READ_ONCE(*args->pudp);
0552 WARN_ON(pud_bad(pud));
0553 }
0554 #else
0555 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
0556 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
0557 #endif
0558
0559 #ifndef __PAGETABLE_P4D_FOLDED
0560 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
0561 {
0562 p4d_t p4d = READ_ONCE(*args->p4dp);
0563
0564 if (mm_pud_folded(args->mm))
0565 return;
0566
0567 pr_debug("Validating P4D clear\n");
0568 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
0569 WRITE_ONCE(*args->p4dp, p4d);
0570 p4d_clear(args->p4dp);
0571 p4d = READ_ONCE(*args->p4dp);
0572 WARN_ON(!p4d_none(p4d));
0573 }
0574
0575 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
0576 {
0577 p4d_t p4d;
0578
0579 if (mm_pud_folded(args->mm))
0580 return;
0581
0582 pr_debug("Validating P4D populate\n");
0583
0584
0585
0586
0587 pud_clear(args->pudp);
0588 p4d_clear(args->p4dp);
0589 p4d_populate(args->mm, args->p4dp, args->start_pudp);
0590 p4d = READ_ONCE(*args->p4dp);
0591 WARN_ON(p4d_bad(p4d));
0592 }
0593
0594 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
0595 {
0596 pgd_t pgd = READ_ONCE(*(args->pgdp));
0597
0598 if (mm_p4d_folded(args->mm))
0599 return;
0600
0601 pr_debug("Validating PGD clear\n");
0602 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
0603 WRITE_ONCE(*args->pgdp, pgd);
0604 pgd_clear(args->pgdp);
0605 pgd = READ_ONCE(*args->pgdp);
0606 WARN_ON(!pgd_none(pgd));
0607 }
0608
0609 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
0610 {
0611 pgd_t pgd;
0612
0613 if (mm_p4d_folded(args->mm))
0614 return;
0615
0616 pr_debug("Validating PGD populate\n");
0617
0618
0619
0620
0621 p4d_clear(args->p4dp);
0622 pgd_clear(args->pgdp);
0623 pgd_populate(args->mm, args->pgdp, args->start_p4dp);
0624 pgd = READ_ONCE(*args->pgdp);
0625 WARN_ON(pgd_bad(pgd));
0626 }
0627 #else
0628 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
0629 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
0630 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
0631 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
0632 #endif
0633
0634 static void __init pte_clear_tests(struct pgtable_debug_args *args)
0635 {
0636 struct page *page;
0637 pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
0638
0639 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
0640 if (!page)
0641 return;
0642
0643
0644
0645
0646
0647
0648
0649
0650 pr_debug("Validating PTE clear\n");
0651 #ifndef CONFIG_RISCV
0652 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
0653 #endif
0654 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
0655 flush_dcache_page(page);
0656 barrier();
0657 ptep_clear(args->mm, args->vaddr, args->ptep);
0658 pte = ptep_get(args->ptep);
0659 WARN_ON(!pte_none(pte));
0660 }
0661
0662 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
0663 {
0664 pmd_t pmd = READ_ONCE(*args->pmdp);
0665
0666 pr_debug("Validating PMD clear\n");
0667 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
0668 WRITE_ONCE(*args->pmdp, pmd);
0669 pmd_clear(args->pmdp);
0670 pmd = READ_ONCE(*args->pmdp);
0671 WARN_ON(!pmd_none(pmd));
0672 }
0673
0674 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
0675 {
0676 pmd_t pmd;
0677
0678 pr_debug("Validating PMD populate\n");
0679
0680
0681
0682
0683 pmd_populate(args->mm, args->pmdp, args->start_ptep);
0684 pmd = READ_ONCE(*args->pmdp);
0685 WARN_ON(pmd_bad(pmd));
0686 }
0687
0688 static void __init pte_special_tests(struct pgtable_debug_args *args)
0689 {
0690 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
0691
0692 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
0693 return;
0694
0695 pr_debug("Validating PTE special\n");
0696 WARN_ON(!pte_special(pte_mkspecial(pte)));
0697 }
0698
0699 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
0700 {
0701 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
0702
0703 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
0704 return;
0705
0706 pr_debug("Validating PTE protnone\n");
0707 WARN_ON(!pte_protnone(pte));
0708 WARN_ON(!pte_present(pte));
0709 }
0710
0711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0712 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
0713 {
0714 pmd_t pmd;
0715
0716 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
0717 return;
0718
0719 if (!has_transparent_hugepage())
0720 return;
0721
0722 pr_debug("Validating PMD protnone\n");
0723 pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
0724 WARN_ON(!pmd_protnone(pmd));
0725 WARN_ON(!pmd_present(pmd));
0726 }
0727 #else
0728 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
0729 #endif
0730
0731 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
0732 static void __init pte_devmap_tests(struct pgtable_debug_args *args)
0733 {
0734 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
0735
0736 pr_debug("Validating PTE devmap\n");
0737 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
0738 }
0739
0740 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0741 static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
0742 {
0743 pmd_t pmd;
0744
0745 if (!has_transparent_hugepage())
0746 return;
0747
0748 pr_debug("Validating PMD devmap\n");
0749 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
0750 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
0751 }
0752
0753 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0754 static void __init pud_devmap_tests(struct pgtable_debug_args *args)
0755 {
0756 pud_t pud;
0757
0758 if (!has_transparent_hugepage())
0759 return;
0760
0761 pr_debug("Validating PUD devmap\n");
0762 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
0763 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
0764 }
0765 #else
0766 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
0767 #endif
0768 #else
0769 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
0770 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
0771 #endif
0772 #else
0773 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
0774 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
0775 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
0776 #endif
0777
0778 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
0779 {
0780 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
0781
0782 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
0783 return;
0784
0785 pr_debug("Validating PTE soft dirty\n");
0786 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
0787 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
0788 }
0789
0790 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
0791 {
0792 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
0793
0794 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
0795 return;
0796
0797 pr_debug("Validating PTE swap soft dirty\n");
0798 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
0799 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
0800 }
0801
0802 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0803 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
0804 {
0805 pmd_t pmd;
0806
0807 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
0808 return;
0809
0810 if (!has_transparent_hugepage())
0811 return;
0812
0813 pr_debug("Validating PMD soft dirty\n");
0814 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
0815 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
0816 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
0817 }
0818
0819 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
0820 {
0821 pmd_t pmd;
0822
0823 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
0824 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
0825 return;
0826
0827 if (!has_transparent_hugepage())
0828 return;
0829
0830 pr_debug("Validating PMD swap soft dirty\n");
0831 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
0832 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
0833 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
0834 }
0835 #else
0836 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
0837 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
0838 #endif
0839
0840 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
0841 {
0842 #ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
0843 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
0844
0845 pr_debug("Validating PTE swap exclusive\n");
0846 pte = pte_swp_mkexclusive(pte);
0847 WARN_ON(!pte_swp_exclusive(pte));
0848 pte = pte_swp_clear_exclusive(pte);
0849 WARN_ON(pte_swp_exclusive(pte));
0850 #endif
0851 }
0852
0853 static void __init pte_swap_tests(struct pgtable_debug_args *args)
0854 {
0855 swp_entry_t swp;
0856 pte_t pte;
0857
0858 pr_debug("Validating PTE swap\n");
0859 pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
0860 swp = __pte_to_swp_entry(pte);
0861 pte = __swp_entry_to_pte(swp);
0862 WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
0863 }
0864
0865 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
0866 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
0867 {
0868 swp_entry_t swp;
0869 pmd_t pmd;
0870
0871 if (!has_transparent_hugepage())
0872 return;
0873
0874 pr_debug("Validating PMD swap\n");
0875 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
0876 swp = __pmd_to_swp_entry(pmd);
0877 pmd = __swp_entry_to_pmd(swp);
0878 WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
0879 }
0880 #else
0881 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
0882 #endif
0883
0884 static void __init swap_migration_tests(struct pgtable_debug_args *args)
0885 {
0886 struct page *page;
0887 swp_entry_t swp;
0888
0889 if (!IS_ENABLED(CONFIG_MIGRATION))
0890 return;
0891
0892
0893
0894
0895
0896
0897
0898
0899 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
0900 if (!page)
0901 return;
0902
0903 pr_debug("Validating swap migration\n");
0904
0905
0906
0907
0908
0909 __SetPageLocked(page);
0910 swp = make_writable_migration_entry(page_to_pfn(page));
0911 WARN_ON(!is_migration_entry(swp));
0912 WARN_ON(!is_writable_migration_entry(swp));
0913
0914 swp = make_readable_migration_entry(swp_offset(swp));
0915 WARN_ON(!is_migration_entry(swp));
0916 WARN_ON(is_writable_migration_entry(swp));
0917
0918 swp = make_readable_migration_entry(page_to_pfn(page));
0919 WARN_ON(!is_migration_entry(swp));
0920 WARN_ON(is_writable_migration_entry(swp));
0921 __ClearPageLocked(page);
0922 }
0923
0924 #ifdef CONFIG_HUGETLB_PAGE
0925 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
0926 {
0927 struct page *page;
0928 pte_t pte;
0929
0930 pr_debug("Validating HugeTLB basic\n");
0931
0932
0933
0934
0935 page = pfn_to_page(args->fixed_pmd_pfn);
0936 pte = mk_huge_pte(page, args->page_prot);
0937
0938 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
0939 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
0940 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
0941
0942 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
0943 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
0944
0945 WARN_ON(!pte_huge(pte_mkhuge(pte)));
0946 #endif
0947 }
0948 #else
0949 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
0950 #endif
0951
0952 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0953 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
0954 {
0955 pmd_t pmd;
0956
0957 if (!has_transparent_hugepage())
0958 return;
0959
0960 pr_debug("Validating PMD based THP\n");
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
0973 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
0974
0975 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
0976 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
0977 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
0978 #endif
0979 }
0980
0981 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0982 static void __init pud_thp_tests(struct pgtable_debug_args *args)
0983 {
0984 pud_t pud;
0985
0986 if (!has_transparent_hugepage())
0987 return;
0988
0989 pr_debug("Validating PUD based THP\n");
0990 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
0991 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
0992
0993
0994
0995
0996
0997
0998
0999
1000 }
1001 #else
1002 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
1003 #endif
1004 #else
1005 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
1006 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
1007 #endif
1008
1009 static unsigned long __init get_random_vaddr(void)
1010 {
1011 unsigned long random_vaddr, random_pages, total_user_pages;
1012
1013 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
1014
1015 random_pages = get_random_long() % total_user_pages;
1016 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1017
1018 return random_vaddr;
1019 }
1020
1021 static void __init destroy_args(struct pgtable_debug_args *args)
1022 {
1023 struct page *page = NULL;
1024
1025
1026 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1027 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1028 has_transparent_hugepage() &&
1029 args->pud_pfn != ULONG_MAX) {
1030 if (args->is_contiguous_page) {
1031 free_contig_range(args->pud_pfn,
1032 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1033 } else {
1034 page = pfn_to_page(args->pud_pfn);
1035 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1036 }
1037
1038 args->pud_pfn = ULONG_MAX;
1039 args->pmd_pfn = ULONG_MAX;
1040 args->pte_pfn = ULONG_MAX;
1041 }
1042
1043 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1044 has_transparent_hugepage() &&
1045 args->pmd_pfn != ULONG_MAX) {
1046 if (args->is_contiguous_page) {
1047 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1048 } else {
1049 page = pfn_to_page(args->pmd_pfn);
1050 __free_pages(page, HPAGE_PMD_ORDER);
1051 }
1052
1053 args->pmd_pfn = ULONG_MAX;
1054 args->pte_pfn = ULONG_MAX;
1055 }
1056
1057 if (args->pte_pfn != ULONG_MAX) {
1058 page = pfn_to_page(args->pte_pfn);
1059 __free_pages(page, 0);
1060
1061 args->pte_pfn = ULONG_MAX;
1062 }
1063
1064
1065 if (args->start_ptep) {
1066 pte_free(args->mm, args->start_ptep);
1067 mm_dec_nr_ptes(args->mm);
1068 }
1069
1070 if (args->start_pmdp) {
1071 pmd_free(args->mm, args->start_pmdp);
1072 mm_dec_nr_pmds(args->mm);
1073 }
1074
1075 if (args->start_pudp) {
1076 pud_free(args->mm, args->start_pudp);
1077 mm_dec_nr_puds(args->mm);
1078 }
1079
1080 if (args->start_p4dp)
1081 p4d_free(args->mm, args->start_p4dp);
1082
1083
1084 if (args->vma)
1085 vm_area_free(args->vma);
1086
1087 if (args->mm)
1088 mmdrop(args->mm);
1089 }
1090
1091 static struct page * __init
1092 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1093 {
1094 struct page *page = NULL;
1095
1096 #ifdef CONFIG_CONTIG_ALLOC
1097 if (order >= MAX_ORDER) {
1098 page = alloc_contig_pages((1 << order), GFP_KERNEL,
1099 first_online_node, NULL);
1100 if (page) {
1101 args->is_contiguous_page = true;
1102 return page;
1103 }
1104 }
1105 #endif
1106
1107 if (order < MAX_ORDER)
1108 page = alloc_pages(GFP_KERNEL, order);
1109
1110 return page;
1111 }
1112
1113 static int __init init_args(struct pgtable_debug_args *args)
1114 {
1115 struct page *page = NULL;
1116 phys_addr_t phys;
1117 int ret = 0;
1118
1119
1120
1121
1122
1123
1124
1125
1126 memset(args, 0, sizeof(*args));
1127 args->vaddr = get_random_vaddr();
1128 args->page_prot = vm_get_page_prot(VMFLAGS);
1129 args->page_prot_none = vm_get_page_prot(VM_NONE);
1130 args->is_contiguous_page = false;
1131 args->pud_pfn = ULONG_MAX;
1132 args->pmd_pfn = ULONG_MAX;
1133 args->pte_pfn = ULONG_MAX;
1134 args->fixed_pgd_pfn = ULONG_MAX;
1135 args->fixed_p4d_pfn = ULONG_MAX;
1136 args->fixed_pud_pfn = ULONG_MAX;
1137 args->fixed_pmd_pfn = ULONG_MAX;
1138 args->fixed_pte_pfn = ULONG_MAX;
1139
1140
1141 args->mm = mm_alloc();
1142 if (!args->mm) {
1143 pr_err("Failed to allocate mm struct\n");
1144 ret = -ENOMEM;
1145 goto error;
1146 }
1147
1148 args->vma = vm_area_alloc(args->mm);
1149 if (!args->vma) {
1150 pr_err("Failed to allocate vma\n");
1151 ret = -ENOMEM;
1152 goto error;
1153 }
1154
1155
1156
1157
1158
1159
1160 args->pgdp = pgd_offset(args->mm, args->vaddr);
1161 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1162 if (!args->p4dp) {
1163 pr_err("Failed to allocate p4d entries\n");
1164 ret = -ENOMEM;
1165 goto error;
1166 }
1167 args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1168 WARN_ON(!args->start_p4dp);
1169
1170 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1171 if (!args->pudp) {
1172 pr_err("Failed to allocate pud entries\n");
1173 ret = -ENOMEM;
1174 goto error;
1175 }
1176 args->start_pudp = pud_offset(args->p4dp, 0UL);
1177 WARN_ON(!args->start_pudp);
1178
1179 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1180 if (!args->pmdp) {
1181 pr_err("Failed to allocate pmd entries\n");
1182 ret = -ENOMEM;
1183 goto error;
1184 }
1185 args->start_pmdp = pmd_offset(args->pudp, 0UL);
1186 WARN_ON(!args->start_pmdp);
1187
1188 if (pte_alloc(args->mm, args->pmdp)) {
1189 pr_err("Failed to allocate pte entries\n");
1190 ret = -ENOMEM;
1191 goto error;
1192 }
1193 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1194 WARN_ON(!args->start_ptep);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 phys = __pa_symbol(&start_kernel);
1206 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1207 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1208 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1209 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1210 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1211 WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1212
1213
1214
1215
1216
1217
1218 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1219 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1220 has_transparent_hugepage()) {
1221 page = debug_vm_pgtable_alloc_huge_page(args,
1222 HPAGE_PUD_SHIFT - PAGE_SHIFT);
1223 if (page) {
1224 args->pud_pfn = page_to_pfn(page);
1225 args->pmd_pfn = args->pud_pfn;
1226 args->pte_pfn = args->pud_pfn;
1227 return 0;
1228 }
1229 }
1230
1231 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1232 has_transparent_hugepage()) {
1233 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1234 if (page) {
1235 args->pmd_pfn = page_to_pfn(page);
1236 args->pte_pfn = args->pmd_pfn;
1237 return 0;
1238 }
1239 }
1240
1241 page = alloc_pages(GFP_KERNEL, 0);
1242 if (page)
1243 args->pte_pfn = page_to_pfn(page);
1244
1245 return 0;
1246
1247 error:
1248 destroy_args(args);
1249 return ret;
1250 }
1251
1252 static int __init debug_vm_pgtable(void)
1253 {
1254 struct pgtable_debug_args args;
1255 spinlock_t *ptl = NULL;
1256 int idx, ret;
1257
1258 pr_info("Validating architecture page table helpers\n");
1259 ret = init_args(&args);
1260 if (ret)
1261 return ret;
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 #define VM_FLAGS_START (VM_NONE)
1274 #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1275
1276 for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1277 pte_basic_tests(&args, idx);
1278 pmd_basic_tests(&args, idx);
1279 pud_basic_tests(&args, idx);
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289 p4d_basic_tests(&args);
1290 pgd_basic_tests(&args);
1291
1292 pmd_leaf_tests(&args);
1293 pud_leaf_tests(&args);
1294
1295 pte_savedwrite_tests(&args);
1296 pmd_savedwrite_tests(&args);
1297
1298 pte_special_tests(&args);
1299 pte_protnone_tests(&args);
1300 pmd_protnone_tests(&args);
1301
1302 pte_devmap_tests(&args);
1303 pmd_devmap_tests(&args);
1304 pud_devmap_tests(&args);
1305
1306 pte_soft_dirty_tests(&args);
1307 pmd_soft_dirty_tests(&args);
1308 pte_swap_soft_dirty_tests(&args);
1309 pmd_swap_soft_dirty_tests(&args);
1310
1311 pte_swap_exclusive_tests(&args);
1312
1313 pte_swap_tests(&args);
1314 pmd_swap_tests(&args);
1315
1316 swap_migration_tests(&args);
1317
1318 pmd_thp_tests(&args);
1319 pud_thp_tests(&args);
1320
1321 hugetlb_basic_tests(&args);
1322
1323
1324
1325
1326
1327
1328 args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1329 pte_clear_tests(&args);
1330 pte_advanced_tests(&args);
1331 pte_unmap_unlock(args.ptep, ptl);
1332
1333 ptl = pmd_lock(args.mm, args.pmdp);
1334 pmd_clear_tests(&args);
1335 pmd_advanced_tests(&args);
1336 pmd_huge_tests(&args);
1337 pmd_populate_tests(&args);
1338 spin_unlock(ptl);
1339
1340 ptl = pud_lock(args.mm, args.pudp);
1341 pud_clear_tests(&args);
1342 pud_advanced_tests(&args);
1343 pud_huge_tests(&args);
1344 pud_populate_tests(&args);
1345 spin_unlock(ptl);
1346
1347 spin_lock(&(args.mm->page_table_lock));
1348 p4d_clear_tests(&args);
1349 pgd_clear_tests(&args);
1350 p4d_populate_tests(&args);
1351 pgd_populate_tests(&args);
1352 spin_unlock(&(args.mm->page_table_lock));
1353
1354 destroy_args(&args);
1355 return 0;
1356 }
1357 late_initcall(debug_vm_pgtable);