Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include <linux/pagewalk.h>
0004 #include <linux/ptdump.h>
0005 #include <linux/kasan.h>
0006 
0007 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0008 /*
0009  * This is an optimization for KASAN=y case. Since all kasan page tables
0010  * eventually point to the kasan_early_shadow_page we could call note_page()
0011  * right away without walking through lower level page tables. This saves
0012  * us dozens of seconds (minutes for 5-level config) while checking for
0013  * W+X mapping or reading kernel_page_tables debugfs file.
0014  */
0015 static inline int note_kasan_page_table(struct mm_walk *walk,
0016                     unsigned long addr)
0017 {
0018     struct ptdump_state *st = walk->private;
0019 
0020     st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
0021 
0022     walk->action = ACTION_CONTINUE;
0023 
0024     return 0;
0025 }
0026 #endif
0027 
0028 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
0029                 unsigned long next, struct mm_walk *walk)
0030 {
0031     struct ptdump_state *st = walk->private;
0032     pgd_t val = READ_ONCE(*pgd);
0033 
0034 #if CONFIG_PGTABLE_LEVELS > 4 && \
0035         (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
0036     if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
0037         return note_kasan_page_table(walk, addr);
0038 #endif
0039 
0040     if (st->effective_prot)
0041         st->effective_prot(st, 0, pgd_val(val));
0042 
0043     if (pgd_leaf(val)) {
0044         st->note_page(st, addr, 0, pgd_val(val));
0045         walk->action = ACTION_CONTINUE;
0046     }
0047 
0048     return 0;
0049 }
0050 
0051 static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
0052                 unsigned long next, struct mm_walk *walk)
0053 {
0054     struct ptdump_state *st = walk->private;
0055     p4d_t val = READ_ONCE(*p4d);
0056 
0057 #if CONFIG_PGTABLE_LEVELS > 3 && \
0058         (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
0059     if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
0060         return note_kasan_page_table(walk, addr);
0061 #endif
0062 
0063     if (st->effective_prot)
0064         st->effective_prot(st, 1, p4d_val(val));
0065 
0066     if (p4d_leaf(val)) {
0067         st->note_page(st, addr, 1, p4d_val(val));
0068         walk->action = ACTION_CONTINUE;
0069     }
0070 
0071     return 0;
0072 }
0073 
0074 static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
0075                 unsigned long next, struct mm_walk *walk)
0076 {
0077     struct ptdump_state *st = walk->private;
0078     pud_t val = READ_ONCE(*pud);
0079 
0080 #if CONFIG_PGTABLE_LEVELS > 2 && \
0081         (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
0082     if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
0083         return note_kasan_page_table(walk, addr);
0084 #endif
0085 
0086     if (st->effective_prot)
0087         st->effective_prot(st, 2, pud_val(val));
0088 
0089     if (pud_leaf(val)) {
0090         st->note_page(st, addr, 2, pud_val(val));
0091         walk->action = ACTION_CONTINUE;
0092     }
0093 
0094     return 0;
0095 }
0096 
0097 static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
0098                 unsigned long next, struct mm_walk *walk)
0099 {
0100     struct ptdump_state *st = walk->private;
0101     pmd_t val = READ_ONCE(*pmd);
0102 
0103 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0104     if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
0105         return note_kasan_page_table(walk, addr);
0106 #endif
0107 
0108     if (st->effective_prot)
0109         st->effective_prot(st, 3, pmd_val(val));
0110     if (pmd_leaf(val)) {
0111         st->note_page(st, addr, 3, pmd_val(val));
0112         walk->action = ACTION_CONTINUE;
0113     }
0114 
0115     return 0;
0116 }
0117 
0118 static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
0119                 unsigned long next, struct mm_walk *walk)
0120 {
0121     struct ptdump_state *st = walk->private;
0122     pte_t val = ptep_get(pte);
0123 
0124     if (st->effective_prot)
0125         st->effective_prot(st, 4, pte_val(val));
0126 
0127     st->note_page(st, addr, 4, pte_val(val));
0128 
0129     return 0;
0130 }
0131 
0132 static int ptdump_hole(unsigned long addr, unsigned long next,
0133                int depth, struct mm_walk *walk)
0134 {
0135     struct ptdump_state *st = walk->private;
0136 
0137     st->note_page(st, addr, depth, 0);
0138 
0139     return 0;
0140 }
0141 
0142 static const struct mm_walk_ops ptdump_ops = {
0143     .pgd_entry  = ptdump_pgd_entry,
0144     .p4d_entry  = ptdump_p4d_entry,
0145     .pud_entry  = ptdump_pud_entry,
0146     .pmd_entry  = ptdump_pmd_entry,
0147     .pte_entry  = ptdump_pte_entry,
0148     .pte_hole   = ptdump_hole,
0149 };
0150 
0151 void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
0152 {
0153     const struct ptdump_range *range = st->range;
0154 
0155     mmap_write_lock(mm);
0156     while (range->start != range->end) {
0157         walk_page_range_novma(mm, range->start, range->end,
0158                       &ptdump_ops, pgd, st);
0159         range++;
0160     }
0161     mmap_write_unlock(mm);
0162 
0163     /* Flush out the last page */
0164     st->note_page(st, 0, -1, 0);
0165 }