Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 /*
0004  * Copyright (c) 2021, Google LLC.
0005  * Pasha Tatashin <pasha.tatashin@soleen.com>
0006  */
0007 #include <linux/mm.h>
0008 #include <linux/page_table_check.h>
0009 
0010 #undef pr_fmt
0011 #define pr_fmt(fmt) "page_table_check: " fmt
0012 
0013 struct page_table_check {
0014     atomic_t anon_map_count;
0015     atomic_t file_map_count;
0016 };
0017 
0018 static bool __page_table_check_enabled __initdata =
0019                 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
0020 
0021 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
0022 EXPORT_SYMBOL(page_table_check_disabled);
0023 
0024 static int __init early_page_table_check_param(char *buf)
0025 {
0026     return strtobool(buf, &__page_table_check_enabled);
0027 }
0028 
0029 early_param("page_table_check", early_page_table_check_param);
0030 
0031 static bool __init need_page_table_check(void)
0032 {
0033     return __page_table_check_enabled;
0034 }
0035 
0036 static void __init init_page_table_check(void)
0037 {
0038     if (!__page_table_check_enabled)
0039         return;
0040     static_branch_disable(&page_table_check_disabled);
0041 }
0042 
0043 struct page_ext_operations page_table_check_ops = {
0044     .size = sizeof(struct page_table_check),
0045     .need = need_page_table_check,
0046     .init = init_page_table_check,
0047 };
0048 
0049 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
0050 {
0051     BUG_ON(!page_ext);
0052     return (void *)(page_ext) + page_table_check_ops.offset;
0053 }
0054 
0055 /*
0056  * An enty is removed from the page table, decrement the counters for that page
0057  * verify that it is of correct type and counters do not become negative.
0058  */
0059 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
0060                    unsigned long pfn, unsigned long pgcnt)
0061 {
0062     struct page_ext *page_ext;
0063     struct page *page;
0064     unsigned long i;
0065     bool anon;
0066 
0067     if (!pfn_valid(pfn))
0068         return;
0069 
0070     page = pfn_to_page(pfn);
0071     page_ext = lookup_page_ext(page);
0072     anon = PageAnon(page);
0073 
0074     for (i = 0; i < pgcnt; i++) {
0075         struct page_table_check *ptc = get_page_table_check(page_ext);
0076 
0077         if (anon) {
0078             BUG_ON(atomic_read(&ptc->file_map_count));
0079             BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
0080         } else {
0081             BUG_ON(atomic_read(&ptc->anon_map_count));
0082             BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
0083         }
0084         page_ext = page_ext_next(page_ext);
0085     }
0086 }
0087 
0088 /*
0089  * A new enty is added to the page table, increment the counters for that page
0090  * verify that it is of correct type and is not being mapped with a different
0091  * type to a different process.
0092  */
0093 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
0094                  unsigned long pfn, unsigned long pgcnt,
0095                  bool rw)
0096 {
0097     struct page_ext *page_ext;
0098     struct page *page;
0099     unsigned long i;
0100     bool anon;
0101 
0102     if (!pfn_valid(pfn))
0103         return;
0104 
0105     page = pfn_to_page(pfn);
0106     page_ext = lookup_page_ext(page);
0107     anon = PageAnon(page);
0108 
0109     for (i = 0; i < pgcnt; i++) {
0110         struct page_table_check *ptc = get_page_table_check(page_ext);
0111 
0112         if (anon) {
0113             BUG_ON(atomic_read(&ptc->file_map_count));
0114             BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
0115         } else {
0116             BUG_ON(atomic_read(&ptc->anon_map_count));
0117             BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
0118         }
0119         page_ext = page_ext_next(page_ext);
0120     }
0121 }
0122 
0123 /*
0124  * page is on free list, or is being allocated, verify that counters are zeroes
0125  * crash if they are not.
0126  */
0127 void __page_table_check_zero(struct page *page, unsigned int order)
0128 {
0129     struct page_ext *page_ext = lookup_page_ext(page);
0130     unsigned long i;
0131 
0132     BUG_ON(!page_ext);
0133     for (i = 0; i < (1ul << order); i++) {
0134         struct page_table_check *ptc = get_page_table_check(page_ext);
0135 
0136         BUG_ON(atomic_read(&ptc->anon_map_count));
0137         BUG_ON(atomic_read(&ptc->file_map_count));
0138         page_ext = page_ext_next(page_ext);
0139     }
0140 }
0141 
0142 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
0143                   pte_t pte)
0144 {
0145     if (&init_mm == mm)
0146         return;
0147 
0148     if (pte_user_accessible_page(pte)) {
0149         page_table_check_clear(mm, addr, pte_pfn(pte),
0150                        PAGE_SIZE >> PAGE_SHIFT);
0151     }
0152 }
0153 EXPORT_SYMBOL(__page_table_check_pte_clear);
0154 
0155 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
0156                   pmd_t pmd)
0157 {
0158     if (&init_mm == mm)
0159         return;
0160 
0161     if (pmd_user_accessible_page(pmd)) {
0162         page_table_check_clear(mm, addr, pmd_pfn(pmd),
0163                        PMD_SIZE >> PAGE_SHIFT);
0164     }
0165 }
0166 EXPORT_SYMBOL(__page_table_check_pmd_clear);
0167 
0168 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
0169                   pud_t pud)
0170 {
0171     if (&init_mm == mm)
0172         return;
0173 
0174     if (pud_user_accessible_page(pud)) {
0175         page_table_check_clear(mm, addr, pud_pfn(pud),
0176                        PUD_SIZE >> PAGE_SHIFT);
0177     }
0178 }
0179 EXPORT_SYMBOL(__page_table_check_pud_clear);
0180 
0181 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
0182                 pte_t *ptep, pte_t pte)
0183 {
0184     if (&init_mm == mm)
0185         return;
0186 
0187     __page_table_check_pte_clear(mm, addr, *ptep);
0188     if (pte_user_accessible_page(pte)) {
0189         page_table_check_set(mm, addr, pte_pfn(pte),
0190                      PAGE_SIZE >> PAGE_SHIFT,
0191                      pte_write(pte));
0192     }
0193 }
0194 EXPORT_SYMBOL(__page_table_check_pte_set);
0195 
0196 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
0197                 pmd_t *pmdp, pmd_t pmd)
0198 {
0199     if (&init_mm == mm)
0200         return;
0201 
0202     __page_table_check_pmd_clear(mm, addr, *pmdp);
0203     if (pmd_user_accessible_page(pmd)) {
0204         page_table_check_set(mm, addr, pmd_pfn(pmd),
0205                      PMD_SIZE >> PAGE_SHIFT,
0206                      pmd_write(pmd));
0207     }
0208 }
0209 EXPORT_SYMBOL(__page_table_check_pmd_set);
0210 
0211 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
0212                 pud_t *pudp, pud_t pud)
0213 {
0214     if (&init_mm == mm)
0215         return;
0216 
0217     __page_table_check_pud_clear(mm, addr, *pudp);
0218     if (pud_user_accessible_page(pud)) {
0219         page_table_check_set(mm, addr, pud_pfn(pud),
0220                      PUD_SIZE >> PAGE_SHIFT,
0221                      pud_write(pud));
0222     }
0223 }
0224 EXPORT_SYMBOL(__page_table_check_pud_set);
0225 
0226 void __page_table_check_pte_clear_range(struct mm_struct *mm,
0227                     unsigned long addr,
0228                     pmd_t pmd)
0229 {
0230     if (&init_mm == mm)
0231         return;
0232 
0233     if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
0234         pte_t *ptep = pte_offset_map(&pmd, addr);
0235         unsigned long i;
0236 
0237         for (i = 0; i < PTRS_PER_PTE; i++) {
0238             __page_table_check_pte_clear(mm, addr, *ptep);
0239             addr += PAGE_SIZE;
0240             ptep++;
0241         }
0242         pte_unmap(ptep - PTRS_PER_PTE);
0243     }
0244 }