Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * This file contains KASAN shadow initialization code.
0004  *
0005  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
0006  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
0007  */
0008 
0009 #include <linux/memblock.h>
0010 #include <linux/init.h>
0011 #include <linux/kasan.h>
0012 #include <linux/kernel.h>
0013 #include <linux/mm.h>
0014 #include <linux/pfn.h>
0015 #include <linux/slab.h>
0016 
0017 #include <asm/page.h>
0018 #include <asm/pgalloc.h>
0019 
0020 #include "kasan.h"
0021 
0022 /*
0023  * This page serves two purposes:
0024  *   - It used as early shadow memory. The entire shadow region populated
0025  *     with this page, before we will be able to setup normal shadow memory.
0026  *   - Latter it reused it as zero shadow to cover large ranges of memory
0027  *     that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
0028  */
0029 unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
0030 
0031 #if CONFIG_PGTABLE_LEVELS > 4
0032 p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
0033 static inline bool kasan_p4d_table(pgd_t pgd)
0034 {
0035     return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
0036 }
0037 #else
0038 static inline bool kasan_p4d_table(pgd_t pgd)
0039 {
0040     return false;
0041 }
0042 #endif
0043 #if CONFIG_PGTABLE_LEVELS > 3
0044 pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss;
0045 static inline bool kasan_pud_table(p4d_t p4d)
0046 {
0047     return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
0048 }
0049 #else
0050 static inline bool kasan_pud_table(p4d_t p4d)
0051 {
0052     return false;
0053 }
0054 #endif
0055 #if CONFIG_PGTABLE_LEVELS > 2
0056 pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss;
0057 static inline bool kasan_pmd_table(pud_t pud)
0058 {
0059     return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
0060 }
0061 #else
0062 static inline bool kasan_pmd_table(pud_t pud)
0063 {
0064     return false;
0065 }
0066 #endif
0067 pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]
0068     __page_aligned_bss;
0069 
0070 static inline bool kasan_pte_table(pmd_t pmd)
0071 {
0072     return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
0073 }
0074 
0075 static inline bool kasan_early_shadow_page_entry(pte_t pte)
0076 {
0077     return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
0078 }
0079 
0080 static __init void *early_alloc(size_t size, int node)
0081 {
0082     void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
0083                        MEMBLOCK_ALLOC_ACCESSIBLE, node);
0084 
0085     if (!ptr)
0086         panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
0087               __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
0088 
0089     return ptr;
0090 }
0091 
0092 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
0093                 unsigned long end)
0094 {
0095     pte_t *pte = pte_offset_kernel(pmd, addr);
0096     pte_t zero_pte;
0097 
0098     zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
0099                 PAGE_KERNEL);
0100     zero_pte = pte_wrprotect(zero_pte);
0101 
0102     while (addr + PAGE_SIZE <= end) {
0103         set_pte_at(&init_mm, addr, pte, zero_pte);
0104         addr += PAGE_SIZE;
0105         pte = pte_offset_kernel(pmd, addr);
0106     }
0107 }
0108 
0109 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
0110                 unsigned long end)
0111 {
0112     pmd_t *pmd = pmd_offset(pud, addr);
0113     unsigned long next;
0114 
0115     do {
0116         next = pmd_addr_end(addr, end);
0117 
0118         if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
0119             pmd_populate_kernel(&init_mm, pmd,
0120                     lm_alias(kasan_early_shadow_pte));
0121             continue;
0122         }
0123 
0124         if (pmd_none(*pmd)) {
0125             pte_t *p;
0126 
0127             if (slab_is_available())
0128                 p = pte_alloc_one_kernel(&init_mm);
0129             else
0130                 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
0131             if (!p)
0132                 return -ENOMEM;
0133 
0134             pmd_populate_kernel(&init_mm, pmd, p);
0135         }
0136         zero_pte_populate(pmd, addr, next);
0137     } while (pmd++, addr = next, addr != end);
0138 
0139     return 0;
0140 }
0141 
0142 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
0143                 unsigned long end)
0144 {
0145     pud_t *pud = pud_offset(p4d, addr);
0146     unsigned long next;
0147 
0148     do {
0149         next = pud_addr_end(addr, end);
0150         if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
0151             pmd_t *pmd;
0152 
0153             pud_populate(&init_mm, pud,
0154                     lm_alias(kasan_early_shadow_pmd));
0155             pmd = pmd_offset(pud, addr);
0156             pmd_populate_kernel(&init_mm, pmd,
0157                     lm_alias(kasan_early_shadow_pte));
0158             continue;
0159         }
0160 
0161         if (pud_none(*pud)) {
0162             pmd_t *p;
0163 
0164             if (slab_is_available()) {
0165                 p = pmd_alloc(&init_mm, pud, addr);
0166                 if (!p)
0167                     return -ENOMEM;
0168             } else {
0169                 pud_populate(&init_mm, pud,
0170                     early_alloc(PAGE_SIZE, NUMA_NO_NODE));
0171             }
0172         }
0173         zero_pmd_populate(pud, addr, next);
0174     } while (pud++, addr = next, addr != end);
0175 
0176     return 0;
0177 }
0178 
0179 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
0180                 unsigned long end)
0181 {
0182     p4d_t *p4d = p4d_offset(pgd, addr);
0183     unsigned long next;
0184 
0185     do {
0186         next = p4d_addr_end(addr, end);
0187         if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
0188             pud_t *pud;
0189             pmd_t *pmd;
0190 
0191             p4d_populate(&init_mm, p4d,
0192                     lm_alias(kasan_early_shadow_pud));
0193             pud = pud_offset(p4d, addr);
0194             pud_populate(&init_mm, pud,
0195                     lm_alias(kasan_early_shadow_pmd));
0196             pmd = pmd_offset(pud, addr);
0197             pmd_populate_kernel(&init_mm, pmd,
0198                     lm_alias(kasan_early_shadow_pte));
0199             continue;
0200         }
0201 
0202         if (p4d_none(*p4d)) {
0203             pud_t *p;
0204 
0205             if (slab_is_available()) {
0206                 p = pud_alloc(&init_mm, p4d, addr);
0207                 if (!p)
0208                     return -ENOMEM;
0209             } else {
0210                 p4d_populate(&init_mm, p4d,
0211                     early_alloc(PAGE_SIZE, NUMA_NO_NODE));
0212             }
0213         }
0214         zero_pud_populate(p4d, addr, next);
0215     } while (p4d++, addr = next, addr != end);
0216 
0217     return 0;
0218 }
0219 
0220 /**
0221  * kasan_populate_early_shadow - populate shadow memory region with
0222  *                               kasan_early_shadow_page
0223  * @shadow_start: start of the memory range to populate
0224  * @shadow_end: end of the memory range to populate
0225  */
0226 int __ref kasan_populate_early_shadow(const void *shadow_start,
0227                     const void *shadow_end)
0228 {
0229     unsigned long addr = (unsigned long)shadow_start;
0230     unsigned long end = (unsigned long)shadow_end;
0231     pgd_t *pgd = pgd_offset_k(addr);
0232     unsigned long next;
0233 
0234     do {
0235         next = pgd_addr_end(addr, end);
0236 
0237         if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
0238             p4d_t *p4d;
0239             pud_t *pud;
0240             pmd_t *pmd;
0241 
0242             /*
0243              * kasan_early_shadow_pud should be populated with pmds
0244              * at this moment.
0245              * [pud,pmd]_populate*() below needed only for
0246              * 3,2 - level page tables where we don't have
0247              * puds,pmds, so pgd_populate(), pud_populate()
0248              * is noops.
0249              */
0250             pgd_populate(&init_mm, pgd,
0251                     lm_alias(kasan_early_shadow_p4d));
0252             p4d = p4d_offset(pgd, addr);
0253             p4d_populate(&init_mm, p4d,
0254                     lm_alias(kasan_early_shadow_pud));
0255             pud = pud_offset(p4d, addr);
0256             pud_populate(&init_mm, pud,
0257                     lm_alias(kasan_early_shadow_pmd));
0258             pmd = pmd_offset(pud, addr);
0259             pmd_populate_kernel(&init_mm, pmd,
0260                     lm_alias(kasan_early_shadow_pte));
0261             continue;
0262         }
0263 
0264         if (pgd_none(*pgd)) {
0265             p4d_t *p;
0266 
0267             if (slab_is_available()) {
0268                 p = p4d_alloc(&init_mm, pgd, addr);
0269                 if (!p)
0270                     return -ENOMEM;
0271             } else {
0272                 pgd_populate(&init_mm, pgd,
0273                     early_alloc(PAGE_SIZE, NUMA_NO_NODE));
0274             }
0275         }
0276         zero_p4d_populate(pgd, addr, next);
0277     } while (pgd++, addr = next, addr != end);
0278 
0279     return 0;
0280 }
0281 
0282 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
0283 {
0284     pte_t *pte;
0285     int i;
0286 
0287     for (i = 0; i < PTRS_PER_PTE; i++) {
0288         pte = pte_start + i;
0289         if (!pte_none(*pte))
0290             return;
0291     }
0292 
0293     pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
0294     pmd_clear(pmd);
0295 }
0296 
0297 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
0298 {
0299     pmd_t *pmd;
0300     int i;
0301 
0302     for (i = 0; i < PTRS_PER_PMD; i++) {
0303         pmd = pmd_start + i;
0304         if (!pmd_none(*pmd))
0305             return;
0306     }
0307 
0308     pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
0309     pud_clear(pud);
0310 }
0311 
0312 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
0313 {
0314     pud_t *pud;
0315     int i;
0316 
0317     for (i = 0; i < PTRS_PER_PUD; i++) {
0318         pud = pud_start + i;
0319         if (!pud_none(*pud))
0320             return;
0321     }
0322 
0323     pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
0324     p4d_clear(p4d);
0325 }
0326 
0327 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
0328 {
0329     p4d_t *p4d;
0330     int i;
0331 
0332     for (i = 0; i < PTRS_PER_P4D; i++) {
0333         p4d = p4d_start + i;
0334         if (!p4d_none(*p4d))
0335             return;
0336     }
0337 
0338     p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
0339     pgd_clear(pgd);
0340 }
0341 
0342 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
0343                 unsigned long end)
0344 {
0345     unsigned long next;
0346 
0347     for (; addr < end; addr = next, pte++) {
0348         next = (addr + PAGE_SIZE) & PAGE_MASK;
0349         if (next > end)
0350             next = end;
0351 
0352         if (!pte_present(*pte))
0353             continue;
0354 
0355         if (WARN_ON(!kasan_early_shadow_page_entry(*pte)))
0356             continue;
0357         pte_clear(&init_mm, addr, pte);
0358     }
0359 }
0360 
0361 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
0362                 unsigned long end)
0363 {
0364     unsigned long next;
0365 
0366     for (; addr < end; addr = next, pmd++) {
0367         pte_t *pte;
0368 
0369         next = pmd_addr_end(addr, end);
0370 
0371         if (!pmd_present(*pmd))
0372             continue;
0373 
0374         if (kasan_pte_table(*pmd)) {
0375             if (IS_ALIGNED(addr, PMD_SIZE) &&
0376                 IS_ALIGNED(next, PMD_SIZE)) {
0377                 pmd_clear(pmd);
0378                 continue;
0379             }
0380         }
0381         pte = pte_offset_kernel(pmd, addr);
0382         kasan_remove_pte_table(pte, addr, next);
0383         kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
0384     }
0385 }
0386 
0387 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
0388                 unsigned long end)
0389 {
0390     unsigned long next;
0391 
0392     for (; addr < end; addr = next, pud++) {
0393         pmd_t *pmd, *pmd_base;
0394 
0395         next = pud_addr_end(addr, end);
0396 
0397         if (!pud_present(*pud))
0398             continue;
0399 
0400         if (kasan_pmd_table(*pud)) {
0401             if (IS_ALIGNED(addr, PUD_SIZE) &&
0402                 IS_ALIGNED(next, PUD_SIZE)) {
0403                 pud_clear(pud);
0404                 continue;
0405             }
0406         }
0407         pmd = pmd_offset(pud, addr);
0408         pmd_base = pmd_offset(pud, 0);
0409         kasan_remove_pmd_table(pmd, addr, next);
0410         kasan_free_pmd(pmd_base, pud);
0411     }
0412 }
0413 
0414 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
0415                 unsigned long end)
0416 {
0417     unsigned long next;
0418 
0419     for (; addr < end; addr = next, p4d++) {
0420         pud_t *pud;
0421 
0422         next = p4d_addr_end(addr, end);
0423 
0424         if (!p4d_present(*p4d))
0425             continue;
0426 
0427         if (kasan_pud_table(*p4d)) {
0428             if (IS_ALIGNED(addr, P4D_SIZE) &&
0429                 IS_ALIGNED(next, P4D_SIZE)) {
0430                 p4d_clear(p4d);
0431                 continue;
0432             }
0433         }
0434         pud = pud_offset(p4d, addr);
0435         kasan_remove_pud_table(pud, addr, next);
0436         kasan_free_pud(pud_offset(p4d, 0), p4d);
0437     }
0438 }
0439 
0440 void kasan_remove_zero_shadow(void *start, unsigned long size)
0441 {
0442     unsigned long addr, end, next;
0443     pgd_t *pgd;
0444 
0445     addr = (unsigned long)kasan_mem_to_shadow(start);
0446     end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
0447 
0448     if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
0449         WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
0450         return;
0451 
0452     for (; addr < end; addr = next) {
0453         p4d_t *p4d;
0454 
0455         next = pgd_addr_end(addr, end);
0456 
0457         pgd = pgd_offset_k(addr);
0458         if (!pgd_present(*pgd))
0459             continue;
0460 
0461         if (kasan_p4d_table(*pgd)) {
0462             if (IS_ALIGNED(addr, PGDIR_SIZE) &&
0463                 IS_ALIGNED(next, PGDIR_SIZE)) {
0464                 pgd_clear(pgd);
0465                 continue;
0466             }
0467         }
0468 
0469         p4d = p4d_offset(pgd, addr);
0470         kasan_remove_p4d_table(p4d, addr, next);
0471         kasan_free_p4d(p4d_offset(pgd, 0), pgd);
0472     }
0473 }
0474 
0475 int kasan_add_zero_shadow(void *start, unsigned long size)
0476 {
0477     int ret;
0478     void *shadow_start, *shadow_end;
0479 
0480     shadow_start = kasan_mem_to_shadow(start);
0481     shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
0482 
0483     if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
0484         WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
0485         return -EINVAL;
0486 
0487     ret = kasan_populate_early_shadow(shadow_start, shadow_end);
0488     if (ret)
0489         kasan_remove_zero_shadow(start, size);
0490     return ret;
0491 }