Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * This file contains kasan initialization code for ARM64.
0004  *
0005  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
0006  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
0007  */
0008 
0009 #define pr_fmt(fmt) "kasan: " fmt
0010 #include <linux/kasan.h>
0011 #include <linux/kernel.h>
0012 #include <linux/sched/task.h>
0013 #include <linux/memblock.h>
0014 #include <linux/start_kernel.h>
0015 #include <linux/mm.h>
0016 
0017 #include <asm/mmu_context.h>
0018 #include <asm/kernel-pgtable.h>
0019 #include <asm/page.h>
0020 #include <asm/pgalloc.h>
0021 #include <asm/sections.h>
0022 #include <asm/tlbflush.h>
0023 
0024 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0025 
0026 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
0027 
0028 /*
0029  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
0030  * directly on kernel symbols (bm_p*d). All the early functions are called too
0031  * early to use lm_alias so __p*d_populate functions must be used to populate
0032  * with the physical address from __pa_symbol.
0033  */
0034 
0035 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
0036 {
0037     void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
0038                           __pa(MAX_DMA_ADDRESS),
0039                           MEMBLOCK_ALLOC_NOLEAKTRACE, node);
0040     if (!p)
0041         panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
0042               __func__, PAGE_SIZE, PAGE_SIZE, node,
0043               __pa(MAX_DMA_ADDRESS));
0044 
0045     return __pa(p);
0046 }
0047 
0048 static phys_addr_t __init kasan_alloc_raw_page(int node)
0049 {
0050     void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
0051                         __pa(MAX_DMA_ADDRESS),
0052                         MEMBLOCK_ALLOC_NOLEAKTRACE,
0053                         node);
0054     if (!p)
0055         panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
0056               __func__, PAGE_SIZE, PAGE_SIZE, node,
0057               __pa(MAX_DMA_ADDRESS));
0058 
0059     return __pa(p);
0060 }
0061 
0062 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
0063                       bool early)
0064 {
0065     if (pmd_none(READ_ONCE(*pmdp))) {
0066         phys_addr_t pte_phys = early ?
0067                 __pa_symbol(kasan_early_shadow_pte)
0068                     : kasan_alloc_zeroed_page(node);
0069         __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
0070     }
0071 
0072     return early ? pte_offset_kimg(pmdp, addr)
0073              : pte_offset_kernel(pmdp, addr);
0074 }
0075 
0076 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
0077                       bool early)
0078 {
0079     if (pud_none(READ_ONCE(*pudp))) {
0080         phys_addr_t pmd_phys = early ?
0081                 __pa_symbol(kasan_early_shadow_pmd)
0082                     : kasan_alloc_zeroed_page(node);
0083         __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
0084     }
0085 
0086     return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
0087 }
0088 
0089 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
0090                       bool early)
0091 {
0092     if (p4d_none(READ_ONCE(*p4dp))) {
0093         phys_addr_t pud_phys = early ?
0094                 __pa_symbol(kasan_early_shadow_pud)
0095                     : kasan_alloc_zeroed_page(node);
0096         __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
0097     }
0098 
0099     return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
0100 }
0101 
0102 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
0103                       unsigned long end, int node, bool early)
0104 {
0105     unsigned long next;
0106     pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
0107 
0108     do {
0109         phys_addr_t page_phys = early ?
0110                 __pa_symbol(kasan_early_shadow_page)
0111                     : kasan_alloc_raw_page(node);
0112         if (!early)
0113             memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
0114         next = addr + PAGE_SIZE;
0115         set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
0116     } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
0117 }
0118 
0119 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
0120                       unsigned long end, int node, bool early)
0121 {
0122     unsigned long next;
0123     pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
0124 
0125     do {
0126         next = pmd_addr_end(addr, end);
0127         kasan_pte_populate(pmdp, addr, next, node, early);
0128     } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
0129 }
0130 
0131 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
0132                       unsigned long end, int node, bool early)
0133 {
0134     unsigned long next;
0135     pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
0136 
0137     do {
0138         next = pud_addr_end(addr, end);
0139         kasan_pmd_populate(pudp, addr, next, node, early);
0140     } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
0141 }
0142 
0143 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
0144                       unsigned long end, int node, bool early)
0145 {
0146     unsigned long next;
0147     p4d_t *p4dp = p4d_offset(pgdp, addr);
0148 
0149     do {
0150         next = p4d_addr_end(addr, end);
0151         kasan_pud_populate(p4dp, addr, next, node, early);
0152     } while (p4dp++, addr = next, addr != end);
0153 }
0154 
0155 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
0156                       int node, bool early)
0157 {
0158     unsigned long next;
0159     pgd_t *pgdp;
0160 
0161     pgdp = pgd_offset_k(addr);
0162     do {
0163         next = pgd_addr_end(addr, end);
0164         kasan_p4d_populate(pgdp, addr, next, node, early);
0165     } while (pgdp++, addr = next, addr != end);
0166 }
0167 
0168 /* The early shadow maps everything to a single page of zeroes */
0169 asmlinkage void __init kasan_early_init(void)
0170 {
0171     BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
0172         KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
0173     BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
0174     BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
0175     BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
0176     kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
0177                true);
0178 }
0179 
0180 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
0181 static void __init kasan_map_populate(unsigned long start, unsigned long end,
0182                       int node)
0183 {
0184     kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
0185 }
0186 
0187 /*
0188  * Copy the current shadow region into a new pgdir.
0189  */
0190 void __init kasan_copy_shadow(pgd_t *pgdir)
0191 {
0192     pgd_t *pgdp, *pgdp_new, *pgdp_end;
0193 
0194     pgdp = pgd_offset_k(KASAN_SHADOW_START);
0195     pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
0196     pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
0197     do {
0198         set_pgd(pgdp_new, READ_ONCE(*pgdp));
0199     } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
0200 }
0201 
0202 static void __init clear_pgds(unsigned long start,
0203             unsigned long end)
0204 {
0205     /*
0206      * Remove references to kasan page tables from
0207      * swapper_pg_dir. pgd_clear() can't be used
0208      * here because it's nop on 2,3-level pagetable setups
0209      */
0210     for (; start < end; start += PGDIR_SIZE)
0211         set_pgd(pgd_offset_k(start), __pgd(0));
0212 }
0213 
0214 static void __init kasan_init_shadow(void)
0215 {
0216     u64 kimg_shadow_start, kimg_shadow_end;
0217     u64 mod_shadow_start, mod_shadow_end;
0218     u64 vmalloc_shadow_end;
0219     phys_addr_t pa_start, pa_end;
0220     u64 i;
0221 
0222     kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
0223     kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
0224 
0225     mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
0226     mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
0227 
0228     vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
0229 
0230     /*
0231      * We are going to perform proper setup of shadow memory.
0232      * At first we should unmap early shadow (clear_pgds() call below).
0233      * However, instrumented code couldn't execute without shadow memory.
0234      * tmp_pg_dir used to keep early shadow mapped until full shadow
0235      * setup will be finished.
0236      */
0237     memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
0238     dsb(ishst);
0239     cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
0240 
0241     clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
0242 
0243     kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
0244                early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
0245 
0246     kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
0247                    (void *)mod_shadow_start);
0248 
0249     if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
0250         BUILD_BUG_ON(VMALLOC_START != MODULES_END);
0251         kasan_populate_early_shadow((void *)vmalloc_shadow_end,
0252                         (void *)KASAN_SHADOW_END);
0253     } else {
0254         kasan_populate_early_shadow((void *)kimg_shadow_end,
0255                         (void *)KASAN_SHADOW_END);
0256         if (kimg_shadow_start > mod_shadow_end)
0257             kasan_populate_early_shadow((void *)mod_shadow_end,
0258                             (void *)kimg_shadow_start);
0259     }
0260 
0261     for_each_mem_range(i, &pa_start, &pa_end) {
0262         void *start = (void *)__phys_to_virt(pa_start);
0263         void *end = (void *)__phys_to_virt(pa_end);
0264 
0265         if (start >= end)
0266             break;
0267 
0268         kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
0269                    (unsigned long)kasan_mem_to_shadow(end),
0270                    early_pfn_to_nid(virt_to_pfn(start)));
0271     }
0272 
0273     /*
0274      * KAsan may reuse the contents of kasan_early_shadow_pte directly,
0275      * so we should make sure that it maps the zero page read-only.
0276      */
0277     for (i = 0; i < PTRS_PER_PTE; i++)
0278         set_pte(&kasan_early_shadow_pte[i],
0279             pfn_pte(sym_to_pfn(kasan_early_shadow_page),
0280                 PAGE_KERNEL_RO));
0281 
0282     memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
0283     cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
0284 }
0285 
0286 static void __init kasan_init_depth(void)
0287 {
0288     init_task.kasan_depth = 0;
0289 }
0290 
0291 #ifdef CONFIG_KASAN_VMALLOC
0292 void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
0293 {
0294     unsigned long shadow_start, shadow_end;
0295 
0296     if (!is_vmalloc_or_module_addr(start))
0297         return;
0298 
0299     shadow_start = (unsigned long)kasan_mem_to_shadow(start);
0300     shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
0301     shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
0302     shadow_end = ALIGN(shadow_end, PAGE_SIZE);
0303     kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
0304 }
0305 #endif
0306 
0307 void __init kasan_init(void)
0308 {
0309     kasan_init_shadow();
0310     kasan_init_depth();
0311 #if defined(CONFIG_KASAN_GENERIC)
0312     /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
0313     pr_info("KernelAddressSanitizer initialized (generic)\n");
0314 #endif
0315 }
0316 
0317 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */