Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * KASAN for 64-bit Book3S powerpc
0004  *
0005  * Copyright 2019-2022, Daniel Axtens, IBM Corporation.
0006  */
0007 
0008 /*
0009  * ppc64 turns on virtual memory late in boot, after calling into generic code
0010  * like the device-tree parser, so it uses this in conjunction with a hook in
0011  * outline mode to avoid invalid access early in boot.
0012  */
0013 
0014 #define DISABLE_BRANCH_PROFILING
0015 
0016 #include <linux/kasan.h>
0017 #include <linux/printk.h>
0018 #include <linux/sched/task.h>
0019 #include <linux/memblock.h>
0020 #include <asm/pgalloc.h>
0021 
0022 DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
0023 
0024 static void __init kasan_init_phys_region(void *start, void *end)
0025 {
0026     unsigned long k_start, k_end, k_cur;
0027     void *va;
0028 
0029     if (start >= end)
0030         return;
0031 
0032     k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
0033     k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
0034 
0035     va = memblock_alloc(k_end - k_start, PAGE_SIZE);
0036     for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
0037         map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
0038 }
0039 
0040 void __init kasan_init(void)
0041 {
0042     /*
0043      * We want to do the following things:
0044      *  1) Map real memory into the shadow for all physical memblocks
0045      *     This takes us from c000... to c008...
0046      *  2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC
0047      *     will manage this for us.
0048      *     This takes us from c008... to c00a...
0049      *  3) Map the 'early shadow'/zero page over iomap and vmemmap space.
0050      *     This takes us up to where we start at c00e...
0051      */
0052 
0053     void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END);
0054     void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END);
0055     phys_addr_t start, end;
0056     u64 i;
0057     pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
0058 
0059     if (!early_radix_enabled()) {
0060         pr_warn("KASAN not enabled as it requires radix!");
0061         return;
0062     }
0063 
0064     for_each_mem_range(i, &start, &end)
0065         kasan_init_phys_region((void *)start, (void *)end);
0066 
0067     for (i = 0; i < PTRS_PER_PTE; i++)
0068         __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
0069                  &kasan_early_shadow_pte[i], zero_pte, 0);
0070 
0071     for (i = 0; i < PTRS_PER_PMD; i++)
0072         pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
0073                     kasan_early_shadow_pte);
0074 
0075     for (i = 0; i < PTRS_PER_PUD; i++)
0076         pud_populate(&init_mm, &kasan_early_shadow_pud[i],
0077                  kasan_early_shadow_pmd);
0078 
0079     /* map the early shadow over the iomap and vmemmap space */
0080     kasan_populate_early_shadow(k_start, k_end);
0081 
0082     /* mark early shadow region as RO and wipe it */
0083     zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
0084     for (i = 0; i < PTRS_PER_PTE; i++)
0085         __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
0086                  &kasan_early_shadow_pte[i], zero_pte, 0);
0087 
0088     /*
0089      * clear_page relies on some cache info that hasn't been set up yet.
0090      * It ends up looping ~forever and blows up other data.
0091      * Use memset instead.
0092      */
0093     memset(kasan_early_shadow_page, 0, PAGE_SIZE);
0094 
0095     static_branch_inc(&powerpc_kasan_enabled_key);
0096 
0097     /* Enable error messages */
0098     init_task.kasan_depth = 0;
0099     pr_info("KASAN init done\n");
0100 }
0101 
0102 void __init kasan_early_init(void) { }
0103 
0104 void __init kasan_late_init(void) { }