0001
0002
0003 #define DISABLE_BRANCH_PROFILING
0004
0005 #include <linux/kasan.h>
0006 #include <linux/memblock.h>
0007 #include <mm/mmu_decl.h>
0008
0009 int __init kasan_init_region(void *start, size_t size)
0010 {
0011 unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
0012 unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
0013 unsigned long k_nobat = k_start;
0014 unsigned long k_cur;
0015 phys_addr_t phys;
0016 int ret;
0017
0018 while (k_nobat < k_end) {
0019 unsigned int k_size = bat_block_size(k_nobat, k_end);
0020 int idx = find_free_bat();
0021
0022 if (idx == -1)
0023 break;
0024 if (k_size < SZ_128K)
0025 break;
0026 phys = memblock_phys_alloc_range(k_size, k_size, 0,
0027 MEMBLOCK_ALLOC_ANYWHERE);
0028 if (!phys)
0029 break;
0030
0031 setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
0032 k_nobat += k_size;
0033 }
0034 if (k_nobat != k_start)
0035 update_bats();
0036
0037 if (k_nobat < k_end) {
0038 phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
0039 MEMBLOCK_ALLOC_ANYWHERE);
0040 if (!phys)
0041 return -ENOMEM;
0042 }
0043
0044 ret = kasan_init_shadow_page_tables(k_start, k_end);
0045 if (ret)
0046 return ret;
0047
0048 kasan_update_early_region(k_start, k_nobat, __pte(0));
0049
0050 for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
0051 pmd_t *pmd = pmd_off_k(k_cur);
0052 pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
0053
0054 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
0055 }
0056 flush_tlb_kernel_range(k_start, k_end);
0057 memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
0058
0059 return 0;
0060 }