Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 //
0003 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
0004 
0005 #include <linux/kernel.h>
0006 #include <linux/errno.h>
0007 #include <linux/string.h>
0008 #include <linux/types.h>
0009 #include <linux/mm.h>
0010 #include <linux/swap.h>
0011 #include <linux/stddef.h>
0012 #include <linux/init.h>
0013 #include <linux/delay.h>
0014 #include <linux/memblock.h>
0015 #include <linux/libfdt.h>
0016 #include <linux/crash_core.h>
0017 #include <linux/of.h>
0018 #include <linux/of_fdt.h>
0019 #include <asm/cacheflush.h>
0020 #include <asm/kdump.h>
0021 #include <mm/mmu_decl.h>
0022 #include <generated/utsrelease.h>
0023 
0024 struct regions {
0025     unsigned long pa_start;
0026     unsigned long pa_end;
0027     unsigned long kernel_size;
0028     unsigned long dtb_start;
0029     unsigned long dtb_end;
0030     unsigned long initrd_start;
0031     unsigned long initrd_end;
0032     unsigned long crash_start;
0033     unsigned long crash_end;
0034     int reserved_mem;
0035     int reserved_mem_addr_cells;
0036     int reserved_mem_size_cells;
0037 };
0038 
0039 struct regions __initdata regions;
0040 
0041 static __init void kaslr_get_cmdline(void *fdt)
0042 {
0043     early_init_dt_scan_chosen(boot_command_line);
0044 }
0045 
0046 static unsigned long __init rotate_xor(unsigned long hash, const void *area,
0047                        size_t size)
0048 {
0049     size_t i;
0050     const unsigned long *ptr = area;
0051 
0052     for (i = 0; i < size / sizeof(hash); i++) {
0053         /* Rotate by odd number of bits and XOR. */
0054         hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
0055         hash ^= ptr[i];
0056     }
0057 
0058     return hash;
0059 }
0060 
0061 /* Attempt to create a simple starting entropy. This can make it defferent for
0062  * every build but it is still not enough. Stronger entropy should
0063  * be added to make it change for every boot.
0064  */
0065 static unsigned long __init get_boot_seed(void *fdt)
0066 {
0067     unsigned long hash = 0;
0068 
0069     /* build-specific string for starting entropy. */
0070     hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
0071     hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
0072 
0073     return hash;
0074 }
0075 
0076 static __init u64 get_kaslr_seed(void *fdt)
0077 {
0078     int node, len;
0079     fdt64_t *prop;
0080     u64 ret;
0081 
0082     node = fdt_path_offset(fdt, "/chosen");
0083     if (node < 0)
0084         return 0;
0085 
0086     prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
0087     if (!prop || len != sizeof(u64))
0088         return 0;
0089 
0090     ret = fdt64_to_cpu(*prop);
0091     *prop = 0;
0092     return ret;
0093 }
0094 
0095 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
0096 {
0097     return e1 >= s2 && e2 >= s1;
0098 }
0099 
0100 static __init bool overlaps_reserved_region(const void *fdt, u32 start,
0101                         u32 end)
0102 {
0103     int subnode, len, i;
0104     u64 base, size;
0105 
0106     /* check for overlap with /memreserve/ entries */
0107     for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
0108         if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
0109             continue;
0110         if (regions_overlap(start, end, base, base + size))
0111             return true;
0112     }
0113 
0114     if (regions.reserved_mem < 0)
0115         return false;
0116 
0117     /* check for overlap with static reservations in /reserved-memory */
0118     for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
0119          subnode >= 0;
0120          subnode = fdt_next_subnode(fdt, subnode)) {
0121         const fdt32_t *reg;
0122         u64 rsv_end;
0123 
0124         len = 0;
0125         reg = fdt_getprop(fdt, subnode, "reg", &len);
0126         while (len >= (regions.reserved_mem_addr_cells +
0127                    regions.reserved_mem_size_cells)) {
0128             base = fdt32_to_cpu(reg[0]);
0129             if (regions.reserved_mem_addr_cells == 2)
0130                 base = (base << 32) | fdt32_to_cpu(reg[1]);
0131 
0132             reg += regions.reserved_mem_addr_cells;
0133             len -= 4 * regions.reserved_mem_addr_cells;
0134 
0135             size = fdt32_to_cpu(reg[0]);
0136             if (regions.reserved_mem_size_cells == 2)
0137                 size = (size << 32) | fdt32_to_cpu(reg[1]);
0138 
0139             reg += regions.reserved_mem_size_cells;
0140             len -= 4 * regions.reserved_mem_size_cells;
0141 
0142             if (base >= regions.pa_end)
0143                 continue;
0144 
0145             rsv_end = min(base + size, (u64)U32_MAX);
0146 
0147             if (regions_overlap(start, end, base, rsv_end))
0148                 return true;
0149         }
0150     }
0151     return false;
0152 }
0153 
0154 static __init bool overlaps_region(const void *fdt, u32 start,
0155                    u32 end)
0156 {
0157     if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
0158         return true;
0159 
0160     if (regions_overlap(start, end, regions.dtb_start,
0161                 regions.dtb_end))
0162         return true;
0163 
0164     if (regions_overlap(start, end, regions.initrd_start,
0165                 regions.initrd_end))
0166         return true;
0167 
0168     if (regions_overlap(start, end, regions.crash_start,
0169                 regions.crash_end))
0170         return true;
0171 
0172     return overlaps_reserved_region(fdt, start, end);
0173 }
0174 
0175 static void __init get_crash_kernel(void *fdt, unsigned long size)
0176 {
0177 #ifdef CONFIG_CRASH_CORE
0178     unsigned long long crash_size, crash_base;
0179     int ret;
0180 
0181     ret = parse_crashkernel(boot_command_line, size, &crash_size,
0182                 &crash_base);
0183     if (ret != 0 || crash_size == 0)
0184         return;
0185     if (crash_base == 0)
0186         crash_base = KDUMP_KERNELBASE;
0187 
0188     regions.crash_start = (unsigned long)crash_base;
0189     regions.crash_end = (unsigned long)(crash_base + crash_size);
0190 
0191     pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
0192 #endif
0193 }
0194 
0195 static void __init get_initrd_range(void *fdt)
0196 {
0197     u64 start, end;
0198     int node, len;
0199     const __be32 *prop;
0200 
0201     node = fdt_path_offset(fdt, "/chosen");
0202     if (node < 0)
0203         return;
0204 
0205     prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
0206     if (!prop)
0207         return;
0208     start = of_read_number(prop, len / 4);
0209 
0210     prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
0211     if (!prop)
0212         return;
0213     end = of_read_number(prop, len / 4);
0214 
0215     regions.initrd_start = (unsigned long)start;
0216     regions.initrd_end = (unsigned long)end;
0217 
0218     pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
0219 }
0220 
0221 static __init unsigned long get_usable_address(const void *fdt,
0222                            unsigned long start,
0223                            unsigned long offset)
0224 {
0225     unsigned long pa;
0226     unsigned long pa_end;
0227 
0228     for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
0229         pa_end = pa + regions.kernel_size;
0230         if (overlaps_region(fdt, pa, pa_end))
0231             continue;
0232 
0233         return pa;
0234     }
0235     return 0;
0236 }
0237 
0238 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
0239                   int *size_cells)
0240 {
0241     const int *prop;
0242     int len;
0243 
0244     /*
0245      * Retrieve the #address-cells and #size-cells properties
0246      * from the 'node', or use the default if not provided.
0247      */
0248     *addr_cells = *size_cells = 1;
0249 
0250     prop = fdt_getprop(fdt, node, "#address-cells", &len);
0251     if (len == 4)
0252         *addr_cells = fdt32_to_cpu(*prop);
0253     prop = fdt_getprop(fdt, node, "#size-cells", &len);
0254     if (len == 4)
0255         *size_cells = fdt32_to_cpu(*prop);
0256 }
0257 
0258 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
0259                            unsigned long offset)
0260 {
0261     unsigned long koffset = 0;
0262     unsigned long start;
0263 
0264     while ((long)index >= 0) {
0265         offset = memstart_addr + index * SZ_64M + offset;
0266         start = memstart_addr + index * SZ_64M;
0267         koffset = get_usable_address(dt_ptr, start, offset);
0268         if (koffset)
0269             break;
0270         index--;
0271     }
0272 
0273     if (koffset != 0)
0274         koffset -= memstart_addr;
0275 
0276     return koffset;
0277 }
0278 
0279 static inline __init bool kaslr_disabled(void)
0280 {
0281     return strstr(boot_command_line, "nokaslr") != NULL;
0282 }
0283 
0284 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
0285                           unsigned long kernel_sz)
0286 {
0287     unsigned long offset, random;
0288     unsigned long ram, linear_sz;
0289     u64 seed;
0290     unsigned long index;
0291 
0292     kaslr_get_cmdline(dt_ptr);
0293     if (kaslr_disabled())
0294         return 0;
0295 
0296     random = get_boot_seed(dt_ptr);
0297 
0298     seed = get_tb() << 32;
0299     seed ^= get_tb();
0300     random = rotate_xor(random, &seed, sizeof(seed));
0301 
0302     /*
0303      * Retrieve (and wipe) the seed from the FDT
0304      */
0305     seed = get_kaslr_seed(dt_ptr);
0306     if (seed)
0307         random = rotate_xor(random, &seed, sizeof(seed));
0308     else
0309         pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
0310 
0311     ram = min_t(phys_addr_t, __max_low_memory, size);
0312     ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
0313     linear_sz = min_t(unsigned long, ram, SZ_512M);
0314 
0315     /* If the linear size is smaller than 64M, do not randomize */
0316     if (linear_sz < SZ_64M)
0317         return 0;
0318 
0319     /* check for a reserved-memory node and record its cell sizes */
0320     regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
0321     if (regions.reserved_mem >= 0)
0322         get_cell_sizes(dt_ptr, regions.reserved_mem,
0323                    &regions.reserved_mem_addr_cells,
0324                    &regions.reserved_mem_size_cells);
0325 
0326     regions.pa_start = memstart_addr;
0327     regions.pa_end = memstart_addr + linear_sz;
0328     regions.dtb_start = __pa(dt_ptr);
0329     regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
0330     regions.kernel_size = kernel_sz;
0331 
0332     get_initrd_range(dt_ptr);
0333     get_crash_kernel(dt_ptr, ram);
0334 
0335     /*
0336      * Decide which 64M we want to start
0337      * Only use the low 8 bits of the random seed
0338      */
0339     index = random & 0xFF;
0340     index %= linear_sz / SZ_64M;
0341 
0342     /* Decide offset inside 64M */
0343     offset = random % (SZ_64M - kernel_sz);
0344     offset = round_down(offset, SZ_16K);
0345 
0346     return kaslr_legal_offset(dt_ptr, index, offset);
0347 }
0348 
0349 /*
0350  * To see if we need to relocate the kernel to a random offset
0351  * void *dt_ptr - address of the device tree
0352  * phys_addr_t size - size of the first memory block
0353  */
0354 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
0355 {
0356     unsigned long tlb_virt;
0357     phys_addr_t tlb_phys;
0358     unsigned long offset;
0359     unsigned long kernel_sz;
0360 
0361     kernel_sz = (unsigned long)_end - (unsigned long)_stext;
0362 
0363     offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
0364     if (offset == 0)
0365         return;
0366 
0367     kernstart_virt_addr += offset;
0368     kernstart_addr += offset;
0369 
0370     is_second_reloc = 1;
0371 
0372     if (offset >= SZ_64M) {
0373         tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
0374         tlb_phys = round_down(kernstart_addr, SZ_64M);
0375 
0376         /* Create kernel map to relocate in */
0377         create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
0378     }
0379 
0380     /* Copy the kernel to it's new location and run */
0381     memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
0382     flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
0383 
0384     reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
0385 }
0386 
0387 void __init kaslr_late_init(void)
0388 {
0389     /* If randomized, clear the original kernel */
0390     if (kernstart_virt_addr != KERNELBASE) {
0391         unsigned long kernel_sz;
0392 
0393         kernel_sz = (unsigned long)_end - kernstart_virt_addr;
0394         memzero_explicit((void *)KERNELBASE, kernel_sz);
0395     }
0396 }