Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * kexec_file for arm64
0004  *
0005  * Copyright (C) 2018 Linaro Limited
0006  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
0007  *
0008  * Most code is derived from arm64 port of kexec-tools
0009  */
0010 
0011 #define pr_fmt(fmt) "kexec_file: " fmt
0012 
0013 #include <linux/ioport.h>
0014 #include <linux/kernel.h>
0015 #include <linux/kexec.h>
0016 #include <linux/libfdt.h>
0017 #include <linux/memblock.h>
0018 #include <linux/of.h>
0019 #include <linux/of_fdt.h>
0020 #include <linux/slab.h>
0021 #include <linux/string.h>
0022 #include <linux/types.h>
0023 #include <linux/vmalloc.h>
0024 
0025 const struct kexec_file_ops * const kexec_file_loaders[] = {
0026     &kexec_image_ops,
0027     NULL
0028 };
0029 
0030 int arch_kimage_file_post_load_cleanup(struct kimage *image)
0031 {
0032     kvfree(image->arch.dtb);
0033     image->arch.dtb = NULL;
0034 
0035     vfree(image->elf_headers);
0036     image->elf_headers = NULL;
0037     image->elf_headers_sz = 0;
0038 
0039     return kexec_image_post_load_cleanup_default(image);
0040 }
0041 
0042 static int prepare_elf_headers(void **addr, unsigned long *sz)
0043 {
0044     struct crash_mem *cmem;
0045     unsigned int nr_ranges;
0046     int ret;
0047     u64 i;
0048     phys_addr_t start, end;
0049 
0050     nr_ranges = 2; /* for exclusion of crashkernel region */
0051     for_each_mem_range(i, &start, &end)
0052         nr_ranges++;
0053 
0054     cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
0055     if (!cmem)
0056         return -ENOMEM;
0057 
0058     cmem->max_nr_ranges = nr_ranges;
0059     cmem->nr_ranges = 0;
0060     for_each_mem_range(i, &start, &end) {
0061         cmem->ranges[cmem->nr_ranges].start = start;
0062         cmem->ranges[cmem->nr_ranges].end = end - 1;
0063         cmem->nr_ranges++;
0064     }
0065 
0066     /* Exclude crashkernel region */
0067     ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
0068     if (ret)
0069         goto out;
0070 
0071     if (crashk_low_res.end) {
0072         ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
0073         if (ret)
0074             goto out;
0075     }
0076 
0077     ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
0078 
0079 out:
0080     kfree(cmem);
0081     return ret;
0082 }
0083 
0084 /*
0085  * Tries to add the initrd and DTB to the image. If it is not possible to find
0086  * valid locations, this function will undo changes to the image and return non
0087  * zero.
0088  */
0089 int load_other_segments(struct kimage *image,
0090             unsigned long kernel_load_addr,
0091             unsigned long kernel_size,
0092             char *initrd, unsigned long initrd_len,
0093             char *cmdline)
0094 {
0095     struct kexec_buf kbuf;
0096     void *headers, *dtb = NULL;
0097     unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
0098               orig_segments = image->nr_segments;
0099     int ret = 0;
0100 
0101     kbuf.image = image;
0102     /* not allocate anything below the kernel */
0103     kbuf.buf_min = kernel_load_addr + kernel_size;
0104 
0105     /* load elf core header */
0106     if (image->type == KEXEC_TYPE_CRASH) {
0107         ret = prepare_elf_headers(&headers, &headers_sz);
0108         if (ret) {
0109             pr_err("Preparing elf core header failed\n");
0110             goto out_err;
0111         }
0112 
0113         kbuf.buffer = headers;
0114         kbuf.bufsz = headers_sz;
0115         kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
0116         kbuf.memsz = headers_sz;
0117         kbuf.buf_align = SZ_64K; /* largest supported page size */
0118         kbuf.buf_max = ULONG_MAX;
0119         kbuf.top_down = true;
0120 
0121         ret = kexec_add_buffer(&kbuf);
0122         if (ret) {
0123             vfree(headers);
0124             goto out_err;
0125         }
0126         image->elf_headers = headers;
0127         image->elf_load_addr = kbuf.mem;
0128         image->elf_headers_sz = headers_sz;
0129 
0130         pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
0131              image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
0132     }
0133 
0134     /* load initrd */
0135     if (initrd) {
0136         kbuf.buffer = initrd;
0137         kbuf.bufsz = initrd_len;
0138         kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
0139         kbuf.memsz = initrd_len;
0140         kbuf.buf_align = 0;
0141         /* within 1GB-aligned window of up to 32GB in size */
0142         kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
0143                         + (unsigned long)SZ_1G * 32;
0144         kbuf.top_down = false;
0145 
0146         ret = kexec_add_buffer(&kbuf);
0147         if (ret)
0148             goto out_err;
0149         initrd_load_addr = kbuf.mem;
0150 
0151         pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
0152                 initrd_load_addr, kbuf.bufsz, kbuf.memsz);
0153     }
0154 
0155     /* load dtb */
0156     dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
0157                        initrd_len, cmdline, 0);
0158     if (!dtb) {
0159         pr_err("Preparing for new dtb failed\n");
0160         ret = -EINVAL;
0161         goto out_err;
0162     }
0163 
0164     /* trim it */
0165     fdt_pack(dtb);
0166     dtb_len = fdt_totalsize(dtb);
0167     kbuf.buffer = dtb;
0168     kbuf.bufsz = dtb_len;
0169     kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
0170     kbuf.memsz = dtb_len;
0171     /* not across 2MB boundary */
0172     kbuf.buf_align = SZ_2M;
0173     kbuf.buf_max = ULONG_MAX;
0174     kbuf.top_down = true;
0175 
0176     ret = kexec_add_buffer(&kbuf);
0177     if (ret)
0178         goto out_err;
0179     image->arch.dtb = dtb;
0180     image->arch.dtb_mem = kbuf.mem;
0181 
0182     pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
0183             kbuf.mem, kbuf.bufsz, kbuf.memsz);
0184 
0185     return 0;
0186 
0187 out_err:
0188     image->nr_segments = orig_segments;
0189     kvfree(dtb);
0190     return ret;
0191 }