Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * kexec.c - kexec system call core code.
0004  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
0005  */
0006 
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008 
0009 #include <linux/capability.h>
0010 #include <linux/mm.h>
0011 #include <linux/file.h>
0012 #include <linux/slab.h>
0013 #include <linux/fs.h>
0014 #include <linux/kexec.h>
0015 #include <linux/mutex.h>
0016 #include <linux/list.h>
0017 #include <linux/highmem.h>
0018 #include <linux/syscalls.h>
0019 #include <linux/reboot.h>
0020 #include <linux/ioport.h>
0021 #include <linux/hardirq.h>
0022 #include <linux/elf.h>
0023 #include <linux/elfcore.h>
0024 #include <linux/utsname.h>
0025 #include <linux/numa.h>
0026 #include <linux/suspend.h>
0027 #include <linux/device.h>
0028 #include <linux/freezer.h>
0029 #include <linux/panic_notifier.h>
0030 #include <linux/pm.h>
0031 #include <linux/cpu.h>
0032 #include <linux/uaccess.h>
0033 #include <linux/io.h>
0034 #include <linux/console.h>
0035 #include <linux/vmalloc.h>
0036 #include <linux/swap.h>
0037 #include <linux/syscore_ops.h>
0038 #include <linux/compiler.h>
0039 #include <linux/hugetlb.h>
0040 #include <linux/objtool.h>
0041 #include <linux/kmsg_dump.h>
0042 
0043 #include <asm/page.h>
0044 #include <asm/sections.h>
0045 
0046 #include <crypto/hash.h>
0047 #include "kexec_internal.h"
0048 
0049 DEFINE_MUTEX(kexec_mutex);
0050 
0051 /* Per cpu memory for storing cpu states in case of system crash. */
0052 note_buf_t __percpu *crash_notes;
0053 
0054 /* Flag to indicate we are going to kexec a new kernel */
0055 bool kexec_in_progress = false;
0056 
0057 
0058 /* Location of the reserved area for the crash kernel */
0059 struct resource crashk_res = {
0060     .name  = "Crash kernel",
0061     .start = 0,
0062     .end   = 0,
0063     .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
0064     .desc  = IORES_DESC_CRASH_KERNEL
0065 };
0066 struct resource crashk_low_res = {
0067     .name  = "Crash kernel",
0068     .start = 0,
0069     .end   = 0,
0070     .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
0071     .desc  = IORES_DESC_CRASH_KERNEL
0072 };
0073 
0074 int kexec_should_crash(struct task_struct *p)
0075 {
0076     /*
0077      * If crash_kexec_post_notifiers is enabled, don't run
0078      * crash_kexec() here yet, which must be run after panic
0079      * notifiers in panic().
0080      */
0081     if (crash_kexec_post_notifiers)
0082         return 0;
0083     /*
0084      * There are 4 panic() calls in make_task_dead() path, each of which
0085      * corresponds to each of these 4 conditions.
0086      */
0087     if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
0088         return 1;
0089     return 0;
0090 }
0091 
0092 int kexec_crash_loaded(void)
0093 {
0094     return !!kexec_crash_image;
0095 }
0096 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
0097 
0098 /*
0099  * When kexec transitions to the new kernel there is a one-to-one
0100  * mapping between physical and virtual addresses.  On processors
0101  * where you can disable the MMU this is trivial, and easy.  For
0102  * others it is still a simple predictable page table to setup.
0103  *
0104  * In that environment kexec copies the new kernel to its final
0105  * resting place.  This means I can only support memory whose
0106  * physical address can fit in an unsigned long.  In particular
0107  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
0108  * If the assembly stub has more restrictive requirements
0109  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
0110  * defined more restrictively in <asm/kexec.h>.
0111  *
0112  * The code for the transition from the current kernel to the
0113  * new kernel is placed in the control_code_buffer, whose size
0114  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
0115  * page of memory is necessary, but some architectures require more.
0116  * Because this memory must be identity mapped in the transition from
0117  * virtual to physical addresses it must live in the range
0118  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
0119  * modifiable.
0120  *
0121  * The assembly stub in the control code buffer is passed a linked list
0122  * of descriptor pages detailing the source pages of the new kernel,
0123  * and the destination addresses of those source pages.  As this data
0124  * structure is not used in the context of the current OS, it must
0125  * be self-contained.
0126  *
0127  * The code has been made to work with highmem pages and will use a
0128  * destination page in its final resting place (if it happens
0129  * to allocate it).  The end product of this is that most of the
0130  * physical address space, and most of RAM can be used.
0131  *
0132  * Future directions include:
0133  *  - allocating a page table with the control code buffer identity
0134  *    mapped, to simplify machine_kexec and make kexec_on_panic more
0135  *    reliable.
0136  */
0137 
0138 /*
0139  * KIMAGE_NO_DEST is an impossible destination address..., for
0140  * allocating pages whose destination address we do not care about.
0141  */
0142 #define KIMAGE_NO_DEST (-1UL)
0143 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
0144 
0145 static struct page *kimage_alloc_page(struct kimage *image,
0146                        gfp_t gfp_mask,
0147                        unsigned long dest);
0148 
0149 int sanity_check_segment_list(struct kimage *image)
0150 {
0151     int i;
0152     unsigned long nr_segments = image->nr_segments;
0153     unsigned long total_pages = 0;
0154     unsigned long nr_pages = totalram_pages();
0155 
0156     /*
0157      * Verify we have good destination addresses.  The caller is
0158      * responsible for making certain we don't attempt to load
0159      * the new image into invalid or reserved areas of RAM.  This
0160      * just verifies it is an address we can use.
0161      *
0162      * Since the kernel does everything in page size chunks ensure
0163      * the destination addresses are page aligned.  Too many
0164      * special cases crop of when we don't do this.  The most
0165      * insidious is getting overlapping destination addresses
0166      * simply because addresses are changed to page size
0167      * granularity.
0168      */
0169     for (i = 0; i < nr_segments; i++) {
0170         unsigned long mstart, mend;
0171 
0172         mstart = image->segment[i].mem;
0173         mend   = mstart + image->segment[i].memsz;
0174         if (mstart > mend)
0175             return -EADDRNOTAVAIL;
0176         if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
0177             return -EADDRNOTAVAIL;
0178         if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
0179             return -EADDRNOTAVAIL;
0180     }
0181 
0182     /* Verify our destination addresses do not overlap.
0183      * If we alloed overlapping destination addresses
0184      * through very weird things can happen with no
0185      * easy explanation as one segment stops on another.
0186      */
0187     for (i = 0; i < nr_segments; i++) {
0188         unsigned long mstart, mend;
0189         unsigned long j;
0190 
0191         mstart = image->segment[i].mem;
0192         mend   = mstart + image->segment[i].memsz;
0193         for (j = 0; j < i; j++) {
0194             unsigned long pstart, pend;
0195 
0196             pstart = image->segment[j].mem;
0197             pend   = pstart + image->segment[j].memsz;
0198             /* Do the segments overlap ? */
0199             if ((mend > pstart) && (mstart < pend))
0200                 return -EINVAL;
0201         }
0202     }
0203 
0204     /* Ensure our buffer sizes are strictly less than
0205      * our memory sizes.  This should always be the case,
0206      * and it is easier to check up front than to be surprised
0207      * later on.
0208      */
0209     for (i = 0; i < nr_segments; i++) {
0210         if (image->segment[i].bufsz > image->segment[i].memsz)
0211             return -EINVAL;
0212     }
0213 
0214     /*
0215      * Verify that no more than half of memory will be consumed. If the
0216      * request from userspace is too large, a large amount of time will be
0217      * wasted allocating pages, which can cause a soft lockup.
0218      */
0219     for (i = 0; i < nr_segments; i++) {
0220         if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
0221             return -EINVAL;
0222 
0223         total_pages += PAGE_COUNT(image->segment[i].memsz);
0224     }
0225 
0226     if (total_pages > nr_pages / 2)
0227         return -EINVAL;
0228 
0229     /*
0230      * Verify we have good destination addresses.  Normally
0231      * the caller is responsible for making certain we don't
0232      * attempt to load the new image into invalid or reserved
0233      * areas of RAM.  But crash kernels are preloaded into a
0234      * reserved area of ram.  We must ensure the addresses
0235      * are in the reserved area otherwise preloading the
0236      * kernel could corrupt things.
0237      */
0238 
0239     if (image->type == KEXEC_TYPE_CRASH) {
0240         for (i = 0; i < nr_segments; i++) {
0241             unsigned long mstart, mend;
0242 
0243             mstart = image->segment[i].mem;
0244             mend = mstart + image->segment[i].memsz - 1;
0245             /* Ensure we are within the crash kernel limits */
0246             if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
0247                 (mend > phys_to_boot_phys(crashk_res.end)))
0248                 return -EADDRNOTAVAIL;
0249         }
0250     }
0251 
0252     return 0;
0253 }
0254 
0255 struct kimage *do_kimage_alloc_init(void)
0256 {
0257     struct kimage *image;
0258 
0259     /* Allocate a controlling structure */
0260     image = kzalloc(sizeof(*image), GFP_KERNEL);
0261     if (!image)
0262         return NULL;
0263 
0264     image->head = 0;
0265     image->entry = &image->head;
0266     image->last_entry = &image->head;
0267     image->control_page = ~0; /* By default this does not apply */
0268     image->type = KEXEC_TYPE_DEFAULT;
0269 
0270     /* Initialize the list of control pages */
0271     INIT_LIST_HEAD(&image->control_pages);
0272 
0273     /* Initialize the list of destination pages */
0274     INIT_LIST_HEAD(&image->dest_pages);
0275 
0276     /* Initialize the list of unusable pages */
0277     INIT_LIST_HEAD(&image->unusable_pages);
0278 
0279     return image;
0280 }
0281 
0282 int kimage_is_destination_range(struct kimage *image,
0283                     unsigned long start,
0284                     unsigned long end)
0285 {
0286     unsigned long i;
0287 
0288     for (i = 0; i < image->nr_segments; i++) {
0289         unsigned long mstart, mend;
0290 
0291         mstart = image->segment[i].mem;
0292         mend = mstart + image->segment[i].memsz;
0293         if ((end > mstart) && (start < mend))
0294             return 1;
0295     }
0296 
0297     return 0;
0298 }
0299 
0300 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
0301 {
0302     struct page *pages;
0303 
0304     if (fatal_signal_pending(current))
0305         return NULL;
0306     pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
0307     if (pages) {
0308         unsigned int count, i;
0309 
0310         pages->mapping = NULL;
0311         set_page_private(pages, order);
0312         count = 1 << order;
0313         for (i = 0; i < count; i++)
0314             SetPageReserved(pages + i);
0315 
0316         arch_kexec_post_alloc_pages(page_address(pages), count,
0317                         gfp_mask);
0318 
0319         if (gfp_mask & __GFP_ZERO)
0320             for (i = 0; i < count; i++)
0321                 clear_highpage(pages + i);
0322     }
0323 
0324     return pages;
0325 }
0326 
0327 static void kimage_free_pages(struct page *page)
0328 {
0329     unsigned int order, count, i;
0330 
0331     order = page_private(page);
0332     count = 1 << order;
0333 
0334     arch_kexec_pre_free_pages(page_address(page), count);
0335 
0336     for (i = 0; i < count; i++)
0337         ClearPageReserved(page + i);
0338     __free_pages(page, order);
0339 }
0340 
0341 void kimage_free_page_list(struct list_head *list)
0342 {
0343     struct page *page, *next;
0344 
0345     list_for_each_entry_safe(page, next, list, lru) {
0346         list_del(&page->lru);
0347         kimage_free_pages(page);
0348     }
0349 }
0350 
0351 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
0352                             unsigned int order)
0353 {
0354     /* Control pages are special, they are the intermediaries
0355      * that are needed while we copy the rest of the pages
0356      * to their final resting place.  As such they must
0357      * not conflict with either the destination addresses
0358      * or memory the kernel is already using.
0359      *
0360      * The only case where we really need more than one of
0361      * these are for architectures where we cannot disable
0362      * the MMU and must instead generate an identity mapped
0363      * page table for all of the memory.
0364      *
0365      * At worst this runs in O(N) of the image size.
0366      */
0367     struct list_head extra_pages;
0368     struct page *pages;
0369     unsigned int count;
0370 
0371     count = 1 << order;
0372     INIT_LIST_HEAD(&extra_pages);
0373 
0374     /* Loop while I can allocate a page and the page allocated
0375      * is a destination page.
0376      */
0377     do {
0378         unsigned long pfn, epfn, addr, eaddr;
0379 
0380         pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
0381         if (!pages)
0382             break;
0383         pfn   = page_to_boot_pfn(pages);
0384         epfn  = pfn + count;
0385         addr  = pfn << PAGE_SHIFT;
0386         eaddr = epfn << PAGE_SHIFT;
0387         if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
0388                   kimage_is_destination_range(image, addr, eaddr)) {
0389             list_add(&pages->lru, &extra_pages);
0390             pages = NULL;
0391         }
0392     } while (!pages);
0393 
0394     if (pages) {
0395         /* Remember the allocated page... */
0396         list_add(&pages->lru, &image->control_pages);
0397 
0398         /* Because the page is already in it's destination
0399          * location we will never allocate another page at
0400          * that address.  Therefore kimage_alloc_pages
0401          * will not return it (again) and we don't need
0402          * to give it an entry in image->segment[].
0403          */
0404     }
0405     /* Deal with the destination pages I have inadvertently allocated.
0406      *
0407      * Ideally I would convert multi-page allocations into single
0408      * page allocations, and add everything to image->dest_pages.
0409      *
0410      * For now it is simpler to just free the pages.
0411      */
0412     kimage_free_page_list(&extra_pages);
0413 
0414     return pages;
0415 }
0416 
0417 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
0418                               unsigned int order)
0419 {
0420     /* Control pages are special, they are the intermediaries
0421      * that are needed while we copy the rest of the pages
0422      * to their final resting place.  As such they must
0423      * not conflict with either the destination addresses
0424      * or memory the kernel is already using.
0425      *
0426      * Control pages are also the only pags we must allocate
0427      * when loading a crash kernel.  All of the other pages
0428      * are specified by the segments and we just memcpy
0429      * into them directly.
0430      *
0431      * The only case where we really need more than one of
0432      * these are for architectures where we cannot disable
0433      * the MMU and must instead generate an identity mapped
0434      * page table for all of the memory.
0435      *
0436      * Given the low demand this implements a very simple
0437      * allocator that finds the first hole of the appropriate
0438      * size in the reserved memory region, and allocates all
0439      * of the memory up to and including the hole.
0440      */
0441     unsigned long hole_start, hole_end, size;
0442     struct page *pages;
0443 
0444     pages = NULL;
0445     size = (1 << order) << PAGE_SHIFT;
0446     hole_start = (image->control_page + (size - 1)) & ~(size - 1);
0447     hole_end   = hole_start + size - 1;
0448     while (hole_end <= crashk_res.end) {
0449         unsigned long i;
0450 
0451         cond_resched();
0452 
0453         if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
0454             break;
0455         /* See if I overlap any of the segments */
0456         for (i = 0; i < image->nr_segments; i++) {
0457             unsigned long mstart, mend;
0458 
0459             mstart = image->segment[i].mem;
0460             mend   = mstart + image->segment[i].memsz - 1;
0461             if ((hole_end >= mstart) && (hole_start <= mend)) {
0462                 /* Advance the hole to the end of the segment */
0463                 hole_start = (mend + (size - 1)) & ~(size - 1);
0464                 hole_end   = hole_start + size - 1;
0465                 break;
0466             }
0467         }
0468         /* If I don't overlap any segments I have found my hole! */
0469         if (i == image->nr_segments) {
0470             pages = pfn_to_page(hole_start >> PAGE_SHIFT);
0471             image->control_page = hole_end;
0472             break;
0473         }
0474     }
0475 
0476     /* Ensure that these pages are decrypted if SME is enabled. */
0477     if (pages)
0478         arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
0479 
0480     return pages;
0481 }
0482 
0483 
0484 struct page *kimage_alloc_control_pages(struct kimage *image,
0485                      unsigned int order)
0486 {
0487     struct page *pages = NULL;
0488 
0489     switch (image->type) {
0490     case KEXEC_TYPE_DEFAULT:
0491         pages = kimage_alloc_normal_control_pages(image, order);
0492         break;
0493     case KEXEC_TYPE_CRASH:
0494         pages = kimage_alloc_crash_control_pages(image, order);
0495         break;
0496     }
0497 
0498     return pages;
0499 }
0500 
0501 int kimage_crash_copy_vmcoreinfo(struct kimage *image)
0502 {
0503     struct page *vmcoreinfo_page;
0504     void *safecopy;
0505 
0506     if (image->type != KEXEC_TYPE_CRASH)
0507         return 0;
0508 
0509     /*
0510      * For kdump, allocate one vmcoreinfo safe copy from the
0511      * crash memory. as we have arch_kexec_protect_crashkres()
0512      * after kexec syscall, we naturally protect it from write
0513      * (even read) access under kernel direct mapping. But on
0514      * the other hand, we still need to operate it when crash
0515      * happens to generate vmcoreinfo note, hereby we rely on
0516      * vmap for this purpose.
0517      */
0518     vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
0519     if (!vmcoreinfo_page) {
0520         pr_warn("Could not allocate vmcoreinfo buffer\n");
0521         return -ENOMEM;
0522     }
0523     safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
0524     if (!safecopy) {
0525         pr_warn("Could not vmap vmcoreinfo buffer\n");
0526         return -ENOMEM;
0527     }
0528 
0529     image->vmcoreinfo_data_copy = safecopy;
0530     crash_update_vmcoreinfo_safecopy(safecopy);
0531 
0532     return 0;
0533 }
0534 
0535 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
0536 {
0537     if (*image->entry != 0)
0538         image->entry++;
0539 
0540     if (image->entry == image->last_entry) {
0541         kimage_entry_t *ind_page;
0542         struct page *page;
0543 
0544         page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
0545         if (!page)
0546             return -ENOMEM;
0547 
0548         ind_page = page_address(page);
0549         *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
0550         image->entry = ind_page;
0551         image->last_entry = ind_page +
0552                       ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
0553     }
0554     *image->entry = entry;
0555     image->entry++;
0556     *image->entry = 0;
0557 
0558     return 0;
0559 }
0560 
0561 static int kimage_set_destination(struct kimage *image,
0562                    unsigned long destination)
0563 {
0564     int result;
0565 
0566     destination &= PAGE_MASK;
0567     result = kimage_add_entry(image, destination | IND_DESTINATION);
0568 
0569     return result;
0570 }
0571 
0572 
0573 static int kimage_add_page(struct kimage *image, unsigned long page)
0574 {
0575     int result;
0576 
0577     page &= PAGE_MASK;
0578     result = kimage_add_entry(image, page | IND_SOURCE);
0579 
0580     return result;
0581 }
0582 
0583 
0584 static void kimage_free_extra_pages(struct kimage *image)
0585 {
0586     /* Walk through and free any extra destination pages I may have */
0587     kimage_free_page_list(&image->dest_pages);
0588 
0589     /* Walk through and free any unusable pages I have cached */
0590     kimage_free_page_list(&image->unusable_pages);
0591 
0592 }
0593 
0594 void kimage_terminate(struct kimage *image)
0595 {
0596     if (*image->entry != 0)
0597         image->entry++;
0598 
0599     *image->entry = IND_DONE;
0600 }
0601 
0602 #define for_each_kimage_entry(image, ptr, entry) \
0603     for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
0604         ptr = (entry & IND_INDIRECTION) ? \
0605             boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
0606 
0607 static void kimage_free_entry(kimage_entry_t entry)
0608 {
0609     struct page *page;
0610 
0611     page = boot_pfn_to_page(entry >> PAGE_SHIFT);
0612     kimage_free_pages(page);
0613 }
0614 
0615 void kimage_free(struct kimage *image)
0616 {
0617     kimage_entry_t *ptr, entry;
0618     kimage_entry_t ind = 0;
0619 
0620     if (!image)
0621         return;
0622 
0623     if (image->vmcoreinfo_data_copy) {
0624         crash_update_vmcoreinfo_safecopy(NULL);
0625         vunmap(image->vmcoreinfo_data_copy);
0626     }
0627 
0628     kimage_free_extra_pages(image);
0629     for_each_kimage_entry(image, ptr, entry) {
0630         if (entry & IND_INDIRECTION) {
0631             /* Free the previous indirection page */
0632             if (ind & IND_INDIRECTION)
0633                 kimage_free_entry(ind);
0634             /* Save this indirection page until we are
0635              * done with it.
0636              */
0637             ind = entry;
0638         } else if (entry & IND_SOURCE)
0639             kimage_free_entry(entry);
0640     }
0641     /* Free the final indirection page */
0642     if (ind & IND_INDIRECTION)
0643         kimage_free_entry(ind);
0644 
0645     /* Handle any machine specific cleanup */
0646     machine_kexec_cleanup(image);
0647 
0648     /* Free the kexec control pages... */
0649     kimage_free_page_list(&image->control_pages);
0650 
0651     /*
0652      * Free up any temporary buffers allocated. This might hit if
0653      * error occurred much later after buffer allocation.
0654      */
0655     if (image->file_mode)
0656         kimage_file_post_load_cleanup(image);
0657 
0658     kfree(image);
0659 }
0660 
0661 static kimage_entry_t *kimage_dst_used(struct kimage *image,
0662                     unsigned long page)
0663 {
0664     kimage_entry_t *ptr, entry;
0665     unsigned long destination = 0;
0666 
0667     for_each_kimage_entry(image, ptr, entry) {
0668         if (entry & IND_DESTINATION)
0669             destination = entry & PAGE_MASK;
0670         else if (entry & IND_SOURCE) {
0671             if (page == destination)
0672                 return ptr;
0673             destination += PAGE_SIZE;
0674         }
0675     }
0676 
0677     return NULL;
0678 }
0679 
0680 static struct page *kimage_alloc_page(struct kimage *image,
0681                     gfp_t gfp_mask,
0682                     unsigned long destination)
0683 {
0684     /*
0685      * Here we implement safeguards to ensure that a source page
0686      * is not copied to its destination page before the data on
0687      * the destination page is no longer useful.
0688      *
0689      * To do this we maintain the invariant that a source page is
0690      * either its own destination page, or it is not a
0691      * destination page at all.
0692      *
0693      * That is slightly stronger than required, but the proof
0694      * that no problems will not occur is trivial, and the
0695      * implementation is simply to verify.
0696      *
0697      * When allocating all pages normally this algorithm will run
0698      * in O(N) time, but in the worst case it will run in O(N^2)
0699      * time.   If the runtime is a problem the data structures can
0700      * be fixed.
0701      */
0702     struct page *page;
0703     unsigned long addr;
0704 
0705     /*
0706      * Walk through the list of destination pages, and see if I
0707      * have a match.
0708      */
0709     list_for_each_entry(page, &image->dest_pages, lru) {
0710         addr = page_to_boot_pfn(page) << PAGE_SHIFT;
0711         if (addr == destination) {
0712             list_del(&page->lru);
0713             return page;
0714         }
0715     }
0716     page = NULL;
0717     while (1) {
0718         kimage_entry_t *old;
0719 
0720         /* Allocate a page, if we run out of memory give up */
0721         page = kimage_alloc_pages(gfp_mask, 0);
0722         if (!page)
0723             return NULL;
0724         /* If the page cannot be used file it away */
0725         if (page_to_boot_pfn(page) >
0726                 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
0727             list_add(&page->lru, &image->unusable_pages);
0728             continue;
0729         }
0730         addr = page_to_boot_pfn(page) << PAGE_SHIFT;
0731 
0732         /* If it is the destination page we want use it */
0733         if (addr == destination)
0734             break;
0735 
0736         /* If the page is not a destination page use it */
0737         if (!kimage_is_destination_range(image, addr,
0738                           addr + PAGE_SIZE))
0739             break;
0740 
0741         /*
0742          * I know that the page is someones destination page.
0743          * See if there is already a source page for this
0744          * destination page.  And if so swap the source pages.
0745          */
0746         old = kimage_dst_used(image, addr);
0747         if (old) {
0748             /* If so move it */
0749             unsigned long old_addr;
0750             struct page *old_page;
0751 
0752             old_addr = *old & PAGE_MASK;
0753             old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
0754             copy_highpage(page, old_page);
0755             *old = addr | (*old & ~PAGE_MASK);
0756 
0757             /* The old page I have found cannot be a
0758              * destination page, so return it if it's
0759              * gfp_flags honor the ones passed in.
0760              */
0761             if (!(gfp_mask & __GFP_HIGHMEM) &&
0762                 PageHighMem(old_page)) {
0763                 kimage_free_pages(old_page);
0764                 continue;
0765             }
0766             page = old_page;
0767             break;
0768         }
0769         /* Place the page on the destination list, to be used later */
0770         list_add(&page->lru, &image->dest_pages);
0771     }
0772 
0773     return page;
0774 }
0775 
0776 static int kimage_load_normal_segment(struct kimage *image,
0777                      struct kexec_segment *segment)
0778 {
0779     unsigned long maddr;
0780     size_t ubytes, mbytes;
0781     int result;
0782     unsigned char __user *buf = NULL;
0783     unsigned char *kbuf = NULL;
0784 
0785     if (image->file_mode)
0786         kbuf = segment->kbuf;
0787     else
0788         buf = segment->buf;
0789     ubytes = segment->bufsz;
0790     mbytes = segment->memsz;
0791     maddr = segment->mem;
0792 
0793     result = kimage_set_destination(image, maddr);
0794     if (result < 0)
0795         goto out;
0796 
0797     while (mbytes) {
0798         struct page *page;
0799         char *ptr;
0800         size_t uchunk, mchunk;
0801 
0802         page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
0803         if (!page) {
0804             result  = -ENOMEM;
0805             goto out;
0806         }
0807         result = kimage_add_page(image, page_to_boot_pfn(page)
0808                                 << PAGE_SHIFT);
0809         if (result < 0)
0810             goto out;
0811 
0812         ptr = kmap(page);
0813         /* Start with a clear page */
0814         clear_page(ptr);
0815         ptr += maddr & ~PAGE_MASK;
0816         mchunk = min_t(size_t, mbytes,
0817                 PAGE_SIZE - (maddr & ~PAGE_MASK));
0818         uchunk = min(ubytes, mchunk);
0819 
0820         /* For file based kexec, source pages are in kernel memory */
0821         if (image->file_mode)
0822             memcpy(ptr, kbuf, uchunk);
0823         else
0824             result = copy_from_user(ptr, buf, uchunk);
0825         kunmap(page);
0826         if (result) {
0827             result = -EFAULT;
0828             goto out;
0829         }
0830         ubytes -= uchunk;
0831         maddr  += mchunk;
0832         if (image->file_mode)
0833             kbuf += mchunk;
0834         else
0835             buf += mchunk;
0836         mbytes -= mchunk;
0837 
0838         cond_resched();
0839     }
0840 out:
0841     return result;
0842 }
0843 
0844 static int kimage_load_crash_segment(struct kimage *image,
0845                     struct kexec_segment *segment)
0846 {
0847     /* For crash dumps kernels we simply copy the data from
0848      * user space to it's destination.
0849      * We do things a page at a time for the sake of kmap.
0850      */
0851     unsigned long maddr;
0852     size_t ubytes, mbytes;
0853     int result;
0854     unsigned char __user *buf = NULL;
0855     unsigned char *kbuf = NULL;
0856 
0857     result = 0;
0858     if (image->file_mode)
0859         kbuf = segment->kbuf;
0860     else
0861         buf = segment->buf;
0862     ubytes = segment->bufsz;
0863     mbytes = segment->memsz;
0864     maddr = segment->mem;
0865     while (mbytes) {
0866         struct page *page;
0867         char *ptr;
0868         size_t uchunk, mchunk;
0869 
0870         page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
0871         if (!page) {
0872             result  = -ENOMEM;
0873             goto out;
0874         }
0875         arch_kexec_post_alloc_pages(page_address(page), 1, 0);
0876         ptr = kmap(page);
0877         ptr += maddr & ~PAGE_MASK;
0878         mchunk = min_t(size_t, mbytes,
0879                 PAGE_SIZE - (maddr & ~PAGE_MASK));
0880         uchunk = min(ubytes, mchunk);
0881         if (mchunk > uchunk) {
0882             /* Zero the trailing part of the page */
0883             memset(ptr + uchunk, 0, mchunk - uchunk);
0884         }
0885 
0886         /* For file based kexec, source pages are in kernel memory */
0887         if (image->file_mode)
0888             memcpy(ptr, kbuf, uchunk);
0889         else
0890             result = copy_from_user(ptr, buf, uchunk);
0891         kexec_flush_icache_page(page);
0892         kunmap(page);
0893         arch_kexec_pre_free_pages(page_address(page), 1);
0894         if (result) {
0895             result = -EFAULT;
0896             goto out;
0897         }
0898         ubytes -= uchunk;
0899         maddr  += mchunk;
0900         if (image->file_mode)
0901             kbuf += mchunk;
0902         else
0903             buf += mchunk;
0904         mbytes -= mchunk;
0905 
0906         cond_resched();
0907     }
0908 out:
0909     return result;
0910 }
0911 
0912 int kimage_load_segment(struct kimage *image,
0913                 struct kexec_segment *segment)
0914 {
0915     int result = -ENOMEM;
0916 
0917     switch (image->type) {
0918     case KEXEC_TYPE_DEFAULT:
0919         result = kimage_load_normal_segment(image, segment);
0920         break;
0921     case KEXEC_TYPE_CRASH:
0922         result = kimage_load_crash_segment(image, segment);
0923         break;
0924     }
0925 
0926     return result;
0927 }
0928 
0929 struct kimage *kexec_image;
0930 struct kimage *kexec_crash_image;
0931 int kexec_load_disabled;
0932 #ifdef CONFIG_SYSCTL
0933 static struct ctl_table kexec_core_sysctls[] = {
0934     {
0935         .procname   = "kexec_load_disabled",
0936         .data       = &kexec_load_disabled,
0937         .maxlen     = sizeof(int),
0938         .mode       = 0644,
0939         /* only handle a transition from default "0" to "1" */
0940         .proc_handler   = proc_dointvec_minmax,
0941         .extra1     = SYSCTL_ONE,
0942         .extra2     = SYSCTL_ONE,
0943     },
0944     { }
0945 };
0946 
0947 static int __init kexec_core_sysctl_init(void)
0948 {
0949     register_sysctl_init("kernel", kexec_core_sysctls);
0950     return 0;
0951 }
0952 late_initcall(kexec_core_sysctl_init);
0953 #endif
0954 
0955 /*
0956  * No panic_cpu check version of crash_kexec().  This function is called
0957  * only when panic_cpu holds the current CPU number; this is the only CPU
0958  * which processes crash_kexec routines.
0959  */
0960 void __noclone __crash_kexec(struct pt_regs *regs)
0961 {
0962     /* Take the kexec_mutex here to prevent sys_kexec_load
0963      * running on one cpu from replacing the crash kernel
0964      * we are using after a panic on a different cpu.
0965      *
0966      * If the crash kernel was not located in a fixed area
0967      * of memory the xchg(&kexec_crash_image) would be
0968      * sufficient.  But since I reuse the memory...
0969      */
0970     if (mutex_trylock(&kexec_mutex)) {
0971         if (kexec_crash_image) {
0972             struct pt_regs fixed_regs;
0973 
0974             crash_setup_regs(&fixed_regs, regs);
0975             crash_save_vmcoreinfo();
0976             machine_crash_shutdown(&fixed_regs);
0977             machine_kexec(kexec_crash_image);
0978         }
0979         mutex_unlock(&kexec_mutex);
0980     }
0981 }
0982 STACK_FRAME_NON_STANDARD(__crash_kexec);
0983 
0984 void crash_kexec(struct pt_regs *regs)
0985 {
0986     int old_cpu, this_cpu;
0987 
0988     /*
0989      * Only one CPU is allowed to execute the crash_kexec() code as with
0990      * panic().  Otherwise parallel calls of panic() and crash_kexec()
0991      * may stop each other.  To exclude them, we use panic_cpu here too.
0992      */
0993     this_cpu = raw_smp_processor_id();
0994     old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
0995     if (old_cpu == PANIC_CPU_INVALID) {
0996         /* This is the 1st CPU which comes here, so go ahead. */
0997         __crash_kexec(regs);
0998 
0999         /*
1000          * Reset panic_cpu to allow another panic()/crash_kexec()
1001          * call.
1002          */
1003         atomic_set(&panic_cpu, PANIC_CPU_INVALID);
1004     }
1005 }
1006 
1007 size_t crash_get_memory_size(void)
1008 {
1009     size_t size = 0;
1010 
1011     mutex_lock(&kexec_mutex);
1012     if (crashk_res.end != crashk_res.start)
1013         size = resource_size(&crashk_res);
1014     mutex_unlock(&kexec_mutex);
1015     return size;
1016 }
1017 
1018 int crash_shrink_memory(unsigned long new_size)
1019 {
1020     int ret = 0;
1021     unsigned long start, end;
1022     unsigned long old_size;
1023     struct resource *ram_res;
1024 
1025     mutex_lock(&kexec_mutex);
1026 
1027     if (kexec_crash_image) {
1028         ret = -ENOENT;
1029         goto unlock;
1030     }
1031     start = crashk_res.start;
1032     end = crashk_res.end;
1033     old_size = (end == 0) ? 0 : end - start + 1;
1034     if (new_size >= old_size) {
1035         ret = (new_size == old_size) ? 0 : -EINVAL;
1036         goto unlock;
1037     }
1038 
1039     ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1040     if (!ram_res) {
1041         ret = -ENOMEM;
1042         goto unlock;
1043     }
1044 
1045     start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1046     end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1047 
1048     crash_free_reserved_phys_range(end, crashk_res.end);
1049 
1050     if ((start == end) && (crashk_res.parent != NULL))
1051         release_resource(&crashk_res);
1052 
1053     ram_res->start = end;
1054     ram_res->end = crashk_res.end;
1055     ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1056     ram_res->name = "System RAM";
1057 
1058     crashk_res.end = end - 1;
1059 
1060     insert_resource(&iomem_resource, ram_res);
1061 
1062 unlock:
1063     mutex_unlock(&kexec_mutex);
1064     return ret;
1065 }
1066 
1067 void crash_save_cpu(struct pt_regs *regs, int cpu)
1068 {
1069     struct elf_prstatus prstatus;
1070     u32 *buf;
1071 
1072     if ((cpu < 0) || (cpu >= nr_cpu_ids))
1073         return;
1074 
1075     /* Using ELF notes here is opportunistic.
1076      * I need a well defined structure format
1077      * for the data I pass, and I need tags
1078      * on the data to indicate what information I have
1079      * squirrelled away.  ELF notes happen to provide
1080      * all of that, so there is no need to invent something new.
1081      */
1082     buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1083     if (!buf)
1084         return;
1085     memset(&prstatus, 0, sizeof(prstatus));
1086     prstatus.common.pr_pid = current->pid;
1087     elf_core_copy_regs(&prstatus.pr_reg, regs);
1088     buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1089                   &prstatus, sizeof(prstatus));
1090     final_note(buf);
1091 }
1092 
1093 static int __init crash_notes_memory_init(void)
1094 {
1095     /* Allocate memory for saving cpu registers. */
1096     size_t size, align;
1097 
1098     /*
1099      * crash_notes could be allocated across 2 vmalloc pages when percpu
1100      * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1101      * pages are also on 2 continuous physical pages. In this case the
1102      * 2nd part of crash_notes in 2nd page could be lost since only the
1103      * starting address and size of crash_notes are exported through sysfs.
1104      * Here round up the size of crash_notes to the nearest power of two
1105      * and pass it to __alloc_percpu as align value. This can make sure
1106      * crash_notes is allocated inside one physical page.
1107      */
1108     size = sizeof(note_buf_t);
1109     align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1110 
1111     /*
1112      * Break compile if size is bigger than PAGE_SIZE since crash_notes
1113      * definitely will be in 2 pages with that.
1114      */
1115     BUILD_BUG_ON(size > PAGE_SIZE);
1116 
1117     crash_notes = __alloc_percpu(size, align);
1118     if (!crash_notes) {
1119         pr_warn("Memory allocation for saving cpu register states failed\n");
1120         return -ENOMEM;
1121     }
1122     return 0;
1123 }
1124 subsys_initcall(crash_notes_memory_init);
1125 
1126 
1127 /*
1128  * Move into place and start executing a preloaded standalone
1129  * executable.  If nothing was preloaded return an error.
1130  */
1131 int kernel_kexec(void)
1132 {
1133     int error = 0;
1134 
1135     if (!mutex_trylock(&kexec_mutex))
1136         return -EBUSY;
1137     if (!kexec_image) {
1138         error = -EINVAL;
1139         goto Unlock;
1140     }
1141 
1142 #ifdef CONFIG_KEXEC_JUMP
1143     if (kexec_image->preserve_context) {
1144         pm_prepare_console();
1145         error = freeze_processes();
1146         if (error) {
1147             error = -EBUSY;
1148             goto Restore_console;
1149         }
1150         suspend_console();
1151         error = dpm_suspend_start(PMSG_FREEZE);
1152         if (error)
1153             goto Resume_console;
1154         /* At this point, dpm_suspend_start() has been called,
1155          * but *not* dpm_suspend_end(). We *must* call
1156          * dpm_suspend_end() now.  Otherwise, drivers for
1157          * some devices (e.g. interrupt controllers) become
1158          * desynchronized with the actual state of the
1159          * hardware at resume time, and evil weirdness ensues.
1160          */
1161         error = dpm_suspend_end(PMSG_FREEZE);
1162         if (error)
1163             goto Resume_devices;
1164         error = suspend_disable_secondary_cpus();
1165         if (error)
1166             goto Enable_cpus;
1167         local_irq_disable();
1168         error = syscore_suspend();
1169         if (error)
1170             goto Enable_irqs;
1171     } else
1172 #endif
1173     {
1174         kexec_in_progress = true;
1175         kernel_restart_prepare("kexec reboot");
1176         migrate_to_reboot_cpu();
1177 
1178         /*
1179          * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1180          * no further code needs to use CPU hotplug (which is true in
1181          * the reboot case). However, the kexec path depends on using
1182          * CPU hotplug again; so re-enable it here.
1183          */
1184         cpu_hotplug_enable();
1185         pr_notice("Starting new kernel\n");
1186         machine_shutdown();
1187     }
1188 
1189     kmsg_dump(KMSG_DUMP_SHUTDOWN);
1190     machine_kexec(kexec_image);
1191 
1192 #ifdef CONFIG_KEXEC_JUMP
1193     if (kexec_image->preserve_context) {
1194         syscore_resume();
1195  Enable_irqs:
1196         local_irq_enable();
1197  Enable_cpus:
1198         suspend_enable_secondary_cpus();
1199         dpm_resume_start(PMSG_RESTORE);
1200  Resume_devices:
1201         dpm_resume_end(PMSG_RESTORE);
1202  Resume_console:
1203         resume_console();
1204         thaw_processes();
1205  Restore_console:
1206         pm_restore_console();
1207     }
1208 #endif
1209 
1210  Unlock:
1211     mutex_unlock(&kexec_mutex);
1212     return error;
1213 }