Back to home page

LXR

 
 

    


0001 /*
0002  * kexec.c - kexec system call core code.
0003  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
0004  *
0005  * This source code is licensed under the GNU General Public License,
0006  * Version 2.  See the file COPYING for more details.
0007  */
0008 
0009 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0010 
0011 #include <linux/capability.h>
0012 #include <linux/mm.h>
0013 #include <linux/file.h>
0014 #include <linux/slab.h>
0015 #include <linux/fs.h>
0016 #include <linux/kexec.h>
0017 #include <linux/mutex.h>
0018 #include <linux/list.h>
0019 #include <linux/highmem.h>
0020 #include <linux/syscalls.h>
0021 #include <linux/reboot.h>
0022 #include <linux/ioport.h>
0023 #include <linux/hardirq.h>
0024 #include <linux/elf.h>
0025 #include <linux/elfcore.h>
0026 #include <linux/utsname.h>
0027 #include <linux/numa.h>
0028 #include <linux/suspend.h>
0029 #include <linux/device.h>
0030 #include <linux/freezer.h>
0031 #include <linux/pm.h>
0032 #include <linux/cpu.h>
0033 #include <linux/uaccess.h>
0034 #include <linux/io.h>
0035 #include <linux/console.h>
0036 #include <linux/vmalloc.h>
0037 #include <linux/swap.h>
0038 #include <linux/syscore_ops.h>
0039 #include <linux/compiler.h>
0040 #include <linux/hugetlb.h>
0041 
0042 #include <asm/page.h>
0043 #include <asm/sections.h>
0044 
0045 #include <crypto/hash.h>
0046 #include <crypto/sha.h>
0047 #include "kexec_internal.h"
0048 
0049 DEFINE_MUTEX(kexec_mutex);
0050 
0051 /* Per cpu memory for storing cpu states in case of system crash. */
0052 note_buf_t __percpu *crash_notes;
0053 
0054 /* vmcoreinfo stuff */
0055 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
0056 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
0057 size_t vmcoreinfo_size;
0058 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
0059 
0060 /* Flag to indicate we are going to kexec a new kernel */
0061 bool kexec_in_progress = false;
0062 
0063 
0064 /* Location of the reserved area for the crash kernel */
0065 struct resource crashk_res = {
0066     .name  = "Crash kernel",
0067     .start = 0,
0068     .end   = 0,
0069     .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
0070     .desc  = IORES_DESC_CRASH_KERNEL
0071 };
0072 struct resource crashk_low_res = {
0073     .name  = "Crash kernel",
0074     .start = 0,
0075     .end   = 0,
0076     .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
0077     .desc  = IORES_DESC_CRASH_KERNEL
0078 };
0079 
0080 int kexec_should_crash(struct task_struct *p)
0081 {
0082     /*
0083      * If crash_kexec_post_notifiers is enabled, don't run
0084      * crash_kexec() here yet, which must be run after panic
0085      * notifiers in panic().
0086      */
0087     if (crash_kexec_post_notifiers)
0088         return 0;
0089     /*
0090      * There are 4 panic() calls in do_exit() path, each of which
0091      * corresponds to each of these 4 conditions.
0092      */
0093     if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
0094         return 1;
0095     return 0;
0096 }
0097 
0098 int kexec_crash_loaded(void)
0099 {
0100     return !!kexec_crash_image;
0101 }
0102 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
0103 
0104 /*
0105  * When kexec transitions to the new kernel there is a one-to-one
0106  * mapping between physical and virtual addresses.  On processors
0107  * where you can disable the MMU this is trivial, and easy.  For
0108  * others it is still a simple predictable page table to setup.
0109  *
0110  * In that environment kexec copies the new kernel to its final
0111  * resting place.  This means I can only support memory whose
0112  * physical address can fit in an unsigned long.  In particular
0113  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
0114  * If the assembly stub has more restrictive requirements
0115  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
0116  * defined more restrictively in <asm/kexec.h>.
0117  *
0118  * The code for the transition from the current kernel to the
0119  * the new kernel is placed in the control_code_buffer, whose size
0120  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
0121  * page of memory is necessary, but some architectures require more.
0122  * Because this memory must be identity mapped in the transition from
0123  * virtual to physical addresses it must live in the range
0124  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
0125  * modifiable.
0126  *
0127  * The assembly stub in the control code buffer is passed a linked list
0128  * of descriptor pages detailing the source pages of the new kernel,
0129  * and the destination addresses of those source pages.  As this data
0130  * structure is not used in the context of the current OS, it must
0131  * be self-contained.
0132  *
0133  * The code has been made to work with highmem pages and will use a
0134  * destination page in its final resting place (if it happens
0135  * to allocate it).  The end product of this is that most of the
0136  * physical address space, and most of RAM can be used.
0137  *
0138  * Future directions include:
0139  *  - allocating a page table with the control code buffer identity
0140  *    mapped, to simplify machine_kexec and make kexec_on_panic more
0141  *    reliable.
0142  */
0143 
0144 /*
0145  * KIMAGE_NO_DEST is an impossible destination address..., for
0146  * allocating pages whose destination address we do not care about.
0147  */
0148 #define KIMAGE_NO_DEST (-1UL)
0149 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
0150 
0151 static struct page *kimage_alloc_page(struct kimage *image,
0152                        gfp_t gfp_mask,
0153                        unsigned long dest);
0154 
0155 int sanity_check_segment_list(struct kimage *image)
0156 {
0157     int i;
0158     unsigned long nr_segments = image->nr_segments;
0159     unsigned long total_pages = 0;
0160 
0161     /*
0162      * Verify we have good destination addresses.  The caller is
0163      * responsible for making certain we don't attempt to load
0164      * the new image into invalid or reserved areas of RAM.  This
0165      * just verifies it is an address we can use.
0166      *
0167      * Since the kernel does everything in page size chunks ensure
0168      * the destination addresses are page aligned.  Too many
0169      * special cases crop of when we don't do this.  The most
0170      * insidious is getting overlapping destination addresses
0171      * simply because addresses are changed to page size
0172      * granularity.
0173      */
0174     for (i = 0; i < nr_segments; i++) {
0175         unsigned long mstart, mend;
0176 
0177         mstart = image->segment[i].mem;
0178         mend   = mstart + image->segment[i].memsz;
0179         if (mstart > mend)
0180             return -EADDRNOTAVAIL;
0181         if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
0182             return -EADDRNOTAVAIL;
0183         if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
0184             return -EADDRNOTAVAIL;
0185     }
0186 
0187     /* Verify our destination addresses do not overlap.
0188      * If we alloed overlapping destination addresses
0189      * through very weird things can happen with no
0190      * easy explanation as one segment stops on another.
0191      */
0192     for (i = 0; i < nr_segments; i++) {
0193         unsigned long mstart, mend;
0194         unsigned long j;
0195 
0196         mstart = image->segment[i].mem;
0197         mend   = mstart + image->segment[i].memsz;
0198         for (j = 0; j < i; j++) {
0199             unsigned long pstart, pend;
0200 
0201             pstart = image->segment[j].mem;
0202             pend   = pstart + image->segment[j].memsz;
0203             /* Do the segments overlap ? */
0204             if ((mend > pstart) && (mstart < pend))
0205                 return -EINVAL;
0206         }
0207     }
0208 
0209     /* Ensure our buffer sizes are strictly less than
0210      * our memory sizes.  This should always be the case,
0211      * and it is easier to check up front than to be surprised
0212      * later on.
0213      */
0214     for (i = 0; i < nr_segments; i++) {
0215         if (image->segment[i].bufsz > image->segment[i].memsz)
0216             return -EINVAL;
0217     }
0218 
0219     /*
0220      * Verify that no more than half of memory will be consumed. If the
0221      * request from userspace is too large, a large amount of time will be
0222      * wasted allocating pages, which can cause a soft lockup.
0223      */
0224     for (i = 0; i < nr_segments; i++) {
0225         if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
0226             return -EINVAL;
0227 
0228         total_pages += PAGE_COUNT(image->segment[i].memsz);
0229     }
0230 
0231     if (total_pages > totalram_pages / 2)
0232         return -EINVAL;
0233 
0234     /*
0235      * Verify we have good destination addresses.  Normally
0236      * the caller is responsible for making certain we don't
0237      * attempt to load the new image into invalid or reserved
0238      * areas of RAM.  But crash kernels are preloaded into a
0239      * reserved area of ram.  We must ensure the addresses
0240      * are in the reserved area otherwise preloading the
0241      * kernel could corrupt things.
0242      */
0243 
0244     if (image->type == KEXEC_TYPE_CRASH) {
0245         for (i = 0; i < nr_segments; i++) {
0246             unsigned long mstart, mend;
0247 
0248             mstart = image->segment[i].mem;
0249             mend = mstart + image->segment[i].memsz - 1;
0250             /* Ensure we are within the crash kernel limits */
0251             if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
0252                 (mend > phys_to_boot_phys(crashk_res.end)))
0253                 return -EADDRNOTAVAIL;
0254         }
0255     }
0256 
0257     return 0;
0258 }
0259 
0260 struct kimage *do_kimage_alloc_init(void)
0261 {
0262     struct kimage *image;
0263 
0264     /* Allocate a controlling structure */
0265     image = kzalloc(sizeof(*image), GFP_KERNEL);
0266     if (!image)
0267         return NULL;
0268 
0269     image->head = 0;
0270     image->entry = &image->head;
0271     image->last_entry = &image->head;
0272     image->control_page = ~0; /* By default this does not apply */
0273     image->type = KEXEC_TYPE_DEFAULT;
0274 
0275     /* Initialize the list of control pages */
0276     INIT_LIST_HEAD(&image->control_pages);
0277 
0278     /* Initialize the list of destination pages */
0279     INIT_LIST_HEAD(&image->dest_pages);
0280 
0281     /* Initialize the list of unusable pages */
0282     INIT_LIST_HEAD(&image->unusable_pages);
0283 
0284     return image;
0285 }
0286 
0287 int kimage_is_destination_range(struct kimage *image,
0288                     unsigned long start,
0289                     unsigned long end)
0290 {
0291     unsigned long i;
0292 
0293     for (i = 0; i < image->nr_segments; i++) {
0294         unsigned long mstart, mend;
0295 
0296         mstart = image->segment[i].mem;
0297         mend = mstart + image->segment[i].memsz;
0298         if ((end > mstart) && (start < mend))
0299             return 1;
0300     }
0301 
0302     return 0;
0303 }
0304 
0305 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
0306 {
0307     struct page *pages;
0308 
0309     pages = alloc_pages(gfp_mask, order);
0310     if (pages) {
0311         unsigned int count, i;
0312 
0313         pages->mapping = NULL;
0314         set_page_private(pages, order);
0315         count = 1 << order;
0316         for (i = 0; i < count; i++)
0317             SetPageReserved(pages + i);
0318     }
0319 
0320     return pages;
0321 }
0322 
0323 static void kimage_free_pages(struct page *page)
0324 {
0325     unsigned int order, count, i;
0326 
0327     order = page_private(page);
0328     count = 1 << order;
0329     for (i = 0; i < count; i++)
0330         ClearPageReserved(page + i);
0331     __free_pages(page, order);
0332 }
0333 
0334 void kimage_free_page_list(struct list_head *list)
0335 {
0336     struct page *page, *next;
0337 
0338     list_for_each_entry_safe(page, next, list, lru) {
0339         list_del(&page->lru);
0340         kimage_free_pages(page);
0341     }
0342 }
0343 
0344 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
0345                             unsigned int order)
0346 {
0347     /* Control pages are special, they are the intermediaries
0348      * that are needed while we copy the rest of the pages
0349      * to their final resting place.  As such they must
0350      * not conflict with either the destination addresses
0351      * or memory the kernel is already using.
0352      *
0353      * The only case where we really need more than one of
0354      * these are for architectures where we cannot disable
0355      * the MMU and must instead generate an identity mapped
0356      * page table for all of the memory.
0357      *
0358      * At worst this runs in O(N) of the image size.
0359      */
0360     struct list_head extra_pages;
0361     struct page *pages;
0362     unsigned int count;
0363 
0364     count = 1 << order;
0365     INIT_LIST_HEAD(&extra_pages);
0366 
0367     /* Loop while I can allocate a page and the page allocated
0368      * is a destination page.
0369      */
0370     do {
0371         unsigned long pfn, epfn, addr, eaddr;
0372 
0373         pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
0374         if (!pages)
0375             break;
0376         pfn   = page_to_boot_pfn(pages);
0377         epfn  = pfn + count;
0378         addr  = pfn << PAGE_SHIFT;
0379         eaddr = epfn << PAGE_SHIFT;
0380         if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
0381                   kimage_is_destination_range(image, addr, eaddr)) {
0382             list_add(&pages->lru, &extra_pages);
0383             pages = NULL;
0384         }
0385     } while (!pages);
0386 
0387     if (pages) {
0388         /* Remember the allocated page... */
0389         list_add(&pages->lru, &image->control_pages);
0390 
0391         /* Because the page is already in it's destination
0392          * location we will never allocate another page at
0393          * that address.  Therefore kimage_alloc_pages
0394          * will not return it (again) and we don't need
0395          * to give it an entry in image->segment[].
0396          */
0397     }
0398     /* Deal with the destination pages I have inadvertently allocated.
0399      *
0400      * Ideally I would convert multi-page allocations into single
0401      * page allocations, and add everything to image->dest_pages.
0402      *
0403      * For now it is simpler to just free the pages.
0404      */
0405     kimage_free_page_list(&extra_pages);
0406 
0407     return pages;
0408 }
0409 
0410 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
0411                               unsigned int order)
0412 {
0413     /* Control pages are special, they are the intermediaries
0414      * that are needed while we copy the rest of the pages
0415      * to their final resting place.  As such they must
0416      * not conflict with either the destination addresses
0417      * or memory the kernel is already using.
0418      *
0419      * Control pages are also the only pags we must allocate
0420      * when loading a crash kernel.  All of the other pages
0421      * are specified by the segments and we just memcpy
0422      * into them directly.
0423      *
0424      * The only case where we really need more than one of
0425      * these are for architectures where we cannot disable
0426      * the MMU and must instead generate an identity mapped
0427      * page table for all of the memory.
0428      *
0429      * Given the low demand this implements a very simple
0430      * allocator that finds the first hole of the appropriate
0431      * size in the reserved memory region, and allocates all
0432      * of the memory up to and including the hole.
0433      */
0434     unsigned long hole_start, hole_end, size;
0435     struct page *pages;
0436 
0437     pages = NULL;
0438     size = (1 << order) << PAGE_SHIFT;
0439     hole_start = (image->control_page + (size - 1)) & ~(size - 1);
0440     hole_end   = hole_start + size - 1;
0441     while (hole_end <= crashk_res.end) {
0442         unsigned long i;
0443 
0444         cond_resched();
0445 
0446         if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
0447             break;
0448         /* See if I overlap any of the segments */
0449         for (i = 0; i < image->nr_segments; i++) {
0450             unsigned long mstart, mend;
0451 
0452             mstart = image->segment[i].mem;
0453             mend   = mstart + image->segment[i].memsz - 1;
0454             if ((hole_end >= mstart) && (hole_start <= mend)) {
0455                 /* Advance the hole to the end of the segment */
0456                 hole_start = (mend + (size - 1)) & ~(size - 1);
0457                 hole_end   = hole_start + size - 1;
0458                 break;
0459             }
0460         }
0461         /* If I don't overlap any segments I have found my hole! */
0462         if (i == image->nr_segments) {
0463             pages = pfn_to_page(hole_start >> PAGE_SHIFT);
0464             image->control_page = hole_end;
0465             break;
0466         }
0467     }
0468 
0469     return pages;
0470 }
0471 
0472 
0473 struct page *kimage_alloc_control_pages(struct kimage *image,
0474                      unsigned int order)
0475 {
0476     struct page *pages = NULL;
0477 
0478     switch (image->type) {
0479     case KEXEC_TYPE_DEFAULT:
0480         pages = kimage_alloc_normal_control_pages(image, order);
0481         break;
0482     case KEXEC_TYPE_CRASH:
0483         pages = kimage_alloc_crash_control_pages(image, order);
0484         break;
0485     }
0486 
0487     return pages;
0488 }
0489 
0490 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
0491 {
0492     if (*image->entry != 0)
0493         image->entry++;
0494 
0495     if (image->entry == image->last_entry) {
0496         kimage_entry_t *ind_page;
0497         struct page *page;
0498 
0499         page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
0500         if (!page)
0501             return -ENOMEM;
0502 
0503         ind_page = page_address(page);
0504         *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
0505         image->entry = ind_page;
0506         image->last_entry = ind_page +
0507                       ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
0508     }
0509     *image->entry = entry;
0510     image->entry++;
0511     *image->entry = 0;
0512 
0513     return 0;
0514 }
0515 
0516 static int kimage_set_destination(struct kimage *image,
0517                    unsigned long destination)
0518 {
0519     int result;
0520 
0521     destination &= PAGE_MASK;
0522     result = kimage_add_entry(image, destination | IND_DESTINATION);
0523 
0524     return result;
0525 }
0526 
0527 
0528 static int kimage_add_page(struct kimage *image, unsigned long page)
0529 {
0530     int result;
0531 
0532     page &= PAGE_MASK;
0533     result = kimage_add_entry(image, page | IND_SOURCE);
0534 
0535     return result;
0536 }
0537 
0538 
0539 static void kimage_free_extra_pages(struct kimage *image)
0540 {
0541     /* Walk through and free any extra destination pages I may have */
0542     kimage_free_page_list(&image->dest_pages);
0543 
0544     /* Walk through and free any unusable pages I have cached */
0545     kimage_free_page_list(&image->unusable_pages);
0546 
0547 }
0548 void kimage_terminate(struct kimage *image)
0549 {
0550     if (*image->entry != 0)
0551         image->entry++;
0552 
0553     *image->entry = IND_DONE;
0554 }
0555 
0556 #define for_each_kimage_entry(image, ptr, entry) \
0557     for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
0558         ptr = (entry & IND_INDIRECTION) ? \
0559             boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
0560 
0561 static void kimage_free_entry(kimage_entry_t entry)
0562 {
0563     struct page *page;
0564 
0565     page = boot_pfn_to_page(entry >> PAGE_SHIFT);
0566     kimage_free_pages(page);
0567 }
0568 
0569 void kimage_free(struct kimage *image)
0570 {
0571     kimage_entry_t *ptr, entry;
0572     kimage_entry_t ind = 0;
0573 
0574     if (!image)
0575         return;
0576 
0577     kimage_free_extra_pages(image);
0578     for_each_kimage_entry(image, ptr, entry) {
0579         if (entry & IND_INDIRECTION) {
0580             /* Free the previous indirection page */
0581             if (ind & IND_INDIRECTION)
0582                 kimage_free_entry(ind);
0583             /* Save this indirection page until we are
0584              * done with it.
0585              */
0586             ind = entry;
0587         } else if (entry & IND_SOURCE)
0588             kimage_free_entry(entry);
0589     }
0590     /* Free the final indirection page */
0591     if (ind & IND_INDIRECTION)
0592         kimage_free_entry(ind);
0593 
0594     /* Handle any machine specific cleanup */
0595     machine_kexec_cleanup(image);
0596 
0597     /* Free the kexec control pages... */
0598     kimage_free_page_list(&image->control_pages);
0599 
0600     /*
0601      * Free up any temporary buffers allocated. This might hit if
0602      * error occurred much later after buffer allocation.
0603      */
0604     if (image->file_mode)
0605         kimage_file_post_load_cleanup(image);
0606 
0607     kfree(image);
0608 }
0609 
0610 static kimage_entry_t *kimage_dst_used(struct kimage *image,
0611                     unsigned long page)
0612 {
0613     kimage_entry_t *ptr, entry;
0614     unsigned long destination = 0;
0615 
0616     for_each_kimage_entry(image, ptr, entry) {
0617         if (entry & IND_DESTINATION)
0618             destination = entry & PAGE_MASK;
0619         else if (entry & IND_SOURCE) {
0620             if (page == destination)
0621                 return ptr;
0622             destination += PAGE_SIZE;
0623         }
0624     }
0625 
0626     return NULL;
0627 }
0628 
0629 static struct page *kimage_alloc_page(struct kimage *image,
0630                     gfp_t gfp_mask,
0631                     unsigned long destination)
0632 {
0633     /*
0634      * Here we implement safeguards to ensure that a source page
0635      * is not copied to its destination page before the data on
0636      * the destination page is no longer useful.
0637      *
0638      * To do this we maintain the invariant that a source page is
0639      * either its own destination page, or it is not a
0640      * destination page at all.
0641      *
0642      * That is slightly stronger than required, but the proof
0643      * that no problems will not occur is trivial, and the
0644      * implementation is simply to verify.
0645      *
0646      * When allocating all pages normally this algorithm will run
0647      * in O(N) time, but in the worst case it will run in O(N^2)
0648      * time.   If the runtime is a problem the data structures can
0649      * be fixed.
0650      */
0651     struct page *page;
0652     unsigned long addr;
0653 
0654     /*
0655      * Walk through the list of destination pages, and see if I
0656      * have a match.
0657      */
0658     list_for_each_entry(page, &image->dest_pages, lru) {
0659         addr = page_to_boot_pfn(page) << PAGE_SHIFT;
0660         if (addr == destination) {
0661             list_del(&page->lru);
0662             return page;
0663         }
0664     }
0665     page = NULL;
0666     while (1) {
0667         kimage_entry_t *old;
0668 
0669         /* Allocate a page, if we run out of memory give up */
0670         page = kimage_alloc_pages(gfp_mask, 0);
0671         if (!page)
0672             return NULL;
0673         /* If the page cannot be used file it away */
0674         if (page_to_boot_pfn(page) >
0675                 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
0676             list_add(&page->lru, &image->unusable_pages);
0677             continue;
0678         }
0679         addr = page_to_boot_pfn(page) << PAGE_SHIFT;
0680 
0681         /* If it is the destination page we want use it */
0682         if (addr == destination)
0683             break;
0684 
0685         /* If the page is not a destination page use it */
0686         if (!kimage_is_destination_range(image, addr,
0687                           addr + PAGE_SIZE))
0688             break;
0689 
0690         /*
0691          * I know that the page is someones destination page.
0692          * See if there is already a source page for this
0693          * destination page.  And if so swap the source pages.
0694          */
0695         old = kimage_dst_used(image, addr);
0696         if (old) {
0697             /* If so move it */
0698             unsigned long old_addr;
0699             struct page *old_page;
0700 
0701             old_addr = *old & PAGE_MASK;
0702             old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
0703             copy_highpage(page, old_page);
0704             *old = addr | (*old & ~PAGE_MASK);
0705 
0706             /* The old page I have found cannot be a
0707              * destination page, so return it if it's
0708              * gfp_flags honor the ones passed in.
0709              */
0710             if (!(gfp_mask & __GFP_HIGHMEM) &&
0711                 PageHighMem(old_page)) {
0712                 kimage_free_pages(old_page);
0713                 continue;
0714             }
0715             addr = old_addr;
0716             page = old_page;
0717             break;
0718         }
0719         /* Place the page on the destination list, to be used later */
0720         list_add(&page->lru, &image->dest_pages);
0721     }
0722 
0723     return page;
0724 }
0725 
0726 static int kimage_load_normal_segment(struct kimage *image,
0727                      struct kexec_segment *segment)
0728 {
0729     unsigned long maddr;
0730     size_t ubytes, mbytes;
0731     int result;
0732     unsigned char __user *buf = NULL;
0733     unsigned char *kbuf = NULL;
0734 
0735     result = 0;
0736     if (image->file_mode)
0737         kbuf = segment->kbuf;
0738     else
0739         buf = segment->buf;
0740     ubytes = segment->bufsz;
0741     mbytes = segment->memsz;
0742     maddr = segment->mem;
0743 
0744     result = kimage_set_destination(image, maddr);
0745     if (result < 0)
0746         goto out;
0747 
0748     while (mbytes) {
0749         struct page *page;
0750         char *ptr;
0751         size_t uchunk, mchunk;
0752 
0753         page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
0754         if (!page) {
0755             result  = -ENOMEM;
0756             goto out;
0757         }
0758         result = kimage_add_page(image, page_to_boot_pfn(page)
0759                                 << PAGE_SHIFT);
0760         if (result < 0)
0761             goto out;
0762 
0763         ptr = kmap(page);
0764         /* Start with a clear page */
0765         clear_page(ptr);
0766         ptr += maddr & ~PAGE_MASK;
0767         mchunk = min_t(size_t, mbytes,
0768                 PAGE_SIZE - (maddr & ~PAGE_MASK));
0769         uchunk = min(ubytes, mchunk);
0770 
0771         /* For file based kexec, source pages are in kernel memory */
0772         if (image->file_mode)
0773             memcpy(ptr, kbuf, uchunk);
0774         else
0775             result = copy_from_user(ptr, buf, uchunk);
0776         kunmap(page);
0777         if (result) {
0778             result = -EFAULT;
0779             goto out;
0780         }
0781         ubytes -= uchunk;
0782         maddr  += mchunk;
0783         if (image->file_mode)
0784             kbuf += mchunk;
0785         else
0786             buf += mchunk;
0787         mbytes -= mchunk;
0788     }
0789 out:
0790     return result;
0791 }
0792 
0793 static int kimage_load_crash_segment(struct kimage *image,
0794                     struct kexec_segment *segment)
0795 {
0796     /* For crash dumps kernels we simply copy the data from
0797      * user space to it's destination.
0798      * We do things a page at a time for the sake of kmap.
0799      */
0800     unsigned long maddr;
0801     size_t ubytes, mbytes;
0802     int result;
0803     unsigned char __user *buf = NULL;
0804     unsigned char *kbuf = NULL;
0805 
0806     result = 0;
0807     if (image->file_mode)
0808         kbuf = segment->kbuf;
0809     else
0810         buf = segment->buf;
0811     ubytes = segment->bufsz;
0812     mbytes = segment->memsz;
0813     maddr = segment->mem;
0814     while (mbytes) {
0815         struct page *page;
0816         char *ptr;
0817         size_t uchunk, mchunk;
0818 
0819         page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
0820         if (!page) {
0821             result  = -ENOMEM;
0822             goto out;
0823         }
0824         ptr = kmap(page);
0825         ptr += maddr & ~PAGE_MASK;
0826         mchunk = min_t(size_t, mbytes,
0827                 PAGE_SIZE - (maddr & ~PAGE_MASK));
0828         uchunk = min(ubytes, mchunk);
0829         if (mchunk > uchunk) {
0830             /* Zero the trailing part of the page */
0831             memset(ptr + uchunk, 0, mchunk - uchunk);
0832         }
0833 
0834         /* For file based kexec, source pages are in kernel memory */
0835         if (image->file_mode)
0836             memcpy(ptr, kbuf, uchunk);
0837         else
0838             result = copy_from_user(ptr, buf, uchunk);
0839         kexec_flush_icache_page(page);
0840         kunmap(page);
0841         if (result) {
0842             result = -EFAULT;
0843             goto out;
0844         }
0845         ubytes -= uchunk;
0846         maddr  += mchunk;
0847         if (image->file_mode)
0848             kbuf += mchunk;
0849         else
0850             buf += mchunk;
0851         mbytes -= mchunk;
0852     }
0853 out:
0854     return result;
0855 }
0856 
0857 int kimage_load_segment(struct kimage *image,
0858                 struct kexec_segment *segment)
0859 {
0860     int result = -ENOMEM;
0861 
0862     switch (image->type) {
0863     case KEXEC_TYPE_DEFAULT:
0864         result = kimage_load_normal_segment(image, segment);
0865         break;
0866     case KEXEC_TYPE_CRASH:
0867         result = kimage_load_crash_segment(image, segment);
0868         break;
0869     }
0870 
0871     return result;
0872 }
0873 
0874 struct kimage *kexec_image;
0875 struct kimage *kexec_crash_image;
0876 int kexec_load_disabled;
0877 
0878 /*
0879  * No panic_cpu check version of crash_kexec().  This function is called
0880  * only when panic_cpu holds the current CPU number; this is the only CPU
0881  * which processes crash_kexec routines.
0882  */
0883 void __crash_kexec(struct pt_regs *regs)
0884 {
0885     /* Take the kexec_mutex here to prevent sys_kexec_load
0886      * running on one cpu from replacing the crash kernel
0887      * we are using after a panic on a different cpu.
0888      *
0889      * If the crash kernel was not located in a fixed area
0890      * of memory the xchg(&kexec_crash_image) would be
0891      * sufficient.  But since I reuse the memory...
0892      */
0893     if (mutex_trylock(&kexec_mutex)) {
0894         if (kexec_crash_image) {
0895             struct pt_regs fixed_regs;
0896 
0897             crash_setup_regs(&fixed_regs, regs);
0898             crash_save_vmcoreinfo();
0899             machine_crash_shutdown(&fixed_regs);
0900             machine_kexec(kexec_crash_image);
0901         }
0902         mutex_unlock(&kexec_mutex);
0903     }
0904 }
0905 
0906 void crash_kexec(struct pt_regs *regs)
0907 {
0908     int old_cpu, this_cpu;
0909 
0910     /*
0911      * Only one CPU is allowed to execute the crash_kexec() code as with
0912      * panic().  Otherwise parallel calls of panic() and crash_kexec()
0913      * may stop each other.  To exclude them, we use panic_cpu here too.
0914      */
0915     this_cpu = raw_smp_processor_id();
0916     old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
0917     if (old_cpu == PANIC_CPU_INVALID) {
0918         /* This is the 1st CPU which comes here, so go ahead. */
0919         printk_nmi_flush_on_panic();
0920         __crash_kexec(regs);
0921 
0922         /*
0923          * Reset panic_cpu to allow another panic()/crash_kexec()
0924          * call.
0925          */
0926         atomic_set(&panic_cpu, PANIC_CPU_INVALID);
0927     }
0928 }
0929 
0930 size_t crash_get_memory_size(void)
0931 {
0932     size_t size = 0;
0933 
0934     mutex_lock(&kexec_mutex);
0935     if (crashk_res.end != crashk_res.start)
0936         size = resource_size(&crashk_res);
0937     mutex_unlock(&kexec_mutex);
0938     return size;
0939 }
0940 
0941 void __weak crash_free_reserved_phys_range(unsigned long begin,
0942                        unsigned long end)
0943 {
0944     unsigned long addr;
0945 
0946     for (addr = begin; addr < end; addr += PAGE_SIZE)
0947         free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
0948 }
0949 
0950 int crash_shrink_memory(unsigned long new_size)
0951 {
0952     int ret = 0;
0953     unsigned long start, end;
0954     unsigned long old_size;
0955     struct resource *ram_res;
0956 
0957     mutex_lock(&kexec_mutex);
0958 
0959     if (kexec_crash_image) {
0960         ret = -ENOENT;
0961         goto unlock;
0962     }
0963     start = crashk_res.start;
0964     end = crashk_res.end;
0965     old_size = (end == 0) ? 0 : end - start + 1;
0966     if (new_size >= old_size) {
0967         ret = (new_size == old_size) ? 0 : -EINVAL;
0968         goto unlock;
0969     }
0970 
0971     ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
0972     if (!ram_res) {
0973         ret = -ENOMEM;
0974         goto unlock;
0975     }
0976 
0977     start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
0978     end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
0979 
0980     crash_free_reserved_phys_range(end, crashk_res.end);
0981 
0982     if ((start == end) && (crashk_res.parent != NULL))
0983         release_resource(&crashk_res);
0984 
0985     ram_res->start = end;
0986     ram_res->end = crashk_res.end;
0987     ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
0988     ram_res->name = "System RAM";
0989 
0990     crashk_res.end = end - 1;
0991 
0992     insert_resource(&iomem_resource, ram_res);
0993 
0994 unlock:
0995     mutex_unlock(&kexec_mutex);
0996     return ret;
0997 }
0998 
0999 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1000                 size_t data_len)
1001 {
1002     struct elf_note note;
1003 
1004     note.n_namesz = strlen(name) + 1;
1005     note.n_descsz = data_len;
1006     note.n_type   = type;
1007     memcpy(buf, &note, sizeof(note));
1008     buf += (sizeof(note) + 3)/4;
1009     memcpy(buf, name, note.n_namesz);
1010     buf += (note.n_namesz + 3)/4;
1011     memcpy(buf, data, note.n_descsz);
1012     buf += (note.n_descsz + 3)/4;
1013 
1014     return buf;
1015 }
1016 
1017 static void final_note(u32 *buf)
1018 {
1019     struct elf_note note;
1020 
1021     note.n_namesz = 0;
1022     note.n_descsz = 0;
1023     note.n_type   = 0;
1024     memcpy(buf, &note, sizeof(note));
1025 }
1026 
1027 void crash_save_cpu(struct pt_regs *regs, int cpu)
1028 {
1029     struct elf_prstatus prstatus;
1030     u32 *buf;
1031 
1032     if ((cpu < 0) || (cpu >= nr_cpu_ids))
1033         return;
1034 
1035     /* Using ELF notes here is opportunistic.
1036      * I need a well defined structure format
1037      * for the data I pass, and I need tags
1038      * on the data to indicate what information I have
1039      * squirrelled away.  ELF notes happen to provide
1040      * all of that, so there is no need to invent something new.
1041      */
1042     buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1043     if (!buf)
1044         return;
1045     memset(&prstatus, 0, sizeof(prstatus));
1046     prstatus.pr_pid = current->pid;
1047     elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1048     buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1049                   &prstatus, sizeof(prstatus));
1050     final_note(buf);
1051 }
1052 
1053 static int __init crash_notes_memory_init(void)
1054 {
1055     /* Allocate memory for saving cpu registers. */
1056     size_t size, align;
1057 
1058     /*
1059      * crash_notes could be allocated across 2 vmalloc pages when percpu
1060      * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1061      * pages are also on 2 continuous physical pages. In this case the
1062      * 2nd part of crash_notes in 2nd page could be lost since only the
1063      * starting address and size of crash_notes are exported through sysfs.
1064      * Here round up the size of crash_notes to the nearest power of two
1065      * and pass it to __alloc_percpu as align value. This can make sure
1066      * crash_notes is allocated inside one physical page.
1067      */
1068     size = sizeof(note_buf_t);
1069     align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1070 
1071     /*
1072      * Break compile if size is bigger than PAGE_SIZE since crash_notes
1073      * definitely will be in 2 pages with that.
1074      */
1075     BUILD_BUG_ON(size > PAGE_SIZE);
1076 
1077     crash_notes = __alloc_percpu(size, align);
1078     if (!crash_notes) {
1079         pr_warn("Memory allocation for saving cpu register states failed\n");
1080         return -ENOMEM;
1081     }
1082     return 0;
1083 }
1084 subsys_initcall(crash_notes_memory_init);
1085 
1086 
1087 /*
1088  * parsing the "crashkernel" commandline
1089  *
1090  * this code is intended to be called from architecture specific code
1091  */
1092 
1093 
1094 /*
1095  * This function parses command lines in the format
1096  *
1097  *   crashkernel=ramsize-range:size[,...][@offset]
1098  *
1099  * The function returns 0 on success and -EINVAL on failure.
1100  */
1101 static int __init parse_crashkernel_mem(char *cmdline,
1102                     unsigned long long system_ram,
1103                     unsigned long long *crash_size,
1104                     unsigned long long *crash_base)
1105 {
1106     char *cur = cmdline, *tmp;
1107 
1108     /* for each entry of the comma-separated list */
1109     do {
1110         unsigned long long start, end = ULLONG_MAX, size;
1111 
1112         /* get the start of the range */
1113         start = memparse(cur, &tmp);
1114         if (cur == tmp) {
1115             pr_warn("crashkernel: Memory value expected\n");
1116             return -EINVAL;
1117         }
1118         cur = tmp;
1119         if (*cur != '-') {
1120             pr_warn("crashkernel: '-' expected\n");
1121             return -EINVAL;
1122         }
1123         cur++;
1124 
1125         /* if no ':' is here, than we read the end */
1126         if (*cur != ':') {
1127             end = memparse(cur, &tmp);
1128             if (cur == tmp) {
1129                 pr_warn("crashkernel: Memory value expected\n");
1130                 return -EINVAL;
1131             }
1132             cur = tmp;
1133             if (end <= start) {
1134                 pr_warn("crashkernel: end <= start\n");
1135                 return -EINVAL;
1136             }
1137         }
1138 
1139         if (*cur != ':') {
1140             pr_warn("crashkernel: ':' expected\n");
1141             return -EINVAL;
1142         }
1143         cur++;
1144 
1145         size = memparse(cur, &tmp);
1146         if (cur == tmp) {
1147             pr_warn("Memory value expected\n");
1148             return -EINVAL;
1149         }
1150         cur = tmp;
1151         if (size >= system_ram) {
1152             pr_warn("crashkernel: invalid size\n");
1153             return -EINVAL;
1154         }
1155 
1156         /* match ? */
1157         if (system_ram >= start && system_ram < end) {
1158             *crash_size = size;
1159             break;
1160         }
1161     } while (*cur++ == ',');
1162 
1163     if (*crash_size > 0) {
1164         while (*cur && *cur != ' ' && *cur != '@')
1165             cur++;
1166         if (*cur == '@') {
1167             cur++;
1168             *crash_base = memparse(cur, &tmp);
1169             if (cur == tmp) {
1170                 pr_warn("Memory value expected after '@'\n");
1171                 return -EINVAL;
1172             }
1173         }
1174     }
1175 
1176     return 0;
1177 }
1178 
1179 /*
1180  * That function parses "simple" (old) crashkernel command lines like
1181  *
1182  *  crashkernel=size[@offset]
1183  *
1184  * It returns 0 on success and -EINVAL on failure.
1185  */
1186 static int __init parse_crashkernel_simple(char *cmdline,
1187                        unsigned long long *crash_size,
1188                        unsigned long long *crash_base)
1189 {
1190     char *cur = cmdline;
1191 
1192     *crash_size = memparse(cmdline, &cur);
1193     if (cmdline == cur) {
1194         pr_warn("crashkernel: memory value expected\n");
1195         return -EINVAL;
1196     }
1197 
1198     if (*cur == '@')
1199         *crash_base = memparse(cur+1, &cur);
1200     else if (*cur != ' ' && *cur != '\0') {
1201         pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1202         return -EINVAL;
1203     }
1204 
1205     return 0;
1206 }
1207 
1208 #define SUFFIX_HIGH 0
1209 #define SUFFIX_LOW  1
1210 #define SUFFIX_NULL 2
1211 static __initdata char *suffix_tbl[] = {
1212     [SUFFIX_HIGH] = ",high",
1213     [SUFFIX_LOW]  = ",low",
1214     [SUFFIX_NULL] = NULL,
1215 };
1216 
1217 /*
1218  * That function parses "suffix"  crashkernel command lines like
1219  *
1220  *  crashkernel=size,[high|low]
1221  *
1222  * It returns 0 on success and -EINVAL on failure.
1223  */
1224 static int __init parse_crashkernel_suffix(char *cmdline,
1225                        unsigned long long   *crash_size,
1226                        const char *suffix)
1227 {
1228     char *cur = cmdline;
1229 
1230     *crash_size = memparse(cmdline, &cur);
1231     if (cmdline == cur) {
1232         pr_warn("crashkernel: memory value expected\n");
1233         return -EINVAL;
1234     }
1235 
1236     /* check with suffix */
1237     if (strncmp(cur, suffix, strlen(suffix))) {
1238         pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1239         return -EINVAL;
1240     }
1241     cur += strlen(suffix);
1242     if (*cur != ' ' && *cur != '\0') {
1243         pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1244         return -EINVAL;
1245     }
1246 
1247     return 0;
1248 }
1249 
1250 static __init char *get_last_crashkernel(char *cmdline,
1251                  const char *name,
1252                  const char *suffix)
1253 {
1254     char *p = cmdline, *ck_cmdline = NULL;
1255 
1256     /* find crashkernel and use the last one if there are more */
1257     p = strstr(p, name);
1258     while (p) {
1259         char *end_p = strchr(p, ' ');
1260         char *q;
1261 
1262         if (!end_p)
1263             end_p = p + strlen(p);
1264 
1265         if (!suffix) {
1266             int i;
1267 
1268             /* skip the one with any known suffix */
1269             for (i = 0; suffix_tbl[i]; i++) {
1270                 q = end_p - strlen(suffix_tbl[i]);
1271                 if (!strncmp(q, suffix_tbl[i],
1272                          strlen(suffix_tbl[i])))
1273                     goto next;
1274             }
1275             ck_cmdline = p;
1276         } else {
1277             q = end_p - strlen(suffix);
1278             if (!strncmp(q, suffix, strlen(suffix)))
1279                 ck_cmdline = p;
1280         }
1281 next:
1282         p = strstr(p+1, name);
1283     }
1284 
1285     if (!ck_cmdline)
1286         return NULL;
1287 
1288     return ck_cmdline;
1289 }
1290 
1291 static int __init __parse_crashkernel(char *cmdline,
1292                  unsigned long long system_ram,
1293                  unsigned long long *crash_size,
1294                  unsigned long long *crash_base,
1295                  const char *name,
1296                  const char *suffix)
1297 {
1298     char    *first_colon, *first_space;
1299     char    *ck_cmdline;
1300 
1301     BUG_ON(!crash_size || !crash_base);
1302     *crash_size = 0;
1303     *crash_base = 0;
1304 
1305     ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1306 
1307     if (!ck_cmdline)
1308         return -EINVAL;
1309 
1310     ck_cmdline += strlen(name);
1311 
1312     if (suffix)
1313         return parse_crashkernel_suffix(ck_cmdline, crash_size,
1314                 suffix);
1315     /*
1316      * if the commandline contains a ':', then that's the extended
1317      * syntax -- if not, it must be the classic syntax
1318      */
1319     first_colon = strchr(ck_cmdline, ':');
1320     first_space = strchr(ck_cmdline, ' ');
1321     if (first_colon && (!first_space || first_colon < first_space))
1322         return parse_crashkernel_mem(ck_cmdline, system_ram,
1323                 crash_size, crash_base);
1324 
1325     return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1326 }
1327 
1328 /*
1329  * That function is the entry point for command line parsing and should be
1330  * called from the arch-specific code.
1331  */
1332 int __init parse_crashkernel(char *cmdline,
1333                  unsigned long long system_ram,
1334                  unsigned long long *crash_size,
1335                  unsigned long long *crash_base)
1336 {
1337     return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1338                     "crashkernel=", NULL);
1339 }
1340 
1341 int __init parse_crashkernel_high(char *cmdline,
1342                  unsigned long long system_ram,
1343                  unsigned long long *crash_size,
1344                  unsigned long long *crash_base)
1345 {
1346     return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1347                 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1348 }
1349 
1350 int __init parse_crashkernel_low(char *cmdline,
1351                  unsigned long long system_ram,
1352                  unsigned long long *crash_size,
1353                  unsigned long long *crash_base)
1354 {
1355     return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1356                 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1357 }
1358 
1359 static void update_vmcoreinfo_note(void)
1360 {
1361     u32 *buf = vmcoreinfo_note;
1362 
1363     if (!vmcoreinfo_size)
1364         return;
1365     buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1366                   vmcoreinfo_size);
1367     final_note(buf);
1368 }
1369 
1370 void crash_save_vmcoreinfo(void)
1371 {
1372     vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1373     update_vmcoreinfo_note();
1374 }
1375 
1376 void vmcoreinfo_append_str(const char *fmt, ...)
1377 {
1378     va_list args;
1379     char buf[0x50];
1380     size_t r;
1381 
1382     va_start(args, fmt);
1383     r = vscnprintf(buf, sizeof(buf), fmt, args);
1384     va_end(args);
1385 
1386     r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1387 
1388     memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1389 
1390     vmcoreinfo_size += r;
1391 }
1392 
1393 /*
1394  * provide an empty default implementation here -- architecture
1395  * code may override this
1396  */
1397 void __weak arch_crash_save_vmcoreinfo(void)
1398 {}
1399 
1400 phys_addr_t __weak paddr_vmcoreinfo_note(void)
1401 {
1402     return __pa((unsigned long)(char *)&vmcoreinfo_note);
1403 }
1404 
1405 static int __init crash_save_vmcoreinfo_init(void)
1406 {
1407     VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1408     VMCOREINFO_PAGESIZE(PAGE_SIZE);
1409 
1410     VMCOREINFO_SYMBOL(init_uts_ns);
1411     VMCOREINFO_SYMBOL(node_online_map);
1412 #ifdef CONFIG_MMU
1413     VMCOREINFO_SYMBOL(swapper_pg_dir);
1414 #endif
1415     VMCOREINFO_SYMBOL(_stext);
1416     VMCOREINFO_SYMBOL(vmap_area_list);
1417 
1418 #ifndef CONFIG_NEED_MULTIPLE_NODES
1419     VMCOREINFO_SYMBOL(mem_map);
1420     VMCOREINFO_SYMBOL(contig_page_data);
1421 #endif
1422 #ifdef CONFIG_SPARSEMEM
1423     VMCOREINFO_SYMBOL(mem_section);
1424     VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1425     VMCOREINFO_STRUCT_SIZE(mem_section);
1426     VMCOREINFO_OFFSET(mem_section, section_mem_map);
1427 #endif
1428     VMCOREINFO_STRUCT_SIZE(page);
1429     VMCOREINFO_STRUCT_SIZE(pglist_data);
1430     VMCOREINFO_STRUCT_SIZE(zone);
1431     VMCOREINFO_STRUCT_SIZE(free_area);
1432     VMCOREINFO_STRUCT_SIZE(list_head);
1433     VMCOREINFO_SIZE(nodemask_t);
1434     VMCOREINFO_OFFSET(page, flags);
1435     VMCOREINFO_OFFSET(page, _refcount);
1436     VMCOREINFO_OFFSET(page, mapping);
1437     VMCOREINFO_OFFSET(page, lru);
1438     VMCOREINFO_OFFSET(page, _mapcount);
1439     VMCOREINFO_OFFSET(page, private);
1440     VMCOREINFO_OFFSET(page, compound_dtor);
1441     VMCOREINFO_OFFSET(page, compound_order);
1442     VMCOREINFO_OFFSET(page, compound_head);
1443     VMCOREINFO_OFFSET(pglist_data, node_zones);
1444     VMCOREINFO_OFFSET(pglist_data, nr_zones);
1445 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1446     VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1447 #endif
1448     VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1449     VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1450     VMCOREINFO_OFFSET(pglist_data, node_id);
1451     VMCOREINFO_OFFSET(zone, free_area);
1452     VMCOREINFO_OFFSET(zone, vm_stat);
1453     VMCOREINFO_OFFSET(zone, spanned_pages);
1454     VMCOREINFO_OFFSET(free_area, free_list);
1455     VMCOREINFO_OFFSET(list_head, next);
1456     VMCOREINFO_OFFSET(list_head, prev);
1457     VMCOREINFO_OFFSET(vmap_area, va_start);
1458     VMCOREINFO_OFFSET(vmap_area, list);
1459     VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1460     log_buf_kexec_setup();
1461     VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1462     VMCOREINFO_NUMBER(NR_FREE_PAGES);
1463     VMCOREINFO_NUMBER(PG_lru);
1464     VMCOREINFO_NUMBER(PG_private);
1465     VMCOREINFO_NUMBER(PG_swapcache);
1466     VMCOREINFO_NUMBER(PG_slab);
1467 #ifdef CONFIG_MEMORY_FAILURE
1468     VMCOREINFO_NUMBER(PG_hwpoison);
1469 #endif
1470     VMCOREINFO_NUMBER(PG_head_mask);
1471     VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1472 #ifdef CONFIG_HUGETLB_PAGE
1473     VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1474 #endif
1475 
1476     arch_crash_save_vmcoreinfo();
1477     update_vmcoreinfo_note();
1478 
1479     return 0;
1480 }
1481 
1482 subsys_initcall(crash_save_vmcoreinfo_init);
1483 
1484 /*
1485  * Move into place and start executing a preloaded standalone
1486  * executable.  If nothing was preloaded return an error.
1487  */
1488 int kernel_kexec(void)
1489 {
1490     int error = 0;
1491 
1492     if (!mutex_trylock(&kexec_mutex))
1493         return -EBUSY;
1494     if (!kexec_image) {
1495         error = -EINVAL;
1496         goto Unlock;
1497     }
1498 
1499 #ifdef CONFIG_KEXEC_JUMP
1500     if (kexec_image->preserve_context) {
1501         lock_system_sleep();
1502         pm_prepare_console();
1503         error = freeze_processes();
1504         if (error) {
1505             error = -EBUSY;
1506             goto Restore_console;
1507         }
1508         suspend_console();
1509         error = dpm_suspend_start(PMSG_FREEZE);
1510         if (error)
1511             goto Resume_console;
1512         /* At this point, dpm_suspend_start() has been called,
1513          * but *not* dpm_suspend_end(). We *must* call
1514          * dpm_suspend_end() now.  Otherwise, drivers for
1515          * some devices (e.g. interrupt controllers) become
1516          * desynchronized with the actual state of the
1517          * hardware at resume time, and evil weirdness ensues.
1518          */
1519         error = dpm_suspend_end(PMSG_FREEZE);
1520         if (error)
1521             goto Resume_devices;
1522         error = disable_nonboot_cpus();
1523         if (error)
1524             goto Enable_cpus;
1525         local_irq_disable();
1526         error = syscore_suspend();
1527         if (error)
1528             goto Enable_irqs;
1529     } else
1530 #endif
1531     {
1532         kexec_in_progress = true;
1533         kernel_restart_prepare(NULL);
1534         migrate_to_reboot_cpu();
1535 
1536         /*
1537          * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1538          * no further code needs to use CPU hotplug (which is true in
1539          * the reboot case). However, the kexec path depends on using
1540          * CPU hotplug again; so re-enable it here.
1541          */
1542         cpu_hotplug_enable();
1543         pr_emerg("Starting new kernel\n");
1544         machine_shutdown();
1545     }
1546 
1547     machine_kexec(kexec_image);
1548 
1549 #ifdef CONFIG_KEXEC_JUMP
1550     if (kexec_image->preserve_context) {
1551         syscore_resume();
1552  Enable_irqs:
1553         local_irq_enable();
1554  Enable_cpus:
1555         enable_nonboot_cpus();
1556         dpm_resume_start(PMSG_RESTORE);
1557  Resume_devices:
1558         dpm_resume_end(PMSG_RESTORE);
1559  Resume_console:
1560         resume_console();
1561         thaw_processes();
1562  Restore_console:
1563         pm_restore_console();
1564         unlock_system_sleep();
1565     }
1566 #endif
1567 
1568  Unlock:
1569     mutex_unlock(&kexec_mutex);
1570     return error;
1571 }
1572 
1573 /*
1574  * Protection mechanism for crashkernel reserved memory after
1575  * the kdump kernel is loaded.
1576  *
1577  * Provide an empty default implementation here -- architecture
1578  * code may override this
1579  */
1580 void __weak arch_kexec_protect_crashkres(void)
1581 {}
1582 
1583 void __weak arch_kexec_unprotect_crashkres(void)
1584 {}