Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/kernel/panic.c
0004  *
0005  *  Copyright (C) 1991, 1992  Linus Torvalds
0006  */
0007 
0008 /*
0009  * This function is used through-out the kernel (including mm and fs)
0010  * to indicate a major problem.
0011  */
0012 #include <linux/debug_locks.h>
0013 #include <linux/sched/debug.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/kgdb.h>
0016 #include <linux/kmsg_dump.h>
0017 #include <linux/kallsyms.h>
0018 #include <linux/notifier.h>
0019 #include <linux/vt_kern.h>
0020 #include <linux/module.h>
0021 #include <linux/random.h>
0022 #include <linux/ftrace.h>
0023 #include <linux/reboot.h>
0024 #include <linux/delay.h>
0025 #include <linux/kexec.h>
0026 #include <linux/panic_notifier.h>
0027 #include <linux/sched.h>
0028 #include <linux/sysrq.h>
0029 #include <linux/init.h>
0030 #include <linux/nmi.h>
0031 #include <linux/console.h>
0032 #include <linux/bug.h>
0033 #include <linux/ratelimit.h>
0034 #include <linux/debugfs.h>
0035 #include <trace/events/error_report.h>
0036 #include <asm/sections.h>
0037 
0038 #define PANIC_TIMER_STEP 100
0039 #define PANIC_BLINK_SPD 18
0040 
0041 #ifdef CONFIG_SMP
0042 /*
0043  * Should we dump all CPUs backtraces in an oops event?
0044  * Defaults to 0, can be changed via sysctl.
0045  */
0046 static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
0047 #else
0048 #define sysctl_oops_all_cpu_backtrace 0
0049 #endif /* CONFIG_SMP */
0050 
0051 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
0052 static unsigned long tainted_mask =
0053     IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
0054 static int pause_on_oops;
0055 static int pause_on_oops_flag;
0056 static DEFINE_SPINLOCK(pause_on_oops_lock);
0057 bool crash_kexec_post_notifiers;
0058 int panic_on_warn __read_mostly;
0059 unsigned long panic_on_taint;
0060 bool panic_on_taint_nousertaint = false;
0061 
0062 int panic_timeout = CONFIG_PANIC_TIMEOUT;
0063 EXPORT_SYMBOL_GPL(panic_timeout);
0064 
0065 #define PANIC_PRINT_TASK_INFO       0x00000001
0066 #define PANIC_PRINT_MEM_INFO        0x00000002
0067 #define PANIC_PRINT_TIMER_INFO      0x00000004
0068 #define PANIC_PRINT_LOCK_INFO       0x00000008
0069 #define PANIC_PRINT_FTRACE_INFO     0x00000010
0070 #define PANIC_PRINT_ALL_PRINTK_MSG  0x00000020
0071 #define PANIC_PRINT_ALL_CPU_BT      0x00000040
0072 unsigned long panic_print;
0073 
0074 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
0075 
0076 EXPORT_SYMBOL(panic_notifier_list);
0077 
0078 #if defined(CONFIG_SMP) && defined(CONFIG_SYSCTL)
0079 static struct ctl_table kern_panic_table[] = {
0080     {
0081         .procname       = "oops_all_cpu_backtrace",
0082         .data           = &sysctl_oops_all_cpu_backtrace,
0083         .maxlen         = sizeof(int),
0084         .mode           = 0644,
0085         .proc_handler   = proc_dointvec_minmax,
0086         .extra1         = SYSCTL_ZERO,
0087         .extra2         = SYSCTL_ONE,
0088     },
0089     { }
0090 };
0091 
0092 static __init int kernel_panic_sysctls_init(void)
0093 {
0094     register_sysctl_init("kernel", kern_panic_table);
0095     return 0;
0096 }
0097 late_initcall(kernel_panic_sysctls_init);
0098 #endif
0099 
0100 static long no_blink(int state)
0101 {
0102     return 0;
0103 }
0104 
0105 /* Returns how long it waited in ms */
0106 long (*panic_blink)(int state);
0107 EXPORT_SYMBOL(panic_blink);
0108 
0109 /*
0110  * Stop ourself in panic -- architecture code may override this
0111  */
0112 void __weak panic_smp_self_stop(void)
0113 {
0114     while (1)
0115         cpu_relax();
0116 }
0117 
0118 /*
0119  * Stop ourselves in NMI context if another CPU has already panicked. Arch code
0120  * may override this to prepare for crash dumping, e.g. save regs info.
0121  */
0122 void __weak nmi_panic_self_stop(struct pt_regs *regs)
0123 {
0124     panic_smp_self_stop();
0125 }
0126 
0127 /*
0128  * Stop other CPUs in panic.  Architecture dependent code may override this
0129  * with more suitable version.  For example, if the architecture supports
0130  * crash dump, it should save registers of each stopped CPU and disable
0131  * per-CPU features such as virtualization extensions.
0132  */
0133 void __weak crash_smp_send_stop(void)
0134 {
0135     static int cpus_stopped;
0136 
0137     /*
0138      * This function can be called twice in panic path, but obviously
0139      * we execute this only once.
0140      */
0141     if (cpus_stopped)
0142         return;
0143 
0144     /*
0145      * Note smp_send_stop is the usual smp shutdown function, which
0146      * unfortunately means it may not be hardened to work in a panic
0147      * situation.
0148      */
0149     smp_send_stop();
0150     cpus_stopped = 1;
0151 }
0152 
0153 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
0154 
0155 /*
0156  * A variant of panic() called from NMI context. We return if we've already
0157  * panicked on this CPU. If another CPU already panicked, loop in
0158  * nmi_panic_self_stop() which can provide architecture dependent code such
0159  * as saving register state for crash dump.
0160  */
0161 void nmi_panic(struct pt_regs *regs, const char *msg)
0162 {
0163     int old_cpu, cpu;
0164 
0165     cpu = raw_smp_processor_id();
0166     old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);
0167 
0168     if (old_cpu == PANIC_CPU_INVALID)
0169         panic("%s", msg);
0170     else if (old_cpu != cpu)
0171         nmi_panic_self_stop(regs);
0172 }
0173 EXPORT_SYMBOL(nmi_panic);
0174 
0175 static void panic_print_sys_info(bool console_flush)
0176 {
0177     if (console_flush) {
0178         if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
0179             console_flush_on_panic(CONSOLE_REPLAY_ALL);
0180         return;
0181     }
0182 
0183     if (panic_print & PANIC_PRINT_ALL_CPU_BT)
0184         trigger_all_cpu_backtrace();
0185 
0186     if (panic_print & PANIC_PRINT_TASK_INFO)
0187         show_state();
0188 
0189     if (panic_print & PANIC_PRINT_MEM_INFO)
0190         show_mem(0, NULL);
0191 
0192     if (panic_print & PANIC_PRINT_TIMER_INFO)
0193         sysrq_timer_list_show();
0194 
0195     if (panic_print & PANIC_PRINT_LOCK_INFO)
0196         debug_show_all_locks();
0197 
0198     if (panic_print & PANIC_PRINT_FTRACE_INFO)
0199         ftrace_dump(DUMP_ALL);
0200 }
0201 
0202 /**
0203  *  panic - halt the system
0204  *  @fmt: The text string to print
0205  *
0206  *  Display a message, then perform cleanups.
0207  *
0208  *  This function never returns.
0209  */
0210 void panic(const char *fmt, ...)
0211 {
0212     static char buf[1024];
0213     va_list args;
0214     long i, i_next = 0, len;
0215     int state = 0;
0216     int old_cpu, this_cpu;
0217     bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
0218 
0219     if (panic_on_warn) {
0220         /*
0221          * This thread may hit another WARN() in the panic path.
0222          * Resetting this prevents additional WARN() from panicking the
0223          * system on this thread.  Other threads are blocked by the
0224          * panic_mutex in panic().
0225          */
0226         panic_on_warn = 0;
0227     }
0228 
0229     /*
0230      * Disable local interrupts. This will prevent panic_smp_self_stop
0231      * from deadlocking the first cpu that invokes the panic, since
0232      * there is nothing to prevent an interrupt handler (that runs
0233      * after setting panic_cpu) from invoking panic() again.
0234      */
0235     local_irq_disable();
0236     preempt_disable_notrace();
0237 
0238     /*
0239      * It's possible to come here directly from a panic-assertion and
0240      * not have preempt disabled. Some functions called from here want
0241      * preempt to be disabled. No point enabling it later though...
0242      *
0243      * Only one CPU is allowed to execute the panic code from here. For
0244      * multiple parallel invocations of panic, all other CPUs either
0245      * stop themself or will wait until they are stopped by the 1st CPU
0246      * with smp_send_stop().
0247      *
0248      * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
0249      * comes here, so go ahead.
0250      * `old_cpu == this_cpu' means we came from nmi_panic() which sets
0251      * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
0252      */
0253     this_cpu = raw_smp_processor_id();
0254     old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
0255 
0256     if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
0257         panic_smp_self_stop();
0258 
0259     console_verbose();
0260     bust_spinlocks(1);
0261     va_start(args, fmt);
0262     len = vscnprintf(buf, sizeof(buf), fmt, args);
0263     va_end(args);
0264 
0265     if (len && buf[len - 1] == '\n')
0266         buf[len - 1] = '\0';
0267 
0268     pr_emerg("Kernel panic - not syncing: %s\n", buf);
0269 #ifdef CONFIG_DEBUG_BUGVERBOSE
0270     /*
0271      * Avoid nested stack-dumping if a panic occurs during oops processing
0272      */
0273     if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
0274         dump_stack();
0275 #endif
0276 
0277     /*
0278      * If kgdb is enabled, give it a chance to run before we stop all
0279      * the other CPUs or else we won't be able to debug processes left
0280      * running on them.
0281      */
0282     kgdb_panic(buf);
0283 
0284     /*
0285      * If we have crashed and we have a crash kernel loaded let it handle
0286      * everything else.
0287      * If we want to run this after calling panic_notifiers, pass
0288      * the "crash_kexec_post_notifiers" option to the kernel.
0289      *
0290      * Bypass the panic_cpu check and call __crash_kexec directly.
0291      */
0292     if (!_crash_kexec_post_notifiers) {
0293         __crash_kexec(NULL);
0294 
0295         /*
0296          * Note smp_send_stop is the usual smp shutdown function, which
0297          * unfortunately means it may not be hardened to work in a
0298          * panic situation.
0299          */
0300         smp_send_stop();
0301     } else {
0302         /*
0303          * If we want to do crash dump after notifier calls and
0304          * kmsg_dump, we will need architecture dependent extra
0305          * works in addition to stopping other CPUs.
0306          */
0307         crash_smp_send_stop();
0308     }
0309 
0310     /*
0311      * Run any panic handlers, including those that might need to
0312      * add information to the kmsg dump output.
0313      */
0314     atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
0315 
0316     panic_print_sys_info(false);
0317 
0318     kmsg_dump(KMSG_DUMP_PANIC);
0319 
0320     /*
0321      * If you doubt kdump always works fine in any situation,
0322      * "crash_kexec_post_notifiers" offers you a chance to run
0323      * panic_notifiers and dumping kmsg before kdump.
0324      * Note: since some panic_notifiers can make crashed kernel
0325      * more unstable, it can increase risks of the kdump failure too.
0326      *
0327      * Bypass the panic_cpu check and call __crash_kexec directly.
0328      */
0329     if (_crash_kexec_post_notifiers)
0330         __crash_kexec(NULL);
0331 
0332 #ifdef CONFIG_VT
0333     unblank_screen();
0334 #endif
0335     console_unblank();
0336 
0337     /*
0338      * We may have ended up stopping the CPU holding the lock (in
0339      * smp_send_stop()) while still having some valuable data in the console
0340      * buffer.  Try to acquire the lock then release it regardless of the
0341      * result.  The release will also print the buffers out.  Locks debug
0342      * should be disabled to avoid reporting bad unlock balance when
0343      * panic() is not being callled from OOPS.
0344      */
0345     debug_locks_off();
0346     console_flush_on_panic(CONSOLE_FLUSH_PENDING);
0347 
0348     panic_print_sys_info(true);
0349 
0350     if (!panic_blink)
0351         panic_blink = no_blink;
0352 
0353     if (panic_timeout > 0) {
0354         /*
0355          * Delay timeout seconds before rebooting the machine.
0356          * We can't use the "normal" timers since we just panicked.
0357          */
0358         pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
0359 
0360         for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
0361             touch_nmi_watchdog();
0362             if (i >= i_next) {
0363                 i += panic_blink(state ^= 1);
0364                 i_next = i + 3600 / PANIC_BLINK_SPD;
0365             }
0366             mdelay(PANIC_TIMER_STEP);
0367         }
0368     }
0369     if (panic_timeout != 0) {
0370         /*
0371          * This will not be a clean reboot, with everything
0372          * shutting down.  But if there is a chance of
0373          * rebooting the system it will be rebooted.
0374          */
0375         if (panic_reboot_mode != REBOOT_UNDEFINED)
0376             reboot_mode = panic_reboot_mode;
0377         emergency_restart();
0378     }
0379 #ifdef __sparc__
0380     {
0381         extern int stop_a_enabled;
0382         /* Make sure the user can actually press Stop-A (L1-A) */
0383         stop_a_enabled = 1;
0384         pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
0385              "twice on console to return to the boot prom\n");
0386     }
0387 #endif
0388 #if defined(CONFIG_S390)
0389     disabled_wait();
0390 #endif
0391     pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
0392 
0393     /* Do not scroll important messages printed above */
0394     suppress_printk = 1;
0395     local_irq_enable();
0396     for (i = 0; ; i += PANIC_TIMER_STEP) {
0397         touch_softlockup_watchdog();
0398         if (i >= i_next) {
0399             i += panic_blink(state ^= 1);
0400             i_next = i + 3600 / PANIC_BLINK_SPD;
0401         }
0402         mdelay(PANIC_TIMER_STEP);
0403     }
0404 }
0405 
0406 EXPORT_SYMBOL(panic);
0407 
0408 /*
0409  * TAINT_FORCED_RMMOD could be a per-module flag but the module
0410  * is being removed anyway.
0411  */
0412 const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
0413     [ TAINT_PROPRIETARY_MODULE ]    = { 'P', 'G', true },
0414     [ TAINT_FORCED_MODULE ]     = { 'F', ' ', true },
0415     [ TAINT_CPU_OUT_OF_SPEC ]   = { 'S', ' ', false },
0416     [ TAINT_FORCED_RMMOD ]      = { 'R', ' ', false },
0417     [ TAINT_MACHINE_CHECK ]     = { 'M', ' ', false },
0418     [ TAINT_BAD_PAGE ]      = { 'B', ' ', false },
0419     [ TAINT_USER ]          = { 'U', ' ', false },
0420     [ TAINT_DIE ]           = { 'D', ' ', false },
0421     [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false },
0422     [ TAINT_WARN ]          = { 'W', ' ', false },
0423     [ TAINT_CRAP ]          = { 'C', ' ', true },
0424     [ TAINT_FIRMWARE_WORKAROUND ]   = { 'I', ' ', false },
0425     [ TAINT_OOT_MODULE ]        = { 'O', ' ', true },
0426     [ TAINT_UNSIGNED_MODULE ]   = { 'E', ' ', true },
0427     [ TAINT_SOFTLOCKUP ]        = { 'L', ' ', false },
0428     [ TAINT_LIVEPATCH ]     = { 'K', ' ', true },
0429     [ TAINT_AUX ]           = { 'X', ' ', true },
0430     [ TAINT_RANDSTRUCT ]        = { 'T', ' ', true },
0431     [ TAINT_TEST ]          = { 'N', ' ', true },
0432 };
0433 
0434 /**
0435  * print_tainted - return a string to represent the kernel taint state.
0436  *
0437  * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst
0438  *
0439  * The string is overwritten by the next call to print_tainted(),
0440  * but is always NULL terminated.
0441  */
0442 const char *print_tainted(void)
0443 {
0444     static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
0445 
0446     BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);
0447 
0448     if (tainted_mask) {
0449         char *s;
0450         int i;
0451 
0452         s = buf + sprintf(buf, "Tainted: ");
0453         for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
0454             const struct taint_flag *t = &taint_flags[i];
0455             *s++ = test_bit(i, &tainted_mask) ?
0456                     t->c_true : t->c_false;
0457         }
0458         *s = 0;
0459     } else
0460         snprintf(buf, sizeof(buf), "Not tainted");
0461 
0462     return buf;
0463 }
0464 
0465 int test_taint(unsigned flag)
0466 {
0467     return test_bit(flag, &tainted_mask);
0468 }
0469 EXPORT_SYMBOL(test_taint);
0470 
0471 unsigned long get_taint(void)
0472 {
0473     return tainted_mask;
0474 }
0475 
0476 /**
0477  * add_taint: add a taint flag if not already set.
0478  * @flag: one of the TAINT_* constants.
0479  * @lockdep_ok: whether lock debugging is still OK.
0480  *
0481  * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
0482  * some notewortht-but-not-corrupting cases, it can be set to true.
0483  */
0484 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
0485 {
0486     if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
0487         pr_warn("Disabling lock debugging due to kernel taint\n");
0488 
0489     set_bit(flag, &tainted_mask);
0490 
0491     if (tainted_mask & panic_on_taint) {
0492         panic_on_taint = 0;
0493         panic("panic_on_taint set ...");
0494     }
0495 }
0496 EXPORT_SYMBOL(add_taint);
0497 
0498 static void spin_msec(int msecs)
0499 {
0500     int i;
0501 
0502     for (i = 0; i < msecs; i++) {
0503         touch_nmi_watchdog();
0504         mdelay(1);
0505     }
0506 }
0507 
0508 /*
0509  * It just happens that oops_enter() and oops_exit() are identically
0510  * implemented...
0511  */
0512 static void do_oops_enter_exit(void)
0513 {
0514     unsigned long flags;
0515     static int spin_counter;
0516 
0517     if (!pause_on_oops)
0518         return;
0519 
0520     spin_lock_irqsave(&pause_on_oops_lock, flags);
0521     if (pause_on_oops_flag == 0) {
0522         /* This CPU may now print the oops message */
0523         pause_on_oops_flag = 1;
0524     } else {
0525         /* We need to stall this CPU */
0526         if (!spin_counter) {
0527             /* This CPU gets to do the counting */
0528             spin_counter = pause_on_oops;
0529             do {
0530                 spin_unlock(&pause_on_oops_lock);
0531                 spin_msec(MSEC_PER_SEC);
0532                 spin_lock(&pause_on_oops_lock);
0533             } while (--spin_counter);
0534             pause_on_oops_flag = 0;
0535         } else {
0536             /* This CPU waits for a different one */
0537             while (spin_counter) {
0538                 spin_unlock(&pause_on_oops_lock);
0539                 spin_msec(1);
0540                 spin_lock(&pause_on_oops_lock);
0541             }
0542         }
0543     }
0544     spin_unlock_irqrestore(&pause_on_oops_lock, flags);
0545 }
0546 
0547 /*
0548  * Return true if the calling CPU is allowed to print oops-related info.
0549  * This is a bit racy..
0550  */
0551 bool oops_may_print(void)
0552 {
0553     return pause_on_oops_flag == 0;
0554 }
0555 
0556 /*
0557  * Called when the architecture enters its oops handler, before it prints
0558  * anything.  If this is the first CPU to oops, and it's oopsing the first
0559  * time then let it proceed.
0560  *
0561  * This is all enabled by the pause_on_oops kernel boot option.  We do all
0562  * this to ensure that oopses don't scroll off the screen.  It has the
0563  * side-effect of preventing later-oopsing CPUs from mucking up the display,
0564  * too.
0565  *
0566  * It turns out that the CPU which is allowed to print ends up pausing for
0567  * the right duration, whereas all the other CPUs pause for twice as long:
0568  * once in oops_enter(), once in oops_exit().
0569  */
0570 void oops_enter(void)
0571 {
0572     tracing_off();
0573     /* can't trust the integrity of the kernel anymore: */
0574     debug_locks_off();
0575     do_oops_enter_exit();
0576 
0577     if (sysctl_oops_all_cpu_backtrace)
0578         trigger_all_cpu_backtrace();
0579 }
0580 
0581 static void print_oops_end_marker(void)
0582 {
0583     pr_warn("---[ end trace %016llx ]---\n", 0ULL);
0584 }
0585 
0586 /*
0587  * Called when the architecture exits its oops handler, after printing
0588  * everything.
0589  */
0590 void oops_exit(void)
0591 {
0592     do_oops_enter_exit();
0593     print_oops_end_marker();
0594     kmsg_dump(KMSG_DUMP_OOPS);
0595 }
0596 
0597 struct warn_args {
0598     const char *fmt;
0599     va_list args;
0600 };
0601 
0602 void __warn(const char *file, int line, void *caller, unsigned taint,
0603         struct pt_regs *regs, struct warn_args *args)
0604 {
0605     disable_trace_on_warning();
0606 
0607     if (file)
0608         pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
0609             raw_smp_processor_id(), current->pid, file, line,
0610             caller);
0611     else
0612         pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
0613             raw_smp_processor_id(), current->pid, caller);
0614 
0615     if (args)
0616         vprintk(args->fmt, args->args);
0617 
0618     print_modules();
0619 
0620     if (regs)
0621         show_regs(regs);
0622 
0623     if (panic_on_warn)
0624         panic("panic_on_warn set ...\n");
0625 
0626     if (!regs)
0627         dump_stack();
0628 
0629     print_irqtrace_events(current);
0630 
0631     print_oops_end_marker();
0632     trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller);
0633 
0634     /* Just a warning, don't kill lockdep. */
0635     add_taint(taint, LOCKDEP_STILL_OK);
0636 }
0637 
0638 #ifndef __WARN_FLAGS
0639 void warn_slowpath_fmt(const char *file, int line, unsigned taint,
0640                const char *fmt, ...)
0641 {
0642     struct warn_args args;
0643 
0644     pr_warn(CUT_HERE);
0645 
0646     if (!fmt) {
0647         __warn(file, line, __builtin_return_address(0), taint,
0648                NULL, NULL);
0649         return;
0650     }
0651 
0652     args.fmt = fmt;
0653     va_start(args.args, fmt);
0654     __warn(file, line, __builtin_return_address(0), taint, NULL, &args);
0655     va_end(args.args);
0656 }
0657 EXPORT_SYMBOL(warn_slowpath_fmt);
0658 #else
0659 void __warn_printk(const char *fmt, ...)
0660 {
0661     va_list args;
0662 
0663     pr_warn(CUT_HERE);
0664 
0665     va_start(args, fmt);
0666     vprintk(fmt, args);
0667     va_end(args);
0668 }
0669 EXPORT_SYMBOL(__warn_printk);
0670 #endif
0671 
0672 #ifdef CONFIG_BUG
0673 
0674 /* Support resetting WARN*_ONCE state */
0675 
0676 static int clear_warn_once_set(void *data, u64 val)
0677 {
0678     generic_bug_clear_once();
0679     memset(__start_once, 0, __end_once - __start_once);
0680     return 0;
0681 }
0682 
0683 DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
0684              "%lld\n");
0685 
0686 static __init int register_warn_debugfs(void)
0687 {
0688     /* Don't care about failure */
0689     debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
0690                    &clear_warn_once_fops);
0691     return 0;
0692 }
0693 
0694 device_initcall(register_warn_debugfs);
0695 #endif
0696 
0697 #ifdef CONFIG_STACKPROTECTOR
0698 
0699 /*
0700  * Called when gcc's -fstack-protector feature is used, and
0701  * gcc detects corruption of the on-stack canary value
0702  */
0703 __visible noinstr void __stack_chk_fail(void)
0704 {
0705     instrumentation_begin();
0706     panic("stack-protector: Kernel stack is corrupted in: %pB",
0707         __builtin_return_address(0));
0708     instrumentation_end();
0709 }
0710 EXPORT_SYMBOL(__stack_chk_fail);
0711 
0712 #endif
0713 
0714 core_param(panic, panic_timeout, int, 0644);
0715 core_param(panic_print, panic_print, ulong, 0644);
0716 core_param(pause_on_oops, pause_on_oops, int, 0644);
0717 core_param(panic_on_warn, panic_on_warn, int, 0644);
0718 core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
0719 
0720 static int __init oops_setup(char *s)
0721 {
0722     if (!s)
0723         return -EINVAL;
0724     if (!strcmp(s, "panic"))
0725         panic_on_oops = 1;
0726     return 0;
0727 }
0728 early_param("oops", oops_setup);
0729 
0730 static int __init panic_on_taint_setup(char *s)
0731 {
0732     char *taint_str;
0733 
0734     if (!s)
0735         return -EINVAL;
0736 
0737     taint_str = strsep(&s, ",");
0738     if (kstrtoul(taint_str, 16, &panic_on_taint))
0739         return -EINVAL;
0740 
0741     /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */
0742     panic_on_taint &= TAINT_FLAGS_MAX;
0743 
0744     if (!panic_on_taint)
0745         return -EINVAL;
0746 
0747     if (s && !strcmp(s, "nousertaint"))
0748         panic_on_taint_nousertaint = true;
0749 
0750     pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%sabled\n",
0751         panic_on_taint, panic_on_taint_nousertaint ? "en" : "dis");
0752 
0753     return 0;
0754 }
0755 early_param("panic_on_taint", panic_on_taint_setup);