Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Detect hard and soft lockups on a system
0004  *
0005  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
0006  *
0007  * Note: Most of this code is borrowed heavily from the original softlockup
0008  * detector, so thanks to Ingo for the initial implementation.
0009  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
0010  * to those contributors as well.
0011  */
0012 
0013 #define pr_fmt(fmt) "watchdog: " fmt
0014 
0015 #include <linux/mm.h>
0016 #include <linux/cpu.h>
0017 #include <linux/nmi.h>
0018 #include <linux/init.h>
0019 #include <linux/module.h>
0020 #include <linux/sysctl.h>
0021 #include <linux/tick.h>
0022 #include <linux/sched/clock.h>
0023 #include <linux/sched/debug.h>
0024 #include <linux/sched/isolation.h>
0025 #include <linux/stop_machine.h>
0026 
0027 #include <asm/irq_regs.h>
0028 #include <linux/kvm_para.h>
0029 
0030 static DEFINE_MUTEX(watchdog_mutex);
0031 
0032 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
0033 # define WATCHDOG_DEFAULT   (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
0034 # define NMI_WATCHDOG_DEFAULT   1
0035 #else
0036 # define WATCHDOG_DEFAULT   (SOFT_WATCHDOG_ENABLED)
0037 # define NMI_WATCHDOG_DEFAULT   0
0038 #endif
0039 
0040 unsigned long __read_mostly watchdog_enabled;
0041 int __read_mostly watchdog_user_enabled = 1;
0042 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
0043 int __read_mostly soft_watchdog_user_enabled = 1;
0044 int __read_mostly watchdog_thresh = 10;
0045 static int __read_mostly nmi_watchdog_available;
0046 
0047 struct cpumask watchdog_cpumask __read_mostly;
0048 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
0049 
0050 #ifdef CONFIG_HARDLOCKUP_DETECTOR
0051 
0052 # ifdef CONFIG_SMP
0053 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
0054 # endif /* CONFIG_SMP */
0055 
0056 /*
0057  * Should we panic when a soft-lockup or hard-lockup occurs:
0058  */
0059 unsigned int __read_mostly hardlockup_panic =
0060             IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
0061 /*
0062  * We may not want to enable hard lockup detection by default in all cases,
0063  * for example when running the kernel as a guest on a hypervisor. In these
0064  * cases this function can be called to disable hard lockup detection. This
0065  * function should only be executed once by the boot processor before the
0066  * kernel command line parameters are parsed, because otherwise it is not
0067  * possible to override this in hardlockup_panic_setup().
0068  */
0069 void __init hardlockup_detector_disable(void)
0070 {
0071     nmi_watchdog_user_enabled = 0;
0072 }
0073 
0074 static int __init hardlockup_panic_setup(char *str)
0075 {
0076     if (!strncmp(str, "panic", 5))
0077         hardlockup_panic = 1;
0078     else if (!strncmp(str, "nopanic", 7))
0079         hardlockup_panic = 0;
0080     else if (!strncmp(str, "0", 1))
0081         nmi_watchdog_user_enabled = 0;
0082     else if (!strncmp(str, "1", 1))
0083         nmi_watchdog_user_enabled = 1;
0084     return 1;
0085 }
0086 __setup("nmi_watchdog=", hardlockup_panic_setup);
0087 
0088 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
0089 
0090 /*
0091  * These functions can be overridden if an architecture implements its
0092  * own hardlockup detector.
0093  *
0094  * watchdog_nmi_enable/disable can be implemented to start and stop when
0095  * softlockup watchdog start and stop. The arch must select the
0096  * SOFTLOCKUP_DETECTOR Kconfig.
0097  */
0098 int __weak watchdog_nmi_enable(unsigned int cpu)
0099 {
0100     hardlockup_detector_perf_enable();
0101     return 0;
0102 }
0103 
0104 void __weak watchdog_nmi_disable(unsigned int cpu)
0105 {
0106     hardlockup_detector_perf_disable();
0107 }
0108 
0109 /* Return 0, if a NMI watchdog is available. Error code otherwise */
0110 int __weak __init watchdog_nmi_probe(void)
0111 {
0112     return hardlockup_detector_perf_init();
0113 }
0114 
0115 /**
0116  * watchdog_nmi_stop - Stop the watchdog for reconfiguration
0117  *
0118  * The reconfiguration steps are:
0119  * watchdog_nmi_stop();
0120  * update_variables();
0121  * watchdog_nmi_start();
0122  */
0123 void __weak watchdog_nmi_stop(void) { }
0124 
0125 /**
0126  * watchdog_nmi_start - Start the watchdog after reconfiguration
0127  *
0128  * Counterpart to watchdog_nmi_stop().
0129  *
0130  * The following variables have been updated in update_variables() and
0131  * contain the currently valid configuration:
0132  * - watchdog_enabled
0133  * - watchdog_thresh
0134  * - watchdog_cpumask
0135  */
0136 void __weak watchdog_nmi_start(void) { }
0137 
0138 /**
0139  * lockup_detector_update_enable - Update the sysctl enable bit
0140  *
0141  * Caller needs to make sure that the NMI/perf watchdogs are off, so this
0142  * can't race with watchdog_nmi_disable().
0143  */
0144 static void lockup_detector_update_enable(void)
0145 {
0146     watchdog_enabled = 0;
0147     if (!watchdog_user_enabled)
0148         return;
0149     if (nmi_watchdog_available && nmi_watchdog_user_enabled)
0150         watchdog_enabled |= NMI_WATCHDOG_ENABLED;
0151     if (soft_watchdog_user_enabled)
0152         watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
0153 }
0154 
0155 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
0156 
0157 /*
0158  * Delay the soflockup report when running a known slow code.
0159  * It does _not_ affect the timestamp of the last successdul reschedule.
0160  */
0161 #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
0162 
0163 #ifdef CONFIG_SMP
0164 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
0165 #endif
0166 
0167 static struct cpumask watchdog_allowed_mask __read_mostly;
0168 
0169 /* Global variables, exported for sysctl */
0170 unsigned int __read_mostly softlockup_panic =
0171             IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
0172 
0173 static bool softlockup_initialized __read_mostly;
0174 static u64 __read_mostly sample_period;
0175 
0176 /* Timestamp taken after the last successful reschedule. */
0177 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
0178 /* Timestamp of the last softlockup report. */
0179 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
0180 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
0181 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
0182 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
0183 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
0184 static unsigned long soft_lockup_nmi_warn;
0185 
0186 static int __init nowatchdog_setup(char *str)
0187 {
0188     watchdog_user_enabled = 0;
0189     return 1;
0190 }
0191 __setup("nowatchdog", nowatchdog_setup);
0192 
0193 static int __init nosoftlockup_setup(char *str)
0194 {
0195     soft_watchdog_user_enabled = 0;
0196     return 1;
0197 }
0198 __setup("nosoftlockup", nosoftlockup_setup);
0199 
0200 static int __init watchdog_thresh_setup(char *str)
0201 {
0202     get_option(&str, &watchdog_thresh);
0203     return 1;
0204 }
0205 __setup("watchdog_thresh=", watchdog_thresh_setup);
0206 
0207 static void __lockup_detector_cleanup(void);
0208 
0209 /*
0210  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
0211  * lockups can have false positives under extreme conditions. So we generally
0212  * want a higher threshold for soft lockups than for hard lockups. So we couple
0213  * the thresholds with a factor: we make the soft threshold twice the amount of
0214  * time the hard threshold is.
0215  */
0216 static int get_softlockup_thresh(void)
0217 {
0218     return watchdog_thresh * 2;
0219 }
0220 
0221 /*
0222  * Returns seconds, approximately.  We don't need nanosecond
0223  * resolution, and we don't need to waste time with a big divide when
0224  * 2^30ns == 1.074s.
0225  */
0226 static unsigned long get_timestamp(void)
0227 {
0228     return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
0229 }
0230 
0231 static void set_sample_period(void)
0232 {
0233     /*
0234      * convert watchdog_thresh from seconds to ns
0235      * the divide by 5 is to give hrtimer several chances (two
0236      * or three with the current relation between the soft
0237      * and hard thresholds) to increment before the
0238      * hardlockup detector generates a warning
0239      */
0240     sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
0241     watchdog_update_hrtimer_threshold(sample_period);
0242 }
0243 
0244 static void update_report_ts(void)
0245 {
0246     __this_cpu_write(watchdog_report_ts, get_timestamp());
0247 }
0248 
0249 /* Commands for resetting the watchdog */
0250 static void update_touch_ts(void)
0251 {
0252     __this_cpu_write(watchdog_touch_ts, get_timestamp());
0253     update_report_ts();
0254 }
0255 
0256 /**
0257  * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
0258  *
0259  * Call when the scheduler may have stalled for legitimate reasons
0260  * preventing the watchdog task from executing - e.g. the scheduler
0261  * entering idle state.  This should only be used for scheduler events.
0262  * Use touch_softlockup_watchdog() for everything else.
0263  */
0264 notrace void touch_softlockup_watchdog_sched(void)
0265 {
0266     /*
0267      * Preemption can be enabled.  It doesn't matter which CPU's watchdog
0268      * report period gets restarted here, so use the raw_ operation.
0269      */
0270     raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
0271 }
0272 
0273 notrace void touch_softlockup_watchdog(void)
0274 {
0275     touch_softlockup_watchdog_sched();
0276     wq_watchdog_touch(raw_smp_processor_id());
0277 }
0278 EXPORT_SYMBOL(touch_softlockup_watchdog);
0279 
0280 void touch_all_softlockup_watchdogs(void)
0281 {
0282     int cpu;
0283 
0284     /*
0285      * watchdog_mutex cannpt be taken here, as this might be called
0286      * from (soft)interrupt context, so the access to
0287      * watchdog_allowed_cpumask might race with a concurrent update.
0288      *
0289      * The watchdog time stamp can race against a concurrent real
0290      * update as well, the only side effect might be a cycle delay for
0291      * the softlockup check.
0292      */
0293     for_each_cpu(cpu, &watchdog_allowed_mask) {
0294         per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
0295         wq_watchdog_touch(cpu);
0296     }
0297 }
0298 
0299 void touch_softlockup_watchdog_sync(void)
0300 {
0301     __this_cpu_write(softlockup_touch_sync, true);
0302     __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
0303 }
0304 
0305 static int is_softlockup(unsigned long touch_ts,
0306              unsigned long period_ts,
0307              unsigned long now)
0308 {
0309     if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
0310         /* Warn about unreasonable delays. */
0311         if (time_after(now, period_ts + get_softlockup_thresh()))
0312             return now - touch_ts;
0313     }
0314     return 0;
0315 }
0316 
0317 /* watchdog detector functions */
0318 bool is_hardlockup(void)
0319 {
0320     unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
0321 
0322     if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
0323         return true;
0324 
0325     __this_cpu_write(hrtimer_interrupts_saved, hrint);
0326     return false;
0327 }
0328 
0329 static void watchdog_interrupt_count(void)
0330 {
0331     __this_cpu_inc(hrtimer_interrupts);
0332 }
0333 
0334 static DEFINE_PER_CPU(struct completion, softlockup_completion);
0335 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
0336 
0337 /*
0338  * The watchdog feed function - touches the timestamp.
0339  *
0340  * It only runs once every sample_period seconds (4 seconds by
0341  * default) to reset the softlockup timestamp. If this gets delayed
0342  * for more than 2*watchdog_thresh seconds then the debug-printout
0343  * triggers in watchdog_timer_fn().
0344  */
0345 static int softlockup_fn(void *data)
0346 {
0347     update_touch_ts();
0348     complete(this_cpu_ptr(&softlockup_completion));
0349 
0350     return 0;
0351 }
0352 
0353 /* watchdog kicker functions */
0354 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
0355 {
0356     unsigned long touch_ts, period_ts, now;
0357     struct pt_regs *regs = get_irq_regs();
0358     int duration;
0359     int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
0360 
0361     if (!watchdog_enabled)
0362         return HRTIMER_NORESTART;
0363 
0364     /* kick the hardlockup detector */
0365     watchdog_interrupt_count();
0366 
0367     /* kick the softlockup detector */
0368     if (completion_done(this_cpu_ptr(&softlockup_completion))) {
0369         reinit_completion(this_cpu_ptr(&softlockup_completion));
0370         stop_one_cpu_nowait(smp_processor_id(),
0371                 softlockup_fn, NULL,
0372                 this_cpu_ptr(&softlockup_stop_work));
0373     }
0374 
0375     /* .. and repeat */
0376     hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
0377 
0378     /*
0379      * Read the current timestamp first. It might become invalid anytime
0380      * when a virtual machine is stopped by the host or when the watchog
0381      * is touched from NMI.
0382      */
0383     now = get_timestamp();
0384     /*
0385      * If a virtual machine is stopped by the host it can look to
0386      * the watchdog like a soft lockup. This function touches the watchdog.
0387      */
0388     kvm_check_and_clear_guest_paused();
0389     /*
0390      * The stored timestamp is comparable with @now only when not touched.
0391      * It might get touched anytime from NMI. Make sure that is_softlockup()
0392      * uses the same (valid) value.
0393      */
0394     period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
0395 
0396     /* Reset the interval when touched by known problematic code. */
0397     if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
0398         if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
0399             /*
0400              * If the time stamp was touched atomically
0401              * make sure the scheduler tick is up to date.
0402              */
0403             __this_cpu_write(softlockup_touch_sync, false);
0404             sched_clock_tick();
0405         }
0406 
0407         update_report_ts();
0408         return HRTIMER_RESTART;
0409     }
0410 
0411     /* Check for a softlockup. */
0412     touch_ts = __this_cpu_read(watchdog_touch_ts);
0413     duration = is_softlockup(touch_ts, period_ts, now);
0414     if (unlikely(duration)) {
0415         /*
0416          * Prevent multiple soft-lockup reports if one cpu is already
0417          * engaged in dumping all cpu back traces.
0418          */
0419         if (softlockup_all_cpu_backtrace) {
0420             if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
0421                 return HRTIMER_RESTART;
0422         }
0423 
0424         /* Start period for the next softlockup warning. */
0425         update_report_ts();
0426 
0427         pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
0428             smp_processor_id(), duration,
0429             current->comm, task_pid_nr(current));
0430         print_modules();
0431         print_irqtrace_events(current);
0432         if (regs)
0433             show_regs(regs);
0434         else
0435             dump_stack();
0436 
0437         if (softlockup_all_cpu_backtrace) {
0438             trigger_allbutself_cpu_backtrace();
0439             clear_bit_unlock(0, &soft_lockup_nmi_warn);
0440         }
0441 
0442         add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
0443         if (softlockup_panic)
0444             panic("softlockup: hung tasks");
0445     }
0446 
0447     return HRTIMER_RESTART;
0448 }
0449 
0450 static void watchdog_enable(unsigned int cpu)
0451 {
0452     struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
0453     struct completion *done = this_cpu_ptr(&softlockup_completion);
0454 
0455     WARN_ON_ONCE(cpu != smp_processor_id());
0456 
0457     init_completion(done);
0458     complete(done);
0459 
0460     /*
0461      * Start the timer first to prevent the NMI watchdog triggering
0462      * before the timer has a chance to fire.
0463      */
0464     hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
0465     hrtimer->function = watchdog_timer_fn;
0466     hrtimer_start(hrtimer, ns_to_ktime(sample_period),
0467               HRTIMER_MODE_REL_PINNED_HARD);
0468 
0469     /* Initialize timestamp */
0470     update_touch_ts();
0471     /* Enable the perf event */
0472     if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
0473         watchdog_nmi_enable(cpu);
0474 }
0475 
0476 static void watchdog_disable(unsigned int cpu)
0477 {
0478     struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
0479 
0480     WARN_ON_ONCE(cpu != smp_processor_id());
0481 
0482     /*
0483      * Disable the perf event first. That prevents that a large delay
0484      * between disabling the timer and disabling the perf event causes
0485      * the perf NMI to detect a false positive.
0486      */
0487     watchdog_nmi_disable(cpu);
0488     hrtimer_cancel(hrtimer);
0489     wait_for_completion(this_cpu_ptr(&softlockup_completion));
0490 }
0491 
0492 static int softlockup_stop_fn(void *data)
0493 {
0494     watchdog_disable(smp_processor_id());
0495     return 0;
0496 }
0497 
0498 static void softlockup_stop_all(void)
0499 {
0500     int cpu;
0501 
0502     if (!softlockup_initialized)
0503         return;
0504 
0505     for_each_cpu(cpu, &watchdog_allowed_mask)
0506         smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
0507 
0508     cpumask_clear(&watchdog_allowed_mask);
0509 }
0510 
0511 static int softlockup_start_fn(void *data)
0512 {
0513     watchdog_enable(smp_processor_id());
0514     return 0;
0515 }
0516 
0517 static void softlockup_start_all(void)
0518 {
0519     int cpu;
0520 
0521     cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
0522     for_each_cpu(cpu, &watchdog_allowed_mask)
0523         smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
0524 }
0525 
0526 int lockup_detector_online_cpu(unsigned int cpu)
0527 {
0528     if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
0529         watchdog_enable(cpu);
0530     return 0;
0531 }
0532 
0533 int lockup_detector_offline_cpu(unsigned int cpu)
0534 {
0535     if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
0536         watchdog_disable(cpu);
0537     return 0;
0538 }
0539 
0540 static void __lockup_detector_reconfigure(void)
0541 {
0542     cpus_read_lock();
0543     watchdog_nmi_stop();
0544 
0545     softlockup_stop_all();
0546     set_sample_period();
0547     lockup_detector_update_enable();
0548     if (watchdog_enabled && watchdog_thresh)
0549         softlockup_start_all();
0550 
0551     watchdog_nmi_start();
0552     cpus_read_unlock();
0553     /*
0554      * Must be called outside the cpus locked section to prevent
0555      * recursive locking in the perf code.
0556      */
0557     __lockup_detector_cleanup();
0558 }
0559 
0560 void lockup_detector_reconfigure(void)
0561 {
0562     mutex_lock(&watchdog_mutex);
0563     __lockup_detector_reconfigure();
0564     mutex_unlock(&watchdog_mutex);
0565 }
0566 
0567 /*
0568  * Create the watchdog infrastructure and configure the detector(s).
0569  */
0570 static __init void lockup_detector_setup(void)
0571 {
0572     /*
0573      * If sysctl is off and watchdog got disabled on the command line,
0574      * nothing to do here.
0575      */
0576     lockup_detector_update_enable();
0577 
0578     if (!IS_ENABLED(CONFIG_SYSCTL) &&
0579         !(watchdog_enabled && watchdog_thresh))
0580         return;
0581 
0582     mutex_lock(&watchdog_mutex);
0583     __lockup_detector_reconfigure();
0584     softlockup_initialized = true;
0585     mutex_unlock(&watchdog_mutex);
0586 }
0587 
0588 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
0589 static void __lockup_detector_reconfigure(void)
0590 {
0591     cpus_read_lock();
0592     watchdog_nmi_stop();
0593     lockup_detector_update_enable();
0594     watchdog_nmi_start();
0595     cpus_read_unlock();
0596 }
0597 void lockup_detector_reconfigure(void)
0598 {
0599     __lockup_detector_reconfigure();
0600 }
0601 static inline void lockup_detector_setup(void)
0602 {
0603     __lockup_detector_reconfigure();
0604 }
0605 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
0606 
0607 static void __lockup_detector_cleanup(void)
0608 {
0609     lockdep_assert_held(&watchdog_mutex);
0610     hardlockup_detector_perf_cleanup();
0611 }
0612 
0613 /**
0614  * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
0615  *
0616  * Caller must not hold the cpu hotplug rwsem.
0617  */
0618 void lockup_detector_cleanup(void)
0619 {
0620     mutex_lock(&watchdog_mutex);
0621     __lockup_detector_cleanup();
0622     mutex_unlock(&watchdog_mutex);
0623 }
0624 
0625 /**
0626  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
0627  *
0628  * Special interface for parisc. It prevents lockup detector warnings from
0629  * the default pm_poweroff() function which busy loops forever.
0630  */
0631 void lockup_detector_soft_poweroff(void)
0632 {
0633     watchdog_enabled = 0;
0634 }
0635 
0636 #ifdef CONFIG_SYSCTL
0637 
0638 /* Propagate any changes to the watchdog infrastructure */
0639 static void proc_watchdog_update(void)
0640 {
0641     /* Remove impossible cpus to keep sysctl output clean. */
0642     cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
0643     __lockup_detector_reconfigure();
0644 }
0645 
0646 /*
0647  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
0648  *
0649  * caller             | table->data points to      | 'which'
0650  * -------------------|----------------------------|--------------------------
0651  * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
0652  *                    |                            | SOFT_WATCHDOG_ENABLED
0653  * -------------------|----------------------------|--------------------------
0654  * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
0655  * -------------------|----------------------------|--------------------------
0656  * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
0657  */
0658 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
0659                 void *buffer, size_t *lenp, loff_t *ppos)
0660 {
0661     int err, old, *param = table->data;
0662 
0663     mutex_lock(&watchdog_mutex);
0664 
0665     if (!write) {
0666         /*
0667          * On read synchronize the userspace interface. This is a
0668          * racy snapshot.
0669          */
0670         *param = (watchdog_enabled & which) != 0;
0671         err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0672     } else {
0673         old = READ_ONCE(*param);
0674         err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0675         if (!err && old != READ_ONCE(*param))
0676             proc_watchdog_update();
0677     }
0678     mutex_unlock(&watchdog_mutex);
0679     return err;
0680 }
0681 
0682 /*
0683  * /proc/sys/kernel/watchdog
0684  */
0685 int proc_watchdog(struct ctl_table *table, int write,
0686           void *buffer, size_t *lenp, loff_t *ppos)
0687 {
0688     return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
0689                     table, write, buffer, lenp, ppos);
0690 }
0691 
0692 /*
0693  * /proc/sys/kernel/nmi_watchdog
0694  */
0695 int proc_nmi_watchdog(struct ctl_table *table, int write,
0696               void *buffer, size_t *lenp, loff_t *ppos)
0697 {
0698     if (!nmi_watchdog_available && write)
0699         return -ENOTSUPP;
0700     return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
0701                     table, write, buffer, lenp, ppos);
0702 }
0703 
0704 /*
0705  * /proc/sys/kernel/soft_watchdog
0706  */
0707 int proc_soft_watchdog(struct ctl_table *table, int write,
0708             void *buffer, size_t *lenp, loff_t *ppos)
0709 {
0710     return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
0711                     table, write, buffer, lenp, ppos);
0712 }
0713 
0714 /*
0715  * /proc/sys/kernel/watchdog_thresh
0716  */
0717 int proc_watchdog_thresh(struct ctl_table *table, int write,
0718              void *buffer, size_t *lenp, loff_t *ppos)
0719 {
0720     int err, old;
0721 
0722     mutex_lock(&watchdog_mutex);
0723 
0724     old = READ_ONCE(watchdog_thresh);
0725     err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0726 
0727     if (!err && write && old != READ_ONCE(watchdog_thresh))
0728         proc_watchdog_update();
0729 
0730     mutex_unlock(&watchdog_mutex);
0731     return err;
0732 }
0733 
0734 /*
0735  * The cpumask is the mask of possible cpus that the watchdog can run
0736  * on, not the mask of cpus it is actually running on.  This allows the
0737  * user to specify a mask that will include cpus that have not yet
0738  * been brought online, if desired.
0739  */
0740 int proc_watchdog_cpumask(struct ctl_table *table, int write,
0741               void *buffer, size_t *lenp, loff_t *ppos)
0742 {
0743     int err;
0744 
0745     mutex_lock(&watchdog_mutex);
0746 
0747     err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
0748     if (!err && write)
0749         proc_watchdog_update();
0750 
0751     mutex_unlock(&watchdog_mutex);
0752     return err;
0753 }
0754 
0755 static const int sixty = 60;
0756 
0757 static struct ctl_table watchdog_sysctls[] = {
0758     {
0759         .procname       = "watchdog",
0760         .data       = &watchdog_user_enabled,
0761         .maxlen     = sizeof(int),
0762         .mode       = 0644,
0763         .proc_handler   = proc_watchdog,
0764         .extra1     = SYSCTL_ZERO,
0765         .extra2     = SYSCTL_ONE,
0766     },
0767     {
0768         .procname   = "watchdog_thresh",
0769         .data       = &watchdog_thresh,
0770         .maxlen     = sizeof(int),
0771         .mode       = 0644,
0772         .proc_handler   = proc_watchdog_thresh,
0773         .extra1     = SYSCTL_ZERO,
0774         .extra2     = (void *)&sixty,
0775     },
0776     {
0777         .procname       = "nmi_watchdog",
0778         .data       = &nmi_watchdog_user_enabled,
0779         .maxlen     = sizeof(int),
0780         .mode       = NMI_WATCHDOG_SYSCTL_PERM,
0781         .proc_handler   = proc_nmi_watchdog,
0782         .extra1     = SYSCTL_ZERO,
0783         .extra2     = SYSCTL_ONE,
0784     },
0785     {
0786         .procname   = "watchdog_cpumask",
0787         .data       = &watchdog_cpumask_bits,
0788         .maxlen     = NR_CPUS,
0789         .mode       = 0644,
0790         .proc_handler   = proc_watchdog_cpumask,
0791     },
0792 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
0793     {
0794         .procname       = "soft_watchdog",
0795         .data       = &soft_watchdog_user_enabled,
0796         .maxlen     = sizeof(int),
0797         .mode       = 0644,
0798         .proc_handler   = proc_soft_watchdog,
0799         .extra1     = SYSCTL_ZERO,
0800         .extra2     = SYSCTL_ONE,
0801     },
0802     {
0803         .procname   = "softlockup_panic",
0804         .data       = &softlockup_panic,
0805         .maxlen     = sizeof(int),
0806         .mode       = 0644,
0807         .proc_handler   = proc_dointvec_minmax,
0808         .extra1     = SYSCTL_ZERO,
0809         .extra2     = SYSCTL_ONE,
0810     },
0811 #ifdef CONFIG_SMP
0812     {
0813         .procname   = "softlockup_all_cpu_backtrace",
0814         .data       = &sysctl_softlockup_all_cpu_backtrace,
0815         .maxlen     = sizeof(int),
0816         .mode       = 0644,
0817         .proc_handler   = proc_dointvec_minmax,
0818         .extra1     = SYSCTL_ZERO,
0819         .extra2     = SYSCTL_ONE,
0820     },
0821 #endif /* CONFIG_SMP */
0822 #endif
0823 #ifdef CONFIG_HARDLOCKUP_DETECTOR
0824     {
0825         .procname   = "hardlockup_panic",
0826         .data       = &hardlockup_panic,
0827         .maxlen     = sizeof(int),
0828         .mode       = 0644,
0829         .proc_handler   = proc_dointvec_minmax,
0830         .extra1     = SYSCTL_ZERO,
0831         .extra2     = SYSCTL_ONE,
0832     },
0833 #ifdef CONFIG_SMP
0834     {
0835         .procname   = "hardlockup_all_cpu_backtrace",
0836         .data       = &sysctl_hardlockup_all_cpu_backtrace,
0837         .maxlen     = sizeof(int),
0838         .mode       = 0644,
0839         .proc_handler   = proc_dointvec_minmax,
0840         .extra1     = SYSCTL_ZERO,
0841         .extra2     = SYSCTL_ONE,
0842     },
0843 #endif /* CONFIG_SMP */
0844 #endif
0845     {}
0846 };
0847 
0848 static void __init watchdog_sysctl_init(void)
0849 {
0850     register_sysctl_init("kernel", watchdog_sysctls);
0851 }
0852 #else
0853 #define watchdog_sysctl_init() do { } while (0)
0854 #endif /* CONFIG_SYSCTL */
0855 
0856 void __init lockup_detector_init(void)
0857 {
0858     if (tick_nohz_full_enabled())
0859         pr_info("Disabling watchdog on nohz_full cores by default\n");
0860 
0861     cpumask_copy(&watchdog_cpumask,
0862              housekeeping_cpumask(HK_TYPE_TIMER));
0863 
0864     if (!watchdog_nmi_probe())
0865         nmi_watchdog_available = true;
0866     lockup_detector_setup();
0867     watchdog_sysctl_init();
0868 }