0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) "watchdog: " fmt
0014
0015 #include <linux/mm.h>
0016 #include <linux/cpu.h>
0017 #include <linux/nmi.h>
0018 #include <linux/init.h>
0019 #include <linux/module.h>
0020 #include <linux/sysctl.h>
0021 #include <linux/tick.h>
0022 #include <linux/sched/clock.h>
0023 #include <linux/sched/debug.h>
0024 #include <linux/sched/isolation.h>
0025 #include <linux/stop_machine.h>
0026
0027 #include <asm/irq_regs.h>
0028 #include <linux/kvm_para.h>
0029
0030 static DEFINE_MUTEX(watchdog_mutex);
0031
0032 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
0033 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
0034 # define NMI_WATCHDOG_DEFAULT 1
0035 #else
0036 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
0037 # define NMI_WATCHDOG_DEFAULT 0
0038 #endif
0039
0040 unsigned long __read_mostly watchdog_enabled;
0041 int __read_mostly watchdog_user_enabled = 1;
0042 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
0043 int __read_mostly soft_watchdog_user_enabled = 1;
0044 int __read_mostly watchdog_thresh = 10;
0045 static int __read_mostly nmi_watchdog_available;
0046
0047 struct cpumask watchdog_cpumask __read_mostly;
0048 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
0049
0050 #ifdef CONFIG_HARDLOCKUP_DETECTOR
0051
0052 # ifdef CONFIG_SMP
0053 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
0054 # endif
0055
0056
0057
0058
0059 unsigned int __read_mostly hardlockup_panic =
0060 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
0061
0062
0063
0064
0065
0066
0067
0068
0069 void __init hardlockup_detector_disable(void)
0070 {
0071 nmi_watchdog_user_enabled = 0;
0072 }
0073
0074 static int __init hardlockup_panic_setup(char *str)
0075 {
0076 if (!strncmp(str, "panic", 5))
0077 hardlockup_panic = 1;
0078 else if (!strncmp(str, "nopanic", 7))
0079 hardlockup_panic = 0;
0080 else if (!strncmp(str, "0", 1))
0081 nmi_watchdog_user_enabled = 0;
0082 else if (!strncmp(str, "1", 1))
0083 nmi_watchdog_user_enabled = 1;
0084 return 1;
0085 }
0086 __setup("nmi_watchdog=", hardlockup_panic_setup);
0087
0088 #endif
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 int __weak watchdog_nmi_enable(unsigned int cpu)
0099 {
0100 hardlockup_detector_perf_enable();
0101 return 0;
0102 }
0103
0104 void __weak watchdog_nmi_disable(unsigned int cpu)
0105 {
0106 hardlockup_detector_perf_disable();
0107 }
0108
0109
0110 int __weak __init watchdog_nmi_probe(void)
0111 {
0112 return hardlockup_detector_perf_init();
0113 }
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 void __weak watchdog_nmi_stop(void) { }
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 void __weak watchdog_nmi_start(void) { }
0137
0138
0139
0140
0141
0142
0143
0144 static void lockup_detector_update_enable(void)
0145 {
0146 watchdog_enabled = 0;
0147 if (!watchdog_user_enabled)
0148 return;
0149 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
0150 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
0151 if (soft_watchdog_user_enabled)
0152 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
0153 }
0154
0155 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
0156
0157
0158
0159
0160
0161 #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
0162
0163 #ifdef CONFIG_SMP
0164 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
0165 #endif
0166
0167 static struct cpumask watchdog_allowed_mask __read_mostly;
0168
0169
0170 unsigned int __read_mostly softlockup_panic =
0171 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
0172
0173 static bool softlockup_initialized __read_mostly;
0174 static u64 __read_mostly sample_period;
0175
0176
0177 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
0178
0179 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
0180 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
0181 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
0182 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
0183 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
0184 static unsigned long soft_lockup_nmi_warn;
0185
0186 static int __init nowatchdog_setup(char *str)
0187 {
0188 watchdog_user_enabled = 0;
0189 return 1;
0190 }
0191 __setup("nowatchdog", nowatchdog_setup);
0192
0193 static int __init nosoftlockup_setup(char *str)
0194 {
0195 soft_watchdog_user_enabled = 0;
0196 return 1;
0197 }
0198 __setup("nosoftlockup", nosoftlockup_setup);
0199
0200 static int __init watchdog_thresh_setup(char *str)
0201 {
0202 get_option(&str, &watchdog_thresh);
0203 return 1;
0204 }
0205 __setup("watchdog_thresh=", watchdog_thresh_setup);
0206
0207 static void __lockup_detector_cleanup(void);
0208
0209
0210
0211
0212
0213
0214
0215
0216 static int get_softlockup_thresh(void)
0217 {
0218 return watchdog_thresh * 2;
0219 }
0220
0221
0222
0223
0224
0225
0226 static unsigned long get_timestamp(void)
0227 {
0228 return running_clock() >> 30LL;
0229 }
0230
0231 static void set_sample_period(void)
0232 {
0233
0234
0235
0236
0237
0238
0239
0240 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
0241 watchdog_update_hrtimer_threshold(sample_period);
0242 }
0243
0244 static void update_report_ts(void)
0245 {
0246 __this_cpu_write(watchdog_report_ts, get_timestamp());
0247 }
0248
0249
0250 static void update_touch_ts(void)
0251 {
0252 __this_cpu_write(watchdog_touch_ts, get_timestamp());
0253 update_report_ts();
0254 }
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 notrace void touch_softlockup_watchdog_sched(void)
0265 {
0266
0267
0268
0269
0270 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
0271 }
0272
0273 notrace void touch_softlockup_watchdog(void)
0274 {
0275 touch_softlockup_watchdog_sched();
0276 wq_watchdog_touch(raw_smp_processor_id());
0277 }
0278 EXPORT_SYMBOL(touch_softlockup_watchdog);
0279
0280 void touch_all_softlockup_watchdogs(void)
0281 {
0282 int cpu;
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 for_each_cpu(cpu, &watchdog_allowed_mask) {
0294 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
0295 wq_watchdog_touch(cpu);
0296 }
0297 }
0298
0299 void touch_softlockup_watchdog_sync(void)
0300 {
0301 __this_cpu_write(softlockup_touch_sync, true);
0302 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
0303 }
0304
0305 static int is_softlockup(unsigned long touch_ts,
0306 unsigned long period_ts,
0307 unsigned long now)
0308 {
0309 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
0310
0311 if (time_after(now, period_ts + get_softlockup_thresh()))
0312 return now - touch_ts;
0313 }
0314 return 0;
0315 }
0316
0317
0318 bool is_hardlockup(void)
0319 {
0320 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
0321
0322 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
0323 return true;
0324
0325 __this_cpu_write(hrtimer_interrupts_saved, hrint);
0326 return false;
0327 }
0328
0329 static void watchdog_interrupt_count(void)
0330 {
0331 __this_cpu_inc(hrtimer_interrupts);
0332 }
0333
0334 static DEFINE_PER_CPU(struct completion, softlockup_completion);
0335 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 static int softlockup_fn(void *data)
0346 {
0347 update_touch_ts();
0348 complete(this_cpu_ptr(&softlockup_completion));
0349
0350 return 0;
0351 }
0352
0353
0354 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
0355 {
0356 unsigned long touch_ts, period_ts, now;
0357 struct pt_regs *regs = get_irq_regs();
0358 int duration;
0359 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
0360
0361 if (!watchdog_enabled)
0362 return HRTIMER_NORESTART;
0363
0364
0365 watchdog_interrupt_count();
0366
0367
0368 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
0369 reinit_completion(this_cpu_ptr(&softlockup_completion));
0370 stop_one_cpu_nowait(smp_processor_id(),
0371 softlockup_fn, NULL,
0372 this_cpu_ptr(&softlockup_stop_work));
0373 }
0374
0375
0376 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
0377
0378
0379
0380
0381
0382
0383 now = get_timestamp();
0384
0385
0386
0387
0388 kvm_check_and_clear_guest_paused();
0389
0390
0391
0392
0393
0394 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
0395
0396
0397 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
0398 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
0399
0400
0401
0402
0403 __this_cpu_write(softlockup_touch_sync, false);
0404 sched_clock_tick();
0405 }
0406
0407 update_report_ts();
0408 return HRTIMER_RESTART;
0409 }
0410
0411
0412 touch_ts = __this_cpu_read(watchdog_touch_ts);
0413 duration = is_softlockup(touch_ts, period_ts, now);
0414 if (unlikely(duration)) {
0415
0416
0417
0418
0419 if (softlockup_all_cpu_backtrace) {
0420 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
0421 return HRTIMER_RESTART;
0422 }
0423
0424
0425 update_report_ts();
0426
0427 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
0428 smp_processor_id(), duration,
0429 current->comm, task_pid_nr(current));
0430 print_modules();
0431 print_irqtrace_events(current);
0432 if (regs)
0433 show_regs(regs);
0434 else
0435 dump_stack();
0436
0437 if (softlockup_all_cpu_backtrace) {
0438 trigger_allbutself_cpu_backtrace();
0439 clear_bit_unlock(0, &soft_lockup_nmi_warn);
0440 }
0441
0442 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
0443 if (softlockup_panic)
0444 panic("softlockup: hung tasks");
0445 }
0446
0447 return HRTIMER_RESTART;
0448 }
0449
0450 static void watchdog_enable(unsigned int cpu)
0451 {
0452 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
0453 struct completion *done = this_cpu_ptr(&softlockup_completion);
0454
0455 WARN_ON_ONCE(cpu != smp_processor_id());
0456
0457 init_completion(done);
0458 complete(done);
0459
0460
0461
0462
0463
0464 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
0465 hrtimer->function = watchdog_timer_fn;
0466 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
0467 HRTIMER_MODE_REL_PINNED_HARD);
0468
0469
0470 update_touch_ts();
0471
0472 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
0473 watchdog_nmi_enable(cpu);
0474 }
0475
0476 static void watchdog_disable(unsigned int cpu)
0477 {
0478 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
0479
0480 WARN_ON_ONCE(cpu != smp_processor_id());
0481
0482
0483
0484
0485
0486
0487 watchdog_nmi_disable(cpu);
0488 hrtimer_cancel(hrtimer);
0489 wait_for_completion(this_cpu_ptr(&softlockup_completion));
0490 }
0491
0492 static int softlockup_stop_fn(void *data)
0493 {
0494 watchdog_disable(smp_processor_id());
0495 return 0;
0496 }
0497
0498 static void softlockup_stop_all(void)
0499 {
0500 int cpu;
0501
0502 if (!softlockup_initialized)
0503 return;
0504
0505 for_each_cpu(cpu, &watchdog_allowed_mask)
0506 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
0507
0508 cpumask_clear(&watchdog_allowed_mask);
0509 }
0510
0511 static int softlockup_start_fn(void *data)
0512 {
0513 watchdog_enable(smp_processor_id());
0514 return 0;
0515 }
0516
0517 static void softlockup_start_all(void)
0518 {
0519 int cpu;
0520
0521 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
0522 for_each_cpu(cpu, &watchdog_allowed_mask)
0523 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
0524 }
0525
0526 int lockup_detector_online_cpu(unsigned int cpu)
0527 {
0528 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
0529 watchdog_enable(cpu);
0530 return 0;
0531 }
0532
0533 int lockup_detector_offline_cpu(unsigned int cpu)
0534 {
0535 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
0536 watchdog_disable(cpu);
0537 return 0;
0538 }
0539
0540 static void __lockup_detector_reconfigure(void)
0541 {
0542 cpus_read_lock();
0543 watchdog_nmi_stop();
0544
0545 softlockup_stop_all();
0546 set_sample_period();
0547 lockup_detector_update_enable();
0548 if (watchdog_enabled && watchdog_thresh)
0549 softlockup_start_all();
0550
0551 watchdog_nmi_start();
0552 cpus_read_unlock();
0553
0554
0555
0556
0557 __lockup_detector_cleanup();
0558 }
0559
0560 void lockup_detector_reconfigure(void)
0561 {
0562 mutex_lock(&watchdog_mutex);
0563 __lockup_detector_reconfigure();
0564 mutex_unlock(&watchdog_mutex);
0565 }
0566
0567
0568
0569
0570 static __init void lockup_detector_setup(void)
0571 {
0572
0573
0574
0575
0576 lockup_detector_update_enable();
0577
0578 if (!IS_ENABLED(CONFIG_SYSCTL) &&
0579 !(watchdog_enabled && watchdog_thresh))
0580 return;
0581
0582 mutex_lock(&watchdog_mutex);
0583 __lockup_detector_reconfigure();
0584 softlockup_initialized = true;
0585 mutex_unlock(&watchdog_mutex);
0586 }
0587
0588 #else
0589 static void __lockup_detector_reconfigure(void)
0590 {
0591 cpus_read_lock();
0592 watchdog_nmi_stop();
0593 lockup_detector_update_enable();
0594 watchdog_nmi_start();
0595 cpus_read_unlock();
0596 }
0597 void lockup_detector_reconfigure(void)
0598 {
0599 __lockup_detector_reconfigure();
0600 }
0601 static inline void lockup_detector_setup(void)
0602 {
0603 __lockup_detector_reconfigure();
0604 }
0605 #endif
0606
0607 static void __lockup_detector_cleanup(void)
0608 {
0609 lockdep_assert_held(&watchdog_mutex);
0610 hardlockup_detector_perf_cleanup();
0611 }
0612
0613
0614
0615
0616
0617
0618 void lockup_detector_cleanup(void)
0619 {
0620 mutex_lock(&watchdog_mutex);
0621 __lockup_detector_cleanup();
0622 mutex_unlock(&watchdog_mutex);
0623 }
0624
0625
0626
0627
0628
0629
0630
0631 void lockup_detector_soft_poweroff(void)
0632 {
0633 watchdog_enabled = 0;
0634 }
0635
0636 #ifdef CONFIG_SYSCTL
0637
0638
0639 static void proc_watchdog_update(void)
0640 {
0641
0642 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
0643 __lockup_detector_reconfigure();
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
0659 void *buffer, size_t *lenp, loff_t *ppos)
0660 {
0661 int err, old, *param = table->data;
0662
0663 mutex_lock(&watchdog_mutex);
0664
0665 if (!write) {
0666
0667
0668
0669
0670 *param = (watchdog_enabled & which) != 0;
0671 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0672 } else {
0673 old = READ_ONCE(*param);
0674 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0675 if (!err && old != READ_ONCE(*param))
0676 proc_watchdog_update();
0677 }
0678 mutex_unlock(&watchdog_mutex);
0679 return err;
0680 }
0681
0682
0683
0684
0685 int proc_watchdog(struct ctl_table *table, int write,
0686 void *buffer, size_t *lenp, loff_t *ppos)
0687 {
0688 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
0689 table, write, buffer, lenp, ppos);
0690 }
0691
0692
0693
0694
0695 int proc_nmi_watchdog(struct ctl_table *table, int write,
0696 void *buffer, size_t *lenp, loff_t *ppos)
0697 {
0698 if (!nmi_watchdog_available && write)
0699 return -ENOTSUPP;
0700 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
0701 table, write, buffer, lenp, ppos);
0702 }
0703
0704
0705
0706
0707 int proc_soft_watchdog(struct ctl_table *table, int write,
0708 void *buffer, size_t *lenp, loff_t *ppos)
0709 {
0710 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
0711 table, write, buffer, lenp, ppos);
0712 }
0713
0714
0715
0716
0717 int proc_watchdog_thresh(struct ctl_table *table, int write,
0718 void *buffer, size_t *lenp, loff_t *ppos)
0719 {
0720 int err, old;
0721
0722 mutex_lock(&watchdog_mutex);
0723
0724 old = READ_ONCE(watchdog_thresh);
0725 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0726
0727 if (!err && write && old != READ_ONCE(watchdog_thresh))
0728 proc_watchdog_update();
0729
0730 mutex_unlock(&watchdog_mutex);
0731 return err;
0732 }
0733
0734
0735
0736
0737
0738
0739
0740 int proc_watchdog_cpumask(struct ctl_table *table, int write,
0741 void *buffer, size_t *lenp, loff_t *ppos)
0742 {
0743 int err;
0744
0745 mutex_lock(&watchdog_mutex);
0746
0747 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
0748 if (!err && write)
0749 proc_watchdog_update();
0750
0751 mutex_unlock(&watchdog_mutex);
0752 return err;
0753 }
0754
0755 static const int sixty = 60;
0756
0757 static struct ctl_table watchdog_sysctls[] = {
0758 {
0759 .procname = "watchdog",
0760 .data = &watchdog_user_enabled,
0761 .maxlen = sizeof(int),
0762 .mode = 0644,
0763 .proc_handler = proc_watchdog,
0764 .extra1 = SYSCTL_ZERO,
0765 .extra2 = SYSCTL_ONE,
0766 },
0767 {
0768 .procname = "watchdog_thresh",
0769 .data = &watchdog_thresh,
0770 .maxlen = sizeof(int),
0771 .mode = 0644,
0772 .proc_handler = proc_watchdog_thresh,
0773 .extra1 = SYSCTL_ZERO,
0774 .extra2 = (void *)&sixty,
0775 },
0776 {
0777 .procname = "nmi_watchdog",
0778 .data = &nmi_watchdog_user_enabled,
0779 .maxlen = sizeof(int),
0780 .mode = NMI_WATCHDOG_SYSCTL_PERM,
0781 .proc_handler = proc_nmi_watchdog,
0782 .extra1 = SYSCTL_ZERO,
0783 .extra2 = SYSCTL_ONE,
0784 },
0785 {
0786 .procname = "watchdog_cpumask",
0787 .data = &watchdog_cpumask_bits,
0788 .maxlen = NR_CPUS,
0789 .mode = 0644,
0790 .proc_handler = proc_watchdog_cpumask,
0791 },
0792 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
0793 {
0794 .procname = "soft_watchdog",
0795 .data = &soft_watchdog_user_enabled,
0796 .maxlen = sizeof(int),
0797 .mode = 0644,
0798 .proc_handler = proc_soft_watchdog,
0799 .extra1 = SYSCTL_ZERO,
0800 .extra2 = SYSCTL_ONE,
0801 },
0802 {
0803 .procname = "softlockup_panic",
0804 .data = &softlockup_panic,
0805 .maxlen = sizeof(int),
0806 .mode = 0644,
0807 .proc_handler = proc_dointvec_minmax,
0808 .extra1 = SYSCTL_ZERO,
0809 .extra2 = SYSCTL_ONE,
0810 },
0811 #ifdef CONFIG_SMP
0812 {
0813 .procname = "softlockup_all_cpu_backtrace",
0814 .data = &sysctl_softlockup_all_cpu_backtrace,
0815 .maxlen = sizeof(int),
0816 .mode = 0644,
0817 .proc_handler = proc_dointvec_minmax,
0818 .extra1 = SYSCTL_ZERO,
0819 .extra2 = SYSCTL_ONE,
0820 },
0821 #endif
0822 #endif
0823 #ifdef CONFIG_HARDLOCKUP_DETECTOR
0824 {
0825 .procname = "hardlockup_panic",
0826 .data = &hardlockup_panic,
0827 .maxlen = sizeof(int),
0828 .mode = 0644,
0829 .proc_handler = proc_dointvec_minmax,
0830 .extra1 = SYSCTL_ZERO,
0831 .extra2 = SYSCTL_ONE,
0832 },
0833 #ifdef CONFIG_SMP
0834 {
0835 .procname = "hardlockup_all_cpu_backtrace",
0836 .data = &sysctl_hardlockup_all_cpu_backtrace,
0837 .maxlen = sizeof(int),
0838 .mode = 0644,
0839 .proc_handler = proc_dointvec_minmax,
0840 .extra1 = SYSCTL_ZERO,
0841 .extra2 = SYSCTL_ONE,
0842 },
0843 #endif
0844 #endif
0845 {}
0846 };
0847
0848 static void __init watchdog_sysctl_init(void)
0849 {
0850 register_sysctl_init("kernel", watchdog_sysctls);
0851 }
0852 #else
0853 #define watchdog_sysctl_init() do { } while (0)
0854 #endif
0855
0856 void __init lockup_detector_init(void)
0857 {
0858 if (tick_nohz_full_enabled())
0859 pr_info("Disabling watchdog on nohz_full cores by default\n");
0860
0861 cpumask_copy(&watchdog_cpumask,
0862 housekeeping_cpumask(HK_TYPE_TIMER));
0863
0864 if (!watchdog_nmi_probe())
0865 nmi_watchdog_available = true;
0866 lockup_detector_setup();
0867 watchdog_sysctl_init();
0868 }