0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/export.h>
0010 #include <linux/string.h>
0011 #include <linux/sched.h>
0012 #include <linux/init.h>
0013 #include <linux/kernel.h>
0014 #include <linux/reboot.h>
0015 #include <linux/delay.h>
0016 #include <linux/initrd.h>
0017 #include <linux/seq_file.h>
0018 #include <linux/ioport.h>
0019 #include <linux/console.h>
0020 #include <linux/utsname.h>
0021 #include <linux/tty.h>
0022 #include <linux/root_dev.h>
0023 #include <linux/notifier.h>
0024 #include <linux/cpu.h>
0025 #include <linux/unistd.h>
0026 #include <linux/serial.h>
0027 #include <linux/serial_8250.h>
0028 #include <linux/memblock.h>
0029 #include <linux/pci.h>
0030 #include <linux/lockdep.h>
0031 #include <linux/memory.h>
0032 #include <linux/nmi.h>
0033 #include <linux/pgtable.h>
0034 #include <linux/of.h>
0035 #include <linux/of_fdt.h>
0036
0037 #include <asm/kvm_guest.h>
0038 #include <asm/io.h>
0039 #include <asm/kdump.h>
0040 #include <asm/processor.h>
0041 #include <asm/smp.h>
0042 #include <asm/elf.h>
0043 #include <asm/machdep.h>
0044 #include <asm/paca.h>
0045 #include <asm/time.h>
0046 #include <asm/cputable.h>
0047 #include <asm/dt_cpu_ftrs.h>
0048 #include <asm/sections.h>
0049 #include <asm/btext.h>
0050 #include <asm/nvram.h>
0051 #include <asm/setup.h>
0052 #include <asm/rtas.h>
0053 #include <asm/iommu.h>
0054 #include <asm/serial.h>
0055 #include <asm/cache.h>
0056 #include <asm/page.h>
0057 #include <asm/mmu.h>
0058 #include <asm/firmware.h>
0059 #include <asm/xmon.h>
0060 #include <asm/udbg.h>
0061 #include <asm/kexec.h>
0062 #include <asm/code-patching.h>
0063 #include <asm/ftrace.h>
0064 #include <asm/opal.h>
0065 #include <asm/cputhreads.h>
0066 #include <asm/hw_irq.h>
0067 #include <asm/feature-fixups.h>
0068 #include <asm/kup.h>
0069 #include <asm/early_ioremap.h>
0070 #include <asm/pgalloc.h>
0071
0072 #include "setup.h"
0073
0074 int spinning_secondaries;
0075 u64 ppc64_pft_size;
0076
0077 struct ppc64_caches ppc64_caches = {
0078 .l1d = {
0079 .block_size = 0x40,
0080 .log_block_size = 6,
0081 },
0082 .l1i = {
0083 .block_size = 0x40,
0084 .log_block_size = 6
0085 },
0086 };
0087 EXPORT_SYMBOL_GPL(ppc64_caches);
0088
0089 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
0090 void __init setup_tlb_core_data(void)
0091 {
0092 int cpu;
0093
0094 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
0095
0096 for_each_possible_cpu(cpu) {
0097 int first = cpu_first_thread_sibling(cpu);
0098
0099
0100
0101
0102
0103
0104 if (cpu_first_thread_sibling(boot_cpuid) == first)
0105 first = boot_cpuid;
0106
0107 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
0108
0109
0110
0111
0112
0113
0114
0115 WARN_ONCE(smt_enabled_at_boot >= 2 &&
0116 book3e_htw_mode != PPC_HTW_E6500,
0117 "%s: unsupported MMU configuration\n", __func__);
0118 }
0119 }
0120 #endif
0121
0122 #ifdef CONFIG_SMP
0123
0124 static char *smt_enabled_cmdline;
0125
0126
0127 void __init check_smt_enabled(void)
0128 {
0129 struct device_node *dn;
0130 const char *smt_option;
0131
0132
0133 smt_enabled_at_boot = threads_per_core;
0134
0135
0136 if (smt_enabled_cmdline) {
0137 if (!strcmp(smt_enabled_cmdline, "on"))
0138 smt_enabled_at_boot = threads_per_core;
0139 else if (!strcmp(smt_enabled_cmdline, "off"))
0140 smt_enabled_at_boot = 0;
0141 else {
0142 int smt;
0143 int rc;
0144
0145 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
0146 if (!rc)
0147 smt_enabled_at_boot =
0148 min(threads_per_core, smt);
0149 }
0150 } else {
0151 dn = of_find_node_by_path("/options");
0152 if (dn) {
0153 smt_option = of_get_property(dn, "ibm,smt-enabled",
0154 NULL);
0155
0156 if (smt_option) {
0157 if (!strcmp(smt_option, "on"))
0158 smt_enabled_at_boot = threads_per_core;
0159 else if (!strcmp(smt_option, "off"))
0160 smt_enabled_at_boot = 0;
0161 }
0162
0163 of_node_put(dn);
0164 }
0165 }
0166 }
0167
0168
0169 static int __init early_smt_enabled(char *p)
0170 {
0171 smt_enabled_cmdline = p;
0172 return 0;
0173 }
0174 early_param("smt-enabled", early_smt_enabled);
0175
0176 #endif
0177
0178
0179 static void __init fixup_boot_paca(void)
0180 {
0181
0182 get_paca()->cpu_start = 1;
0183
0184 get_paca()->data_offset = 0;
0185
0186 irq_soft_mask_set(IRQS_DISABLED);
0187 }
0188
0189 static void __init configure_exceptions(void)
0190 {
0191
0192
0193
0194
0195 setup_kdump_trampoline();
0196
0197
0198 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222 if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
0223 init_task.thread.fscr &= ~FSCR_SCV;
0224 cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
0225 }
0226
0227
0228 if (!pseries_enable_reloc_on_exc()) {
0229 init_task.thread.fscr &= ~FSCR_SCV;
0230 cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 #ifdef __LITTLE_ENDIAN__
0242 pseries_little_endian_exceptions();
0243 #endif
0244 } else {
0245
0246 if (firmware_has_feature(FW_FEATURE_OPAL))
0247 opal_configure_cores();
0248
0249
0250 }
0251 }
0252
0253 static void cpu_ready_for_interrupts(void)
0254 {
0255
0256
0257
0258
0259
0260
0261
0262 if (cpu_has_feature(CPU_FTR_HVMODE)) {
0263 unsigned long lpcr = mfspr(SPRN_LPCR);
0264 unsigned long new_lpcr = lpcr;
0265
0266 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0267
0268 if (pvr_version_is(PVR_POWER10) &&
0269 (mfspr(SPRN_PVR) & 0xf00) == 0x100)
0270 new_lpcr |= LPCR_AIL_3;
0271 else
0272 new_lpcr |= LPCR_HAIL;
0273 } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
0274 new_lpcr |= LPCR_AIL_3;
0275 }
0276
0277 if (new_lpcr != lpcr)
0278 mtspr(SPRN_LPCR, new_lpcr);
0279 }
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 if (cpu_has_feature(CPU_FTR_HVMODE)) {
0290 if (cpu_has_feature(CPU_FTR_TM_COMP))
0291 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
0292 else
0293 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
0294 }
0295
0296
0297 get_paca()->kernel_msr = MSR_KERNEL;
0298 }
0299
0300 unsigned long spr_default_dscr = 0;
0301
0302 static void __init record_spr_defaults(void)
0303 {
0304 if (early_cpu_has_feature(CPU_FTR_DSCR))
0305 spr_default_dscr = mfspr(SPRN_DSCR);
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 void __init early_setup(unsigned long dt_ptr)
0328 {
0329 static __initdata struct paca_struct boot_paca;
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352 initialise_paca(&boot_paca, 0);
0353 setup_paca(&boot_paca);
0354 fixup_boot_paca();
0355
0356
0357
0358
0359 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
0360
0361 identify_cpu(0, mfspr(SPRN_PVR));
0362
0363
0364 udbg_early_init();
0365
0366 udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
0367
0368
0369
0370
0371
0372
0373 early_init_devtree(__va(dt_ptr));
0374
0375
0376 if (boot_cpuid != 0) {
0377
0378 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
0379 }
0380 setup_paca(paca_ptrs[boot_cpuid]);
0381 fixup_boot_paca();
0382
0383
0384
0385
0386
0387 configure_exceptions();
0388
0389
0390
0391
0392
0393 setup_kup();
0394
0395
0396 apply_feature_fixups();
0397 setup_feature_keys();
0398
0399
0400 early_init_mmu();
0401
0402 early_ioremap_setup();
0403
0404
0405
0406
0407
0408
0409 record_spr_defaults();
0410
0411
0412
0413
0414
0415
0416 cpu_ready_for_interrupts();
0417
0418
0419
0420
0421
0422
0423 this_cpu_enable_ftrace();
0424
0425 udbg_printf(" <- %s()\n", __func__);
0426
0427 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
0428
0429
0430
0431
0432
0433
0434
0435
0436 btext_map();
0437 #endif
0438 }
0439
0440 #ifdef CONFIG_SMP
0441 void early_setup_secondary(void)
0442 {
0443
0444 irq_soft_mask_set(IRQS_DISABLED);
0445
0446
0447 early_init_mmu_secondary();
0448
0449
0450 setup_kup();
0451
0452
0453
0454
0455
0456
0457 cpu_ready_for_interrupts();
0458 }
0459
0460 #endif
0461
0462 void panic_smp_self_stop(void)
0463 {
0464 hard_irq_disable();
0465 spin_begin();
0466 while (1)
0467 spin_cpu_relax();
0468 }
0469
0470 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
0471 static bool use_spinloop(void)
0472 {
0473 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
0474
0475
0476
0477
0478
0479 if (firmware_has_feature(FW_FEATURE_OPAL))
0480 return false;
0481 return true;
0482 }
0483
0484
0485
0486
0487
0488 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
0489 }
0490
0491 void smp_release_cpus(void)
0492 {
0493 unsigned long *ptr;
0494 int i;
0495
0496 if (!use_spinloop())
0497 return;
0498
0499
0500
0501
0502
0503
0504
0505 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
0506 - PHYSICAL_START);
0507 *ptr = ppc_function_entry(generic_secondary_smp_init);
0508
0509
0510 for (i = 0; i < 100000; i++) {
0511 mb();
0512 HMT_low();
0513 if (spinning_secondaries == 0)
0514 break;
0515 udelay(1);
0516 }
0517 pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
0518 }
0519 #endif
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
0530 u32 bsize, u32 sets)
0531 {
0532 info->size = size;
0533 info->sets = sets;
0534 info->line_size = lsize;
0535 info->block_size = bsize;
0536 info->log_block_size = __ilog2(bsize);
0537 if (bsize)
0538 info->blocks_per_page = PAGE_SIZE / bsize;
0539 else
0540 info->blocks_per_page = 0;
0541
0542 if (sets == 0)
0543 info->assoc = 0xffff;
0544 else
0545 info->assoc = size / (sets * lsize);
0546 }
0547
0548 static bool __init parse_cache_info(struct device_node *np,
0549 bool icache,
0550 struct ppc_cache_info *info)
0551 {
0552 static const char *ipropnames[] __initdata = {
0553 "i-cache-size",
0554 "i-cache-sets",
0555 "i-cache-block-size",
0556 "i-cache-line-size",
0557 };
0558 static const char *dpropnames[] __initdata = {
0559 "d-cache-size",
0560 "d-cache-sets",
0561 "d-cache-block-size",
0562 "d-cache-line-size",
0563 };
0564 const char **propnames = icache ? ipropnames : dpropnames;
0565 const __be32 *sizep, *lsizep, *bsizep, *setsp;
0566 u32 size, lsize, bsize, sets;
0567 bool success = true;
0568
0569 size = 0;
0570 sets = -1u;
0571 lsize = bsize = cur_cpu_spec->dcache_bsize;
0572 sizep = of_get_property(np, propnames[0], NULL);
0573 if (sizep != NULL)
0574 size = be32_to_cpu(*sizep);
0575 setsp = of_get_property(np, propnames[1], NULL);
0576 if (setsp != NULL)
0577 sets = be32_to_cpu(*setsp);
0578 bsizep = of_get_property(np, propnames[2], NULL);
0579 lsizep = of_get_property(np, propnames[3], NULL);
0580 if (bsizep == NULL)
0581 bsizep = lsizep;
0582 if (lsizep == NULL)
0583 lsizep = bsizep;
0584 if (lsizep != NULL)
0585 lsize = be32_to_cpu(*lsizep);
0586 if (bsizep != NULL)
0587 bsize = be32_to_cpu(*bsizep);
0588 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
0589 success = false;
0590
0591
0592
0593
0594
0595
0596
0597 if (sets == 1)
0598 sets = 0;
0599 else if (sets == 0)
0600 sets = 1;
0601
0602 init_cache_info(info, size, lsize, bsize, sets);
0603
0604 return success;
0605 }
0606
0607 void __init initialize_cache_info(void)
0608 {
0609 struct device_node *cpu = NULL, *l2, *l3 = NULL;
0610 u32 pvr;
0611
0612
0613
0614
0615
0616
0617
0618 pvr = PVR_VER(mfspr(SPRN_PVR));
0619 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
0620 pvr == PVR_POWER8NVL) {
0621
0622 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
0623 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
0624 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
0625 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
0626 } else
0627 cpu = of_find_node_by_type(NULL, "cpu");
0628
0629
0630
0631
0632
0633 if (cpu) {
0634 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
0635 pr_warn("Argh, can't find dcache properties !\n");
0636
0637 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
0638 pr_warn("Argh, can't find icache properties !\n");
0639
0640
0641
0642
0643
0644 l2 = of_find_next_cache_node(cpu);
0645 of_node_put(cpu);
0646 if (l2) {
0647 parse_cache_info(l2, false, &ppc64_caches.l2);
0648 l3 = of_find_next_cache_node(l2);
0649 of_node_put(l2);
0650 }
0651 if (l3) {
0652 parse_cache_info(l3, false, &ppc64_caches.l3);
0653 of_node_put(l3);
0654 }
0655 }
0656
0657
0658 dcache_bsize = ppc64_caches.l1d.block_size;
0659 icache_bsize = ppc64_caches.l1i.block_size;
0660
0661 cur_cpu_spec->dcache_bsize = dcache_bsize;
0662 cur_cpu_spec->icache_bsize = icache_bsize;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 __init u64 ppc64_bolted_size(void)
0675 {
0676 #ifdef CONFIG_PPC_BOOK3E
0677
0678
0679 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
0680 return linear_map_top;
0681
0682 return 1ul << 30;
0683 #else
0684
0685 if (early_radix_enabled())
0686 return ULONG_MAX;
0687
0688
0689 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
0690 return 1UL << SID_SHIFT_1T;
0691 return 1UL << SID_SHIFT;
0692 #endif
0693 }
0694
0695 static void *__init alloc_stack(unsigned long limit, int cpu)
0696 {
0697 void *ptr;
0698
0699 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
0700
0701 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
0702 MEMBLOCK_LOW_LIMIT, limit,
0703 early_cpu_to_node(cpu));
0704 if (!ptr)
0705 panic("cannot allocate stacks");
0706
0707 return ptr;
0708 }
0709
0710 void __init irqstack_early_init(void)
0711 {
0712 u64 limit = ppc64_bolted_size();
0713 unsigned int i;
0714
0715
0716
0717
0718
0719
0720 for_each_possible_cpu(i) {
0721 softirq_ctx[i] = alloc_stack(limit, i);
0722 hardirq_ctx[i] = alloc_stack(limit, i);
0723 }
0724 }
0725
0726 #ifdef CONFIG_PPC_BOOK3E
0727 void __init exc_lvl_early_init(void)
0728 {
0729 unsigned int i;
0730
0731 for_each_possible_cpu(i) {
0732 void *sp;
0733
0734 sp = alloc_stack(ULONG_MAX, i);
0735 critirq_ctx[i] = sp;
0736 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
0737
0738 sp = alloc_stack(ULONG_MAX, i);
0739 dbgirq_ctx[i] = sp;
0740 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
0741
0742 sp = alloc_stack(ULONG_MAX, i);
0743 mcheckirq_ctx[i] = sp;
0744 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
0745 }
0746
0747 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
0748 patch_exception(0x040, exc_debug_debug_book3e);
0749 }
0750 #endif
0751
0752
0753
0754
0755
0756
0757 void __init emergency_stack_init(void)
0758 {
0759 u64 limit, mce_limit;
0760 unsigned int i;
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
0777
0778
0779
0780
0781
0782
0783
0784 if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
0785 mce_limit = SZ_4G;
0786
0787 for_each_possible_cpu(i) {
0788 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
0789
0790 #ifdef CONFIG_PPC_BOOK3S_64
0791
0792 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
0793
0794
0795 paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
0796 #endif
0797 }
0798 }
0799
0800 #ifdef CONFIG_SMP
0801 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
0802 {
0803 if (early_cpu_to_node(from) == early_cpu_to_node(to))
0804 return LOCAL_DISTANCE;
0805 else
0806 return REMOTE_DISTANCE;
0807 }
0808
0809 static __init int pcpu_cpu_to_node(int cpu)
0810 {
0811 return early_cpu_to_node(cpu);
0812 }
0813
0814 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
0815 EXPORT_SYMBOL(__per_cpu_offset);
0816
0817 void __init setup_per_cpu_areas(void)
0818 {
0819 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
0820 size_t atom_size;
0821 unsigned long delta;
0822 unsigned int cpu;
0823 int rc = -EINVAL;
0824
0825
0826
0827
0828 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
0829 atom_size = SZ_1M;
0830 } else if (radix_enabled()) {
0831 atom_size = PAGE_SIZE;
0832 } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
0833
0834
0835
0836
0837
0838 if (mmu_linear_psize == MMU_PAGE_4K)
0839 atom_size = PAGE_SIZE;
0840 else
0841 atom_size = SZ_1M;
0842 }
0843
0844 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
0845 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
0846 pcpu_cpu_to_node);
0847 if (rc)
0848 pr_warn("PERCPU: %s allocator failed (%d), "
0849 "falling back to page size\n",
0850 pcpu_fc_names[pcpu_chosen_fc], rc);
0851 }
0852
0853 if (rc < 0)
0854 rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
0855 if (rc < 0)
0856 panic("cannot initialize percpu area (err=%d)", rc);
0857
0858 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
0859 for_each_possible_cpu(cpu) {
0860 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
0861 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
0862 }
0863 }
0864 #endif
0865
0866 #ifdef CONFIG_MEMORY_HOTPLUG
0867 unsigned long memory_block_size_bytes(void)
0868 {
0869 if (ppc_md.memory_block_size)
0870 return ppc_md.memory_block_size();
0871
0872 return MIN_MEMORY_BLOCK_SIZE;
0873 }
0874 #endif
0875
0876 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
0877 struct ppc_pci_io ppc_pci_io;
0878 EXPORT_SYMBOL(ppc_pci_io);
0879 #endif
0880
0881 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
0882 u64 hw_nmi_get_sample_period(int watchdog_thresh)
0883 {
0884 return ppc_proc_freq * watchdog_thresh;
0885 }
0886 #endif
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898 static int __init disable_hardlockup_detector(void)
0899 {
0900 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
0901 hardlockup_detector_disable();
0902 #else
0903 if (firmware_has_feature(FW_FEATURE_LPAR)) {
0904 if (is_kvm_guest())
0905 hardlockup_detector_disable();
0906 }
0907 #endif
0908
0909 return 0;
0910 }
0911 early_initcall(disable_hardlockup_detector);