0001
0002
0003
0004
0005
0006
0007
0008
0009 #undef DEBUG
0010 #define pr_fmt(fmt) "mce: " fmt
0011
0012 #include <linux/hardirq.h>
0013 #include <linux/types.h>
0014 #include <linux/ptrace.h>
0015 #include <linux/percpu.h>
0016 #include <linux/export.h>
0017 #include <linux/irq_work.h>
0018 #include <linux/extable.h>
0019 #include <linux/ftrace.h>
0020 #include <linux/memblock.h>
0021 #include <linux/of.h>
0022
0023 #include <asm/interrupt.h>
0024 #include <asm/machdep.h>
0025 #include <asm/mce.h>
0026 #include <asm/nmi.h>
0027
0028 #include "setup.h"
0029
0030 static void machine_check_ue_event(struct machine_check_event *evt);
0031 static void machine_process_ue_event(struct work_struct *work);
0032
0033 static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
0034
0035 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
0036
0037 int mce_register_notifier(struct notifier_block *nb)
0038 {
0039 return blocking_notifier_chain_register(&mce_notifier_list, nb);
0040 }
0041 EXPORT_SYMBOL_GPL(mce_register_notifier);
0042
0043 int mce_unregister_notifier(struct notifier_block *nb)
0044 {
0045 return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
0046 }
0047 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
0048
0049 static void mce_set_error_info(struct machine_check_event *mce,
0050 struct mce_error_info *mce_err)
0051 {
0052 mce->error_type = mce_err->error_type;
0053 switch (mce_err->error_type) {
0054 case MCE_ERROR_TYPE_UE:
0055 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
0056 break;
0057 case MCE_ERROR_TYPE_SLB:
0058 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
0059 break;
0060 case MCE_ERROR_TYPE_ERAT:
0061 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
0062 break;
0063 case MCE_ERROR_TYPE_TLB:
0064 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
0065 break;
0066 case MCE_ERROR_TYPE_USER:
0067 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
0068 break;
0069 case MCE_ERROR_TYPE_RA:
0070 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
0071 break;
0072 case MCE_ERROR_TYPE_LINK:
0073 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
0074 break;
0075 case MCE_ERROR_TYPE_UNKNOWN:
0076 default:
0077 break;
0078 }
0079 }
0080
0081 void mce_irq_work_queue(void)
0082 {
0083
0084 arch_irq_work_raise();
0085 set_mce_pending_irq_work();
0086 }
0087
0088
0089
0090
0091
0092 void save_mce_event(struct pt_regs *regs, long handled,
0093 struct mce_error_info *mce_err,
0094 uint64_t nip, uint64_t addr, uint64_t phys_addr)
0095 {
0096 int index = local_paca->mce_info->mce_nest_count++;
0097 struct machine_check_event *mce;
0098
0099 mce = &local_paca->mce_info->mce_event[index];
0100
0101
0102
0103
0104
0105 if (index >= MAX_MC_EVT)
0106 return;
0107
0108
0109 mce->version = MCE_V1;
0110 mce->srr0 = nip;
0111 mce->srr1 = regs->msr;
0112 mce->gpr3 = regs->gpr[3];
0113 mce->in_use = 1;
0114 mce->cpu = get_paca()->paca_index;
0115
0116
0117 if (handled && (regs->msr & MSR_RI))
0118 mce->disposition = MCE_DISPOSITION_RECOVERED;
0119 else
0120 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
0121
0122 mce->initiator = mce_err->initiator;
0123 mce->severity = mce_err->severity;
0124 mce->sync_error = mce_err->sync_error;
0125 mce->error_class = mce_err->error_class;
0126
0127
0128
0129
0130 mce_set_error_info(mce, mce_err);
0131 if (mce->error_type == MCE_ERROR_TYPE_UE)
0132 mce->u.ue_error.ignore_event = mce_err->ignore_event;
0133
0134 if (!addr)
0135 return;
0136
0137 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
0138 mce->u.tlb_error.effective_address_provided = true;
0139 mce->u.tlb_error.effective_address = addr;
0140 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
0141 mce->u.slb_error.effective_address_provided = true;
0142 mce->u.slb_error.effective_address = addr;
0143 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
0144 mce->u.erat_error.effective_address_provided = true;
0145 mce->u.erat_error.effective_address = addr;
0146 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
0147 mce->u.user_error.effective_address_provided = true;
0148 mce->u.user_error.effective_address = addr;
0149 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
0150 mce->u.ra_error.effective_address_provided = true;
0151 mce->u.ra_error.effective_address = addr;
0152 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
0153 mce->u.link_error.effective_address_provided = true;
0154 mce->u.link_error.effective_address = addr;
0155 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
0156 mce->u.ue_error.effective_address_provided = true;
0157 mce->u.ue_error.effective_address = addr;
0158 if (phys_addr != ULONG_MAX) {
0159 mce->u.ue_error.physical_address_provided = true;
0160 mce->u.ue_error.physical_address = phys_addr;
0161 machine_check_ue_event(mce);
0162 }
0163 }
0164 return;
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 int get_mce_event(struct machine_check_event *mce, bool release)
0185 {
0186 int index = local_paca->mce_info->mce_nest_count - 1;
0187 struct machine_check_event *mc_evt;
0188 int ret = 0;
0189
0190
0191 if (index < 0)
0192 return ret;
0193
0194
0195 if (index < MAX_MC_EVT) {
0196 mc_evt = &local_paca->mce_info->mce_event[index];
0197
0198 if (mce)
0199 *mce = *mc_evt;
0200 if (release)
0201 mc_evt->in_use = 0;
0202 ret = 1;
0203 }
0204
0205 if (release)
0206 local_paca->mce_info->mce_nest_count--;
0207
0208 return ret;
0209 }
0210
0211 void release_mce_event(void)
0212 {
0213 get_mce_event(NULL, true);
0214 }
0215
0216 static void machine_check_ue_work(void)
0217 {
0218 schedule_work(&mce_ue_event_work);
0219 }
0220
0221
0222
0223
0224 static void machine_check_ue_event(struct machine_check_event *evt)
0225 {
0226 int index;
0227
0228 index = local_paca->mce_info->mce_ue_count++;
0229
0230 if (index >= MAX_MC_EVT) {
0231 local_paca->mce_info->mce_ue_count--;
0232 return;
0233 }
0234 memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
0235 evt, sizeof(*evt));
0236
0237
0238 mce_irq_work_queue();
0239 }
0240
0241
0242
0243
0244 void machine_check_queue_event(void)
0245 {
0246 int index;
0247 struct machine_check_event evt;
0248
0249 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
0250 return;
0251
0252 index = local_paca->mce_info->mce_queue_count++;
0253
0254 if (index >= MAX_MC_EVT) {
0255 local_paca->mce_info->mce_queue_count--;
0256 return;
0257 }
0258 memcpy(&local_paca->mce_info->mce_event_queue[index],
0259 &evt, sizeof(evt));
0260
0261 mce_irq_work_queue();
0262 }
0263
0264 void mce_common_process_ue(struct pt_regs *regs,
0265 struct mce_error_info *mce_err)
0266 {
0267 const struct exception_table_entry *entry;
0268
0269 entry = search_kernel_exception_table(regs->nip);
0270 if (entry) {
0271 mce_err->ignore_event = true;
0272 regs_set_return_ip(regs, extable_fixup(entry));
0273 }
0274 }
0275
0276
0277
0278
0279
0280 static void machine_process_ue_event(struct work_struct *work)
0281 {
0282 int index;
0283 struct machine_check_event *evt;
0284
0285 while (local_paca->mce_info->mce_ue_count > 0) {
0286 index = local_paca->mce_info->mce_ue_count - 1;
0287 evt = &local_paca->mce_info->mce_ue_event_queue[index];
0288 blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
0289 #ifdef CONFIG_MEMORY_FAILURE
0290
0291
0292
0293
0294
0295
0296
0297
0298 if (evt->error_type == MCE_ERROR_TYPE_UE) {
0299 if (evt->u.ue_error.ignore_event) {
0300 local_paca->mce_info->mce_ue_count--;
0301 continue;
0302 }
0303
0304 if (evt->u.ue_error.physical_address_provided) {
0305 unsigned long pfn;
0306
0307 pfn = evt->u.ue_error.physical_address >>
0308 PAGE_SHIFT;
0309 memory_failure(pfn, 0);
0310 } else
0311 pr_warn("Failed to identify bad address from "
0312 "where the uncorrectable error (UE) "
0313 "was generated\n");
0314 }
0315 #endif
0316 local_paca->mce_info->mce_ue_count--;
0317 }
0318 }
0319
0320
0321
0322
0323 static void machine_check_process_queued_event(void)
0324 {
0325 int index;
0326 struct machine_check_event *evt;
0327
0328 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
0329
0330
0331
0332
0333
0334 while (local_paca->mce_info->mce_queue_count > 0) {
0335 index = local_paca->mce_info->mce_queue_count - 1;
0336 evt = &local_paca->mce_info->mce_event_queue[index];
0337
0338 if (evt->error_type == MCE_ERROR_TYPE_UE &&
0339 evt->u.ue_error.ignore_event) {
0340 local_paca->mce_info->mce_queue_count--;
0341 continue;
0342 }
0343 machine_check_print_event_info(evt, false, false);
0344 local_paca->mce_info->mce_queue_count--;
0345 }
0346 }
0347
0348 void set_mce_pending_irq_work(void)
0349 {
0350 local_paca->mce_pending_irq_work = 1;
0351 }
0352
0353 void clear_mce_pending_irq_work(void)
0354 {
0355 local_paca->mce_pending_irq_work = 0;
0356 }
0357
0358 void mce_run_irq_context_handlers(void)
0359 {
0360 if (unlikely(local_paca->mce_pending_irq_work)) {
0361 if (ppc_md.machine_check_log_err)
0362 ppc_md.machine_check_log_err();
0363 machine_check_process_queued_event();
0364 machine_check_ue_work();
0365 clear_mce_pending_irq_work();
0366 }
0367 }
0368
0369 void machine_check_print_event_info(struct machine_check_event *evt,
0370 bool user_mode, bool in_guest)
0371 {
0372 const char *level, *sevstr, *subtype, *err_type, *initiator;
0373 uint64_t ea = 0, pa = 0;
0374 int n = 0;
0375 char dar_str[50];
0376 char pa_str[50];
0377 static const char *mc_ue_types[] = {
0378 "Indeterminate",
0379 "Instruction fetch",
0380 "Page table walk ifetch",
0381 "Load/Store",
0382 "Page table walk Load/Store",
0383 };
0384 static const char *mc_slb_types[] = {
0385 "Indeterminate",
0386 "Parity",
0387 "Multihit",
0388 };
0389 static const char *mc_erat_types[] = {
0390 "Indeterminate",
0391 "Parity",
0392 "Multihit",
0393 };
0394 static const char *mc_tlb_types[] = {
0395 "Indeterminate",
0396 "Parity",
0397 "Multihit",
0398 };
0399 static const char *mc_user_types[] = {
0400 "Indeterminate",
0401 "tlbie(l) invalid",
0402 "scv invalid",
0403 };
0404 static const char *mc_ra_types[] = {
0405 "Indeterminate",
0406 "Instruction fetch (bad)",
0407 "Instruction fetch (foreign/control memory)",
0408 "Page table walk ifetch (bad)",
0409 "Page table walk ifetch (foreign/control memory)",
0410 "Load (bad)",
0411 "Store (bad)",
0412 "Page table walk Load/Store (bad)",
0413 "Page table walk Load/Store (foreign/control memory)",
0414 "Load/Store (foreign/control memory)",
0415 };
0416 static const char *mc_link_types[] = {
0417 "Indeterminate",
0418 "Instruction fetch (timeout)",
0419 "Page table walk ifetch (timeout)",
0420 "Load (timeout)",
0421 "Store (timeout)",
0422 "Page table walk Load/Store (timeout)",
0423 };
0424 static const char *mc_error_class[] = {
0425 "Unknown",
0426 "Hardware error",
0427 "Probable Hardware error (some chance of software cause)",
0428 "Software error",
0429 "Probable Software error (some chance of hardware cause)",
0430 };
0431
0432
0433 if (evt->version != MCE_V1) {
0434 pr_err("Machine Check Exception, Unknown event version %d !\n",
0435 evt->version);
0436 return;
0437 }
0438 switch (evt->severity) {
0439 case MCE_SEV_NO_ERROR:
0440 level = KERN_INFO;
0441 sevstr = "Harmless";
0442 break;
0443 case MCE_SEV_WARNING:
0444 level = KERN_WARNING;
0445 sevstr = "Warning";
0446 break;
0447 case MCE_SEV_SEVERE:
0448 level = KERN_ERR;
0449 sevstr = "Severe";
0450 break;
0451 case MCE_SEV_FATAL:
0452 default:
0453 level = KERN_ERR;
0454 sevstr = "Fatal";
0455 break;
0456 }
0457
0458 switch(evt->initiator) {
0459 case MCE_INITIATOR_CPU:
0460 initiator = "CPU";
0461 break;
0462 case MCE_INITIATOR_PCI:
0463 initiator = "PCI";
0464 break;
0465 case MCE_INITIATOR_ISA:
0466 initiator = "ISA";
0467 break;
0468 case MCE_INITIATOR_MEMORY:
0469 initiator = "Memory";
0470 break;
0471 case MCE_INITIATOR_POWERMGM:
0472 initiator = "Power Management";
0473 break;
0474 case MCE_INITIATOR_UNKNOWN:
0475 default:
0476 initiator = "Unknown";
0477 break;
0478 }
0479
0480 switch (evt->error_type) {
0481 case MCE_ERROR_TYPE_UE:
0482 err_type = "UE";
0483 subtype = evt->u.ue_error.ue_error_type <
0484 ARRAY_SIZE(mc_ue_types) ?
0485 mc_ue_types[evt->u.ue_error.ue_error_type]
0486 : "Unknown";
0487 if (evt->u.ue_error.effective_address_provided)
0488 ea = evt->u.ue_error.effective_address;
0489 if (evt->u.ue_error.physical_address_provided)
0490 pa = evt->u.ue_error.physical_address;
0491 break;
0492 case MCE_ERROR_TYPE_SLB:
0493 err_type = "SLB";
0494 subtype = evt->u.slb_error.slb_error_type <
0495 ARRAY_SIZE(mc_slb_types) ?
0496 mc_slb_types[evt->u.slb_error.slb_error_type]
0497 : "Unknown";
0498 if (evt->u.slb_error.effective_address_provided)
0499 ea = evt->u.slb_error.effective_address;
0500 break;
0501 case MCE_ERROR_TYPE_ERAT:
0502 err_type = "ERAT";
0503 subtype = evt->u.erat_error.erat_error_type <
0504 ARRAY_SIZE(mc_erat_types) ?
0505 mc_erat_types[evt->u.erat_error.erat_error_type]
0506 : "Unknown";
0507 if (evt->u.erat_error.effective_address_provided)
0508 ea = evt->u.erat_error.effective_address;
0509 break;
0510 case MCE_ERROR_TYPE_TLB:
0511 err_type = "TLB";
0512 subtype = evt->u.tlb_error.tlb_error_type <
0513 ARRAY_SIZE(mc_tlb_types) ?
0514 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
0515 : "Unknown";
0516 if (evt->u.tlb_error.effective_address_provided)
0517 ea = evt->u.tlb_error.effective_address;
0518 break;
0519 case MCE_ERROR_TYPE_USER:
0520 err_type = "User";
0521 subtype = evt->u.user_error.user_error_type <
0522 ARRAY_SIZE(mc_user_types) ?
0523 mc_user_types[evt->u.user_error.user_error_type]
0524 : "Unknown";
0525 if (evt->u.user_error.effective_address_provided)
0526 ea = evt->u.user_error.effective_address;
0527 break;
0528 case MCE_ERROR_TYPE_RA:
0529 err_type = "Real address";
0530 subtype = evt->u.ra_error.ra_error_type <
0531 ARRAY_SIZE(mc_ra_types) ?
0532 mc_ra_types[evt->u.ra_error.ra_error_type]
0533 : "Unknown";
0534 if (evt->u.ra_error.effective_address_provided)
0535 ea = evt->u.ra_error.effective_address;
0536 break;
0537 case MCE_ERROR_TYPE_LINK:
0538 err_type = "Link";
0539 subtype = evt->u.link_error.link_error_type <
0540 ARRAY_SIZE(mc_link_types) ?
0541 mc_link_types[evt->u.link_error.link_error_type]
0542 : "Unknown";
0543 if (evt->u.link_error.effective_address_provided)
0544 ea = evt->u.link_error.effective_address;
0545 break;
0546 case MCE_ERROR_TYPE_DCACHE:
0547 err_type = "D-Cache";
0548 subtype = "Unknown";
0549 break;
0550 case MCE_ERROR_TYPE_ICACHE:
0551 err_type = "I-Cache";
0552 subtype = "Unknown";
0553 break;
0554 default:
0555 case MCE_ERROR_TYPE_UNKNOWN:
0556 err_type = "Unknown";
0557 subtype = "";
0558 break;
0559 }
0560
0561 dar_str[0] = pa_str[0] = '\0';
0562 if (ea && evt->srr0 != ea) {
0563
0564 n = sprintf(dar_str, "DAR: %016llx ", ea);
0565 if (pa)
0566 sprintf(dar_str + n, "paddr: %016llx ", pa);
0567 } else if (pa) {
0568 sprintf(pa_str, " paddr: %016llx", pa);
0569 }
0570
0571 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
0572 level, evt->cpu, sevstr, in_guest ? "Guest" : "",
0573 err_type, subtype, dar_str,
0574 evt->disposition == MCE_DISPOSITION_RECOVERED ?
0575 "Recovered" : "Not recovered");
0576
0577 if (in_guest || user_mode) {
0578 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
0579 level, evt->cpu, current->pid, current->comm,
0580 in_guest ? "Guest " : "", evt->srr0, pa_str);
0581 } else {
0582 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
0583 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
0584 }
0585
0586 printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
0587
0588 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
0589 mc_error_class[evt->error_class] : "Unknown";
0590 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
0591
0592 #ifdef CONFIG_PPC_64S_HASH_MMU
0593
0594 if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
0595 slb_dump_contents(local_paca->mce_faulty_slbs);
0596 #endif
0597 }
0598 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
0599
0600
0601
0602
0603
0604
0605 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
0606 {
0607 long handled = 0;
0608
0609 hv_nmi_check_nonrecoverable(regs);
0610
0611
0612
0613
0614 if (ppc_md.machine_check_early)
0615 handled = ppc_md.machine_check_early(regs);
0616
0617 return handled;
0618 }
0619
0620
0621 static enum {
0622 DTRIG_UNKNOWN,
0623 DTRIG_VECTOR_CI,
0624 DTRIG_SUSPEND_ESCAPE,
0625 } hmer_debug_trig_function;
0626
0627 static int init_debug_trig_function(void)
0628 {
0629 int pvr;
0630 struct device_node *cpun;
0631 struct property *prop = NULL;
0632 const char *str;
0633
0634
0635 preempt_disable();
0636 cpun = of_get_cpu_node(smp_processor_id(), NULL);
0637 if (cpun) {
0638 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
0639 prop, str) {
0640 if (strcmp(str, "bit17-vector-ci-load") == 0)
0641 hmer_debug_trig_function = DTRIG_VECTOR_CI;
0642 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
0643 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
0644 }
0645 of_node_put(cpun);
0646 }
0647 preempt_enable();
0648
0649
0650 if (prop)
0651 goto out;
0652
0653 pvr = mfspr(SPRN_PVR);
0654
0655 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
0656
0657 if ((pvr & 0xfff) >= 0x202)
0658 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
0659
0660 else if ((pvr & 0xfff) >= 0x200)
0661 hmer_debug_trig_function = DTRIG_VECTOR_CI;
0662 }
0663
0664 out:
0665 switch (hmer_debug_trig_function) {
0666 case DTRIG_VECTOR_CI:
0667 pr_debug("HMI debug trigger used for vector CI load\n");
0668 break;
0669 case DTRIG_SUSPEND_ESCAPE:
0670 pr_debug("HMI debug trigger used for TM suspend escape\n");
0671 break;
0672 default:
0673 break;
0674 }
0675 return 0;
0676 }
0677 __initcall(init_debug_trig_function);
0678
0679
0680
0681
0682
0683
0684
0685
0686 long hmi_handle_debugtrig(struct pt_regs *regs)
0687 {
0688 unsigned long hmer = mfspr(SPRN_HMER);
0689 long ret = 0;
0690
0691
0692 if (!((hmer & HMER_DEBUG_TRIG)
0693 && hmer_debug_trig_function != DTRIG_UNKNOWN))
0694 return -1;
0695
0696 hmer &= ~HMER_DEBUG_TRIG;
0697
0698 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
0699
0700 switch (hmer_debug_trig_function) {
0701 case DTRIG_VECTOR_CI:
0702
0703
0704
0705
0706
0707 if (regs && user_mode(regs))
0708 ret = local_paca->hmi_p9_special_emu = 1;
0709
0710 break;
0711
0712 default:
0713 break;
0714 }
0715
0716
0717
0718
0719 if (hmer & mfspr(SPRN_HMEER))
0720 return -1;
0721
0722 return ret;
0723 }
0724
0725
0726
0727
0728 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
0729 {
0730 int ret;
0731
0732 local_paca->hmi_irqs++;
0733
0734 ret = hmi_handle_debugtrig(regs);
0735 if (ret >= 0)
0736 return ret;
0737
0738 wait_for_subcore_guest_exit();
0739
0740 if (ppc_md.hmi_exception_early)
0741 ppc_md.hmi_exception_early(regs);
0742
0743 wait_for_tb_resync();
0744
0745 return 1;
0746 }
0747
0748 void __init mce_init(void)
0749 {
0750 struct mce_info *mce_info;
0751 u64 limit;
0752 int i;
0753
0754 limit = min(ppc64_bolted_size(), ppc64_rma_size);
0755 for_each_possible_cpu(i) {
0756 mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
0757 __alignof__(*mce_info),
0758 MEMBLOCK_LOW_LIMIT,
0759 limit, early_cpu_to_node(i));
0760 if (!mce_info)
0761 goto err;
0762 paca_ptrs[i]->mce_info = mce_info;
0763 }
0764 return;
0765 err:
0766 panic("Failed to allocate memory for MCE event data\n");
0767 }