0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/acpi.h>
0018 #include <linux/dmi.h>
0019 #include <linux/adxl.h>
0020 #include <acpi/nfit.h>
0021 #include <asm/mce.h>
0022 #include "edac_module.h"
0023 #include "skx_common.h"
0024
0025 static const char * const component_names[] = {
0026 [INDEX_SOCKET] = "ProcessorSocketId",
0027 [INDEX_MEMCTRL] = "MemoryControllerId",
0028 [INDEX_CHANNEL] = "ChannelId",
0029 [INDEX_DIMM] = "DimmSlotId",
0030 [INDEX_NM_MEMCTRL] = "NmMemoryControllerId",
0031 [INDEX_NM_CHANNEL] = "NmChannelId",
0032 [INDEX_NM_DIMM] = "NmDimmSlotId",
0033 };
0034
0035 static int component_indices[ARRAY_SIZE(component_names)];
0036 static int adxl_component_count;
0037 static const char * const *adxl_component_names;
0038 static u64 *adxl_values;
0039 static char *adxl_msg;
0040 static unsigned long adxl_nm_bitmap;
0041
0042 static char skx_msg[MSG_SIZE];
0043 static skx_decode_f skx_decode;
0044 static skx_show_retry_log_f skx_show_retry_rd_err_log;
0045 static u64 skx_tolm, skx_tohm;
0046 static LIST_HEAD(dev_edac_list);
0047 static bool skx_mem_cfg_2lm;
0048
0049 int __init skx_adxl_get(void)
0050 {
0051 const char * const *names;
0052 int i, j;
0053
0054 names = adxl_get_component_names();
0055 if (!names) {
0056 skx_printk(KERN_NOTICE, "No firmware support for address translation.\n");
0057 return -ENODEV;
0058 }
0059
0060 for (i = 0; i < INDEX_MAX; i++) {
0061 for (j = 0; names[j]; j++) {
0062 if (!strcmp(component_names[i], names[j])) {
0063 component_indices[i] = j;
0064
0065 if (i >= INDEX_NM_FIRST)
0066 adxl_nm_bitmap |= 1 << i;
0067
0068 break;
0069 }
0070 }
0071
0072 if (!names[j] && i < INDEX_NM_FIRST)
0073 goto err;
0074 }
0075
0076 if (skx_mem_cfg_2lm) {
0077 if (!adxl_nm_bitmap)
0078 skx_printk(KERN_NOTICE, "Not enough ADXL components for 2-level memory.\n");
0079 else
0080 edac_dbg(2, "adxl_nm_bitmap: 0x%lx\n", adxl_nm_bitmap);
0081 }
0082
0083 adxl_component_names = names;
0084 while (*names++)
0085 adxl_component_count++;
0086
0087 adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values),
0088 GFP_KERNEL);
0089 if (!adxl_values) {
0090 adxl_component_count = 0;
0091 return -ENOMEM;
0092 }
0093
0094 adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
0095 if (!adxl_msg) {
0096 adxl_component_count = 0;
0097 kfree(adxl_values);
0098 return -ENOMEM;
0099 }
0100
0101 return 0;
0102 err:
0103 skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ",
0104 component_names[i]);
0105 for (j = 0; names[j]; j++)
0106 skx_printk(KERN_CONT, "%s ", names[j]);
0107 skx_printk(KERN_CONT, "\n");
0108
0109 return -ENODEV;
0110 }
0111
0112 void __exit skx_adxl_put(void)
0113 {
0114 kfree(adxl_values);
0115 kfree(adxl_msg);
0116 }
0117
0118 static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
0119 {
0120 struct skx_dev *d;
0121 int i, len = 0;
0122
0123 if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
0124 res->addr < BIT_ULL(32))) {
0125 edac_dbg(0, "Address 0x%llx out of range\n", res->addr);
0126 return false;
0127 }
0128
0129 if (adxl_decode(res->addr, adxl_values)) {
0130 edac_dbg(0, "Failed to decode 0x%llx\n", res->addr);
0131 return false;
0132 }
0133
0134 res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
0135 if (error_in_1st_level_mem) {
0136 res->imc = (adxl_nm_bitmap & BIT_NM_MEMCTRL) ?
0137 (int)adxl_values[component_indices[INDEX_NM_MEMCTRL]] : -1;
0138 res->channel = (adxl_nm_bitmap & BIT_NM_CHANNEL) ?
0139 (int)adxl_values[component_indices[INDEX_NM_CHANNEL]] : -1;
0140 res->dimm = (adxl_nm_bitmap & BIT_NM_DIMM) ?
0141 (int)adxl_values[component_indices[INDEX_NM_DIMM]] : -1;
0142 } else {
0143 res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
0144 res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
0145 res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
0146 }
0147
0148 if (res->imc > NUM_IMC - 1 || res->imc < 0) {
0149 skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
0150 return false;
0151 }
0152
0153 list_for_each_entry(d, &dev_edac_list, list) {
0154 if (d->imc[0].src_id == res->socket) {
0155 res->dev = d;
0156 break;
0157 }
0158 }
0159
0160 if (!res->dev) {
0161 skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
0162 res->socket, res->imc);
0163 return false;
0164 }
0165
0166 for (i = 0; i < adxl_component_count; i++) {
0167 if (adxl_values[i] == ~0x0ull)
0168 continue;
0169
0170 len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx",
0171 adxl_component_names[i], adxl_values[i]);
0172 if (MSG_SIZE - len <= 0)
0173 break;
0174 }
0175
0176 return true;
0177 }
0178
0179 void skx_set_mem_cfg(bool mem_cfg_2lm)
0180 {
0181 skx_mem_cfg_2lm = mem_cfg_2lm;
0182 }
0183
0184 void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
0185 {
0186 skx_decode = decode;
0187 skx_show_retry_rd_err_log = show_retry_log;
0188 }
0189
0190 int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
0191 {
0192 u32 reg;
0193
0194 if (pci_read_config_dword(d->util_all, off, ®)) {
0195 skx_printk(KERN_ERR, "Failed to read src id\n");
0196 return -ENODEV;
0197 }
0198
0199 *id = GET_BITFIELD(reg, 12, 14);
0200 return 0;
0201 }
0202
0203 int skx_get_node_id(struct skx_dev *d, u8 *id)
0204 {
0205 u32 reg;
0206
0207 if (pci_read_config_dword(d->util_all, 0xf4, ®)) {
0208 skx_printk(KERN_ERR, "Failed to read node id\n");
0209 return -ENODEV;
0210 }
0211
0212 *id = GET_BITFIELD(reg, 0, 2);
0213 return 0;
0214 }
0215
0216 static int get_width(u32 mtr)
0217 {
0218 switch (GET_BITFIELD(mtr, 8, 9)) {
0219 case 0:
0220 return DEV_X4;
0221 case 1:
0222 return DEV_X8;
0223 case 2:
0224 return DEV_X16;
0225 }
0226 return DEV_UNKNOWN;
0227 }
0228
0229
0230
0231
0232
0233
0234 int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
0235 {
0236 struct pci_dev *pdev, *prev;
0237 struct skx_dev *d;
0238 u32 reg;
0239 int ndev = 0;
0240
0241 prev = NULL;
0242 for (;;) {
0243 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, cfg->decs_did, prev);
0244 if (!pdev)
0245 break;
0246 ndev++;
0247 d = kzalloc(sizeof(*d), GFP_KERNEL);
0248 if (!d) {
0249 pci_dev_put(pdev);
0250 return -ENOMEM;
0251 }
0252
0253 if (pci_read_config_dword(pdev, cfg->busno_cfg_offset, ®)) {
0254 kfree(d);
0255 pci_dev_put(pdev);
0256 skx_printk(KERN_ERR, "Failed to read bus idx\n");
0257 return -ENODEV;
0258 }
0259
0260 d->bus[0] = GET_BITFIELD(reg, 0, 7);
0261 d->bus[1] = GET_BITFIELD(reg, 8, 15);
0262 if (cfg->type == SKX) {
0263 d->seg = pci_domain_nr(pdev->bus);
0264 d->bus[2] = GET_BITFIELD(reg, 16, 23);
0265 d->bus[3] = GET_BITFIELD(reg, 24, 31);
0266 } else {
0267 d->seg = GET_BITFIELD(reg, 16, 23);
0268 }
0269
0270 edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
0271 d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
0272 list_add_tail(&d->list, &dev_edac_list);
0273 prev = pdev;
0274 }
0275
0276 if (list)
0277 *list = &dev_edac_list;
0278 return ndev;
0279 }
0280
0281 int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
0282 {
0283 struct pci_dev *pdev;
0284 u32 reg;
0285
0286 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
0287 if (!pdev) {
0288 edac_dbg(2, "Can't get tolm/tohm\n");
0289 return -ENODEV;
0290 }
0291
0292 if (pci_read_config_dword(pdev, off[0], ®)) {
0293 skx_printk(KERN_ERR, "Failed to read tolm\n");
0294 goto fail;
0295 }
0296 skx_tolm = reg;
0297
0298 if (pci_read_config_dword(pdev, off[1], ®)) {
0299 skx_printk(KERN_ERR, "Failed to read lower tohm\n");
0300 goto fail;
0301 }
0302 skx_tohm = reg;
0303
0304 if (pci_read_config_dword(pdev, off[2], ®)) {
0305 skx_printk(KERN_ERR, "Failed to read upper tohm\n");
0306 goto fail;
0307 }
0308 skx_tohm |= (u64)reg << 32;
0309
0310 pci_dev_put(pdev);
0311 *tolm = skx_tolm;
0312 *tohm = skx_tohm;
0313 edac_dbg(2, "tolm = 0x%llx tohm = 0x%llx\n", skx_tolm, skx_tohm);
0314 return 0;
0315 fail:
0316 pci_dev_put(pdev);
0317 return -ENODEV;
0318 }
0319
0320 static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
0321 int minval, int maxval, const char *name)
0322 {
0323 u32 val = GET_BITFIELD(reg, lobit, hibit);
0324
0325 if (val < minval || val > maxval) {
0326 edac_dbg(2, "bad %s = %d (raw=0x%x)\n", name, val, reg);
0327 return -EINVAL;
0328 }
0329 return val + add;
0330 }
0331
0332 #define numrank(reg) skx_get_dimm_attr(reg, 12, 13, 0, 0, 2, "ranks")
0333 #define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
0334 #define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
0335
0336 int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
0337 struct skx_imc *imc, int chan, int dimmno,
0338 struct res_config *cfg)
0339 {
0340 int banks, ranks, rows, cols, npages;
0341 enum mem_type mtype;
0342 u64 size;
0343
0344 ranks = numrank(mtr);
0345 rows = numrow(mtr);
0346 cols = imc->hbm_mc ? 6 : numcol(mtr);
0347
0348 if (imc->hbm_mc) {
0349 banks = 32;
0350 mtype = MEM_HBM2;
0351 } else if (cfg->support_ddr5 && (amap & 0x8)) {
0352 banks = 32;
0353 mtype = MEM_DDR5;
0354 } else {
0355 banks = 16;
0356 mtype = MEM_DDR4;
0357 }
0358
0359
0360
0361
0362 size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
0363 npages = MiB_TO_PAGES(size);
0364
0365 edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: 0x%x, col: 0x%x\n",
0366 imc->mc, chan, dimmno, size, npages,
0367 banks, 1 << ranks, rows, cols);
0368
0369 imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
0370 imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
0371 imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
0372 imc->chan[chan].dimms[dimmno].rowbits = rows;
0373 imc->chan[chan].dimms[dimmno].colbits = cols;
0374
0375 dimm->nr_pages = npages;
0376 dimm->grain = 32;
0377 dimm->dtype = get_width(mtr);
0378 dimm->mtype = mtype;
0379 dimm->edac_mode = EDAC_SECDED;
0380
0381 if (imc->hbm_mc)
0382 snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_HBMC#%u_Chan#%u",
0383 imc->src_id, imc->lmc, chan);
0384 else
0385 snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
0386 imc->src_id, imc->lmc, chan, dimmno);
0387
0388 return 1;
0389 }
0390
0391 int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
0392 int chan, int dimmno, const char *mod_str)
0393 {
0394 int smbios_handle;
0395 u32 dev_handle;
0396 u16 flags;
0397 u64 size = 0;
0398
0399 dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc,
0400 imc->src_id, 0);
0401
0402 smbios_handle = nfit_get_smbios_id(dev_handle, &flags);
0403 if (smbios_handle == -EOPNOTSUPP) {
0404 pr_warn_once("%s: Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n", mod_str);
0405 goto unknown_size;
0406 }
0407
0408 if (smbios_handle < 0) {
0409 skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=0x%x\n", dev_handle);
0410 goto unknown_size;
0411 }
0412
0413 if (flags & ACPI_NFIT_MEM_MAP_FAILED) {
0414 skx_printk(KERN_ERR, "NVDIMM ADR=0x%x is not mapped\n", dev_handle);
0415 goto unknown_size;
0416 }
0417
0418 size = dmi_memdev_size(smbios_handle);
0419 if (size == ~0ull)
0420 skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=0x%x/SMBIOS=0x%x\n",
0421 dev_handle, smbios_handle);
0422
0423 unknown_size:
0424 dimm->nr_pages = size >> PAGE_SHIFT;
0425 dimm->grain = 32;
0426 dimm->dtype = DEV_UNKNOWN;
0427 dimm->mtype = MEM_NVDIMM;
0428 dimm->edac_mode = EDAC_SECDED;
0429
0430 edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n",
0431 imc->mc, chan, dimmno, size >> 20, dimm->nr_pages);
0432
0433 snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
0434 imc->src_id, imc->lmc, chan, dimmno);
0435
0436 return (size == 0 || size == ~0ull) ? 0 : 1;
0437 }
0438
0439 int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
0440 const char *ctl_name, const char *mod_str,
0441 get_dimm_config_f get_dimm_config,
0442 struct res_config *cfg)
0443 {
0444 struct mem_ctl_info *mci;
0445 struct edac_mc_layer layers[2];
0446 struct skx_pvt *pvt;
0447 int rc;
0448
0449
0450 layers[0].type = EDAC_MC_LAYER_CHANNEL;
0451 layers[0].size = NUM_CHANNELS;
0452 layers[0].is_virt_csrow = false;
0453 layers[1].type = EDAC_MC_LAYER_SLOT;
0454 layers[1].size = NUM_DIMMS;
0455 layers[1].is_virt_csrow = true;
0456 mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
0457 sizeof(struct skx_pvt));
0458
0459 if (unlikely(!mci))
0460 return -ENOMEM;
0461
0462 edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
0463
0464
0465 imc->mci = mci;
0466 pvt = mci->pvt_info;
0467 pvt->imc = imc;
0468
0469 mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
0470 imc->node_id, imc->lmc);
0471 if (!mci->ctl_name) {
0472 rc = -ENOMEM;
0473 goto fail0;
0474 }
0475
0476 mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM;
0477 if (cfg->support_ddr5)
0478 mci->mtype_cap |= MEM_FLAG_DDR5;
0479 mci->edac_ctl_cap = EDAC_FLAG_NONE;
0480 mci->edac_cap = EDAC_FLAG_NONE;
0481 mci->mod_name = mod_str;
0482 mci->dev_name = pci_name(pdev);
0483 mci->ctl_page_to_phys = NULL;
0484
0485 rc = get_dimm_config(mci, cfg);
0486 if (rc < 0)
0487 goto fail;
0488
0489
0490 mci->pdev = &pdev->dev;
0491
0492
0493 if (unlikely(edac_mc_add_mc(mci))) {
0494 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
0495 rc = -EINVAL;
0496 goto fail;
0497 }
0498
0499 return 0;
0500
0501 fail:
0502 kfree(mci->ctl_name);
0503 fail0:
0504 edac_mc_free(mci);
0505 imc->mci = NULL;
0506 return rc;
0507 }
0508
0509 static void skx_unregister_mci(struct skx_imc *imc)
0510 {
0511 struct mem_ctl_info *mci = imc->mci;
0512
0513 if (!mci)
0514 return;
0515
0516 edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
0517
0518
0519 edac_mc_del_mc(mci->pdev);
0520
0521 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
0522 kfree(mci->ctl_name);
0523 edac_mc_free(mci);
0524 }
0525
0526 static void skx_mce_output_error(struct mem_ctl_info *mci,
0527 const struct mce *m,
0528 struct decoded_addr *res)
0529 {
0530 enum hw_event_mc_err_type tp_event;
0531 char *optype;
0532 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
0533 bool overflow = GET_BITFIELD(m->status, 62, 62);
0534 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
0535 bool scrub_err = false;
0536 bool recoverable;
0537 int len;
0538 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
0539 u32 mscod = GET_BITFIELD(m->status, 16, 31);
0540 u32 errcode = GET_BITFIELD(m->status, 0, 15);
0541 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
0542
0543 recoverable = GET_BITFIELD(m->status, 56, 56);
0544
0545 if (uncorrected_error) {
0546 core_err_cnt = 1;
0547 if (ripv) {
0548 tp_event = HW_EVENT_ERR_UNCORRECTED;
0549 } else {
0550 tp_event = HW_EVENT_ERR_FATAL;
0551 }
0552 } else {
0553 tp_event = HW_EVENT_ERR_CORRECTED;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569 if (!((errcode & 0xef80) == 0x80 || (errcode & 0xef80) == 0x280)) {
0570 optype = "Can't parse: it is not a mem";
0571 } else {
0572 switch (optypenum) {
0573 case 0:
0574 optype = "generic undef request error";
0575 break;
0576 case 1:
0577 optype = "memory read error";
0578 break;
0579 case 2:
0580 optype = "memory write error";
0581 break;
0582 case 3:
0583 optype = "addr/cmd error";
0584 break;
0585 case 4:
0586 optype = "memory scrubbing error";
0587 scrub_err = true;
0588 break;
0589 default:
0590 optype = "reserved";
0591 break;
0592 }
0593 }
0594 if (adxl_component_count) {
0595 len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
0596 overflow ? " OVERFLOW" : "",
0597 (uncorrected_error && recoverable) ? " recoverable" : "",
0598 mscod, errcode, adxl_msg);
0599 } else {
0600 len = snprintf(skx_msg, MSG_SIZE,
0601 "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x",
0602 overflow ? " OVERFLOW" : "",
0603 (uncorrected_error && recoverable) ? " recoverable" : "",
0604 mscod, errcode,
0605 res->socket, res->imc, res->rank,
0606 res->bank_group, res->bank_address, res->row, res->column);
0607 }
0608
0609 if (skx_show_retry_rd_err_log)
0610 skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len, scrub_err);
0611
0612 edac_dbg(0, "%s\n", skx_msg);
0613
0614
0615 edac_mc_handle_error(tp_event, mci, core_err_cnt,
0616 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
0617 res->channel, res->dimm, -1,
0618 optype, skx_msg);
0619 }
0620
0621 static bool skx_error_in_1st_level_mem(const struct mce *m)
0622 {
0623 u32 errcode;
0624
0625 if (!skx_mem_cfg_2lm)
0626 return false;
0627
0628 errcode = GET_BITFIELD(m->status, 0, 15);
0629
0630 if ((errcode & 0xef80) != 0x280)
0631 return false;
0632
0633 return true;
0634 }
0635
0636 int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
0637 void *data)
0638 {
0639 struct mce *mce = (struct mce *)data;
0640 struct decoded_addr res;
0641 struct mem_ctl_info *mci;
0642 char *type;
0643
0644 if (mce->kflags & MCE_HANDLED_CEC)
0645 return NOTIFY_DONE;
0646
0647
0648 if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
0649 return NOTIFY_DONE;
0650
0651 memset(&res, 0, sizeof(res));
0652 res.addr = mce->addr;
0653
0654 if (adxl_component_count) {
0655 if (!skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce)))
0656 return NOTIFY_DONE;
0657 } else if (!skx_decode || !skx_decode(&res)) {
0658 return NOTIFY_DONE;
0659 }
0660
0661 mci = res.dev->imc[res.imc].mci;
0662
0663 if (!mci)
0664 return NOTIFY_DONE;
0665
0666 if (mce->mcgstatus & MCG_STATUS_MCIP)
0667 type = "Exception";
0668 else
0669 type = "Event";
0670
0671 skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
0672
0673 skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: 0x%llx "
0674 "Bank %d: 0x%llx\n", mce->extcpu, type,
0675 mce->mcgstatus, mce->bank, mce->status);
0676 skx_mc_printk(mci, KERN_DEBUG, "TSC 0x%llx ", mce->tsc);
0677 skx_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", mce->addr);
0678 skx_mc_printk(mci, KERN_DEBUG, "MISC 0x%llx ", mce->misc);
0679
0680 skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:0x%x TIME %llu SOCKET "
0681 "%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid,
0682 mce->time, mce->socketid, mce->apicid);
0683
0684 skx_mce_output_error(mci, mce, &res);
0685
0686 mce->kflags |= MCE_HANDLED_EDAC;
0687 return NOTIFY_DONE;
0688 }
0689
0690 void skx_remove(void)
0691 {
0692 int i, j;
0693 struct skx_dev *d, *tmp;
0694
0695 edac_dbg(0, "\n");
0696
0697 list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
0698 list_del(&d->list);
0699 for (i = 0; i < NUM_IMC; i++) {
0700 if (d->imc[i].mci)
0701 skx_unregister_mci(&d->imc[i]);
0702
0703 if (d->imc[i].mdev)
0704 pci_dev_put(d->imc[i].mdev);
0705
0706 if (d->imc[i].mbase)
0707 iounmap(d->imc[i].mbase);
0708
0709 for (j = 0; j < NUM_CHANNELS; j++) {
0710 if (d->imc[i].chan[j].cdev)
0711 pci_dev_put(d->imc[i].chan[j].cdev);
0712 }
0713 }
0714 if (d->util_all)
0715 pci_dev_put(d->util_all);
0716 if (d->pcu_cr3)
0717 pci_dev_put(d->pcu_cr3);
0718 if (d->sad_all)
0719 pci_dev_put(d->sad_all);
0720 if (d->uracu)
0721 pci_dev_put(d->uracu);
0722
0723 kfree(d);
0724 }
0725 }