0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/module.h>
0020 #include <linux/init.h>
0021 #include <linux/pci.h>
0022 #include <linux/pci_ids.h>
0023 #include <linux/slab.h>
0024 #include <linux/delay.h>
0025 #include <linux/edac.h>
0026 #include <linux/mmzone.h>
0027 #include <linux/smp.h>
0028 #include <linux/bitmap.h>
0029 #include <linux/math64.h>
0030 #include <linux/mod_devicetable.h>
0031 #include <linux/platform_data/x86/p2sb.h>
0032
0033 #include <asm/cpu_device_id.h>
0034 #include <asm/intel-family.h>
0035 #include <asm/processor.h>
0036 #include <asm/mce.h>
0037
0038 #include "edac_mc.h"
0039 #include "edac_module.h"
0040 #include "pnd2_edac.h"
0041
0042 #define EDAC_MOD_STR "pnd2_edac"
0043
0044 #define APL_NUM_CHANNELS 4
0045 #define DNV_NUM_CHANNELS 2
0046 #define DNV_MAX_DIMMS 2
0047
0048 enum type {
0049 APL,
0050 DNV,
0051 };
0052
0053 struct dram_addr {
0054 int chan;
0055 int dimm;
0056 int rank;
0057 int bank;
0058 int row;
0059 int col;
0060 };
0061
0062 struct pnd2_pvt {
0063 int dimm_geom[APL_NUM_CHANNELS];
0064 u64 tolm, tohm;
0065 };
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 static struct region {
0077 u64 base;
0078 u64 limit;
0079 u8 enabled;
0080 } mot, as0, as1, as2;
0081
0082 static struct dunit_ops {
0083 char *name;
0084 enum type type;
0085 int pmiaddr_shift;
0086 int pmiidx_shift;
0087 int channels;
0088 int dimms_per_channel;
0089 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
0090 int (*get_registers)(void);
0091 int (*check_ecc)(void);
0092 void (*mk_region)(char *name, struct region *rp, void *asym);
0093 void (*get_dimm_config)(struct mem_ctl_info *mci);
0094 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
0095 struct dram_addr *daddr, char *msg);
0096 } *ops;
0097
0098 static struct mem_ctl_info *pnd2_mci;
0099
0100 #define PND2_MSG_SIZE 256
0101
0102
0103 #define pnd2_printk(level, fmt, arg...) \
0104 edac_printk(level, "pnd2", fmt, ##arg)
0105
0106 #define pnd2_mc_printk(mci, level, fmt, arg...) \
0107 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
0108
0109 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
0110 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
0111 #define SELECTOR_DISABLED (-1)
0112 #define _4GB (1ul << 32)
0113
0114 #define PMI_ADDRESS_WIDTH 31
0115 #define PND_MAX_PHYS_BIT 39
0116
0117 #define APL_ASYMSHIFT 28
0118 #define DNV_ASYMSHIFT 31
0119 #define CH_HASH_MASK_LSB 6
0120 #define SLICE_HASH_MASK_LSB 6
0121 #define MOT_SLC_INTLV_BIT 12
0122 #define LOG2_PMI_ADDR_GRANULARITY 5
0123 #define MOT_SHIFT 24
0124
0125 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
0126 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
0127
0128
0129
0130
0131
0132
0133 static struct pci_bus *p2sb_bus;
0134 #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
0135 #define P2SB_ADDR_OFF 0xd0
0136 #define P2SB_DATA_OFF 0xd4
0137 #define P2SB_STAT_OFF 0xd8
0138 #define P2SB_ROUT_OFF 0xda
0139 #define P2SB_EADD_OFF 0xdc
0140 #define P2SB_HIDE_OFF 0xe1
0141
0142 #define P2SB_BUSY 1
0143
0144 #define P2SB_READ(size, off, ptr) \
0145 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
0146 #define P2SB_WRITE(size, off, val) \
0147 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
0148
0149 static bool p2sb_is_busy(u16 *status)
0150 {
0151 P2SB_READ(word, P2SB_STAT_OFF, status);
0152
0153 return !!(*status & P2SB_BUSY);
0154 }
0155
0156 static int _apl_rd_reg(int port, int off, int op, u32 *data)
0157 {
0158 int retries = 0xff, ret;
0159 u16 status;
0160 u8 hidden;
0161
0162
0163 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
0164 if (hidden)
0165 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
0166
0167 if (p2sb_is_busy(&status)) {
0168 ret = -EAGAIN;
0169 goto out;
0170 }
0171
0172 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
0173 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
0174 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
0175 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
0176 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
0177
0178 while (p2sb_is_busy(&status)) {
0179 if (retries-- == 0) {
0180 ret = -EBUSY;
0181 goto out;
0182 }
0183 }
0184
0185 P2SB_READ(dword, P2SB_DATA_OFF, data);
0186 ret = (status >> 1) & 0x3;
0187 out:
0188
0189 if (hidden)
0190 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
0191
0192 return ret;
0193 }
0194
0195 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
0196 {
0197 int ret = 0;
0198
0199 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
0200 switch (sz) {
0201 case 8:
0202 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
0203 fallthrough;
0204 case 4:
0205 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
0206 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
0207 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
0208 break;
0209 }
0210
0211 return ret;
0212 }
0213
0214 static u64 get_mem_ctrl_hub_base_addr(void)
0215 {
0216 struct b_cr_mchbar_lo_pci lo;
0217 struct b_cr_mchbar_hi_pci hi;
0218 struct pci_dev *pdev;
0219
0220 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
0221 if (pdev) {
0222 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
0223 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
0224 pci_dev_put(pdev);
0225 } else {
0226 return 0;
0227 }
0228
0229 if (!lo.enable) {
0230 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
0231 return 0;
0232 }
0233
0234 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
0235 }
0236
0237 #define DNV_MCHBAR_SIZE 0x8000
0238 #define DNV_SB_PORT_SIZE 0x10000
0239 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
0240 {
0241 struct pci_dev *pdev;
0242 void __iomem *base;
0243 struct resource r;
0244 int ret;
0245
0246 if (op == 4) {
0247 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
0248 if (!pdev)
0249 return -ENODEV;
0250
0251 pci_read_config_dword(pdev, off, data);
0252 pci_dev_put(pdev);
0253 } else {
0254
0255 if (op == 0 && port == 0x4c) {
0256 memset(&r, 0, sizeof(r));
0257
0258 r.start = get_mem_ctrl_hub_base_addr();
0259 if (!r.start)
0260 return -ENODEV;
0261 r.end = r.start + DNV_MCHBAR_SIZE - 1;
0262 } else {
0263
0264 ret = p2sb_bar(NULL, 0, &r);
0265 if (ret)
0266 return ret;
0267
0268 r.start += (port << 16);
0269 r.end = r.start + DNV_SB_PORT_SIZE - 1;
0270 }
0271
0272 base = ioremap(r.start, resource_size(&r));
0273 if (!base)
0274 return -ENODEV;
0275
0276 if (sz == 8)
0277 *(u64 *)data = readq(base + off);
0278 else
0279 *(u32 *)data = readl(base + off);
0280
0281 iounmap(base);
0282 }
0283
0284 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
0285 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
0286
0287 return 0;
0288 }
0289
0290 #define RD_REGP(regp, regname, port) \
0291 ops->rd_reg(port, \
0292 regname##_offset, \
0293 regname##_r_opcode, \
0294 regp, sizeof(struct regname), \
0295 #regname)
0296
0297 #define RD_REG(regp, regname) \
0298 ops->rd_reg(regname ## _port, \
0299 regname##_offset, \
0300 regname##_r_opcode, \
0301 regp, sizeof(struct regname), \
0302 #regname)
0303
0304 static u64 top_lm, top_hm;
0305 static bool two_slices;
0306 static bool two_channels;
0307
0308 static u8 sym_chan_mask;
0309 static u8 asym_chan_mask;
0310 static u8 chan_mask;
0311
0312 static int slice_selector = -1;
0313 static int chan_selector = -1;
0314 static u64 slice_hash_mask;
0315 static u64 chan_hash_mask;
0316
0317 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
0318 {
0319 rp->enabled = 1;
0320 rp->base = base;
0321 rp->limit = limit;
0322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
0323 }
0324
0325 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
0326 {
0327 if (mask == 0) {
0328 pr_info(FW_BUG "MOT mask cannot be zero\n");
0329 return;
0330 }
0331 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
0332 pr_info(FW_BUG "MOT mask not power of two\n");
0333 return;
0334 }
0335 if (base & ~mask) {
0336 pr_info(FW_BUG "MOT region base/mask alignment error\n");
0337 return;
0338 }
0339 rp->base = base;
0340 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
0341 rp->enabled = 1;
0342 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
0343 }
0344
0345 static bool in_region(struct region *rp, u64 addr)
0346 {
0347 if (!rp->enabled)
0348 return false;
0349
0350 return rp->base <= addr && addr <= rp->limit;
0351 }
0352
0353 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
0354 {
0355 int mask = 0;
0356
0357 if (!p->slice_0_mem_disabled)
0358 mask |= p->sym_slice0_channel_enabled;
0359
0360 if (!p->slice_1_disabled)
0361 mask |= p->sym_slice1_channel_enabled << 2;
0362
0363 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
0364 mask &= 0x5;
0365
0366 return mask;
0367 }
0368
0369 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
0370 struct b_cr_asym_mem_region0_mchbar *as0,
0371 struct b_cr_asym_mem_region1_mchbar *as1,
0372 struct b_cr_asym_2way_mem_region_mchbar *as2way)
0373 {
0374 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
0375 int mask = 0;
0376
0377 if (as2way->asym_2way_interleave_enable)
0378 mask = intlv[as2way->asym_2way_intlv_mode];
0379 if (as0->slice0_asym_enable)
0380 mask |= (1 << as0->slice0_asym_channel_select);
0381 if (as1->slice1_asym_enable)
0382 mask |= (4 << as1->slice1_asym_channel_select);
0383 if (p->slice_0_mem_disabled)
0384 mask &= 0xc;
0385 if (p->slice_1_disabled)
0386 mask &= 0x3;
0387 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
0388 mask &= 0x5;
0389
0390 return mask;
0391 }
0392
0393 static struct b_cr_tolud_pci tolud;
0394 static struct b_cr_touud_lo_pci touud_lo;
0395 static struct b_cr_touud_hi_pci touud_hi;
0396 static struct b_cr_asym_mem_region0_mchbar asym0;
0397 static struct b_cr_asym_mem_region1_mchbar asym1;
0398 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
0399 static struct b_cr_mot_out_base_mchbar mot_base;
0400 static struct b_cr_mot_out_mask_mchbar mot_mask;
0401 static struct b_cr_slice_channel_hash chash;
0402
0403
0404
0405
0406
0407
0408
0409 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
0410 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
0411
0412
0413 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
0414 static struct d_cr_dsch dsch;
0415 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
0416 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
0417 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
0418 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
0419 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
0420 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
0421 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
0422 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
0423
0424 static void apl_mk_region(char *name, struct region *rp, void *asym)
0425 {
0426 struct b_cr_asym_mem_region0_mchbar *a = asym;
0427
0428 mk_region(name, rp,
0429 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
0430 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
0431 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
0432 }
0433
0434 static void dnv_mk_region(char *name, struct region *rp, void *asym)
0435 {
0436 struct b_cr_asym_mem_region_denverton *a = asym;
0437
0438 mk_region(name, rp,
0439 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
0440 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
0441 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
0442 }
0443
0444 static int apl_get_registers(void)
0445 {
0446 int ret = -ENODEV;
0447 int i;
0448
0449 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
0450 return -ENODEV;
0451
0452
0453
0454
0455
0456 for (i = 0; i < APL_NUM_CHANNELS; i++)
0457 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
0458 ret = 0;
0459
0460 return ret;
0461 }
0462
0463 static int dnv_get_registers(void)
0464 {
0465 int i;
0466
0467 if (RD_REG(&dsch, d_cr_dsch))
0468 return -ENODEV;
0469
0470 for (i = 0; i < DNV_NUM_CHANNELS; i++)
0471 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
0472 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
0473 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
0474 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
0475 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
0476 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
0477 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
0478 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
0479 return -ENODEV;
0480
0481 return 0;
0482 }
0483
0484
0485
0486
0487
0488
0489 static int get_registers(void)
0490 {
0491 const int intlv[] = { 10, 11, 12, 12 };
0492
0493 if (RD_REG(&tolud, b_cr_tolud_pci) ||
0494 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
0495 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
0496 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
0497 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
0498 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
0499 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
0500 RD_REG(&chash, b_cr_slice_channel_hash))
0501 return -ENODEV;
0502
0503 if (ops->get_registers())
0504 return -ENODEV;
0505
0506 if (ops->type == DNV) {
0507
0508 asym0.slice0_asym_channel_select = 0;
0509 asym1.slice1_asym_channel_select = 0;
0510
0511 chash.sym_slice0_channel_enabled = 0x1;
0512 chash.sym_slice1_channel_enabled = 0x1;
0513 }
0514
0515 if (asym0.slice0_asym_enable)
0516 ops->mk_region("as0", &as0, &asym0);
0517
0518 if (asym1.slice1_asym_enable)
0519 ops->mk_region("as1", &as1, &asym1);
0520
0521 if (asym_2way.asym_2way_interleave_enable) {
0522 mk_region("as2way", &as2,
0523 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
0524 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
0525 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
0526 }
0527
0528 if (mot_base.imr_en) {
0529 mk_region_mask("mot", &mot,
0530 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
0531 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
0532 }
0533
0534 top_lm = U64_LSHIFT(tolud.tolud, 20);
0535 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
0536
0537 two_slices = !chash.slice_1_disabled &&
0538 !chash.slice_0_mem_disabled &&
0539 (chash.sym_slice0_channel_enabled != 0) &&
0540 (chash.sym_slice1_channel_enabled != 0);
0541 two_channels = !chash.ch_1_disabled &&
0542 !chash.enable_pmi_dual_data_mode &&
0543 ((chash.sym_slice0_channel_enabled == 3) ||
0544 (chash.sym_slice1_channel_enabled == 3));
0545
0546 sym_chan_mask = gen_sym_mask(&chash);
0547 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
0548 chan_mask = sym_chan_mask | asym_chan_mask;
0549
0550 if (two_slices && !two_channels) {
0551 if (chash.hvm_mode)
0552 slice_selector = 29;
0553 else
0554 slice_selector = intlv[chash.interleave_mode];
0555 } else if (!two_slices && two_channels) {
0556 if (chash.hvm_mode)
0557 chan_selector = 29;
0558 else
0559 chan_selector = intlv[chash.interleave_mode];
0560 } else if (two_slices && two_channels) {
0561 if (chash.hvm_mode) {
0562 slice_selector = 29;
0563 chan_selector = 30;
0564 } else {
0565 slice_selector = intlv[chash.interleave_mode];
0566 chan_selector = intlv[chash.interleave_mode] + 1;
0567 }
0568 }
0569
0570 if (two_slices) {
0571 if (!chash.hvm_mode)
0572 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
0573 if (!two_channels)
0574 slice_hash_mask |= BIT_ULL(slice_selector);
0575 }
0576
0577 if (two_channels) {
0578 if (!chash.hvm_mode)
0579 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
0580 if (!two_slices)
0581 chan_hash_mask |= BIT_ULL(chan_selector);
0582 }
0583
0584 return 0;
0585 }
0586
0587
0588 static u64 remove_mmio_gap(u64 sys)
0589 {
0590 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
0591 }
0592
0593
0594 static void remove_addr_bit(u64 *addr, int bitidx)
0595 {
0596 u64 mask;
0597
0598 if (bitidx == -1)
0599 return;
0600
0601 mask = (1ull << bitidx) - 1;
0602 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
0603 }
0604
0605
0606 static int hash_by_mask(u64 addr, u64 mask)
0607 {
0608 u64 result = addr & mask;
0609
0610 result = (result >> 32) ^ result;
0611 result = (result >> 16) ^ result;
0612 result = (result >> 8) ^ result;
0613 result = (result >> 4) ^ result;
0614 result = (result >> 2) ^ result;
0615 result = (result >> 1) ^ result;
0616
0617 return (int)result & 1;
0618 }
0619
0620
0621
0622
0623
0624 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
0625 {
0626 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
0627 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
0628 MOT_CHAN_INTLV_BIT_1SLC_2CH;
0629 int slice_intlv_bit_rm = SELECTOR_DISABLED;
0630 int chan_intlv_bit_rm = SELECTOR_DISABLED;
0631
0632 bool mot_hit = in_region(&mot, addr);
0633
0634 int sym_channels = hweight8(sym_chan_mask);
0635
0636
0637
0638
0639
0640
0641
0642 int sym_chan_shift = sym_channels >> 1;
0643
0644
0645 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
0646 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
0647 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
0648 return -EINVAL;
0649 }
0650
0651
0652 contig_addr = remove_mmio_gap(addr);
0653
0654 if (in_region(&as0, addr)) {
0655 *pmiidx = asym0.slice0_asym_channel_select;
0656
0657 contig_base = remove_mmio_gap(as0.base);
0658 contig_offset = contig_addr - contig_base;
0659 contig_base_adj = (contig_base >> sym_chan_shift) *
0660 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
0661 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
0662 } else if (in_region(&as1, addr)) {
0663 *pmiidx = 2u + asym1.slice1_asym_channel_select;
0664
0665 contig_base = remove_mmio_gap(as1.base);
0666 contig_offset = contig_addr - contig_base;
0667 contig_base_adj = (contig_base >> sym_chan_shift) *
0668 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
0669 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
0670 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
0671 bool channel1;
0672
0673 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
0674 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
0675 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
0676 hash_by_mask(contig_addr, chan_hash_mask);
0677 *pmiidx |= (u32)channel1;
0678
0679 contig_base = remove_mmio_gap(as2.base);
0680 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
0681 contig_offset = contig_addr - contig_base;
0682 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
0683 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
0684 } else {
0685
0686 *pmiidx = 0u;
0687
0688 if (two_slices) {
0689 bool slice1;
0690
0691 if (mot_hit) {
0692 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
0693 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
0694 } else {
0695 slice_intlv_bit_rm = slice_selector;
0696 slice1 = hash_by_mask(addr, slice_hash_mask);
0697 }
0698
0699 *pmiidx = (u32)slice1 << 1;
0700 }
0701
0702 if (two_channels) {
0703 bool channel1;
0704
0705 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
0706 MOT_CHAN_INTLV_BIT_1SLC_2CH;
0707
0708 if (mot_hit) {
0709 chan_intlv_bit_rm = mot_intlv_bit;
0710 channel1 = (addr >> mot_intlv_bit) & 1;
0711 } else {
0712 chan_intlv_bit_rm = chan_selector;
0713 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
0714 }
0715
0716 *pmiidx |= (u32)channel1;
0717 }
0718 }
0719
0720
0721 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
0722
0723 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
0724 *pmiaddr = contig_addr;
0725
0726 return 0;
0727 }
0728
0729
0730 #define C(n) (0x10 | (n))
0731 #define B(n) (0x20 | (n))
0732 #define R(n) (0x40 | (n))
0733 #define RS (0x80)
0734
0735
0736 #define AMAP_1KB 0
0737 #define AMAP_2KB 1
0738 #define AMAP_4KB 2
0739 #define AMAP_RSVD 3
0740
0741
0742 #define DEN_4Gb 0
0743 #define DEN_8Gb 2
0744
0745
0746 #define X8 0
0747 #define X16 1
0748
0749 static struct dimm_geometry {
0750 u8 addrdec;
0751 u8 dden;
0752 u8 dwid;
0753 u8 rowbits, colbits;
0754 u16 bits[PMI_ADDRESS_WIDTH];
0755 } dimms[] = {
0756 {
0757 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
0758 .rowbits = 15, .colbits = 10,
0759 .bits = {
0760 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
0761 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
0762 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0763 0, 0, 0, 0
0764 }
0765 },
0766 {
0767 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
0768 .rowbits = 16, .colbits = 10,
0769 .bits = {
0770 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
0771 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
0772 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0773 R(15), 0, 0, 0
0774 }
0775 },
0776 {
0777 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
0778 .rowbits = 16, .colbits = 10,
0779 .bits = {
0780 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
0781 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
0782 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0783 R(15), 0, 0, 0
0784 }
0785 },
0786 {
0787 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
0788 .rowbits = 16, .colbits = 11,
0789 .bits = {
0790 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
0791 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
0792 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
0793 R(14), R(15), 0, 0
0794 }
0795 },
0796 {
0797 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
0798 .rowbits = 15, .colbits = 10,
0799 .bits = {
0800 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
0801 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
0802 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0803 0, 0, 0, 0
0804 }
0805 },
0806 {
0807 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
0808 .rowbits = 16, .colbits = 10,
0809 .bits = {
0810 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
0811 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
0812 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0813 R(15), 0, 0, 0
0814 }
0815 },
0816 {
0817 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
0818 .rowbits = 16, .colbits = 10,
0819 .bits = {
0820 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
0821 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
0822 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0823 R(15), 0, 0, 0
0824 }
0825 },
0826 {
0827 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
0828 .rowbits = 16, .colbits = 11,
0829 .bits = {
0830 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
0831 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
0832 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
0833 R(14), R(15), 0, 0
0834 }
0835 },
0836 {
0837 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
0838 .rowbits = 15, .colbits = 10,
0839 .bits = {
0840 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
0841 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
0842 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
0843 0, 0, 0, 0
0844 }
0845 },
0846 {
0847 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
0848 .rowbits = 16, .colbits = 10,
0849 .bits = {
0850 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
0851 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
0852 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
0853 R(15), 0, 0, 0
0854 }
0855 },
0856 {
0857 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
0858 .rowbits = 16, .colbits = 10,
0859 .bits = {
0860 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
0861 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
0862 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
0863 R(15), 0, 0, 0
0864 }
0865 },
0866 {
0867 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
0868 .rowbits = 16, .colbits = 11,
0869 .bits = {
0870 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
0871 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
0872 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
0873 R(14), R(15), 0, 0
0874 }
0875 }
0876 };
0877
0878 static int bank_hash(u64 pmiaddr, int idx, int shft)
0879 {
0880 int bhash = 0;
0881
0882 switch (idx) {
0883 case 0:
0884 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
0885 break;
0886 case 1:
0887 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
0888 bhash ^= ((pmiaddr >> 22) & 1) << 1;
0889 break;
0890 case 2:
0891 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
0892 break;
0893 }
0894
0895 return bhash;
0896 }
0897
0898 static int rank_hash(u64 pmiaddr)
0899 {
0900 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
0901 }
0902
0903
0904 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
0905 struct dram_addr *daddr, char *msg)
0906 {
0907 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
0908 struct pnd2_pvt *pvt = mci->pvt_info;
0909 int g = pvt->dimm_geom[pmiidx];
0910 struct dimm_geometry *d = &dimms[g];
0911 int column = 0, bank = 0, row = 0, rank = 0;
0912 int i, idx, type, skiprs = 0;
0913
0914 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
0915 int bit = (pmiaddr >> i) & 1;
0916
0917 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
0918 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
0919 return -EINVAL;
0920 }
0921
0922 type = d->bits[i + skiprs] & ~0xf;
0923 idx = d->bits[i + skiprs] & 0xf;
0924
0925
0926
0927
0928
0929 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
0930 skiprs = 1;
0931 type = d->bits[i + skiprs] & ~0xf;
0932 idx = d->bits[i + skiprs] & 0xf;
0933 }
0934
0935 switch (type) {
0936 case C(0):
0937 column |= (bit << idx);
0938 break;
0939 case B(0):
0940 bank |= (bit << idx);
0941 if (cr_drp0->bahen)
0942 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
0943 break;
0944 case R(0):
0945 row |= (bit << idx);
0946 break;
0947 case RS:
0948 rank = bit;
0949 if (cr_drp0->rsien)
0950 rank ^= rank_hash(pmiaddr);
0951 break;
0952 default:
0953 if (bit) {
0954 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
0955 return -EINVAL;
0956 }
0957 goto done;
0958 }
0959 }
0960
0961 done:
0962 daddr->col = column;
0963 daddr->bank = bank;
0964 daddr->row = row;
0965 daddr->rank = rank;
0966 daddr->dimm = 0;
0967
0968 return 0;
0969 }
0970
0971
0972 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
0973
0974 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
0975 struct dram_addr *daddr, char *msg)
0976 {
0977
0978 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
0979
0980 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
0981
0982
0983
0984
0985
0986 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
0987
0988 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
0989 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
0990 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
0991 if (dsch.ddr4en)
0992 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
0993 if (dmap1[pmiidx].bxor) {
0994 if (dsch.ddr4en) {
0995 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
0996 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
0997 if (dsch.chan_width == 0)
0998
0999 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1000 else
1001
1002 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1003 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1004 } else {
1005 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1006 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1007 if (dsch.chan_width == 0)
1008 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1009 else
1010 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1011 }
1012 }
1013
1014 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1015 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1016 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1017 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1018 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1019 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1020 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1021 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1022 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1023 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1024 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1025 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1026 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1027 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1028 if (dmap4[pmiidx].row14 != 31)
1029 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1030 if (dmap4[pmiidx].row15 != 31)
1031 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1032 if (dmap4[pmiidx].row16 != 31)
1033 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1034 if (dmap4[pmiidx].row17 != 31)
1035 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1036
1037 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1038 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1039 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1040 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1041 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1042 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1043 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1044 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1045 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1046
1047 return 0;
1048 }
1049
1050 static int check_channel(int ch)
1051 {
1052 if (drp0[ch].dramtype != 0) {
1053 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1054 return 1;
1055 } else if (drp0[ch].eccen == 0) {
1056 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1057 return 1;
1058 }
1059 return 0;
1060 }
1061
1062 static int apl_check_ecc_active(void)
1063 {
1064 int i, ret = 0;
1065
1066
1067 for (i = 0; i < APL_NUM_CHANNELS; i++)
1068 if (chan_mask & BIT(i))
1069 ret += check_channel(i);
1070 return ret ? -EINVAL : 0;
1071 }
1072
1073 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1074
1075 static int check_unit(int ch)
1076 {
1077 struct d_cr_drp *d = &drp[ch];
1078
1079 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1080 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1081 return 1;
1082 }
1083 return 0;
1084 }
1085
1086 static int dnv_check_ecc_active(void)
1087 {
1088 int i, ret = 0;
1089
1090 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1091 ret += check_unit(i);
1092 return ret ? -EINVAL : 0;
1093 }
1094
1095 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1096 struct dram_addr *daddr, char *msg)
1097 {
1098 u64 pmiaddr;
1099 u32 pmiidx;
1100 int ret;
1101
1102 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1103 if (ret)
1104 return ret;
1105
1106 pmiaddr >>= ops->pmiaddr_shift;
1107
1108 pmiidx >>= ops->pmiidx_shift;
1109 daddr->chan = pmiidx;
1110
1111 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1112 if (ret)
1113 return ret;
1114
1115 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1116 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1117
1118 return 0;
1119 }
1120
1121 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1122 struct dram_addr *daddr)
1123 {
1124 enum hw_event_mc_err_type tp_event;
1125 char *optype, msg[PND2_MSG_SIZE];
1126 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1127 bool overflow = m->status & MCI_STATUS_OVER;
1128 bool uc_err = m->status & MCI_STATUS_UC;
1129 bool recov = m->status & MCI_STATUS_S;
1130 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1131 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1132 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1133 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1134 int rc;
1135
1136 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1137 HW_EVENT_ERR_CORRECTED;
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 if (!((errcode & 0xef80) == 0x80)) {
1151 optype = "Can't parse: it is not a mem";
1152 } else {
1153 switch (optypenum) {
1154 case 0:
1155 optype = "generic undef request error";
1156 break;
1157 case 1:
1158 optype = "memory read error";
1159 break;
1160 case 2:
1161 optype = "memory write error";
1162 break;
1163 case 3:
1164 optype = "addr/cmd error";
1165 break;
1166 case 4:
1167 optype = "memory scrubbing error";
1168 break;
1169 default:
1170 optype = "reserved";
1171 break;
1172 }
1173 }
1174
1175
1176 if (!(m->status & MCI_STATUS_ADDRV))
1177 return;
1178
1179 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1180 if (rc)
1181 goto address_error;
1182
1183 snprintf(msg, sizeof(msg),
1184 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1185 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1186 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1187
1188 edac_dbg(0, "%s\n", msg);
1189
1190
1191 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1192 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1193
1194 return;
1195
1196 address_error:
1197 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1198 }
1199
1200 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1201 {
1202 struct pnd2_pvt *pvt = mci->pvt_info;
1203 struct dimm_info *dimm;
1204 struct d_cr_drp0 *d;
1205 u64 capacity;
1206 int i, g;
1207
1208 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1209 if (!(chan_mask & BIT(i)))
1210 continue;
1211
1212 dimm = edac_get_dimm(mci, i, 0, 0);
1213 if (!dimm) {
1214 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1215 continue;
1216 }
1217
1218 d = &drp0[i];
1219 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1220 if (dimms[g].addrdec == d->addrdec &&
1221 dimms[g].dden == d->dden &&
1222 dimms[g].dwid == d->dwid)
1223 break;
1224
1225 if (g == ARRAY_SIZE(dimms)) {
1226 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1227 continue;
1228 }
1229
1230 pvt->dimm_geom[i] = g;
1231 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1232 (1ul << dimms[g].colbits);
1233 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1234 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1235 dimm->grain = 32;
1236 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1237 dimm->mtype = MEM_DDR3;
1238 dimm->edac_mode = EDAC_SECDED;
1239 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1240 }
1241 }
1242
1243 static const int dnv_dtypes[] = {
1244 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1245 };
1246
1247 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1248 {
1249 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1250 struct dimm_info *dimm;
1251 struct d_cr_drp *d;
1252 u64 capacity;
1253
1254 if (dsch.ddr4en) {
1255 memtype = MEM_DDR4;
1256 banks = 16;
1257 colbits = 10;
1258 } else {
1259 memtype = MEM_DDR3;
1260 banks = 8;
1261 }
1262
1263 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1264 if (dmap4[i].row14 == 31)
1265 rowbits = 14;
1266 else if (dmap4[i].row15 == 31)
1267 rowbits = 15;
1268 else if (dmap4[i].row16 == 31)
1269 rowbits = 16;
1270 else if (dmap4[i].row17 == 31)
1271 rowbits = 17;
1272 else
1273 rowbits = 18;
1274
1275 if (memtype == MEM_DDR3) {
1276 if (dmap1[i].ca11 != 0x3f)
1277 colbits = 12;
1278 else
1279 colbits = 10;
1280 }
1281
1282 d = &drp[i];
1283
1284 ranks_of_dimm[0] = d->rken0 + d->rken1;
1285
1286 ranks_of_dimm[1] = d->rken2 + d->rken3;
1287
1288 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1289 if (!ranks_of_dimm[j])
1290 continue;
1291
1292 dimm = edac_get_dimm(mci, i, j, 0);
1293 if (!dimm) {
1294 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1295 continue;
1296 }
1297
1298 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1299 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1300 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1301 dimm->grain = 32;
1302 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1303 dimm->mtype = memtype;
1304 dimm->edac_mode = EDAC_SECDED;
1305 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1306 }
1307 }
1308 }
1309
1310 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1311 {
1312 struct edac_mc_layer layers[2];
1313 struct mem_ctl_info *mci;
1314 struct pnd2_pvt *pvt;
1315 int rc;
1316
1317 rc = ops->check_ecc();
1318 if (rc < 0)
1319 return rc;
1320
1321
1322 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1323 layers[0].size = ops->channels;
1324 layers[0].is_virt_csrow = false;
1325 layers[1].type = EDAC_MC_LAYER_SLOT;
1326 layers[1].size = ops->dimms_per_channel;
1327 layers[1].is_virt_csrow = true;
1328 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1329 if (!mci)
1330 return -ENOMEM;
1331
1332 pvt = mci->pvt_info;
1333 memset(pvt, 0, sizeof(*pvt));
1334
1335 mci->mod_name = EDAC_MOD_STR;
1336 mci->dev_name = ops->name;
1337 mci->ctl_name = "Pondicherry2";
1338
1339
1340 ops->get_dimm_config(mci);
1341
1342 if (edac_mc_add_mc(mci)) {
1343 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1344 edac_mc_free(mci);
1345 return -EINVAL;
1346 }
1347
1348 *ppmci = mci;
1349
1350 return 0;
1351 }
1352
1353 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1354 {
1355 if (unlikely(!mci || !mci->pvt_info)) {
1356 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1357 return;
1358 }
1359
1360
1361 edac_mc_del_mc(NULL);
1362 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1363 edac_mc_free(mci);
1364 }
1365
1366
1367
1368
1369
1370 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1371 {
1372 struct mce *mce = (struct mce *)data;
1373 struct mem_ctl_info *mci;
1374 struct dram_addr daddr;
1375 char *type;
1376
1377 mci = pnd2_mci;
1378 if (!mci || (mce->kflags & MCE_HANDLED_CEC))
1379 return NOTIFY_DONE;
1380
1381
1382
1383
1384
1385
1386
1387 if ((mce->status & 0xefff) >> 7 != 1)
1388 return NOTIFY_DONE;
1389
1390 if (mce->mcgstatus & MCG_STATUS_MCIP)
1391 type = "Exception";
1392 else
1393 type = "Event";
1394
1395 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1396 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1397 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1398 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1399 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1400 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1401 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1402 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1403
1404 pnd2_mce_output_error(mci, mce, &daddr);
1405
1406
1407 mce->kflags |= MCE_HANDLED_EDAC;
1408 return NOTIFY_OK;
1409 }
1410
1411 static struct notifier_block pnd2_mce_dec = {
1412 .notifier_call = pnd2_mce_check_error,
1413 .priority = MCE_PRIO_EDAC,
1414 };
1415
1416 #ifdef CONFIG_EDAC_DEBUG
1417
1418
1419
1420
1421 static u64 pnd2_fake_addr;
1422 #define PND2_BLOB_SIZE 1024
1423 static char pnd2_result[PND2_BLOB_SIZE];
1424 static struct dentry *pnd2_test;
1425 static struct debugfs_blob_wrapper pnd2_blob = {
1426 .data = pnd2_result,
1427 .size = 0
1428 };
1429
1430 static int debugfs_u64_set(void *data, u64 val)
1431 {
1432 struct dram_addr daddr;
1433 struct mce m;
1434
1435 *(u64 *)data = val;
1436 m.mcgstatus = 0;
1437
1438 m.status = MCI_STATUS_ADDRV + 0x9f;
1439 m.addr = val;
1440 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1441 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1442 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1443 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1444 pnd2_blob.size = strlen(pnd2_blob.data);
1445
1446 return 0;
1447 }
1448 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1449
1450 static void setup_pnd2_debug(void)
1451 {
1452 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1453 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1454 &pnd2_fake_addr, &fops_u64_wo);
1455 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1456 }
1457
1458 static void teardown_pnd2_debug(void)
1459 {
1460 debugfs_remove_recursive(pnd2_test);
1461 }
1462 #else
1463 static void setup_pnd2_debug(void) {}
1464 static void teardown_pnd2_debug(void) {}
1465 #endif
1466
1467
1468 static int pnd2_probe(void)
1469 {
1470 int rc;
1471
1472 edac_dbg(2, "\n");
1473 rc = get_registers();
1474 if (rc)
1475 return rc;
1476
1477 return pnd2_register_mci(&pnd2_mci);
1478 }
1479
1480 static void pnd2_remove(void)
1481 {
1482 edac_dbg(0, "\n");
1483 pnd2_unregister_mci(pnd2_mci);
1484 }
1485
1486 static struct dunit_ops apl_ops = {
1487 .name = "pnd2/apl",
1488 .type = APL,
1489 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1490 .pmiidx_shift = 0,
1491 .channels = APL_NUM_CHANNELS,
1492 .dimms_per_channel = 1,
1493 .rd_reg = apl_rd_reg,
1494 .get_registers = apl_get_registers,
1495 .check_ecc = apl_check_ecc_active,
1496 .mk_region = apl_mk_region,
1497 .get_dimm_config = apl_get_dimm_config,
1498 .pmi2mem = apl_pmi2mem,
1499 };
1500
1501 static struct dunit_ops dnv_ops = {
1502 .name = "pnd2/dnv",
1503 .type = DNV,
1504 .pmiaddr_shift = 0,
1505 .pmiidx_shift = 1,
1506 .channels = DNV_NUM_CHANNELS,
1507 .dimms_per_channel = 2,
1508 .rd_reg = dnv_rd_reg,
1509 .get_registers = dnv_get_registers,
1510 .check_ecc = dnv_check_ecc_active,
1511 .mk_region = dnv_mk_region,
1512 .get_dimm_config = dnv_get_dimm_config,
1513 .pmi2mem = dnv_pmi2mem,
1514 };
1515
1516 static const struct x86_cpu_id pnd2_cpuids[] = {
1517 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
1518 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
1519 { }
1520 };
1521 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1522
1523 static int __init pnd2_init(void)
1524 {
1525 const struct x86_cpu_id *id;
1526 const char *owner;
1527 int rc;
1528
1529 edac_dbg(2, "\n");
1530
1531 owner = edac_get_owner();
1532 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1533 return -EBUSY;
1534
1535 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1536 return -ENODEV;
1537
1538 id = x86_match_cpu(pnd2_cpuids);
1539 if (!id)
1540 return -ENODEV;
1541
1542 ops = (struct dunit_ops *)id->driver_data;
1543
1544 if (ops->type == APL) {
1545 p2sb_bus = pci_find_bus(0, 0);
1546 if (!p2sb_bus)
1547 return -ENODEV;
1548 }
1549
1550
1551 opstate_init();
1552
1553 rc = pnd2_probe();
1554 if (rc < 0) {
1555 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1556 return rc;
1557 }
1558
1559 if (!pnd2_mci)
1560 return -ENODEV;
1561
1562 mce_register_decode_chain(&pnd2_mce_dec);
1563 setup_pnd2_debug();
1564
1565 return 0;
1566 }
1567
1568 static void __exit pnd2_exit(void)
1569 {
1570 edac_dbg(2, "\n");
1571 teardown_pnd2_debug();
1572 mce_unregister_decode_chain(&pnd2_mce_dec);
1573 pnd2_remove();
1574 }
1575
1576 module_init(pnd2_init);
1577 module_exit(pnd2_exit);
1578
1579 module_param(edac_op_state, int, 0444);
1580 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1581
1582 MODULE_LICENSE("GPL v2");
1583 MODULE_AUTHOR("Tony Luck");
1584 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");