0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/module.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/ctype.h>
0019 #include <linux/io.h>
0020 #include <linux/mod_devicetable.h>
0021 #include <linux/edac.h>
0022 #include <linux/smp.h>
0023 #include <linux/gfp.h>
0024
0025 #include <linux/of_platform.h>
0026 #include <linux/of_device.h>
0027 #include <linux/of_address.h>
0028 #include "edac_module.h"
0029 #include "fsl_ddr_edac.h"
0030
0031 #define EDAC_MOD_STR "fsl_ddr_edac"
0032
0033 static int edac_mc_idx;
0034
0035 static u32 orig_ddr_err_disable;
0036 static u32 orig_ddr_err_sbe;
0037 static bool little_endian;
0038
0039 static inline u32 ddr_in32(void __iomem *addr)
0040 {
0041 return little_endian ? ioread32(addr) : ioread32be(addr);
0042 }
0043
0044 static inline void ddr_out32(void __iomem *addr, u32 value)
0045 {
0046 if (little_endian)
0047 iowrite32(value, addr);
0048 else
0049 iowrite32be(value, addr);
0050 }
0051
0052 #ifdef CONFIG_EDAC_DEBUG
0053
0054
0055 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
0056
0057 static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
0058 struct device_attribute *mattr,
0059 char *data)
0060 {
0061 struct mem_ctl_info *mci = to_mci(dev);
0062 struct fsl_mc_pdata *pdata = mci->pvt_info;
0063 return sprintf(data, "0x%08x",
0064 ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
0065 }
0066
0067 static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
0068 struct device_attribute *mattr,
0069 char *data)
0070 {
0071 struct mem_ctl_info *mci = to_mci(dev);
0072 struct fsl_mc_pdata *pdata = mci->pvt_info;
0073 return sprintf(data, "0x%08x",
0074 ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
0075 }
0076
0077 static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
0078 struct device_attribute *mattr,
0079 char *data)
0080 {
0081 struct mem_ctl_info *mci = to_mci(dev);
0082 struct fsl_mc_pdata *pdata = mci->pvt_info;
0083 return sprintf(data, "0x%08x",
0084 ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
0085 }
0086
0087 static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
0088 struct device_attribute *mattr,
0089 const char *data, size_t count)
0090 {
0091 struct mem_ctl_info *mci = to_mci(dev);
0092 struct fsl_mc_pdata *pdata = mci->pvt_info;
0093 unsigned long val;
0094 int rc;
0095
0096 if (isdigit(*data)) {
0097 rc = kstrtoul(data, 0, &val);
0098 if (rc)
0099 return rc;
0100
0101 ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
0102 return count;
0103 }
0104 return 0;
0105 }
0106
0107 static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
0108 struct device_attribute *mattr,
0109 const char *data, size_t count)
0110 {
0111 struct mem_ctl_info *mci = to_mci(dev);
0112 struct fsl_mc_pdata *pdata = mci->pvt_info;
0113 unsigned long val;
0114 int rc;
0115
0116 if (isdigit(*data)) {
0117 rc = kstrtoul(data, 0, &val);
0118 if (rc)
0119 return rc;
0120
0121 ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
0122 return count;
0123 }
0124 return 0;
0125 }
0126
0127 static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
0128 struct device_attribute *mattr,
0129 const char *data, size_t count)
0130 {
0131 struct mem_ctl_info *mci = to_mci(dev);
0132 struct fsl_mc_pdata *pdata = mci->pvt_info;
0133 unsigned long val;
0134 int rc;
0135
0136 if (isdigit(*data)) {
0137 rc = kstrtoul(data, 0, &val);
0138 if (rc)
0139 return rc;
0140
0141 ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
0142 return count;
0143 }
0144 return 0;
0145 }
0146
0147 static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
0148 fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
0149 static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
0150 fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
0151 static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
0152 fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
0153 #endif
0154
0155 static struct attribute *fsl_ddr_dev_attrs[] = {
0156 #ifdef CONFIG_EDAC_DEBUG
0157 &dev_attr_inject_data_hi.attr,
0158 &dev_attr_inject_data_lo.attr,
0159 &dev_attr_inject_ctrl.attr,
0160 #endif
0161 NULL
0162 };
0163
0164 ATTRIBUTE_GROUPS(fsl_ddr_dev);
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 static unsigned int ecc_table[16] = {
0175
0176
0177 0xf00fe11e, 0xc33c0ff7,
0178 0x00ff00ff, 0x00fff0ff,
0179 0x0f0f0f0f, 0x0f0fff00,
0180 0x11113333, 0x7777000f,
0181 0x22224444, 0x8888222f,
0182 0x44448888, 0xffff4441,
0183 0x8888ffff, 0x11118882,
0184 0xffff1111, 0x22221114,
0185 };
0186
0187
0188
0189
0190 static u8 calculate_ecc(u32 high, u32 low)
0191 {
0192 u32 mask_low;
0193 u32 mask_high;
0194 int bit_cnt;
0195 u8 ecc = 0;
0196 int i;
0197 int j;
0198
0199 for (i = 0; i < 8; i++) {
0200 mask_high = ecc_table[i * 2];
0201 mask_low = ecc_table[i * 2 + 1];
0202 bit_cnt = 0;
0203
0204 for (j = 0; j < 32; j++) {
0205 if ((mask_high >> j) & 1)
0206 bit_cnt ^= (high >> j) & 1;
0207 if ((mask_low >> j) & 1)
0208 bit_cnt ^= (low >> j) & 1;
0209 }
0210
0211 ecc |= bit_cnt << i;
0212 }
0213
0214 return ecc;
0215 }
0216
0217
0218
0219
0220
0221
0222 static u8 syndrome_from_bit(unsigned int bit) {
0223 int i;
0224 u8 syndrome = 0;
0225
0226
0227
0228
0229
0230
0231 for (i = bit < 32; i < 16; i += 2)
0232 syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
0233
0234 return syndrome;
0235 }
0236
0237
0238
0239
0240
0241 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
0242 int *bad_data_bit, int *bad_ecc_bit)
0243 {
0244 int i;
0245 u8 syndrome;
0246
0247 *bad_data_bit = -1;
0248 *bad_ecc_bit = -1;
0249
0250
0251
0252
0253
0254 syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
0255
0256
0257 for (i = 0; i < 64; i++) {
0258 if (syndrome == syndrome_from_bit(i)) {
0259 *bad_data_bit = i;
0260 return;
0261 }
0262 }
0263
0264
0265 for (i = 0; i < 8; i++) {
0266 if ((syndrome >> i) & 0x1) {
0267 *bad_ecc_bit = i;
0268 return;
0269 }
0270 }
0271 }
0272
0273 #define make64(high, low) (((u64)(high) << 32) | (low))
0274
0275 static void fsl_mc_check(struct mem_ctl_info *mci)
0276 {
0277 struct fsl_mc_pdata *pdata = mci->pvt_info;
0278 struct csrow_info *csrow;
0279 u32 bus_width;
0280 u32 err_detect;
0281 u32 syndrome;
0282 u64 err_addr;
0283 u32 pfn;
0284 int row_index;
0285 u32 cap_high;
0286 u32 cap_low;
0287 int bad_data_bit;
0288 int bad_ecc_bit;
0289
0290 err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
0291 if (!err_detect)
0292 return;
0293
0294 fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
0295 err_detect);
0296
0297
0298 if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
0299 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
0300 return;
0301 }
0302
0303 syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
0304
0305
0306 bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
0307 DSC_DBW_MASK) ? 32 : 64;
0308 if (bus_width == 64)
0309 syndrome &= 0xff;
0310 else
0311 syndrome &= 0xffff;
0312
0313 err_addr = make64(
0314 ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
0315 ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
0316 pfn = err_addr >> PAGE_SHIFT;
0317
0318 for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
0319 csrow = mci->csrows[row_index];
0320 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
0321 break;
0322 }
0323
0324 cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
0325 cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
0326
0327
0328
0329
0330
0331 if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
0332 sbe_ecc_decode(cap_high, cap_low, syndrome,
0333 &bad_data_bit, &bad_ecc_bit);
0334
0335 if (bad_data_bit != -1)
0336 fsl_mc_printk(mci, KERN_ERR,
0337 "Faulty Data bit: %d\n", bad_data_bit);
0338 if (bad_ecc_bit != -1)
0339 fsl_mc_printk(mci, KERN_ERR,
0340 "Faulty ECC bit: %d\n", bad_ecc_bit);
0341
0342 fsl_mc_printk(mci, KERN_ERR,
0343 "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
0344 cap_high ^ (1 << (bad_data_bit - 32)),
0345 cap_low ^ (1 << bad_data_bit),
0346 syndrome ^ (1 << bad_ecc_bit));
0347 }
0348
0349 fsl_mc_printk(mci, KERN_ERR,
0350 "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
0351 cap_high, cap_low, syndrome);
0352 fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
0353 fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
0354
0355
0356 if (row_index == mci->nr_csrows)
0357 fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
0358
0359 if (err_detect & DDR_EDE_SBE)
0360 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0361 pfn, err_addr & ~PAGE_MASK, syndrome,
0362 row_index, 0, -1,
0363 mci->ctl_name, "");
0364
0365 if (err_detect & DDR_EDE_MBE)
0366 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0367 pfn, err_addr & ~PAGE_MASK, syndrome,
0368 row_index, 0, -1,
0369 mci->ctl_name, "");
0370
0371 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
0372 }
0373
0374 static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
0375 {
0376 struct mem_ctl_info *mci = dev_id;
0377 struct fsl_mc_pdata *pdata = mci->pvt_info;
0378 u32 err_detect;
0379
0380 err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
0381 if (!err_detect)
0382 return IRQ_NONE;
0383
0384 fsl_mc_check(mci);
0385
0386 return IRQ_HANDLED;
0387 }
0388
0389 static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
0390 {
0391 struct fsl_mc_pdata *pdata = mci->pvt_info;
0392 struct csrow_info *csrow;
0393 struct dimm_info *dimm;
0394 u32 sdram_ctl;
0395 u32 sdtype;
0396 enum mem_type mtype;
0397 u32 cs_bnds;
0398 int index;
0399
0400 sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
0401
0402 sdtype = sdram_ctl & DSC_SDTYPE_MASK;
0403 if (sdram_ctl & DSC_RD_EN) {
0404 switch (sdtype) {
0405 case 0x02000000:
0406 mtype = MEM_RDDR;
0407 break;
0408 case 0x03000000:
0409 mtype = MEM_RDDR2;
0410 break;
0411 case 0x07000000:
0412 mtype = MEM_RDDR3;
0413 break;
0414 case 0x05000000:
0415 mtype = MEM_RDDR4;
0416 break;
0417 default:
0418 mtype = MEM_UNKNOWN;
0419 break;
0420 }
0421 } else {
0422 switch (sdtype) {
0423 case 0x02000000:
0424 mtype = MEM_DDR;
0425 break;
0426 case 0x03000000:
0427 mtype = MEM_DDR2;
0428 break;
0429 case 0x07000000:
0430 mtype = MEM_DDR3;
0431 break;
0432 case 0x05000000:
0433 mtype = MEM_DDR4;
0434 break;
0435 default:
0436 mtype = MEM_UNKNOWN;
0437 break;
0438 }
0439 }
0440
0441 for (index = 0; index < mci->nr_csrows; index++) {
0442 u32 start;
0443 u32 end;
0444
0445 csrow = mci->csrows[index];
0446 dimm = csrow->channels[0]->dimm;
0447
0448 cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
0449 (index * FSL_MC_CS_BNDS_OFS));
0450
0451 start = (cs_bnds & 0xffff0000) >> 16;
0452 end = (cs_bnds & 0x0000ffff);
0453
0454 if (start == end)
0455 continue;
0456
0457 start <<= (24 - PAGE_SHIFT);
0458 end <<= (24 - PAGE_SHIFT);
0459 end |= (1 << (24 - PAGE_SHIFT)) - 1;
0460
0461 csrow->first_page = start;
0462 csrow->last_page = end;
0463
0464 dimm->nr_pages = end + 1 - start;
0465 dimm->grain = 8;
0466 dimm->mtype = mtype;
0467 dimm->dtype = DEV_UNKNOWN;
0468 if (sdram_ctl & DSC_X32_EN)
0469 dimm->dtype = DEV_X32;
0470 dimm->edac_mode = EDAC_SECDED;
0471 }
0472 }
0473
0474 int fsl_mc_err_probe(struct platform_device *op)
0475 {
0476 struct mem_ctl_info *mci;
0477 struct edac_mc_layer layers[2];
0478 struct fsl_mc_pdata *pdata;
0479 struct resource r;
0480 u32 sdram_ctl;
0481 int res;
0482
0483 if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
0484 return -ENOMEM;
0485
0486 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
0487 layers[0].size = 4;
0488 layers[0].is_virt_csrow = true;
0489 layers[1].type = EDAC_MC_LAYER_CHANNEL;
0490 layers[1].size = 1;
0491 layers[1].is_virt_csrow = false;
0492 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
0493 sizeof(*pdata));
0494 if (!mci) {
0495 devres_release_group(&op->dev, fsl_mc_err_probe);
0496 return -ENOMEM;
0497 }
0498
0499 pdata = mci->pvt_info;
0500 pdata->name = "fsl_mc_err";
0501 mci->pdev = &op->dev;
0502 pdata->edac_idx = edac_mc_idx++;
0503 dev_set_drvdata(mci->pdev, mci);
0504 mci->ctl_name = pdata->name;
0505 mci->dev_name = pdata->name;
0506
0507
0508
0509
0510
0511 little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
0512
0513 res = of_address_to_resource(op->dev.of_node, 0, &r);
0514 if (res) {
0515 pr_err("%s: Unable to get resource for MC err regs\n",
0516 __func__);
0517 goto err;
0518 }
0519
0520 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
0521 pdata->name)) {
0522 pr_err("%s: Error while requesting mem region\n",
0523 __func__);
0524 res = -EBUSY;
0525 goto err;
0526 }
0527
0528 pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
0529 if (!pdata->mc_vbase) {
0530 pr_err("%s: Unable to setup MC err regs\n", __func__);
0531 res = -ENOMEM;
0532 goto err;
0533 }
0534
0535 sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
0536 if (!(sdram_ctl & DSC_ECC_EN)) {
0537
0538 pr_warn("%s: No ECC DIMMs discovered\n", __func__);
0539 res = -ENODEV;
0540 goto err;
0541 }
0542
0543 edac_dbg(3, "init mci\n");
0544 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
0545 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
0546 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
0547 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
0548 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
0549 mci->edac_cap = EDAC_FLAG_SECDED;
0550 mci->mod_name = EDAC_MOD_STR;
0551
0552 if (edac_op_state == EDAC_OPSTATE_POLL)
0553 mci->edac_check = fsl_mc_check;
0554
0555 mci->ctl_page_to_phys = NULL;
0556
0557 mci->scrub_mode = SCRUB_SW_SRC;
0558
0559 fsl_ddr_init_csrows(mci);
0560
0561
0562 orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
0563 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
0564
0565
0566 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
0567
0568 res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
0569 if (res) {
0570 edac_dbg(3, "failed edac_mc_add_mc()\n");
0571 goto err;
0572 }
0573
0574 if (edac_op_state == EDAC_OPSTATE_INT) {
0575 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
0576 DDR_EIE_MBEE | DDR_EIE_SBEE);
0577
0578
0579 orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
0580 FSL_MC_ERR_SBE) & 0xff0000;
0581
0582
0583 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
0584
0585
0586 pdata->irq = platform_get_irq(op, 0);
0587 res = devm_request_irq(&op->dev, pdata->irq,
0588 fsl_mc_isr,
0589 IRQF_SHARED,
0590 "[EDAC] MC err", mci);
0591 if (res < 0) {
0592 pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
0593 __func__, pdata->irq);
0594 res = -ENODEV;
0595 goto err2;
0596 }
0597
0598 pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
0599 pdata->irq);
0600 }
0601
0602 devres_remove_group(&op->dev, fsl_mc_err_probe);
0603 edac_dbg(3, "success\n");
0604 pr_info(EDAC_MOD_STR " MC err registered\n");
0605
0606 return 0;
0607
0608 err2:
0609 edac_mc_del_mc(&op->dev);
0610 err:
0611 devres_release_group(&op->dev, fsl_mc_err_probe);
0612 edac_mc_free(mci);
0613 return res;
0614 }
0615
0616 int fsl_mc_err_remove(struct platform_device *op)
0617 {
0618 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
0619 struct fsl_mc_pdata *pdata = mci->pvt_info;
0620
0621 edac_dbg(0, "\n");
0622
0623 if (edac_op_state == EDAC_OPSTATE_INT) {
0624 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
0625 }
0626
0627 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
0628 orig_ddr_err_disable);
0629 ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
0630
0631 edac_mc_del_mc(&op->dev);
0632 edac_mc_free(mci);
0633 return 0;
0634 }