0001
0002
0003
0004
0005 #include <linux/types.h>
0006 #include <linux/kernel.h>
0007 #include <linux/ctype.h>
0008 #include <linux/edac.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/platform_device.h>
0011 #include <linux/of_platform.h>
0012 #include <linux/uaccess.h>
0013
0014 #include "edac_module.h"
0015
0016
0017
0018 #define HB_DDR_ECC_ERR_BASE 0x128
0019 #define MW_DDR_ECC_ERR_BASE 0x1b4
0020
0021 #define HB_DDR_ECC_OPT 0x00
0022 #define HB_DDR_ECC_U_ERR_ADDR 0x08
0023 #define HB_DDR_ECC_U_ERR_STAT 0x0c
0024 #define HB_DDR_ECC_U_ERR_DATAL 0x10
0025 #define HB_DDR_ECC_U_ERR_DATAH 0x14
0026 #define HB_DDR_ECC_C_ERR_ADDR 0x18
0027 #define HB_DDR_ECC_C_ERR_STAT 0x1c
0028 #define HB_DDR_ECC_C_ERR_DATAL 0x20
0029 #define HB_DDR_ECC_C_ERR_DATAH 0x24
0030
0031 #define HB_DDR_ECC_OPT_MODE_MASK 0x3
0032 #define HB_DDR_ECC_OPT_FWC 0x100
0033 #define HB_DDR_ECC_OPT_XOR_SHIFT 16
0034
0035
0036
0037 #define HB_DDR_ECC_INT_BASE 0x180
0038 #define MW_DDR_ECC_INT_BASE 0x218
0039
0040 #define HB_DDR_ECC_INT_STATUS 0x00
0041 #define HB_DDR_ECC_INT_ACK 0x04
0042
0043 #define HB_DDR_ECC_INT_STAT_CE 0x8
0044 #define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
0045 #define HB_DDR_ECC_INT_STAT_UE 0x20
0046 #define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
0047
0048 struct hb_mc_drvdata {
0049 void __iomem *mc_err_base;
0050 void __iomem *mc_int_base;
0051 };
0052
0053 static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
0054 {
0055 struct mem_ctl_info *mci = dev_id;
0056 struct hb_mc_drvdata *drvdata = mci->pvt_info;
0057 u32 status, err_addr;
0058
0059
0060 status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
0061
0062 if (status & HB_DDR_ECC_INT_STAT_UE) {
0063 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
0064 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0065 err_addr >> PAGE_SHIFT,
0066 err_addr & ~PAGE_MASK, 0,
0067 0, 0, -1,
0068 mci->ctl_name, "");
0069 }
0070 if (status & HB_DDR_ECC_INT_STAT_CE) {
0071 u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
0072 syndrome = (syndrome >> 8) & 0xff;
0073 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
0074 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0075 err_addr >> PAGE_SHIFT,
0076 err_addr & ~PAGE_MASK, syndrome,
0077 0, 0, -1,
0078 mci->ctl_name, "");
0079 }
0080
0081
0082 writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
0083 return IRQ_HANDLED;
0084 }
0085
0086 static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
0087 {
0088 struct hb_mc_drvdata *pdata = mci->pvt_info;
0089 u32 reg;
0090
0091 reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
0092 reg &= HB_DDR_ECC_OPT_MODE_MASK;
0093 reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
0094 writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
0095 }
0096
0097 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
0098
0099 static ssize_t highbank_mc_inject_ctrl(struct device *dev,
0100 struct device_attribute *attr, const char *buf, size_t count)
0101 {
0102 struct mem_ctl_info *mci = to_mci(dev);
0103 u8 synd;
0104
0105 if (kstrtou8(buf, 16, &synd))
0106 return -EINVAL;
0107
0108 highbank_mc_err_inject(mci, synd);
0109
0110 return count;
0111 }
0112
0113 static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
0114
0115 static struct attribute *highbank_dev_attrs[] = {
0116 &dev_attr_inject_ctrl.attr,
0117 NULL
0118 };
0119
0120 ATTRIBUTE_GROUPS(highbank_dev);
0121
0122 struct hb_mc_settings {
0123 int err_offset;
0124 int int_offset;
0125 };
0126
0127 static struct hb_mc_settings hb_settings = {
0128 .err_offset = HB_DDR_ECC_ERR_BASE,
0129 .int_offset = HB_DDR_ECC_INT_BASE,
0130 };
0131
0132 static struct hb_mc_settings mw_settings = {
0133 .err_offset = MW_DDR_ECC_ERR_BASE,
0134 .int_offset = MW_DDR_ECC_INT_BASE,
0135 };
0136
0137 static const struct of_device_id hb_ddr_ctrl_of_match[] = {
0138 { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
0139 { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
0140 {},
0141 };
0142 MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
0143
0144 static int highbank_mc_probe(struct platform_device *pdev)
0145 {
0146 const struct of_device_id *id;
0147 const struct hb_mc_settings *settings;
0148 struct edac_mc_layer layers[2];
0149 struct mem_ctl_info *mci;
0150 struct hb_mc_drvdata *drvdata;
0151 struct dimm_info *dimm;
0152 struct resource *r;
0153 void __iomem *base;
0154 u32 control;
0155 int irq;
0156 int res = 0;
0157
0158 id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
0159 if (!id)
0160 return -ENODEV;
0161
0162 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
0163 layers[0].size = 1;
0164 layers[0].is_virt_csrow = true;
0165 layers[1].type = EDAC_MC_LAYER_CHANNEL;
0166 layers[1].size = 1;
0167 layers[1].is_virt_csrow = false;
0168 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
0169 sizeof(struct hb_mc_drvdata));
0170 if (!mci)
0171 return -ENOMEM;
0172
0173 mci->pdev = &pdev->dev;
0174 drvdata = mci->pvt_info;
0175 platform_set_drvdata(pdev, mci);
0176
0177 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
0178 return -ENOMEM;
0179
0180 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0181 if (!r) {
0182 dev_err(&pdev->dev, "Unable to get mem resource\n");
0183 res = -ENODEV;
0184 goto err;
0185 }
0186
0187 if (!devm_request_mem_region(&pdev->dev, r->start,
0188 resource_size(r), dev_name(&pdev->dev))) {
0189 dev_err(&pdev->dev, "Error while requesting mem region\n");
0190 res = -EBUSY;
0191 goto err;
0192 }
0193
0194 base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
0195 if (!base) {
0196 dev_err(&pdev->dev, "Unable to map regs\n");
0197 res = -ENOMEM;
0198 goto err;
0199 }
0200
0201 settings = id->data;
0202 drvdata->mc_err_base = base + settings->err_offset;
0203 drvdata->mc_int_base = base + settings->int_offset;
0204
0205 control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
0206 if (!control || (control == 0x2)) {
0207 dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
0208 res = -ENODEV;
0209 goto err;
0210 }
0211
0212 mci->mtype_cap = MEM_FLAG_DDR3;
0213 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
0214 mci->edac_cap = EDAC_FLAG_SECDED;
0215 mci->mod_name = pdev->dev.driver->name;
0216 mci->ctl_name = id->compatible;
0217 mci->dev_name = dev_name(&pdev->dev);
0218 mci->scrub_mode = SCRUB_SW_SRC;
0219
0220
0221 dimm = *mci->dimms;
0222 dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
0223 dimm->grain = 8;
0224 dimm->dtype = DEV_X8;
0225 dimm->mtype = MEM_DDR3;
0226 dimm->edac_mode = EDAC_SECDED;
0227
0228 res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
0229 if (res < 0)
0230 goto err;
0231
0232 irq = platform_get_irq(pdev, 0);
0233 res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
0234 0, dev_name(&pdev->dev), mci);
0235 if (res < 0) {
0236 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
0237 goto err2;
0238 }
0239
0240 devres_close_group(&pdev->dev, NULL);
0241 return 0;
0242 err2:
0243 edac_mc_del_mc(&pdev->dev);
0244 err:
0245 devres_release_group(&pdev->dev, NULL);
0246 edac_mc_free(mci);
0247 return res;
0248 }
0249
0250 static int highbank_mc_remove(struct platform_device *pdev)
0251 {
0252 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
0253
0254 edac_mc_del_mc(&pdev->dev);
0255 edac_mc_free(mci);
0256 return 0;
0257 }
0258
0259 static struct platform_driver highbank_mc_edac_driver = {
0260 .probe = highbank_mc_probe,
0261 .remove = highbank_mc_remove,
0262 .driver = {
0263 .name = "hb_mc_edac",
0264 .of_match_table = hb_ddr_ctrl_of_match,
0265 },
0266 };
0267
0268 module_platform_driver(highbank_mc_edac_driver);
0269
0270 MODULE_LICENSE("GPL v2");
0271 MODULE_AUTHOR("Calxeda, Inc.");
0272 MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");