0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/module.h>
0027 #include <linux/init.h>
0028 #include <linux/pci.h>
0029 #include <linux/pci_ids.h>
0030 #include <linux/slab.h>
0031 #include <linux/delay.h>
0032 #include <linux/dmi.h>
0033 #include <linux/edac.h>
0034 #include <linux/mmzone.h>
0035 #include <linux/smp.h>
0036 #include <asm/mce.h>
0037 #include <asm/processor.h>
0038 #include <asm/div64.h>
0039
0040 #include "edac_module.h"
0041
0042
0043 static LIST_HEAD(i7core_edac_list);
0044 static DEFINE_MUTEX(i7core_edac_lock);
0045 static int probed;
0046
0047 static int use_pci_fixup;
0048 module_param(use_pci_fixup, int, 0444);
0049 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
0050
0051
0052
0053
0054
0055
0056 #define MAX_SOCKET_BUSES 2
0057
0058
0059
0060
0061
0062 #define I7CORE_REVISION " Ver: 1.0.0"
0063 #define EDAC_MOD_STR "i7core_edac"
0064
0065
0066
0067
0068 #define i7core_printk(level, fmt, arg...) \
0069 edac_printk(level, "i7core", fmt, ##arg)
0070
0071 #define i7core_mc_printk(mci, level, fmt, arg...) \
0072 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
0073
0074
0075
0076
0077
0078
0079
0080 #define MC_CFG_CONTROL 0x90
0081 #define MC_CFG_UNLOCK 0x02
0082 #define MC_CFG_LOCK 0x00
0083
0084
0085
0086 #define MC_CONTROL 0x48
0087 #define MC_STATUS 0x4c
0088 #define MC_MAX_DOD 0x64
0089
0090
0091
0092
0093
0094
0095 #define MC_TEST_ERR_RCV1 0x60
0096 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
0097
0098 #define MC_TEST_ERR_RCV0 0x64
0099 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
0100 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
0101
0102
0103 #define MC_SSRCONTROL 0x48
0104 #define SSR_MODE_DISABLE 0x00
0105 #define SSR_MODE_ENABLE 0x01
0106 #define SSR_MODE_MASK 0x03
0107
0108 #define MC_SCRUB_CONTROL 0x4c
0109 #define STARTSCRUB (1 << 24)
0110 #define SCRUBINTERVAL_MASK 0xffffff
0111
0112 #define MC_COR_ECC_CNT_0 0x80
0113 #define MC_COR_ECC_CNT_1 0x84
0114 #define MC_COR_ECC_CNT_2 0x88
0115 #define MC_COR_ECC_CNT_3 0x8c
0116 #define MC_COR_ECC_CNT_4 0x90
0117 #define MC_COR_ECC_CNT_5 0x94
0118
0119 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
0120 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
0121
0122
0123
0124
0125 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
0126 #define THREE_DIMMS_PRESENT (1 << 24)
0127 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
0128 #define QUAD_RANK_PRESENT (1 << 22)
0129 #define REGISTERED_DIMM (1 << 15)
0130
0131 #define MC_CHANNEL_MAPPER 0x60
0132 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
0133 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
0134
0135 #define MC_CHANNEL_RANK_PRESENT 0x7c
0136 #define RANK_PRESENT_MASK 0xffff
0137
0138 #define MC_CHANNEL_ADDR_MATCH 0xf0
0139 #define MC_CHANNEL_ERROR_MASK 0xf8
0140 #define MC_CHANNEL_ERROR_INJECT 0xfc
0141 #define INJECT_ADDR_PARITY 0x10
0142 #define INJECT_ECC 0x08
0143 #define MASK_CACHELINE 0x06
0144 #define MASK_FULL_CACHELINE 0x06
0145 #define MASK_MSB32_CACHELINE 0x04
0146 #define MASK_LSB32_CACHELINE 0x02
0147 #define NO_MASK_CACHELINE 0x00
0148 #define REPEAT_EN 0x01
0149
0150
0151
0152 #define MC_DOD_CH_DIMM0 0x48
0153 #define MC_DOD_CH_DIMM1 0x4c
0154 #define MC_DOD_CH_DIMM2 0x50
0155 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
0156 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
0157 #define DIMM_PRESENT_MASK (1 << 9)
0158 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
0159 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
0160 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
0161 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
0162 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
0163 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
0164 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
0165 #define MC_DOD_NUMCOL_MASK 3
0166 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0167
0168 #define MC_RANK_PRESENT 0x7c
0169
0170 #define MC_SAG_CH_0 0x80
0171 #define MC_SAG_CH_1 0x84
0172 #define MC_SAG_CH_2 0x88
0173 #define MC_SAG_CH_3 0x8c
0174 #define MC_SAG_CH_4 0x90
0175 #define MC_SAG_CH_5 0x94
0176 #define MC_SAG_CH_6 0x98
0177 #define MC_SAG_CH_7 0x9c
0178
0179 #define MC_RIR_LIMIT_CH_0 0x40
0180 #define MC_RIR_LIMIT_CH_1 0x44
0181 #define MC_RIR_LIMIT_CH_2 0x48
0182 #define MC_RIR_LIMIT_CH_3 0x4C
0183 #define MC_RIR_LIMIT_CH_4 0x50
0184 #define MC_RIR_LIMIT_CH_5 0x54
0185 #define MC_RIR_LIMIT_CH_6 0x58
0186 #define MC_RIR_LIMIT_CH_7 0x5C
0187 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
0188
0189 #define MC_RIR_WAY_CH 0x80
0190 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
0191 #define MC_RIR_WAY_RANK_MASK 0x7
0192
0193
0194
0195
0196
0197 #define NUM_CHANS 3
0198 #define MAX_DIMMS 3
0199 #define MAX_MCR_FUNC 4
0200 #define MAX_CHAN_FUNC 3
0201
0202 struct i7core_info {
0203 u32 mc_control;
0204 u32 mc_status;
0205 u32 max_dod;
0206 u32 ch_map;
0207 };
0208
0209
0210 struct i7core_inject {
0211 int enable;
0212
0213 u32 section;
0214 u32 type;
0215 u32 eccmask;
0216
0217
0218 int channel, dimm, rank, bank, page, col;
0219 };
0220
0221 struct i7core_channel {
0222 bool is_3dimms_present;
0223 bool is_single_4rank;
0224 bool has_4rank;
0225 u32 dimms;
0226 };
0227
0228 struct pci_id_descr {
0229 int dev;
0230 int func;
0231 int dev_id;
0232 int optional;
0233 };
0234
0235 struct pci_id_table {
0236 const struct pci_id_descr *descr;
0237 int n_devs;
0238 };
0239
0240 struct i7core_dev {
0241 struct list_head list;
0242 u8 socket;
0243 struct pci_dev **pdev;
0244 int n_devs;
0245 struct mem_ctl_info *mci;
0246 };
0247
0248 struct i7core_pvt {
0249 struct device *addrmatch_dev, *chancounts_dev;
0250
0251 struct pci_dev *pci_noncore;
0252 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
0253 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
0254
0255 struct i7core_dev *i7core_dev;
0256
0257 struct i7core_info info;
0258 struct i7core_inject inject;
0259 struct i7core_channel channel[NUM_CHANS];
0260
0261 int ce_count_available;
0262
0263
0264 unsigned long udimm_ce_count[MAX_DIMMS];
0265 int udimm_last_ce_count[MAX_DIMMS];
0266
0267 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
0268 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
0269
0270 bool is_registered, enable_scrub;
0271
0272
0273 int dclk_freq;
0274
0275
0276 struct edac_pci_ctl_info *i7core_pci;
0277 };
0278
0279 #define PCI_DESCR(device, function, device_id) \
0280 .dev = (device), \
0281 .func = (function), \
0282 .dev_id = (device_id)
0283
0284 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
0285
0286 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
0287 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
0288
0289 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
0290 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
0291
0292
0293 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
0294 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
0295 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
0296 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
0297
0298
0299 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
0300 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
0301 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
0302 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
0303
0304
0305 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
0306 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
0307 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
0308 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
0309
0310
0311
0312
0313
0314
0315
0316
0317 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
0318
0319 };
0320
0321 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
0322 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
0323 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
0324 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
0325
0326 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
0327 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
0328 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
0329 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
0330
0331 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
0332 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
0333 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
0334 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
0335
0336
0337
0338
0339
0340 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
0341 };
0342
0343 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
0344
0345 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
0346 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
0347
0348 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
0349 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
0350
0351
0352 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
0353 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
0354 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
0355 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
0356
0357
0358 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
0359 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
0360 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
0361 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
0362
0363
0364 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
0365 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
0366 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
0367 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
0368
0369
0370 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
0371
0372 };
0373
0374 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
0375 static const struct pci_id_table pci_dev_table[] = {
0376 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
0377 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
0378 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
0379 {0,}
0380 };
0381
0382
0383
0384
0385 static const struct pci_device_id i7core_pci_tbl[] = {
0386 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
0387 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
0388 {0,}
0389 };
0390
0391
0392
0393
0394
0395
0396 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
0397 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
0398
0399
0400 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
0401 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
0402
0403
0404 static inline int numdimms(u32 dimms)
0405 {
0406 return (dimms & 0x3) + 1;
0407 }
0408
0409 static inline int numrank(u32 rank)
0410 {
0411 static const int ranks[] = { 1, 2, 4, -EINVAL };
0412
0413 return ranks[rank & 0x3];
0414 }
0415
0416 static inline int numbank(u32 bank)
0417 {
0418 static const int banks[] = { 4, 8, 16, -EINVAL };
0419
0420 return banks[bank & 0x3];
0421 }
0422
0423 static inline int numrow(u32 row)
0424 {
0425 static const int rows[] = {
0426 1 << 12, 1 << 13, 1 << 14, 1 << 15,
0427 1 << 16, -EINVAL, -EINVAL, -EINVAL,
0428 };
0429
0430 return rows[row & 0x7];
0431 }
0432
0433 static inline int numcol(u32 col)
0434 {
0435 static const int cols[] = {
0436 1 << 10, 1 << 11, 1 << 12, -EINVAL,
0437 };
0438 return cols[col & 0x3];
0439 }
0440
0441 static struct i7core_dev *get_i7core_dev(u8 socket)
0442 {
0443 struct i7core_dev *i7core_dev;
0444
0445 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
0446 if (i7core_dev->socket == socket)
0447 return i7core_dev;
0448 }
0449
0450 return NULL;
0451 }
0452
0453 static struct i7core_dev *alloc_i7core_dev(u8 socket,
0454 const struct pci_id_table *table)
0455 {
0456 struct i7core_dev *i7core_dev;
0457
0458 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
0459 if (!i7core_dev)
0460 return NULL;
0461
0462 i7core_dev->pdev = kcalloc(table->n_devs, sizeof(*i7core_dev->pdev),
0463 GFP_KERNEL);
0464 if (!i7core_dev->pdev) {
0465 kfree(i7core_dev);
0466 return NULL;
0467 }
0468
0469 i7core_dev->socket = socket;
0470 i7core_dev->n_devs = table->n_devs;
0471 list_add_tail(&i7core_dev->list, &i7core_edac_list);
0472
0473 return i7core_dev;
0474 }
0475
0476 static void free_i7core_dev(struct i7core_dev *i7core_dev)
0477 {
0478 list_del(&i7core_dev->list);
0479 kfree(i7core_dev->pdev);
0480 kfree(i7core_dev);
0481 }
0482
0483
0484
0485
0486
0487 static int get_dimm_config(struct mem_ctl_info *mci)
0488 {
0489 struct i7core_pvt *pvt = mci->pvt_info;
0490 struct pci_dev *pdev;
0491 int i, j;
0492 enum edac_type mode;
0493 enum mem_type mtype;
0494 struct dimm_info *dimm;
0495
0496
0497 pdev = pvt->pci_mcr[0];
0498 if (!pdev)
0499 return -ENODEV;
0500
0501
0502 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
0503 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
0504 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
0505 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
0506
0507 edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
0508 pvt->i7core_dev->socket, pvt->info.mc_control,
0509 pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
0510
0511 if (ECC_ENABLED(pvt)) {
0512 edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
0513 if (ECCx8(pvt))
0514 mode = EDAC_S8ECD8ED;
0515 else
0516 mode = EDAC_S4ECD4ED;
0517 } else {
0518 edac_dbg(0, "ECC disabled\n");
0519 mode = EDAC_NONE;
0520 }
0521
0522
0523 edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
0524 numdimms(pvt->info.max_dod),
0525 numrank(pvt->info.max_dod >> 2),
0526 numbank(pvt->info.max_dod >> 4),
0527 numrow(pvt->info.max_dod >> 6),
0528 numcol(pvt->info.max_dod >> 9));
0529
0530 for (i = 0; i < NUM_CHANS; i++) {
0531 u32 data, dimm_dod[3], value[8];
0532
0533 if (!pvt->pci_ch[i][0])
0534 continue;
0535
0536 if (!CH_ACTIVE(pvt, i)) {
0537 edac_dbg(0, "Channel %i is not active\n", i);
0538 continue;
0539 }
0540 if (CH_DISABLED(pvt, i)) {
0541 edac_dbg(0, "Channel %i is disabled\n", i);
0542 continue;
0543 }
0544
0545
0546 pci_read_config_dword(pvt->pci_ch[i][0],
0547 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
0548
0549
0550 if (data & THREE_DIMMS_PRESENT)
0551 pvt->channel[i].is_3dimms_present = true;
0552
0553 if (data & SINGLE_QUAD_RANK_PRESENT)
0554 pvt->channel[i].is_single_4rank = true;
0555
0556 if (data & QUAD_RANK_PRESENT)
0557 pvt->channel[i].has_4rank = true;
0558
0559 if (data & REGISTERED_DIMM)
0560 mtype = MEM_RDDR3;
0561 else
0562 mtype = MEM_DDR3;
0563
0564
0565 pci_read_config_dword(pvt->pci_ch[i][1],
0566 MC_DOD_CH_DIMM0, &dimm_dod[0]);
0567 pci_read_config_dword(pvt->pci_ch[i][1],
0568 MC_DOD_CH_DIMM1, &dimm_dod[1]);
0569 pci_read_config_dword(pvt->pci_ch[i][1],
0570 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0571
0572 edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
0573 i,
0574 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
0575 data,
0576 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
0577 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
0578 pvt->channel[i].has_4rank ? "HAS_4R " : "",
0579 (data & REGISTERED_DIMM) ? 'R' : 'U');
0580
0581 for (j = 0; j < 3; j++) {
0582 u32 banks, ranks, rows, cols;
0583 u32 size, npages;
0584
0585 if (!DIMM_PRESENT(dimm_dod[j]))
0586 continue;
0587
0588 dimm = edac_get_dimm(mci, i, j, 0);
0589 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
0590 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
0591 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
0592 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
0593
0594
0595 size = (rows * cols * banks * ranks) >> (20 - 3);
0596
0597 edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
0598 j, size,
0599 RANKOFFSET(dimm_dod[j]),
0600 banks, ranks, rows, cols);
0601
0602 npages = MiB_TO_PAGES(size);
0603
0604 dimm->nr_pages = npages;
0605
0606 switch (banks) {
0607 case 4:
0608 dimm->dtype = DEV_X4;
0609 break;
0610 case 8:
0611 dimm->dtype = DEV_X8;
0612 break;
0613 case 16:
0614 dimm->dtype = DEV_X16;
0615 break;
0616 default:
0617 dimm->dtype = DEV_UNKNOWN;
0618 }
0619
0620 snprintf(dimm->label, sizeof(dimm->label),
0621 "CPU#%uChannel#%u_DIMM#%u",
0622 pvt->i7core_dev->socket, i, j);
0623 dimm->grain = 8;
0624 dimm->edac_mode = mode;
0625 dimm->mtype = mtype;
0626 }
0627
0628 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
0629 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
0630 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
0631 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
0632 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
0633 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
0634 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
0635 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
0636 edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
0637 for (j = 0; j < 8; j++)
0638 edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
0639 (value[j] >> 27) & 0x1,
0640 (value[j] >> 24) & 0x7,
0641 (value[j] & ((1 << 24) - 1)));
0642 }
0643
0644 return 0;
0645 }
0646
0647
0648
0649
0650
0651 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
0652
0653
0654
0655
0656
0657
0658
0659
0660 static int disable_inject(const struct mem_ctl_info *mci)
0661 {
0662 struct i7core_pvt *pvt = mci->pvt_info;
0663
0664 pvt->inject.enable = 0;
0665
0666 if (!pvt->pci_ch[pvt->inject.channel][0])
0667 return -ENODEV;
0668
0669 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
0670 MC_CHANNEL_ERROR_INJECT, 0);
0671
0672 return 0;
0673 }
0674
0675
0676
0677
0678
0679
0680
0681
0682 static ssize_t i7core_inject_section_store(struct device *dev,
0683 struct device_attribute *mattr,
0684 const char *data, size_t count)
0685 {
0686 struct mem_ctl_info *mci = to_mci(dev);
0687 struct i7core_pvt *pvt = mci->pvt_info;
0688 unsigned long value;
0689 int rc;
0690
0691 if (pvt->inject.enable)
0692 disable_inject(mci);
0693
0694 rc = kstrtoul(data, 10, &value);
0695 if ((rc < 0) || (value > 3))
0696 return -EIO;
0697
0698 pvt->inject.section = (u32) value;
0699 return count;
0700 }
0701
0702 static ssize_t i7core_inject_section_show(struct device *dev,
0703 struct device_attribute *mattr,
0704 char *data)
0705 {
0706 struct mem_ctl_info *mci = to_mci(dev);
0707 struct i7core_pvt *pvt = mci->pvt_info;
0708 return sprintf(data, "0x%08x\n", pvt->inject.section);
0709 }
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719 static ssize_t i7core_inject_type_store(struct device *dev,
0720 struct device_attribute *mattr,
0721 const char *data, size_t count)
0722 {
0723 struct mem_ctl_info *mci = to_mci(dev);
0724 struct i7core_pvt *pvt = mci->pvt_info;
0725 unsigned long value;
0726 int rc;
0727
0728 if (pvt->inject.enable)
0729 disable_inject(mci);
0730
0731 rc = kstrtoul(data, 10, &value);
0732 if ((rc < 0) || (value > 7))
0733 return -EIO;
0734
0735 pvt->inject.type = (u32) value;
0736 return count;
0737 }
0738
0739 static ssize_t i7core_inject_type_show(struct device *dev,
0740 struct device_attribute *mattr,
0741 char *data)
0742 {
0743 struct mem_ctl_info *mci = to_mci(dev);
0744 struct i7core_pvt *pvt = mci->pvt_info;
0745
0746 return sprintf(data, "0x%08x\n", pvt->inject.type);
0747 }
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759 static ssize_t i7core_inject_eccmask_store(struct device *dev,
0760 struct device_attribute *mattr,
0761 const char *data, size_t count)
0762 {
0763 struct mem_ctl_info *mci = to_mci(dev);
0764 struct i7core_pvt *pvt = mci->pvt_info;
0765 unsigned long value;
0766 int rc;
0767
0768 if (pvt->inject.enable)
0769 disable_inject(mci);
0770
0771 rc = kstrtoul(data, 10, &value);
0772 if (rc < 0)
0773 return -EIO;
0774
0775 pvt->inject.eccmask = (u32) value;
0776 return count;
0777 }
0778
0779 static ssize_t i7core_inject_eccmask_show(struct device *dev,
0780 struct device_attribute *mattr,
0781 char *data)
0782 {
0783 struct mem_ctl_info *mci = to_mci(dev);
0784 struct i7core_pvt *pvt = mci->pvt_info;
0785
0786 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
0787 }
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800 #define DECLARE_ADDR_MATCH(param, limit) \
0801 static ssize_t i7core_inject_store_##param( \
0802 struct device *dev, \
0803 struct device_attribute *mattr, \
0804 const char *data, size_t count) \
0805 { \
0806 struct mem_ctl_info *mci = dev_get_drvdata(dev); \
0807 struct i7core_pvt *pvt; \
0808 long value; \
0809 int rc; \
0810 \
0811 edac_dbg(1, "\n"); \
0812 pvt = mci->pvt_info; \
0813 \
0814 if (pvt->inject.enable) \
0815 disable_inject(mci); \
0816 \
0817 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
0818 value = -1; \
0819 else { \
0820 rc = kstrtoul(data, 10, &value); \
0821 if ((rc < 0) || (value >= limit)) \
0822 return -EIO; \
0823 } \
0824 \
0825 pvt->inject.param = value; \
0826 \
0827 return count; \
0828 } \
0829 \
0830 static ssize_t i7core_inject_show_##param( \
0831 struct device *dev, \
0832 struct device_attribute *mattr, \
0833 char *data) \
0834 { \
0835 struct mem_ctl_info *mci = dev_get_drvdata(dev); \
0836 struct i7core_pvt *pvt; \
0837 \
0838 pvt = mci->pvt_info; \
0839 edac_dbg(1, "pvt=%p\n", pvt); \
0840 if (pvt->inject.param < 0) \
0841 return sprintf(data, "any\n"); \
0842 else \
0843 return sprintf(data, "%d\n", pvt->inject.param);\
0844 }
0845
0846 #define ATTR_ADDR_MATCH(param) \
0847 static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \
0848 i7core_inject_show_##param, \
0849 i7core_inject_store_##param)
0850
0851 DECLARE_ADDR_MATCH(channel, 3);
0852 DECLARE_ADDR_MATCH(dimm, 3);
0853 DECLARE_ADDR_MATCH(rank, 4);
0854 DECLARE_ADDR_MATCH(bank, 32);
0855 DECLARE_ADDR_MATCH(page, 0x10000);
0856 DECLARE_ADDR_MATCH(col, 0x4000);
0857
0858 ATTR_ADDR_MATCH(channel);
0859 ATTR_ADDR_MATCH(dimm);
0860 ATTR_ADDR_MATCH(rank);
0861 ATTR_ADDR_MATCH(bank);
0862 ATTR_ADDR_MATCH(page);
0863 ATTR_ADDR_MATCH(col);
0864
0865 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
0866 {
0867 u32 read;
0868 int count;
0869
0870 edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
0871 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
0872 where, val);
0873
0874 for (count = 0; count < 10; count++) {
0875 if (count)
0876 msleep(100);
0877 pci_write_config_dword(dev, where, val);
0878 pci_read_config_dword(dev, where, &read);
0879
0880 if (read == val)
0881 return 0;
0882 }
0883
0884 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
0885 "write=%08x. Read=%08x\n",
0886 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
0887 where, val, read);
0888
0889 return -EINVAL;
0890 }
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910 static ssize_t i7core_inject_enable_store(struct device *dev,
0911 struct device_attribute *mattr,
0912 const char *data, size_t count)
0913 {
0914 struct mem_ctl_info *mci = to_mci(dev);
0915 struct i7core_pvt *pvt = mci->pvt_info;
0916 u32 injectmask;
0917 u64 mask = 0;
0918 int rc;
0919 long enable;
0920
0921 if (!pvt->pci_ch[pvt->inject.channel][0])
0922 return 0;
0923
0924 rc = kstrtoul(data, 10, &enable);
0925 if ((rc < 0))
0926 return 0;
0927
0928 if (enable) {
0929 pvt->inject.enable = 1;
0930 } else {
0931 disable_inject(mci);
0932 return count;
0933 }
0934
0935
0936 if (pvt->inject.dimm < 0)
0937 mask |= 1LL << 41;
0938 else {
0939 if (pvt->channel[pvt->inject.channel].dimms > 2)
0940 mask |= (pvt->inject.dimm & 0x3LL) << 35;
0941 else
0942 mask |= (pvt->inject.dimm & 0x1LL) << 36;
0943 }
0944
0945
0946 if (pvt->inject.rank < 0)
0947 mask |= 1LL << 40;
0948 else {
0949 if (pvt->channel[pvt->inject.channel].dimms > 2)
0950 mask |= (pvt->inject.rank & 0x1LL) << 34;
0951 else
0952 mask |= (pvt->inject.rank & 0x3LL) << 34;
0953 }
0954
0955
0956 if (pvt->inject.bank < 0)
0957 mask |= 1LL << 39;
0958 else
0959 mask |= (pvt->inject.bank & 0x15LL) << 30;
0960
0961
0962 if (pvt->inject.page < 0)
0963 mask |= 1LL << 38;
0964 else
0965 mask |= (pvt->inject.page & 0xffff) << 14;
0966
0967
0968 if (pvt->inject.col < 0)
0969 mask |= 1LL << 37;
0970 else
0971 mask |= (pvt->inject.col & 0x3fff);
0972
0973
0974
0975
0976
0977
0978
0979
0980 injectmask = (pvt->inject.type & 1) |
0981 (pvt->inject.section & 0x3) << 1 |
0982 (pvt->inject.type & 0x6) << (3 - 1);
0983
0984
0985 pci_write_config_dword(pvt->pci_noncore,
0986 MC_CFG_CONTROL, 0x2);
0987
0988 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
0989 MC_CHANNEL_ADDR_MATCH, mask);
0990 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
0991 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
0992
0993 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
0994 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
0995
0996 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
0997 MC_CHANNEL_ERROR_INJECT, injectmask);
0998
0999
1000
1001
1002
1003
1004 pci_write_config_dword(pvt->pci_noncore,
1005 MC_CFG_CONTROL, 8);
1006
1007 edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
1008 mask, pvt->inject.eccmask, injectmask);
1009
1010
1011 return count;
1012 }
1013
1014 static ssize_t i7core_inject_enable_show(struct device *dev,
1015 struct device_attribute *mattr,
1016 char *data)
1017 {
1018 struct mem_ctl_info *mci = to_mci(dev);
1019 struct i7core_pvt *pvt = mci->pvt_info;
1020 u32 injectmask;
1021
1022 if (!pvt->pci_ch[pvt->inject.channel][0])
1023 return 0;
1024
1025 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1026 MC_CHANNEL_ERROR_INJECT, &injectmask);
1027
1028 edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
1029
1030 if (injectmask & 0x0c)
1031 pvt->inject.enable = 1;
1032
1033 return sprintf(data, "%d\n", pvt->inject.enable);
1034 }
1035
1036 #define DECLARE_COUNTER(param) \
1037 static ssize_t i7core_show_counter_##param( \
1038 struct device *dev, \
1039 struct device_attribute *mattr, \
1040 char *data) \
1041 { \
1042 struct mem_ctl_info *mci = dev_get_drvdata(dev); \
1043 struct i7core_pvt *pvt = mci->pvt_info; \
1044 \
1045 edac_dbg(1, "\n"); \
1046 if (!pvt->ce_count_available || (pvt->is_registered)) \
1047 return sprintf(data, "data unavailable\n"); \
1048 return sprintf(data, "%lu\n", \
1049 pvt->udimm_ce_count[param]); \
1050 }
1051
1052 #define ATTR_COUNTER(param) \
1053 static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \
1054 i7core_show_counter_##param, \
1055 NULL)
1056
1057 DECLARE_COUNTER(0);
1058 DECLARE_COUNTER(1);
1059 DECLARE_COUNTER(2);
1060
1061 ATTR_COUNTER(0);
1062 ATTR_COUNTER(1);
1063 ATTR_COUNTER(2);
1064
1065
1066
1067
1068
1069 static struct attribute *i7core_addrmatch_attrs[] = {
1070 &dev_attr_channel.attr,
1071 &dev_attr_dimm.attr,
1072 &dev_attr_rank.attr,
1073 &dev_attr_bank.attr,
1074 &dev_attr_page.attr,
1075 &dev_attr_col.attr,
1076 NULL
1077 };
1078
1079 static const struct attribute_group addrmatch_grp = {
1080 .attrs = i7core_addrmatch_attrs,
1081 };
1082
1083 static const struct attribute_group *addrmatch_groups[] = {
1084 &addrmatch_grp,
1085 NULL
1086 };
1087
1088 static void addrmatch_release(struct device *device)
1089 {
1090 edac_dbg(1, "Releasing device %s\n", dev_name(device));
1091 kfree(device);
1092 }
1093
1094 static const struct device_type addrmatch_type = {
1095 .groups = addrmatch_groups,
1096 .release = addrmatch_release,
1097 };
1098
1099
1100
1101
1102
1103 static struct attribute *i7core_udimm_counters_attrs[] = {
1104 &dev_attr_udimm0.attr,
1105 &dev_attr_udimm1.attr,
1106 &dev_attr_udimm2.attr,
1107 NULL
1108 };
1109
1110 static const struct attribute_group all_channel_counts_grp = {
1111 .attrs = i7core_udimm_counters_attrs,
1112 };
1113
1114 static const struct attribute_group *all_channel_counts_groups[] = {
1115 &all_channel_counts_grp,
1116 NULL
1117 };
1118
1119 static void all_channel_counts_release(struct device *device)
1120 {
1121 edac_dbg(1, "Releasing device %s\n", dev_name(device));
1122 kfree(device);
1123 }
1124
1125 static const struct device_type all_channel_counts_type = {
1126 .groups = all_channel_counts_groups,
1127 .release = all_channel_counts_release,
1128 };
1129
1130
1131
1132
1133
1134 static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
1135 i7core_inject_section_show, i7core_inject_section_store);
1136
1137 static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
1138 i7core_inject_type_show, i7core_inject_type_store);
1139
1140
1141 static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
1142 i7core_inject_eccmask_show, i7core_inject_eccmask_store);
1143
1144 static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
1145 i7core_inject_enable_show, i7core_inject_enable_store);
1146
1147 static struct attribute *i7core_dev_attrs[] = {
1148 &dev_attr_inject_section.attr,
1149 &dev_attr_inject_type.attr,
1150 &dev_attr_inject_eccmask.attr,
1151 &dev_attr_inject_enable.attr,
1152 NULL
1153 };
1154
1155 ATTRIBUTE_GROUPS(i7core_dev);
1156
1157 static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
1158 {
1159 struct i7core_pvt *pvt = mci->pvt_info;
1160 int rc;
1161
1162 pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL);
1163 if (!pvt->addrmatch_dev)
1164 return -ENOMEM;
1165
1166 pvt->addrmatch_dev->type = &addrmatch_type;
1167 pvt->addrmatch_dev->bus = mci->dev.bus;
1168 device_initialize(pvt->addrmatch_dev);
1169 pvt->addrmatch_dev->parent = &mci->dev;
1170 dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
1171 dev_set_drvdata(pvt->addrmatch_dev, mci);
1172
1173 edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
1174
1175 rc = device_add(pvt->addrmatch_dev);
1176 if (rc < 0)
1177 goto err_put_addrmatch;
1178
1179 if (!pvt->is_registered) {
1180 pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
1181 GFP_KERNEL);
1182 if (!pvt->chancounts_dev) {
1183 rc = -ENOMEM;
1184 goto err_del_addrmatch;
1185 }
1186
1187 pvt->chancounts_dev->type = &all_channel_counts_type;
1188 pvt->chancounts_dev->bus = mci->dev.bus;
1189 device_initialize(pvt->chancounts_dev);
1190 pvt->chancounts_dev->parent = &mci->dev;
1191 dev_set_name(pvt->chancounts_dev, "all_channel_counts");
1192 dev_set_drvdata(pvt->chancounts_dev, mci);
1193
1194 edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
1195
1196 rc = device_add(pvt->chancounts_dev);
1197 if (rc < 0)
1198 goto err_put_chancounts;
1199 }
1200 return 0;
1201
1202 err_put_chancounts:
1203 put_device(pvt->chancounts_dev);
1204 err_del_addrmatch:
1205 device_del(pvt->addrmatch_dev);
1206 err_put_addrmatch:
1207 put_device(pvt->addrmatch_dev);
1208
1209 return rc;
1210 }
1211
1212 static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
1213 {
1214 struct i7core_pvt *pvt = mci->pvt_info;
1215
1216 edac_dbg(1, "\n");
1217
1218 if (!pvt->is_registered) {
1219 device_del(pvt->chancounts_dev);
1220 put_device(pvt->chancounts_dev);
1221 }
1222 device_del(pvt->addrmatch_dev);
1223 put_device(pvt->addrmatch_dev);
1224 }
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1235 {
1236 int i;
1237
1238 edac_dbg(0, "\n");
1239 for (i = 0; i < i7core_dev->n_devs; i++) {
1240 struct pci_dev *pdev = i7core_dev->pdev[i];
1241 if (!pdev)
1242 continue;
1243 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1244 pdev->bus->number,
1245 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1246 pci_dev_put(pdev);
1247 }
1248 }
1249
1250 static void i7core_put_all_devices(void)
1251 {
1252 struct i7core_dev *i7core_dev, *tmp;
1253
1254 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1255 i7core_put_devices(i7core_dev);
1256 free_i7core_dev(i7core_dev);
1257 }
1258 }
1259
1260 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1261 {
1262 struct pci_dev *pdev = NULL;
1263 int i;
1264
1265
1266
1267
1268
1269
1270 while (table && table->descr) {
1271 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1272 if (unlikely(!pdev)) {
1273 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1274 pcibios_scan_specific_bus(255-i);
1275 }
1276 pci_dev_put(pdev);
1277 table++;
1278 }
1279 }
1280
1281 static unsigned i7core_pci_lastbus(void)
1282 {
1283 int last_bus = 0, bus;
1284 struct pci_bus *b = NULL;
1285
1286 while ((b = pci_find_next_bus(b)) != NULL) {
1287 bus = b->number;
1288 edac_dbg(0, "Found bus %d\n", bus);
1289 if (bus > last_bus)
1290 last_bus = bus;
1291 }
1292
1293 edac_dbg(0, "Last bus %d\n", last_bus);
1294
1295 return last_bus;
1296 }
1297
1298
1299
1300
1301
1302
1303
1304 static int i7core_get_onedevice(struct pci_dev **prev,
1305 const struct pci_id_table *table,
1306 const unsigned devno,
1307 const unsigned last_bus)
1308 {
1309 struct i7core_dev *i7core_dev;
1310 const struct pci_id_descr *dev_descr = &table->descr[devno];
1311
1312 struct pci_dev *pdev = NULL;
1313 u8 bus = 0;
1314 u8 socket = 0;
1315
1316 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1317 dev_descr->dev_id, *prev);
1318
1319
1320
1321
1322
1323
1324 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
1325 pci_dev_get(*prev);
1326 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1327 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1328 }
1329
1330 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
1331 !pdev) {
1332 pci_dev_get(*prev);
1333 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1334 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1335 *prev);
1336 }
1337
1338 if (!pdev) {
1339 if (*prev) {
1340 *prev = pdev;
1341 return 0;
1342 }
1343
1344 if (dev_descr->optional)
1345 return 0;
1346
1347 if (devno == 0)
1348 return -ENODEV;
1349
1350 i7core_printk(KERN_INFO,
1351 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1352 dev_descr->dev, dev_descr->func,
1353 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1354
1355
1356 return -ENODEV;
1357 }
1358 bus = pdev->bus->number;
1359
1360 socket = last_bus - bus;
1361
1362 i7core_dev = get_i7core_dev(socket);
1363 if (!i7core_dev) {
1364 i7core_dev = alloc_i7core_dev(socket, table);
1365 if (!i7core_dev) {
1366 pci_dev_put(pdev);
1367 return -ENOMEM;
1368 }
1369 }
1370
1371 if (i7core_dev->pdev[devno]) {
1372 i7core_printk(KERN_ERR,
1373 "Duplicated device for "
1374 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1375 bus, dev_descr->dev, dev_descr->func,
1376 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1377 pci_dev_put(pdev);
1378 return -ENODEV;
1379 }
1380
1381 i7core_dev->pdev[devno] = pdev;
1382
1383
1384 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1385 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1386 i7core_printk(KERN_ERR,
1387 "Device PCI ID %04x:%04x "
1388 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1389 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1390 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1391 bus, dev_descr->dev, dev_descr->func);
1392 return -ENODEV;
1393 }
1394
1395
1396 if (unlikely(pci_enable_device(pdev) < 0)) {
1397 i7core_printk(KERN_ERR,
1398 "Couldn't enable "
1399 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1400 bus, dev_descr->dev, dev_descr->func,
1401 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1402 return -ENODEV;
1403 }
1404
1405 edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1406 socket, bus, dev_descr->dev,
1407 dev_descr->func,
1408 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1409
1410
1411
1412
1413
1414
1415 pci_dev_get(pdev);
1416
1417 *prev = pdev;
1418
1419 return 0;
1420 }
1421
1422 static int i7core_get_all_devices(void)
1423 {
1424 int i, rc, last_bus;
1425 struct pci_dev *pdev = NULL;
1426 const struct pci_id_table *table = pci_dev_table;
1427
1428 last_bus = i7core_pci_lastbus();
1429
1430 while (table && table->descr) {
1431 for (i = 0; i < table->n_devs; i++) {
1432 pdev = NULL;
1433 do {
1434 rc = i7core_get_onedevice(&pdev, table, i,
1435 last_bus);
1436 if (rc < 0) {
1437 if (i == 0) {
1438 i = table->n_devs;
1439 break;
1440 }
1441 i7core_put_all_devices();
1442 return -ENODEV;
1443 }
1444 } while (pdev);
1445 }
1446 table++;
1447 }
1448
1449 return 0;
1450 }
1451
1452 static int mci_bind_devs(struct mem_ctl_info *mci,
1453 struct i7core_dev *i7core_dev)
1454 {
1455 struct i7core_pvt *pvt = mci->pvt_info;
1456 struct pci_dev *pdev;
1457 int i, func, slot;
1458 char *family;
1459
1460 pvt->is_registered = false;
1461 pvt->enable_scrub = false;
1462 for (i = 0; i < i7core_dev->n_devs; i++) {
1463 pdev = i7core_dev->pdev[i];
1464 if (!pdev)
1465 continue;
1466
1467 func = PCI_FUNC(pdev->devfn);
1468 slot = PCI_SLOT(pdev->devfn);
1469 if (slot == 3) {
1470 if (unlikely(func > MAX_MCR_FUNC))
1471 goto error;
1472 pvt->pci_mcr[func] = pdev;
1473 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1474 if (unlikely(func > MAX_CHAN_FUNC))
1475 goto error;
1476 pvt->pci_ch[slot - 4][func] = pdev;
1477 } else if (!slot && !func) {
1478 pvt->pci_noncore = pdev;
1479
1480
1481 switch (pdev->device) {
1482 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1483 family = "Xeon 35xx/ i7core";
1484 pvt->enable_scrub = false;
1485 break;
1486 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1487 family = "i7-800/i5-700";
1488 pvt->enable_scrub = false;
1489 break;
1490 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1491 family = "Xeon 34xx";
1492 pvt->enable_scrub = false;
1493 break;
1494 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1495 family = "Xeon 55xx";
1496 pvt->enable_scrub = true;
1497 break;
1498 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1499 family = "Xeon 56xx / i7-900";
1500 pvt->enable_scrub = true;
1501 break;
1502 default:
1503 family = "unknown";
1504 pvt->enable_scrub = false;
1505 }
1506 edac_dbg(0, "Detected a processor type %s\n", family);
1507 } else
1508 goto error;
1509
1510 edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
1511 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1512 pdev, i7core_dev->socket);
1513
1514 if (PCI_SLOT(pdev->devfn) == 3 &&
1515 PCI_FUNC(pdev->devfn) == 2)
1516 pvt->is_registered = true;
1517 }
1518
1519 return 0;
1520
1521 error:
1522 i7core_printk(KERN_ERR, "Device %d, function %d "
1523 "is out of the expected range\n",
1524 slot, func);
1525 return -EINVAL;
1526 }
1527
1528
1529
1530
1531
1532 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1533 const int chan,
1534 const int new0,
1535 const int new1,
1536 const int new2)
1537 {
1538 struct i7core_pvt *pvt = mci->pvt_info;
1539 int add0 = 0, add1 = 0, add2 = 0;
1540
1541 if (pvt->ce_count_available) {
1542
1543
1544 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1545 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1546 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1547
1548 if (add2 < 0)
1549 add2 += 0x7fff;
1550 pvt->rdimm_ce_count[chan][2] += add2;
1551
1552 if (add1 < 0)
1553 add1 += 0x7fff;
1554 pvt->rdimm_ce_count[chan][1] += add1;
1555
1556 if (add0 < 0)
1557 add0 += 0x7fff;
1558 pvt->rdimm_ce_count[chan][0] += add0;
1559 } else
1560 pvt->ce_count_available = 1;
1561
1562
1563 pvt->rdimm_last_ce_count[chan][2] = new2;
1564 pvt->rdimm_last_ce_count[chan][1] = new1;
1565 pvt->rdimm_last_ce_count[chan][0] = new0;
1566
1567
1568 if (add0 != 0)
1569 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0,
1570 0, 0, 0,
1571 chan, 0, -1, "error", "");
1572 if (add1 != 0)
1573 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1,
1574 0, 0, 0,
1575 chan, 1, -1, "error", "");
1576 if (add2 != 0)
1577 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2,
1578 0, 0, 0,
1579 chan, 2, -1, "error", "");
1580 }
1581
1582 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1583 {
1584 struct i7core_pvt *pvt = mci->pvt_info;
1585 u32 rcv[3][2];
1586 int i, new0, new1, new2;
1587
1588
1589 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1590 &rcv[0][0]);
1591 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1592 &rcv[0][1]);
1593 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1594 &rcv[1][0]);
1595 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1596 &rcv[1][1]);
1597 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1598 &rcv[2][0]);
1599 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1600 &rcv[2][1]);
1601 for (i = 0 ; i < 3; i++) {
1602 edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1603 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1604
1605 if (pvt->channel[i].dimms > 2) {
1606 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1607 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1608 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1609 } else {
1610 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1611 DIMM_BOT_COR_ERR(rcv[i][0]);
1612 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1613 DIMM_BOT_COR_ERR(rcv[i][1]);
1614 new2 = 0;
1615 }
1616
1617 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1618 }
1619 }
1620
1621
1622
1623
1624
1625
1626
1627 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1628 {
1629 struct i7core_pvt *pvt = mci->pvt_info;
1630 u32 rcv1, rcv0;
1631 int new0, new1, new2;
1632
1633 if (!pvt->pci_mcr[4]) {
1634 edac_dbg(0, "MCR registers not found\n");
1635 return;
1636 }
1637
1638
1639 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1640 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1641
1642
1643 new2 = DIMM2_COR_ERR(rcv1);
1644 new1 = DIMM1_COR_ERR(rcv0);
1645 new0 = DIMM0_COR_ERR(rcv0);
1646
1647
1648 if (pvt->ce_count_available) {
1649
1650 int add0, add1, add2;
1651
1652 add2 = new2 - pvt->udimm_last_ce_count[2];
1653 add1 = new1 - pvt->udimm_last_ce_count[1];
1654 add0 = new0 - pvt->udimm_last_ce_count[0];
1655
1656 if (add2 < 0)
1657 add2 += 0x7fff;
1658 pvt->udimm_ce_count[2] += add2;
1659
1660 if (add1 < 0)
1661 add1 += 0x7fff;
1662 pvt->udimm_ce_count[1] += add1;
1663
1664 if (add0 < 0)
1665 add0 += 0x7fff;
1666 pvt->udimm_ce_count[0] += add0;
1667
1668 if (add0 | add1 | add2)
1669 i7core_printk(KERN_ERR, "New Corrected error(s): "
1670 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1671 add0, add1, add2);
1672 } else
1673 pvt->ce_count_available = 1;
1674
1675
1676 pvt->udimm_last_ce_count[2] = new2;
1677 pvt->udimm_last_ce_count[1] = new1;
1678 pvt->udimm_last_ce_count[0] = new0;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1695 const struct mce *m)
1696 {
1697 struct i7core_pvt *pvt = mci->pvt_info;
1698 char *optype, *err;
1699 enum hw_event_mc_err_type tp_event;
1700 unsigned long error = m->status & 0x1ff0000l;
1701 bool uncorrected_error = m->mcgstatus & 1ll << 61;
1702 bool ripv = m->mcgstatus & 1;
1703 u32 optypenum = (m->status >> 4) & 0x07;
1704 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1705 u32 dimm = (m->misc >> 16) & 0x3;
1706 u32 channel = (m->misc >> 18) & 0x3;
1707 u32 syndrome = m->misc >> 32;
1708 u32 errnum = find_first_bit(&error, 32);
1709
1710 if (uncorrected_error) {
1711 core_err_cnt = 1;
1712 if (ripv)
1713 tp_event = HW_EVENT_ERR_UNCORRECTED;
1714 else
1715 tp_event = HW_EVENT_ERR_FATAL;
1716 } else {
1717 tp_event = HW_EVENT_ERR_CORRECTED;
1718 }
1719
1720 switch (optypenum) {
1721 case 0:
1722 optype = "generic undef request";
1723 break;
1724 case 1:
1725 optype = "read error";
1726 break;
1727 case 2:
1728 optype = "write error";
1729 break;
1730 case 3:
1731 optype = "addr/cmd error";
1732 break;
1733 case 4:
1734 optype = "scrubbing error";
1735 break;
1736 default:
1737 optype = "reserved";
1738 break;
1739 }
1740
1741 switch (errnum) {
1742 case 16:
1743 err = "read ECC error";
1744 break;
1745 case 17:
1746 err = "RAS ECC error";
1747 break;
1748 case 18:
1749 err = "write parity error";
1750 break;
1751 case 19:
1752 err = "redundancy loss";
1753 break;
1754 case 20:
1755 err = "reserved";
1756 break;
1757 case 21:
1758 err = "memory range error";
1759 break;
1760 case 22:
1761 err = "RTID out of range";
1762 break;
1763 case 23:
1764 err = "address parity error";
1765 break;
1766 case 24:
1767 err = "byte enable parity error";
1768 break;
1769 default:
1770 err = "unknown";
1771 }
1772
1773
1774
1775
1776
1777
1778 if (uncorrected_error || !pvt->is_registered)
1779 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1780 m->addr >> PAGE_SHIFT,
1781 m->addr & ~PAGE_MASK,
1782 syndrome,
1783 channel, dimm, -1,
1784 err, optype);
1785 }
1786
1787
1788
1789
1790
1791 static void i7core_check_error(struct mem_ctl_info *mci, struct mce *m)
1792 {
1793 struct i7core_pvt *pvt = mci->pvt_info;
1794
1795 i7core_mce_output_error(mci, m);
1796
1797
1798
1799
1800 if (!pvt->is_registered)
1801 i7core_udimm_check_mc_ecc_err(mci);
1802 else
1803 i7core_rdimm_check_mc_ecc_err(mci);
1804 }
1805
1806
1807
1808
1809
1810 static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1811 void *data)
1812 {
1813 struct mce *mce = (struct mce *)data;
1814 struct i7core_dev *i7_dev;
1815 struct mem_ctl_info *mci;
1816
1817 i7_dev = get_i7core_dev(mce->socketid);
1818 if (!i7_dev || (mce->kflags & MCE_HANDLED_CEC))
1819 return NOTIFY_DONE;
1820
1821 mci = i7_dev->mci;
1822
1823
1824
1825
1826
1827 if (((mce->status & 0xffff) >> 7) != 1)
1828 return NOTIFY_DONE;
1829
1830
1831 if (mce->bank != 8)
1832 return NOTIFY_DONE;
1833
1834 i7core_check_error(mci, mce);
1835
1836
1837 mce->kflags |= MCE_HANDLED_EDAC;
1838 return NOTIFY_OK;
1839 }
1840
1841 static struct notifier_block i7_mce_dec = {
1842 .notifier_call = i7core_mce_check_error,
1843 .priority = MCE_PRIO_EDAC,
1844 };
1845
1846 struct memdev_dmi_entry {
1847 u8 type;
1848 u8 length;
1849 u16 handle;
1850 u16 phys_mem_array_handle;
1851 u16 mem_err_info_handle;
1852 u16 total_width;
1853 u16 data_width;
1854 u16 size;
1855 u8 form;
1856 u8 device_set;
1857 u8 device_locator;
1858 u8 bank_locator;
1859 u8 memory_type;
1860 u16 type_detail;
1861 u16 speed;
1862 u8 manufacturer;
1863 u8 serial_number;
1864 u8 asset_tag;
1865 u8 part_number;
1866 u8 attributes;
1867 u32 extended_size;
1868 u16 conf_mem_clk_speed;
1869 } __attribute__((__packed__));
1870
1871
1872
1873
1874
1875
1876
1877 static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1878 {
1879 int *dclk_freq = _dclk_freq;
1880 u16 dmi_mem_clk_speed;
1881
1882 if (*dclk_freq == -1)
1883 return;
1884
1885 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
1886 struct memdev_dmi_entry *memdev_dmi_entry =
1887 (struct memdev_dmi_entry *)dh;
1888 unsigned long conf_mem_clk_speed_offset =
1889 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
1890 (unsigned long)&memdev_dmi_entry->type;
1891 unsigned long speed_offset =
1892 (unsigned long)&memdev_dmi_entry->speed -
1893 (unsigned long)&memdev_dmi_entry->type;
1894
1895
1896 if (memdev_dmi_entry->size == 0)
1897 return;
1898
1899
1900
1901
1902
1903 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
1904 dmi_mem_clk_speed =
1905 memdev_dmi_entry->conf_mem_clk_speed;
1906 } else if (memdev_dmi_entry->length > speed_offset) {
1907 dmi_mem_clk_speed = memdev_dmi_entry->speed;
1908 } else {
1909 *dclk_freq = -1;
1910 return;
1911 }
1912
1913 if (*dclk_freq == 0) {
1914
1915 if (dmi_mem_clk_speed > 0) {
1916
1917 *dclk_freq = dmi_mem_clk_speed;
1918 } else {
1919
1920 *dclk_freq = -1;
1921 }
1922 } else if (*dclk_freq > 0 &&
1923 *dclk_freq != dmi_mem_clk_speed) {
1924
1925
1926
1927
1928 *dclk_freq = -1;
1929 }
1930 }
1931 }
1932
1933
1934
1935
1936
1937
1938 #define DEFAULT_DCLK_FREQ 800
1939
1940 static int get_dclk_freq(void)
1941 {
1942 int dclk_freq = 0;
1943
1944 dmi_walk(decode_dclk, (void *)&dclk_freq);
1945
1946 if (dclk_freq < 1)
1947 return DEFAULT_DCLK_FREQ;
1948
1949 return dclk_freq;
1950 }
1951
1952
1953
1954
1955
1956
1957 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
1958 {
1959 struct i7core_pvt *pvt = mci->pvt_info;
1960 struct pci_dev *pdev;
1961 u32 dw_scrub;
1962 u32 dw_ssr;
1963
1964
1965 pdev = pvt->pci_mcr[2];
1966 if (!pdev)
1967 return -ENODEV;
1968
1969 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
1970
1971 if (new_bw == 0) {
1972
1973 dw_scrub &= ~STARTSCRUB;
1974
1975 write_and_test(pdev, MC_SCRUB_CONTROL,
1976 dw_scrub & ~SCRUBINTERVAL_MASK);
1977
1978
1979 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
1980 dw_ssr &= ~SSR_MODE_MASK;
1981 dw_ssr |= SSR_MODE_DISABLE;
1982 } else {
1983 const int cache_line_size = 64;
1984 const u32 freq_dclk_mhz = pvt->dclk_freq;
1985 unsigned long long scrub_interval;
1986
1987
1988
1989
1990 scrub_interval = (unsigned long long)freq_dclk_mhz *
1991 cache_line_size * 1000000;
1992 do_div(scrub_interval, new_bw);
1993
1994 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
1995 return -EINVAL;
1996
1997 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
1998
1999
2000 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2001 STARTSCRUB | dw_scrub);
2002
2003
2004 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2005 dw_ssr &= ~SSR_MODE_MASK;
2006 dw_ssr |= SSR_MODE_ENABLE;
2007 }
2008
2009 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2010
2011 return new_bw;
2012 }
2013
2014
2015
2016
2017
2018
2019 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2020 {
2021 struct i7core_pvt *pvt = mci->pvt_info;
2022 struct pci_dev *pdev;
2023 const u32 cache_line_size = 64;
2024 const u32 freq_dclk_mhz = pvt->dclk_freq;
2025 unsigned long long scrub_rate;
2026 u32 scrubval;
2027
2028
2029 pdev = pvt->pci_mcr[2];
2030 if (!pdev)
2031 return -ENODEV;
2032
2033
2034 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2035
2036
2037 scrubval &= SCRUBINTERVAL_MASK;
2038 if (!scrubval)
2039 return 0;
2040
2041
2042 scrub_rate = (unsigned long long)freq_dclk_mhz *
2043 1000000 * cache_line_size;
2044 do_div(scrub_rate, scrubval);
2045 return (int)scrub_rate;
2046 }
2047
2048 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2049 {
2050 struct i7core_pvt *pvt = mci->pvt_info;
2051 u32 pci_lock;
2052
2053
2054 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2055 pci_lock &= ~0x3;
2056 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2057 pci_lock | MC_CFG_UNLOCK);
2058
2059 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2060 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2061 }
2062
2063 static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2064 {
2065 struct i7core_pvt *pvt = mci->pvt_info;
2066 u32 pci_lock;
2067
2068
2069 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2070 pci_lock &= ~0x3;
2071 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2072 pci_lock | MC_CFG_LOCK);
2073 }
2074
2075 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2076 {
2077 pvt->i7core_pci = edac_pci_create_generic_ctl(
2078 &pvt->i7core_dev->pdev[0]->dev,
2079 EDAC_MOD_STR);
2080 if (unlikely(!pvt->i7core_pci))
2081 i7core_printk(KERN_WARNING,
2082 "Unable to setup PCI error report via EDAC\n");
2083 }
2084
2085 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2086 {
2087 if (likely(pvt->i7core_pci))
2088 edac_pci_release_generic_ctl(pvt->i7core_pci);
2089 else
2090 i7core_printk(KERN_ERR,
2091 "Couldn't find mem_ctl_info for socket %d\n",
2092 pvt->i7core_dev->socket);
2093 pvt->i7core_pci = NULL;
2094 }
2095
2096 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2097 {
2098 struct mem_ctl_info *mci = i7core_dev->mci;
2099 struct i7core_pvt *pvt;
2100
2101 if (unlikely(!mci || !mci->pvt_info)) {
2102 edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
2103
2104 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2105 return;
2106 }
2107
2108 pvt = mci->pvt_info;
2109
2110 edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2111
2112
2113 if (pvt->enable_scrub)
2114 disable_sdram_scrub_setting(mci);
2115
2116
2117 i7core_pci_ctl_release(pvt);
2118
2119
2120 i7core_delete_sysfs_devices(mci);
2121 edac_mc_del_mc(mci->pdev);
2122
2123 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2124 kfree(mci->ctl_name);
2125 edac_mc_free(mci);
2126 i7core_dev->mci = NULL;
2127 }
2128
2129 static int i7core_register_mci(struct i7core_dev *i7core_dev)
2130 {
2131 struct mem_ctl_info *mci;
2132 struct i7core_pvt *pvt;
2133 int rc;
2134 struct edac_mc_layer layers[2];
2135
2136
2137
2138 layers[0].type = EDAC_MC_LAYER_CHANNEL;
2139 layers[0].size = NUM_CHANS;
2140 layers[0].is_virt_csrow = false;
2141 layers[1].type = EDAC_MC_LAYER_SLOT;
2142 layers[1].size = MAX_DIMMS;
2143 layers[1].is_virt_csrow = true;
2144 mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
2145 sizeof(*pvt));
2146 if (unlikely(!mci))
2147 return -ENOMEM;
2148
2149 edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2150
2151 pvt = mci->pvt_info;
2152 memset(pvt, 0, sizeof(*pvt));
2153
2154
2155 pvt->i7core_dev = i7core_dev;
2156 i7core_dev->mci = mci;
2157
2158
2159
2160
2161
2162
2163 mci->mtype_cap = MEM_FLAG_DDR3;
2164 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2165 mci->edac_cap = EDAC_FLAG_NONE;
2166 mci->mod_name = "i7core_edac.c";
2167
2168 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
2169 if (!mci->ctl_name) {
2170 rc = -ENOMEM;
2171 goto fail1;
2172 }
2173
2174 mci->dev_name = pci_name(i7core_dev->pdev[0]);
2175 mci->ctl_page_to_phys = NULL;
2176
2177
2178 rc = mci_bind_devs(mci, i7core_dev);
2179 if (unlikely(rc < 0))
2180 goto fail0;
2181
2182
2183
2184 get_dimm_config(mci);
2185
2186 mci->pdev = &i7core_dev->pdev[0]->dev;
2187
2188
2189 if (pvt->enable_scrub)
2190 enable_sdram_scrub_setting(mci);
2191
2192
2193 if (unlikely(edac_mc_add_mc_with_groups(mci, i7core_dev_groups))) {
2194 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2195
2196
2197
2198
2199 rc = -EINVAL;
2200 goto fail0;
2201 }
2202 if (i7core_create_sysfs_devices(mci)) {
2203 edac_dbg(0, "MC: failed to create sysfs nodes\n");
2204 edac_mc_del_mc(mci->pdev);
2205 rc = -EINVAL;
2206 goto fail0;
2207 }
2208
2209
2210 pvt->inject.channel = 0;
2211 pvt->inject.dimm = -1;
2212 pvt->inject.rank = -1;
2213 pvt->inject.bank = -1;
2214 pvt->inject.page = -1;
2215 pvt->inject.col = -1;
2216
2217
2218 i7core_pci_ctl_create(pvt);
2219
2220
2221 pvt->dclk_freq = get_dclk_freq();
2222
2223 return 0;
2224
2225 fail0:
2226 kfree(mci->ctl_name);
2227
2228 fail1:
2229 edac_mc_free(mci);
2230 i7core_dev->mci = NULL;
2231 return rc;
2232 }
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2243 {
2244 int rc, count = 0;
2245 struct i7core_dev *i7core_dev;
2246
2247
2248 mutex_lock(&i7core_edac_lock);
2249
2250
2251
2252
2253 if (unlikely(probed >= 1)) {
2254 mutex_unlock(&i7core_edac_lock);
2255 return -ENODEV;
2256 }
2257 probed++;
2258
2259 rc = i7core_get_all_devices();
2260 if (unlikely(rc < 0))
2261 goto fail0;
2262
2263 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2264 count++;
2265 rc = i7core_register_mci(i7core_dev);
2266 if (unlikely(rc < 0))
2267 goto fail1;
2268 }
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278 if (!count) {
2279 rc = -ENODEV;
2280 goto fail1;
2281 }
2282
2283 i7core_printk(KERN_INFO,
2284 "Driver loaded, %d memory controller(s) found.\n",
2285 count);
2286
2287 mutex_unlock(&i7core_edac_lock);
2288 return 0;
2289
2290 fail1:
2291 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2292 i7core_unregister_mci(i7core_dev);
2293
2294 i7core_put_all_devices();
2295 fail0:
2296 mutex_unlock(&i7core_edac_lock);
2297 return rc;
2298 }
2299
2300
2301
2302
2303
2304 static void i7core_remove(struct pci_dev *pdev)
2305 {
2306 struct i7core_dev *i7core_dev;
2307
2308 edac_dbg(0, "\n");
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318 mutex_lock(&i7core_edac_lock);
2319
2320 if (unlikely(!probed)) {
2321 mutex_unlock(&i7core_edac_lock);
2322 return;
2323 }
2324
2325 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2326 i7core_unregister_mci(i7core_dev);
2327
2328
2329 i7core_put_all_devices();
2330
2331 probed--;
2332
2333 mutex_unlock(&i7core_edac_lock);
2334 }
2335
2336 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2337
2338
2339
2340
2341
2342 static struct pci_driver i7core_driver = {
2343 .name = "i7core_edac",
2344 .probe = i7core_probe,
2345 .remove = i7core_remove,
2346 .id_table = i7core_pci_tbl,
2347 };
2348
2349
2350
2351
2352
2353 static int __init i7core_init(void)
2354 {
2355 int pci_rc;
2356
2357 edac_dbg(2, "\n");
2358
2359
2360 opstate_init();
2361
2362 if (use_pci_fixup)
2363 i7core_xeon_pci_fixup(pci_dev_table);
2364
2365 pci_rc = pci_register_driver(&i7core_driver);
2366
2367 if (pci_rc >= 0) {
2368 mce_register_decode_chain(&i7_mce_dec);
2369 return 0;
2370 }
2371
2372 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2373 pci_rc);
2374
2375 return pci_rc;
2376 }
2377
2378
2379
2380
2381
2382 static void __exit i7core_exit(void)
2383 {
2384 edac_dbg(2, "\n");
2385 pci_unregister_driver(&i7core_driver);
2386 mce_unregister_decode_chain(&i7_mce_dec);
2387 }
2388
2389 module_init(i7core_init);
2390 module_exit(i7core_exit);
2391
2392 MODULE_LICENSE("GPL");
2393 MODULE_AUTHOR("Mauro Carvalho Chehab");
2394 MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
2395 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2396 I7CORE_REVISION);
2397
2398 module_param(edac_op_state, int, 0444);
2399 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");