0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/module.h>
0024 #include <linux/init.h>
0025 #include <linux/pci.h>
0026 #include <linux/pci_ids.h>
0027 #include <linux/edac.h>
0028 #include <linux/delay.h>
0029 #include <linux/mmzone.h>
0030 #include <linux/debugfs.h>
0031
0032 #include "edac_module.h"
0033
0034
0035
0036
0037 #define I5100_MC 0x40
0038 #define I5100_MC_SCRBEN_MASK (1 << 7)
0039 #define I5100_MC_SCRBDONE_MASK (1 << 4)
0040 #define I5100_MS 0x44
0041 #define I5100_SPDDATA 0x48
0042 #define I5100_SPDCMD 0x4c
0043 #define I5100_TOLM 0x6c
0044 #define I5100_MIR0 0x80
0045 #define I5100_MIR1 0x84
0046 #define I5100_AMIR_0 0x8c
0047 #define I5100_AMIR_1 0x90
0048 #define I5100_FERR_NF_MEM 0xa0
0049 #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
0050 #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
0051 #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
0052 #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
0053 #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
0054 #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
0055 #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
0056 #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
0057 #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
0058 #define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1)
0059 #define I5100_FERR_NF_MEM_ANY_MASK \
0060 (I5100_FERR_NF_MEM_M16ERR_MASK | \
0061 I5100_FERR_NF_MEM_M15ERR_MASK | \
0062 I5100_FERR_NF_MEM_M14ERR_MASK | \
0063 I5100_FERR_NF_MEM_M12ERR_MASK | \
0064 I5100_FERR_NF_MEM_M11ERR_MASK | \
0065 I5100_FERR_NF_MEM_M10ERR_MASK | \
0066 I5100_FERR_NF_MEM_M6ERR_MASK | \
0067 I5100_FERR_NF_MEM_M5ERR_MASK | \
0068 I5100_FERR_NF_MEM_M4ERR_MASK | \
0069 I5100_FERR_NF_MEM_M1ERR_MASK)
0070 #define I5100_NERR_NF_MEM 0xa4
0071 #define I5100_EMASK_MEM 0xa8
0072 #define I5100_MEM0EINJMSK0 0x200
0073 #define I5100_MEM1EINJMSK0 0x208
0074 #define I5100_MEMXEINJMSK0_EINJEN (1 << 27)
0075 #define I5100_MEM0EINJMSK1 0x204
0076 #define I5100_MEM1EINJMSK1 0x206
0077
0078
0079 #define I5100_DINJ0 0x9a
0080
0081
0082 #define I5100_MTR_0 0x154
0083 #define I5100_DMIR 0x15c
0084 #define I5100_VALIDLOG 0x18c
0085 #define I5100_NRECMEMA 0x190
0086 #define I5100_NRECMEMB 0x194
0087 #define I5100_REDMEMA 0x198
0088 #define I5100_REDMEMB 0x19c
0089 #define I5100_RECMEMA 0x1a0
0090 #define I5100_RECMEMB 0x1a4
0091 #define I5100_MTR_4 0x1b0
0092
0093
0094
0095 static inline u32 i5100_mc_scrben(u32 mc)
0096 {
0097 return mc >> 7 & 1;
0098 }
0099
0100 static inline u32 i5100_mc_errdeten(u32 mc)
0101 {
0102 return mc >> 5 & 1;
0103 }
0104
0105 static inline u32 i5100_mc_scrbdone(u32 mc)
0106 {
0107 return mc >> 4 & 1;
0108 }
0109
0110 static inline u16 i5100_spddata_rdo(u16 a)
0111 {
0112 return a >> 15 & 1;
0113 }
0114
0115 static inline u16 i5100_spddata_sbe(u16 a)
0116 {
0117 return a >> 13 & 1;
0118 }
0119
0120 static inline u16 i5100_spddata_busy(u16 a)
0121 {
0122 return a >> 12 & 1;
0123 }
0124
0125 static inline u16 i5100_spddata_data(u16 a)
0126 {
0127 return a & ((1 << 8) - 1);
0128 }
0129
0130 static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
0131 u32 data, u32 cmd)
0132 {
0133 return ((dti & ((1 << 4) - 1)) << 28) |
0134 ((ckovrd & 1) << 27) |
0135 ((sa & ((1 << 3) - 1)) << 24) |
0136 ((ba & ((1 << 8) - 1)) << 16) |
0137 ((data & ((1 << 8) - 1)) << 8) |
0138 (cmd & 1);
0139 }
0140
0141 static inline u16 i5100_tolm_tolm(u16 a)
0142 {
0143 return a >> 12 & ((1 << 4) - 1);
0144 }
0145
0146 static inline u16 i5100_mir_limit(u16 a)
0147 {
0148 return a >> 4 & ((1 << 12) - 1);
0149 }
0150
0151 static inline u16 i5100_mir_way1(u16 a)
0152 {
0153 return a >> 1 & 1;
0154 }
0155
0156 static inline u16 i5100_mir_way0(u16 a)
0157 {
0158 return a & 1;
0159 }
0160
0161 static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
0162 {
0163 return a >> 28 & 1;
0164 }
0165
0166 static inline u32 i5100_ferr_nf_mem_any(u32 a)
0167 {
0168 return a & I5100_FERR_NF_MEM_ANY_MASK;
0169 }
0170
0171 static inline u32 i5100_nerr_nf_mem_any(u32 a)
0172 {
0173 return i5100_ferr_nf_mem_any(a);
0174 }
0175
0176 static inline u32 i5100_dmir_limit(u32 a)
0177 {
0178 return a >> 16 & ((1 << 11) - 1);
0179 }
0180
0181 static inline u32 i5100_dmir_rank(u32 a, u32 i)
0182 {
0183 return a >> (4 * i) & ((1 << 2) - 1);
0184 }
0185
0186 static inline u16 i5100_mtr_present(u16 a)
0187 {
0188 return a >> 10 & 1;
0189 }
0190
0191 static inline u16 i5100_mtr_ethrottle(u16 a)
0192 {
0193 return a >> 9 & 1;
0194 }
0195
0196 static inline u16 i5100_mtr_width(u16 a)
0197 {
0198 return a >> 8 & 1;
0199 }
0200
0201 static inline u16 i5100_mtr_numbank(u16 a)
0202 {
0203 return a >> 6 & 1;
0204 }
0205
0206 static inline u16 i5100_mtr_numrow(u16 a)
0207 {
0208 return a >> 2 & ((1 << 2) - 1);
0209 }
0210
0211 static inline u16 i5100_mtr_numcol(u16 a)
0212 {
0213 return a & ((1 << 2) - 1);
0214 }
0215
0216
0217 static inline u32 i5100_validlog_redmemvalid(u32 a)
0218 {
0219 return a >> 2 & 1;
0220 }
0221
0222 static inline u32 i5100_validlog_recmemvalid(u32 a)
0223 {
0224 return a >> 1 & 1;
0225 }
0226
0227 static inline u32 i5100_validlog_nrecmemvalid(u32 a)
0228 {
0229 return a & 1;
0230 }
0231
0232 static inline u32 i5100_nrecmema_merr(u32 a)
0233 {
0234 return a >> 15 & ((1 << 5) - 1);
0235 }
0236
0237 static inline u32 i5100_nrecmema_bank(u32 a)
0238 {
0239 return a >> 12 & ((1 << 3) - 1);
0240 }
0241
0242 static inline u32 i5100_nrecmema_rank(u32 a)
0243 {
0244 return a >> 8 & ((1 << 3) - 1);
0245 }
0246
0247 static inline u32 i5100_nrecmemb_cas(u32 a)
0248 {
0249 return a >> 16 & ((1 << 13) - 1);
0250 }
0251
0252 static inline u32 i5100_nrecmemb_ras(u32 a)
0253 {
0254 return a & ((1 << 16) - 1);
0255 }
0256
0257 static inline u32 i5100_recmema_merr(u32 a)
0258 {
0259 return i5100_nrecmema_merr(a);
0260 }
0261
0262 static inline u32 i5100_recmema_bank(u32 a)
0263 {
0264 return i5100_nrecmema_bank(a);
0265 }
0266
0267 static inline u32 i5100_recmema_rank(u32 a)
0268 {
0269 return i5100_nrecmema_rank(a);
0270 }
0271
0272 static inline u32 i5100_recmemb_cas(u32 a)
0273 {
0274 return i5100_nrecmemb_cas(a);
0275 }
0276
0277 static inline u32 i5100_recmemb_ras(u32 a)
0278 {
0279 return i5100_nrecmemb_ras(a);
0280 }
0281
0282
0283 #define I5100_MAX_RANKS_PER_CHAN 6
0284 #define I5100_CHANNELS 2
0285 #define I5100_MAX_RANKS_PER_DIMM 4
0286 #define I5100_DIMM_ADDR_LINES (6 - 3)
0287 #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
0288 #define I5100_MAX_RANK_INTERLEAVE 4
0289 #define I5100_MAX_DMIRS 5
0290 #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
0291
0292 struct i5100_priv {
0293
0294 int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
0305
0306
0307 struct {
0308 u64 limit;
0309 unsigned way[2];
0310 } mir[I5100_CHANNELS];
0311
0312
0313 unsigned amir[I5100_CHANNELS];
0314
0315
0316 struct {
0317 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
0318 u64 limit;
0319 } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
0320
0321
0322 struct {
0323 unsigned present;
0324 unsigned ethrottle;
0325 unsigned width;
0326 unsigned numbank;
0327 unsigned numrow;
0328 unsigned numcol;
0329 } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
0330
0331 u64 tolm;
0332 unsigned ranksperchan;
0333
0334 struct pci_dev *mc;
0335 struct pci_dev *einj;
0336 struct pci_dev *ch0mm;
0337 struct pci_dev *ch1mm;
0338
0339 struct delayed_work i5100_scrubbing;
0340 int scrub_enable;
0341
0342
0343 u8 inject_channel;
0344 u8 inject_hlinesel;
0345 u8 inject_deviceptr1;
0346 u8 inject_deviceptr2;
0347 u16 inject_eccmask1;
0348 u16 inject_eccmask2;
0349
0350 struct dentry *debugfs;
0351 };
0352
0353 static struct dentry *i5100_debugfs;
0354
0355
0356 static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
0357 int chan, int rank)
0358 {
0359 const struct i5100_priv *priv = mci->pvt_info;
0360 int i;
0361
0362 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
0363 int j;
0364 const int numrank = priv->dimm_numrank[chan][i];
0365
0366 for (j = 0; j < numrank; j++)
0367 if (priv->dimm_csmap[i][j] == rank)
0368 return i * 2 + chan;
0369 }
0370
0371 return -1;
0372 }
0373
0374 static const char *i5100_err_msg(unsigned err)
0375 {
0376 static const char *merrs[] = {
0377 "unknown",
0378 "uncorrectable data ECC on replay",
0379 "unknown",
0380 "unknown",
0381 "aliased uncorrectable demand data ECC",
0382 "aliased uncorrectable spare-copy data ECC",
0383 "aliased uncorrectable patrol data ECC",
0384 "unknown",
0385 "unknown",
0386 "unknown",
0387 "non-aliased uncorrectable demand data ECC",
0388 "non-aliased uncorrectable spare-copy data ECC",
0389 "non-aliased uncorrectable patrol data ECC",
0390 "unknown",
0391 "correctable demand data ECC",
0392 "correctable spare-copy data ECC",
0393 "correctable patrol data ECC",
0394 "unknown",
0395 "SPD protocol error",
0396 "unknown",
0397 "spare copy initiated",
0398 "spare copy completed",
0399 };
0400 unsigned i;
0401
0402 for (i = 0; i < ARRAY_SIZE(merrs); i++)
0403 if (1 << i & err)
0404 return merrs[i];
0405
0406 return "none";
0407 }
0408
0409
0410 static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
0411 unsigned int csrow)
0412 {
0413 const struct i5100_priv *priv = mci->pvt_info;
0414
0415 return csrow % priv->ranksperchan;
0416 }
0417
0418
0419 static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
0420 unsigned int csrow)
0421 {
0422 const struct i5100_priv *priv = mci->pvt_info;
0423
0424 return csrow / priv->ranksperchan;
0425 }
0426
0427 static void i5100_handle_ce(struct mem_ctl_info *mci,
0428 int chan,
0429 unsigned bank,
0430 unsigned rank,
0431 unsigned long syndrome,
0432 unsigned cas,
0433 unsigned ras,
0434 const char *msg)
0435 {
0436 char detail[80];
0437
0438
0439 snprintf(detail, sizeof(detail),
0440 "bank %u, cas %u, ras %u\n",
0441 bank, cas, ras);
0442
0443 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0444 0, 0, syndrome,
0445 chan, rank, -1,
0446 msg, detail);
0447 }
0448
0449 static void i5100_handle_ue(struct mem_ctl_info *mci,
0450 int chan,
0451 unsigned bank,
0452 unsigned rank,
0453 unsigned long syndrome,
0454 unsigned cas,
0455 unsigned ras,
0456 const char *msg)
0457 {
0458 char detail[80];
0459
0460
0461 snprintf(detail, sizeof(detail),
0462 "bank %u, cas %u, ras %u\n",
0463 bank, cas, ras);
0464
0465 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0466 0, 0, syndrome,
0467 chan, rank, -1,
0468 msg, detail);
0469 }
0470
0471 static void i5100_read_log(struct mem_ctl_info *mci, int chan,
0472 u32 ferr, u32 nerr)
0473 {
0474 struct i5100_priv *priv = mci->pvt_info;
0475 struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
0476 u32 dw;
0477 u32 dw2;
0478 unsigned syndrome = 0;
0479 unsigned merr;
0480 unsigned bank;
0481 unsigned rank;
0482 unsigned cas;
0483 unsigned ras;
0484
0485 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
0486
0487 if (i5100_validlog_redmemvalid(dw)) {
0488 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
0489 syndrome = dw2;
0490 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
0491 }
0492
0493 if (i5100_validlog_recmemvalid(dw)) {
0494 const char *msg;
0495
0496 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
0497 merr = i5100_recmema_merr(dw2);
0498 bank = i5100_recmema_bank(dw2);
0499 rank = i5100_recmema_rank(dw2);
0500
0501 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
0502 cas = i5100_recmemb_cas(dw2);
0503 ras = i5100_recmemb_ras(dw2);
0504
0505
0506
0507 if (!merr)
0508 msg = i5100_err_msg(ferr);
0509 else
0510 msg = i5100_err_msg(nerr);
0511
0512 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
0513 }
0514
0515 if (i5100_validlog_nrecmemvalid(dw)) {
0516 const char *msg;
0517
0518 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
0519 merr = i5100_nrecmema_merr(dw2);
0520 bank = i5100_nrecmema_bank(dw2);
0521 rank = i5100_nrecmema_rank(dw2);
0522
0523 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
0524 cas = i5100_nrecmemb_cas(dw2);
0525 ras = i5100_nrecmemb_ras(dw2);
0526
0527
0528
0529 if (!merr)
0530 msg = i5100_err_msg(ferr);
0531 else
0532 msg = i5100_err_msg(nerr);
0533
0534 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
0535 }
0536
0537 pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
0538 }
0539
0540 static void i5100_check_error(struct mem_ctl_info *mci)
0541 {
0542 struct i5100_priv *priv = mci->pvt_info;
0543 u32 dw, dw2;
0544
0545 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
0546 if (i5100_ferr_nf_mem_any(dw)) {
0547
0548 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
0549
0550 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
0551 i5100_ferr_nf_mem_any(dw),
0552 i5100_nerr_nf_mem_any(dw2));
0553
0554 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2);
0555 }
0556 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
0557 }
0558
0559
0560
0561
0562
0563
0564
0565 static void i5100_refresh_scrubbing(struct work_struct *work)
0566 {
0567 struct delayed_work *i5100_scrubbing = to_delayed_work(work);
0568 struct i5100_priv *priv = container_of(i5100_scrubbing,
0569 struct i5100_priv,
0570 i5100_scrubbing);
0571 u32 dw;
0572
0573 pci_read_config_dword(priv->mc, I5100_MC, &dw);
0574
0575 if (priv->scrub_enable) {
0576
0577 pci_read_config_dword(priv->mc, I5100_MC, &dw);
0578
0579 if (i5100_mc_scrbdone(dw)) {
0580 dw |= I5100_MC_SCRBEN_MASK;
0581 pci_write_config_dword(priv->mc, I5100_MC, dw);
0582 pci_read_config_dword(priv->mc, I5100_MC, &dw);
0583 }
0584
0585 schedule_delayed_work(&(priv->i5100_scrubbing),
0586 I5100_SCRUB_REFRESH_RATE);
0587 }
0588 }
0589
0590
0591
0592 static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
0593 {
0594 struct i5100_priv *priv = mci->pvt_info;
0595 u32 dw;
0596
0597 pci_read_config_dword(priv->mc, I5100_MC, &dw);
0598 if (bandwidth) {
0599 priv->scrub_enable = 1;
0600 dw |= I5100_MC_SCRBEN_MASK;
0601 schedule_delayed_work(&(priv->i5100_scrubbing),
0602 I5100_SCRUB_REFRESH_RATE);
0603 } else {
0604 priv->scrub_enable = 0;
0605 dw &= ~I5100_MC_SCRBEN_MASK;
0606 cancel_delayed_work(&(priv->i5100_scrubbing));
0607 }
0608 pci_write_config_dword(priv->mc, I5100_MC, dw);
0609
0610 pci_read_config_dword(priv->mc, I5100_MC, &dw);
0611
0612 bandwidth = 5900000 * i5100_mc_scrben(dw);
0613
0614 return bandwidth;
0615 }
0616
0617 static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
0618 {
0619 struct i5100_priv *priv = mci->pvt_info;
0620 u32 dw;
0621
0622 pci_read_config_dword(priv->mc, I5100_MC, &dw);
0623
0624 return 5900000 * i5100_mc_scrben(dw);
0625 }
0626
0627 static struct pci_dev *pci_get_device_func(unsigned vendor,
0628 unsigned device,
0629 unsigned func)
0630 {
0631 struct pci_dev *ret = NULL;
0632
0633 while (1) {
0634 ret = pci_get_device(vendor, device, ret);
0635
0636 if (!ret)
0637 break;
0638
0639 if (PCI_FUNC(ret->devfn) == func)
0640 break;
0641 }
0642
0643 return ret;
0644 }
0645
0646 static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
0647 {
0648 struct i5100_priv *priv = mci->pvt_info;
0649 const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
0650 const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
0651 unsigned addr_lines;
0652
0653
0654 if (!priv->mtr[chan][chan_rank].present)
0655 return 0ULL;
0656
0657 addr_lines =
0658 I5100_DIMM_ADDR_LINES +
0659 priv->mtr[chan][chan_rank].numcol +
0660 priv->mtr[chan][chan_rank].numrow +
0661 priv->mtr[chan][chan_rank].numbank;
0662
0663 return (unsigned long)
0664 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
0665 }
0666
0667 static void i5100_init_mtr(struct mem_ctl_info *mci)
0668 {
0669 struct i5100_priv *priv = mci->pvt_info;
0670 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
0671 int i;
0672
0673 for (i = 0; i < I5100_CHANNELS; i++) {
0674 int j;
0675 struct pci_dev *pdev = mms[i];
0676
0677 for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
0678 const unsigned addr =
0679 (j < 4) ? I5100_MTR_0 + j * 2 :
0680 I5100_MTR_4 + (j - 4) * 2;
0681 u16 w;
0682
0683 pci_read_config_word(pdev, addr, &w);
0684
0685 priv->mtr[i][j].present = i5100_mtr_present(w);
0686 priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
0687 priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
0688 priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
0689 priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
0690 priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
0691 }
0692 }
0693 }
0694
0695
0696
0697
0698
0699 static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
0700 u8 ch, u8 slot, u8 addr, u8 *byte)
0701 {
0702 struct i5100_priv *priv = mci->pvt_info;
0703 u16 w;
0704
0705 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
0706 if (i5100_spddata_busy(w))
0707 return -1;
0708
0709 pci_write_config_dword(priv->mc, I5100_SPDCMD,
0710 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
0711 0, 0));
0712
0713
0714 udelay(100);
0715 while (1) {
0716 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
0717 if (!i5100_spddata_busy(w))
0718 break;
0719 udelay(100);
0720 }
0721
0722 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
0723 return -1;
0724
0725 *byte = i5100_spddata_data(w);
0726
0727 return 0;
0728 }
0729
0730
0731
0732
0733
0734
0735
0736
0737 static void i5100_init_dimm_csmap(struct mem_ctl_info *mci)
0738 {
0739 struct i5100_priv *priv = mci->pvt_info;
0740 int i;
0741
0742 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
0743 int j;
0744
0745 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
0746 priv->dimm_csmap[i][j] = -1;
0747 }
0748
0749
0750 if (priv->ranksperchan == 4) {
0751 priv->dimm_csmap[0][0] = 0;
0752 priv->dimm_csmap[0][1] = 3;
0753 priv->dimm_csmap[1][0] = 1;
0754 priv->dimm_csmap[1][1] = 2;
0755 priv->dimm_csmap[2][0] = 2;
0756 priv->dimm_csmap[3][0] = 3;
0757 } else {
0758 priv->dimm_csmap[0][0] = 0;
0759 priv->dimm_csmap[0][1] = 1;
0760 priv->dimm_csmap[1][0] = 2;
0761 priv->dimm_csmap[1][1] = 3;
0762 priv->dimm_csmap[2][0] = 4;
0763 priv->dimm_csmap[2][1] = 5;
0764 }
0765 }
0766
0767 static void i5100_init_dimm_layout(struct pci_dev *pdev,
0768 struct mem_ctl_info *mci)
0769 {
0770 struct i5100_priv *priv = mci->pvt_info;
0771 int i;
0772
0773 for (i = 0; i < I5100_CHANNELS; i++) {
0774 int j;
0775
0776 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
0777 u8 rank;
0778
0779 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
0780 priv->dimm_numrank[i][j] = 0;
0781 else
0782 priv->dimm_numrank[i][j] = (rank & 3) + 1;
0783 }
0784 }
0785
0786 i5100_init_dimm_csmap(mci);
0787 }
0788
0789 static void i5100_init_interleaving(struct pci_dev *pdev,
0790 struct mem_ctl_info *mci)
0791 {
0792 u16 w;
0793 u32 dw;
0794 struct i5100_priv *priv = mci->pvt_info;
0795 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
0796 int i;
0797
0798 pci_read_config_word(pdev, I5100_TOLM, &w);
0799 priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
0800
0801 pci_read_config_word(pdev, I5100_MIR0, &w);
0802 priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
0803 priv->mir[0].way[1] = i5100_mir_way1(w);
0804 priv->mir[0].way[0] = i5100_mir_way0(w);
0805
0806 pci_read_config_word(pdev, I5100_MIR1, &w);
0807 priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
0808 priv->mir[1].way[1] = i5100_mir_way1(w);
0809 priv->mir[1].way[0] = i5100_mir_way0(w);
0810
0811 pci_read_config_word(pdev, I5100_AMIR_0, &w);
0812 priv->amir[0] = w;
0813 pci_read_config_word(pdev, I5100_AMIR_1, &w);
0814 priv->amir[1] = w;
0815
0816 for (i = 0; i < I5100_CHANNELS; i++) {
0817 int j;
0818
0819 for (j = 0; j < 5; j++) {
0820 int k;
0821
0822 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
0823
0824 priv->dmir[i][j].limit =
0825 (u64) i5100_dmir_limit(dw) << 28;
0826 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
0827 priv->dmir[i][j].rank[k] =
0828 i5100_dmir_rank(dw, k);
0829 }
0830 }
0831
0832 i5100_init_mtr(mci);
0833 }
0834
0835 static void i5100_init_csrows(struct mem_ctl_info *mci)
0836 {
0837 struct i5100_priv *priv = mci->pvt_info;
0838 struct dimm_info *dimm;
0839
0840 mci_for_each_dimm(mci, dimm) {
0841 const unsigned long npages = i5100_npages(mci, dimm->idx);
0842 const unsigned int chan = i5100_csrow_to_chan(mci, dimm->idx);
0843 const unsigned int rank = i5100_csrow_to_rank(mci, dimm->idx);
0844
0845 if (!npages)
0846 continue;
0847
0848 dimm->nr_pages = npages;
0849 dimm->grain = 32;
0850 dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
0851 DEV_X4 : DEV_X8;
0852 dimm->mtype = MEM_RDDR2;
0853 dimm->edac_mode = EDAC_SECDED;
0854 snprintf(dimm->label, sizeof(dimm->label), "DIMM%u",
0855 i5100_rank_to_slot(mci, chan, rank));
0856
0857 edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
0858 chan, rank, (long)PAGES_TO_MiB(npages));
0859 }
0860 }
0861
0862
0863
0864
0865
0866 static void i5100_do_inject(struct mem_ctl_info *mci)
0867 {
0868 struct i5100_priv *priv = mci->pvt_info;
0869 u32 mask0;
0870 u16 mask1;
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884 mask0 = ((priv->inject_hlinesel & 0x3) << 28) |
0885 I5100_MEMXEINJMSK0_EINJEN |
0886 ((priv->inject_eccmask1 & 0xffff) << 10) |
0887 ((priv->inject_deviceptr2 & 0x1f) << 5) |
0888 (priv->inject_deviceptr1 & 0x1f);
0889
0890
0891
0892
0893 mask1 = priv->inject_eccmask2;
0894
0895 if (priv->inject_channel == 0) {
0896 pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0);
0897 pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1);
0898 } else {
0899 pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0);
0900 pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1);
0901 }
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa);
0923 pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab);
0924 }
0925
0926 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
0927 static ssize_t inject_enable_write(struct file *file, const char __user *data,
0928 size_t count, loff_t *ppos)
0929 {
0930 struct device *dev = file->private_data;
0931 struct mem_ctl_info *mci = to_mci(dev);
0932
0933 i5100_do_inject(mci);
0934
0935 return count;
0936 }
0937
0938 static const struct file_operations i5100_inject_enable_fops = {
0939 .open = simple_open,
0940 .write = inject_enable_write,
0941 .llseek = generic_file_llseek,
0942 };
0943
0944 static int i5100_setup_debugfs(struct mem_ctl_info *mci)
0945 {
0946 struct i5100_priv *priv = mci->pvt_info;
0947
0948 if (!i5100_debugfs)
0949 return -ENODEV;
0950
0951 priv->debugfs = edac_debugfs_create_dir_at(mci->bus->name, i5100_debugfs);
0952
0953 if (!priv->debugfs)
0954 return -ENOMEM;
0955
0956 edac_debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
0957 &priv->inject_channel);
0958 edac_debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
0959 &priv->inject_hlinesel);
0960 edac_debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
0961 &priv->inject_deviceptr1);
0962 edac_debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
0963 &priv->inject_deviceptr2);
0964 edac_debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
0965 &priv->inject_eccmask1);
0966 edac_debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
0967 &priv->inject_eccmask2);
0968 edac_debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
0969 &mci->dev, &i5100_inject_enable_fops);
0970
0971 return 0;
0972
0973 }
0974
0975 static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
0976 {
0977 int rc;
0978 struct mem_ctl_info *mci;
0979 struct edac_mc_layer layers[2];
0980 struct i5100_priv *priv;
0981 struct pci_dev *ch0mm, *ch1mm, *einj;
0982 int ret = 0;
0983 u32 dw;
0984 int ranksperch;
0985
0986 if (PCI_FUNC(pdev->devfn) != 1)
0987 return -ENODEV;
0988
0989 rc = pci_enable_device(pdev);
0990 if (rc < 0) {
0991 ret = rc;
0992 goto bail;
0993 }
0994
0995
0996 pci_read_config_dword(pdev, I5100_MC, &dw);
0997 if (!i5100_mc_errdeten(dw)) {
0998 printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
0999 ret = -ENODEV;
1000 goto bail_pdev;
1001 }
1002
1003
1004 pci_read_config_dword(pdev, I5100_MS, &dw);
1005 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
1006
1007
1008 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
1009 dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
1010 pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
1011
1012
1013 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
1014 PCI_DEVICE_ID_INTEL_5100_21, 0);
1015 if (!ch0mm) {
1016 ret = -ENODEV;
1017 goto bail_pdev;
1018 }
1019
1020 rc = pci_enable_device(ch0mm);
1021 if (rc < 0) {
1022 ret = rc;
1023 goto bail_ch0;
1024 }
1025
1026
1027 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
1028 PCI_DEVICE_ID_INTEL_5100_22, 0);
1029 if (!ch1mm) {
1030 ret = -ENODEV;
1031 goto bail_disable_ch0;
1032 }
1033
1034 rc = pci_enable_device(ch1mm);
1035 if (rc < 0) {
1036 ret = rc;
1037 goto bail_ch1;
1038 }
1039
1040 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1041 layers[0].size = 2;
1042 layers[0].is_virt_csrow = false;
1043 layers[1].type = EDAC_MC_LAYER_SLOT;
1044 layers[1].size = ranksperch;
1045 layers[1].is_virt_csrow = true;
1046 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1047 sizeof(*priv));
1048 if (!mci) {
1049 ret = -ENOMEM;
1050 goto bail_disable_ch1;
1051 }
1052
1053
1054
1055 einj = pci_get_device_func(PCI_VENDOR_ID_INTEL,
1056 PCI_DEVICE_ID_INTEL_5100_19, 0);
1057 if (!einj) {
1058 ret = -ENODEV;
1059 goto bail_mc_free;
1060 }
1061
1062 rc = pci_enable_device(einj);
1063 if (rc < 0) {
1064 ret = rc;
1065 goto bail_einj;
1066 }
1067
1068 mci->pdev = &pdev->dev;
1069
1070 priv = mci->pvt_info;
1071 priv->ranksperchan = ranksperch;
1072 priv->mc = pdev;
1073 priv->ch0mm = ch0mm;
1074 priv->ch1mm = ch1mm;
1075 priv->einj = einj;
1076
1077 INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
1078
1079
1080 pci_read_config_dword(pdev, I5100_MC, &dw);
1081 if (i5100_mc_scrben(dw)) {
1082 priv->scrub_enable = 1;
1083 schedule_delayed_work(&(priv->i5100_scrubbing),
1084 I5100_SCRUB_REFRESH_RATE);
1085 }
1086
1087 i5100_init_dimm_layout(pdev, mci);
1088 i5100_init_interleaving(pdev, mci);
1089
1090 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1091 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
1092 mci->edac_cap = EDAC_FLAG_SECDED;
1093 mci->mod_name = "i5100_edac.c";
1094 mci->ctl_name = "i5100";
1095 mci->dev_name = pci_name(pdev);
1096 mci->ctl_page_to_phys = NULL;
1097
1098 mci->edac_check = i5100_check_error;
1099 mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
1100 mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
1101
1102 priv->inject_channel = 0;
1103 priv->inject_hlinesel = 0;
1104 priv->inject_deviceptr1 = 0;
1105 priv->inject_deviceptr2 = 0;
1106 priv->inject_eccmask1 = 0;
1107 priv->inject_eccmask2 = 0;
1108
1109 i5100_init_csrows(mci);
1110
1111
1112 switch (edac_op_state) {
1113 case EDAC_OPSTATE_POLL:
1114 case EDAC_OPSTATE_NMI:
1115 break;
1116 default:
1117 edac_op_state = EDAC_OPSTATE_POLL;
1118 break;
1119 }
1120
1121 if (edac_mc_add_mc(mci)) {
1122 ret = -ENODEV;
1123 goto bail_scrub;
1124 }
1125
1126 i5100_setup_debugfs(mci);
1127
1128 return ret;
1129
1130 bail_scrub:
1131 priv->scrub_enable = 0;
1132 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1133 pci_disable_device(einj);
1134
1135 bail_einj:
1136 pci_dev_put(einj);
1137
1138 bail_mc_free:
1139 edac_mc_free(mci);
1140
1141 bail_disable_ch1:
1142 pci_disable_device(ch1mm);
1143
1144 bail_ch1:
1145 pci_dev_put(ch1mm);
1146
1147 bail_disable_ch0:
1148 pci_disable_device(ch0mm);
1149
1150 bail_ch0:
1151 pci_dev_put(ch0mm);
1152
1153 bail_pdev:
1154 pci_disable_device(pdev);
1155
1156 bail:
1157 return ret;
1158 }
1159
1160 static void i5100_remove_one(struct pci_dev *pdev)
1161 {
1162 struct mem_ctl_info *mci;
1163 struct i5100_priv *priv;
1164
1165 mci = edac_mc_del_mc(&pdev->dev);
1166
1167 if (!mci)
1168 return;
1169
1170 priv = mci->pvt_info;
1171
1172 edac_debugfs_remove_recursive(priv->debugfs);
1173
1174 priv->scrub_enable = 0;
1175 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1176
1177 pci_disable_device(pdev);
1178 pci_disable_device(priv->ch0mm);
1179 pci_disable_device(priv->ch1mm);
1180 pci_disable_device(priv->einj);
1181 pci_dev_put(priv->ch0mm);
1182 pci_dev_put(priv->ch1mm);
1183 pci_dev_put(priv->einj);
1184
1185 edac_mc_free(mci);
1186 }
1187
1188 static const struct pci_device_id i5100_pci_tbl[] = {
1189
1190 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
1191 { 0, }
1192 };
1193 MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
1194
1195 static struct pci_driver i5100_driver = {
1196 .name = KBUILD_BASENAME,
1197 .probe = i5100_init_one,
1198 .remove = i5100_remove_one,
1199 .id_table = i5100_pci_tbl,
1200 };
1201
1202 static int __init i5100_init(void)
1203 {
1204 int pci_rc;
1205
1206 i5100_debugfs = edac_debugfs_create_dir_at("i5100_edac", NULL);
1207
1208 pci_rc = pci_register_driver(&i5100_driver);
1209 return (pci_rc < 0) ? pci_rc : 0;
1210 }
1211
1212 static void __exit i5100_exit(void)
1213 {
1214 edac_debugfs_remove(i5100_debugfs);
1215
1216 pci_unregister_driver(&i5100_driver);
1217 }
1218
1219 module_init(i5100_init);
1220 module_exit(i5100_exit);
1221
1222 MODULE_LICENSE("GPL");
1223 MODULE_AUTHOR
1224 ("Arthur Jones <ajones@riverbed.com>");
1225 MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");