0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <linux/module.h>
0028 #include <linux/init.h>
0029 #include <linux/pci.h>
0030 #include <linux/pci_ids.h>
0031 #include <linux/slab.h>
0032 #include <linux/edac.h>
0033 #include <linux/mmzone.h>
0034
0035 #include "edac_module.h"
0036
0037
0038
0039
0040 #define I5400_REVISION " Ver: 1.0.0"
0041
0042 #define EDAC_MOD_STR "i5400_edac"
0043
0044 #define i5400_printk(level, fmt, arg...) \
0045 edac_printk(level, "i5400", fmt, ##arg)
0046
0047 #define i5400_mc_printk(mci, level, fmt, arg...) \
0048 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
0049
0050
0051 #define MAX_BRANCHES 2
0052 #define CHANNELS_PER_BRANCH 2
0053 #define DIMMS_PER_CHANNEL 4
0054 #define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 #define AMBASE 0x48
0069 #define MAXCH 0x56
0070 #define MAXDIMMPERCH 0x57
0071
0072
0073 #define TOLM 0x6C
0074 #define REDMEMB 0x7C
0075 #define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00)
0076 #define MIR0 0x80
0077 #define MIR1 0x84
0078 #define AMIR0 0x8c
0079 #define AMIR1 0x90
0080
0081
0082 #define FERR_FAT_FBD 0x98
0083 #define FERR_FAT_FBDCHAN (3<<28)
0084
0085 #define NERR_FAT_FBD 0x9c
0086 #define FERR_NF_FBD 0xa0
0087
0088
0089 #define NERR_NF_FBD 0xa4
0090
0091
0092 #define EMASK_FBD 0xa8
0093
0094 #define ERR0_FBD 0xac
0095 #define ERR1_FBD 0xb0
0096 #define ERR2_FBD 0xb4
0097 #define MCERR_FBD 0xb8
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 #define AMBPRESENT_0 0x64
0111 #define AMBPRESENT_1 0x66
0112 #define MTR0 0x80
0113 #define MTR1 0x82
0114 #define MTR2 0x84
0115 #define MTR3 0x86
0116
0117
0118 #define NRECFGLOG 0x74
0119 #define RECFGLOG 0x78
0120 #define NRECMEMA 0xbe
0121 #define NRECMEMB 0xc0
0122 #define NRECFB_DIMMA 0xc4
0123 #define NRECFB_DIMMB 0xc8
0124 #define NRECFB_DIMMC 0xcc
0125 #define NRECFB_DIMMD 0xd0
0126 #define NRECFB_DIMME 0xd4
0127 #define NRECFB_DIMMF 0xd8
0128 #define REDMEMA 0xdC
0129 #define RECMEMA 0xf0
0130 #define RECMEMB 0xf4
0131 #define RECFB_DIMMA 0xf8
0132 #define RECFB_DIMMB 0xec
0133 #define RECFB_DIMMC 0xf0
0134 #define RECFB_DIMMD 0xf4
0135 #define RECFB_DIMME 0xf8
0136 #define RECFB_DIMMF 0xfC
0137
0138
0139
0140
0141
0142
0143 enum error_mask {
0144 EMASK_M1 = 1<<0,
0145 EMASK_M2 = 1<<1,
0146 EMASK_M3 = 1<<2,
0147 EMASK_M4 = 1<<3,
0148 EMASK_M5 = 1<<4,
0149 EMASK_M6 = 1<<5,
0150 EMASK_M7 = 1<<6,
0151 EMASK_M8 = 1<<7,
0152 EMASK_M9 = 1<<8,
0153 EMASK_M10 = 1<<9,
0154 EMASK_M11 = 1<<10,
0155 EMASK_M12 = 1<<11,
0156 EMASK_M13 = 1<<12,
0157 EMASK_M14 = 1<<13,
0158 EMASK_M15 = 1<<14,
0159 EMASK_M16 = 1<<15,
0160 EMASK_M17 = 1<<16,
0161 EMASK_M18 = 1<<17,
0162 EMASK_M19 = 1<<18,
0163 EMASK_M20 = 1<<19,
0164 EMASK_M21 = 1<<20,
0165 EMASK_M22 = 1<<21,
0166 EMASK_M23 = 1<<22,
0167 EMASK_M24 = 1<<23,
0168 EMASK_M25 = 1<<24,
0169 EMASK_M26 = 1<<25,
0170 EMASK_M27 = 1<<26,
0171 EMASK_M28 = 1<<27,
0172 EMASK_M29 = 1<<28,
0173 };
0174
0175
0176
0177
0178 static const char *error_name[] = {
0179 [0] = "Memory Write error on non-redundant retry",
0180 [1] = "Memory or FB-DIMM configuration CRC read error",
0181
0182 [3] = "Uncorrectable Data ECC on Replay",
0183 [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
0184
0185 [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
0186 [7] = "Aliased Uncorrectable Patrol Data ECC",
0187 [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
0188
0189 [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
0190 [11] = "Non-Aliased Uncorrectable Patrol Data ECC",
0191 [12] = "Memory Write error on first attempt",
0192 [13] = "FB-DIMM Configuration Write error on first attempt",
0193 [14] = "Memory or FB-DIMM configuration CRC read error",
0194 [15] = "Channel Failed-Over Occurred",
0195 [16] = "Correctable Non-Mirrored Demand Data ECC",
0196
0197 [18] = "Correctable Resilver- or Spare-Copy Data ECC",
0198 [19] = "Correctable Patrol Data ECC",
0199 [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
0200 [21] = "SPD protocol Error",
0201 [22] = "Non-Redundant Fast Reset Timeout",
0202 [23] = "Refresh error",
0203 [24] = "Memory Write error on redundant retry",
0204 [25] = "Redundant Fast Reset Timeout",
0205 [26] = "Correctable Counter Threshold Exceeded",
0206 [27] = "DIMM-Spare Copy Completed",
0207 [28] = "DIMM-Isolation Completed",
0208 };
0209
0210
0211 #define ERROR_FAT_MASK (EMASK_M1 | \
0212 EMASK_M2 | \
0213 EMASK_M23)
0214
0215
0216 #define ERROR_NF_CORRECTABLE (EMASK_M27 | \
0217 EMASK_M20 | \
0218 EMASK_M19 | \
0219 EMASK_M18 | \
0220 EMASK_M17 | \
0221 EMASK_M16)
0222 #define ERROR_NF_DIMM_SPARE (EMASK_M29 | \
0223 EMASK_M28)
0224 #define ERROR_NF_SPD_PROTOCOL (EMASK_M22)
0225 #define ERROR_NF_NORTH_CRC (EMASK_M21)
0226
0227
0228 #define ERROR_NF_RECOVERABLE (EMASK_M26 | \
0229 EMASK_M25 | \
0230 EMASK_M24 | \
0231 EMASK_M15 | \
0232 EMASK_M14 | \
0233 EMASK_M13 | \
0234 EMASK_M12 | \
0235 EMASK_M11 | \
0236 EMASK_M9 | \
0237 EMASK_M8 | \
0238 EMASK_M7 | \
0239 EMASK_M5)
0240
0241
0242 #define ERROR_NF_UNCORRECTABLE (EMASK_M4)
0243
0244
0245 #define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \
0246 ERROR_NF_UNCORRECTABLE | \
0247 ERROR_NF_RECOVERABLE | \
0248 ERROR_NF_DIMM_SPARE | \
0249 ERROR_NF_SPD_PROTOCOL | \
0250 ERROR_NF_NORTH_CRC)
0251
0252
0253
0254
0255
0256
0257 #define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK)
0258
0259
0260 #define FERR_FAT_MASK ERROR_FAT_MASK
0261
0262
0263 static inline int to_nf_mask(unsigned int mask)
0264 {
0265 return (mask & EMASK_M29) | (mask >> 3);
0266 };
0267
0268 static inline int from_nf_ferr(unsigned int mask)
0269 {
0270 return (mask & EMASK_M29) |
0271 (mask & ((1 << 28) - 1) << 3);
0272 };
0273
0274 #define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK)
0275 #define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE)
0276 #define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE)
0277 #define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL)
0278 #define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC)
0279 #define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE)
0280 #define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE)
0281
0282
0283
0284
0285 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10))
0286 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9))
0287 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4)
0288 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
0289 #define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
0290 #define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1)
0291 #define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
0292 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
0293 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
0294 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
0295 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
0296
0297
0298 static inline int extract_fbdchan_indx(u32 x)
0299 {
0300 return (x>>28) & 0x3;
0301 }
0302
0303
0304 struct i5400_dev_info {
0305 const char *ctl_name;
0306 u16 fsb_mapping_errors;
0307 };
0308
0309
0310 static const struct i5400_dev_info i5400_devs[] = {
0311 {
0312 .ctl_name = "I5400",
0313 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
0314 },
0315 };
0316
0317 struct i5400_dimm_info {
0318 int megabytes;
0319 };
0320
0321
0322 struct i5400_pvt {
0323 struct pci_dev *system_address;
0324 struct pci_dev *branchmap_werrors;
0325 struct pci_dev *fsb_error_regs;
0326 struct pci_dev *branch_0;
0327 struct pci_dev *branch_1;
0328
0329 u16 tolm;
0330 union {
0331 u64 ambase;
0332 struct {
0333 u32 ambase_bottom;
0334 u32 ambase_top;
0335 } u __packed;
0336 };
0337
0338 u16 mir0, mir1;
0339
0340 u16 b0_mtr[DIMMS_PER_CHANNEL];
0341 u16 b0_ambpresent0;
0342 u16 b0_ambpresent1;
0343
0344 u16 b1_mtr[DIMMS_PER_CHANNEL];
0345 u16 b1_ambpresent0;
0346 u16 b1_ambpresent1;
0347
0348
0349 struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
0350
0351
0352 int maxch;
0353 int maxdimmperch;
0354 };
0355
0356
0357 struct i5400_error_info {
0358
0359 u32 ferr_fat_fbd;
0360 u32 nerr_fat_fbd;
0361 u32 ferr_nf_fbd;
0362 u32 nerr_nf_fbd;
0363
0364
0365 u32 redmemb;
0366 u16 recmema;
0367 u32 recmemb;
0368
0369
0370 u16 nrecmema;
0371 u32 nrecmemb;
0372
0373 };
0374
0375
0376
0377 static inline int nrec_bank(struct i5400_error_info *info)
0378 {
0379 return ((info->nrecmema) >> 12) & 0x7;
0380 }
0381 static inline int nrec_rank(struct i5400_error_info *info)
0382 {
0383 return ((info->nrecmema) >> 8) & 0xf;
0384 }
0385 static inline int nrec_buf_id(struct i5400_error_info *info)
0386 {
0387 return ((info->nrecmema)) & 0xff;
0388 }
0389 static inline int nrec_rdwr(struct i5400_error_info *info)
0390 {
0391 return (info->nrecmemb) >> 31;
0392 }
0393
0394
0395 static inline const char *rdwr_str(int rdwr)
0396 {
0397 return rdwr ? "Write" : "Read";
0398 }
0399 static inline int nrec_cas(struct i5400_error_info *info)
0400 {
0401 return ((info->nrecmemb) >> 16) & 0x1fff;
0402 }
0403 static inline int nrec_ras(struct i5400_error_info *info)
0404 {
0405 return (info->nrecmemb) & 0xffff;
0406 }
0407 static inline int rec_bank(struct i5400_error_info *info)
0408 {
0409 return ((info->recmema) >> 12) & 0x7;
0410 }
0411 static inline int rec_rank(struct i5400_error_info *info)
0412 {
0413 return ((info->recmema) >> 8) & 0xf;
0414 }
0415 static inline int rec_rdwr(struct i5400_error_info *info)
0416 {
0417 return (info->recmemb) >> 31;
0418 }
0419 static inline int rec_cas(struct i5400_error_info *info)
0420 {
0421 return ((info->recmemb) >> 16) & 0x1fff;
0422 }
0423 static inline int rec_ras(struct i5400_error_info *info)
0424 {
0425 return (info->recmemb) & 0xffff;
0426 }
0427
0428 static struct edac_pci_ctl_info *i5400_pci;
0429
0430
0431
0432
0433
0434
0435 static void i5400_get_error_info(struct mem_ctl_info *mci,
0436 struct i5400_error_info *info)
0437 {
0438 struct i5400_pvt *pvt;
0439 u32 value;
0440
0441 pvt = mci->pvt_info;
0442
0443
0444 pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
0445
0446
0447
0448 value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
0449
0450
0451
0452
0453 if (value & FERR_FAT_MASK) {
0454 info->ferr_fat_fbd = value;
0455
0456
0457 pci_read_config_dword(pvt->branchmap_werrors,
0458 NERR_FAT_FBD, &info->nerr_fat_fbd);
0459 pci_read_config_word(pvt->branchmap_werrors,
0460 NRECMEMA, &info->nrecmema);
0461 pci_read_config_dword(pvt->branchmap_werrors,
0462 NRECMEMB, &info->nrecmemb);
0463
0464
0465 pci_write_config_dword(pvt->branchmap_werrors,
0466 FERR_FAT_FBD, value);
0467 } else {
0468 info->ferr_fat_fbd = 0;
0469 info->nerr_fat_fbd = 0;
0470 info->nrecmema = 0;
0471 info->nrecmemb = 0;
0472 }
0473
0474
0475 pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
0476
0477
0478
0479 if (value & FERR_NF_MASK) {
0480 info->ferr_nf_fbd = value;
0481
0482
0483 pci_read_config_dword(pvt->branchmap_werrors,
0484 NERR_NF_FBD, &info->nerr_nf_fbd);
0485 pci_read_config_word(pvt->branchmap_werrors,
0486 RECMEMA, &info->recmema);
0487 pci_read_config_dword(pvt->branchmap_werrors,
0488 RECMEMB, &info->recmemb);
0489 pci_read_config_dword(pvt->branchmap_werrors,
0490 REDMEMB, &info->redmemb);
0491
0492
0493 pci_write_config_dword(pvt->branchmap_werrors,
0494 FERR_NF_FBD, value);
0495 } else {
0496 info->ferr_nf_fbd = 0;
0497 info->nerr_nf_fbd = 0;
0498 info->recmema = 0;
0499 info->recmemb = 0;
0500 info->redmemb = 0;
0501 }
0502 }
0503
0504
0505
0506
0507
0508
0509
0510
0511 static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
0512 struct i5400_error_info *info,
0513 unsigned long allErrors)
0514 {
0515 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
0516 int branch;
0517 int channel;
0518 int bank;
0519 int buf_id;
0520 int rank;
0521 int rdwr;
0522 int ras, cas;
0523 int errnum;
0524 char *type = NULL;
0525 enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
0526
0527 if (!allErrors)
0528 return;
0529
0530 if (allErrors & ERROR_FAT_MASK) {
0531 type = "FATAL";
0532 tp_event = HW_EVENT_ERR_FATAL;
0533 } else if (allErrors & FERR_NF_UNCORRECTABLE)
0534 type = "NON-FATAL uncorrected";
0535 else
0536 type = "NON-FATAL recoverable";
0537
0538
0539
0540 branch = extract_fbdchan_indx(info->ferr_fat_fbd);
0541 channel = branch;
0542
0543
0544 bank = nrec_bank(info);
0545 rank = nrec_rank(info);
0546 buf_id = nrec_buf_id(info);
0547 rdwr = nrec_rdwr(info);
0548 ras = nrec_ras(info);
0549 cas = nrec_cas(info);
0550
0551 edac_dbg(0, "\t\t%s DIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
0552 type, rank, channel, channel + 1, branch >> 1, bank,
0553 buf_id, rdwr_str(rdwr), ras, cas);
0554
0555
0556 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
0557
0558
0559 snprintf(msg, sizeof(msg),
0560 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
0561 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
0562
0563 edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0,
0564 branch >> 1, -1, rank,
0565 rdwr ? "Write error" : "Read error",
0566 msg);
0567 }
0568
0569
0570
0571
0572
0573
0574
0575
0576 static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
0577 struct i5400_error_info *info)
0578 {
0579 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
0580 unsigned long allErrors;
0581 int branch;
0582 int channel;
0583 int bank;
0584 int rank;
0585 int rdwr;
0586 int ras, cas;
0587 int errnum;
0588
0589
0590 allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
0591 if (!allErrors)
0592 return;
0593
0594
0595
0596 if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
0597 i5400_proccess_non_recoverable_info(mci, info, allErrors);
0598 return;
0599 }
0600
0601
0602 if (allErrors & ERROR_NF_CORRECTABLE) {
0603 edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors);
0604
0605 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
0606
0607 channel = 0;
0608 if (REC_ECC_LOCATOR_ODD(info->redmemb))
0609 channel = 1;
0610
0611
0612
0613 channel += branch;
0614
0615 bank = rec_bank(info);
0616 rank = rec_rank(info);
0617 rdwr = rec_rdwr(info);
0618 ras = rec_ras(info);
0619 cas = rec_cas(info);
0620
0621
0622 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
0623
0624 edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
0625 rank, channel, branch >> 1, bank,
0626 rdwr_str(rdwr), ras, cas);
0627
0628
0629 snprintf(msg, sizeof(msg),
0630 "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
0631 "RAS=%d CAS=%d, CE Err=0x%lx (%s))",
0632 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
0633 allErrors, error_name[errnum]);
0634
0635 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
0636 branch >> 1, channel % 2, rank,
0637 rdwr ? "Write error" : "Read error",
0638 msg);
0639
0640 return;
0641 }
0642
0643
0644 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
0645
0646 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
0647
0648 i5400_mc_printk(mci, KERN_EMERG,
0649 "Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
0650 branch >> 1, allErrors, error_name[errnum]);
0651 }
0652
0653
0654
0655
0656
0657 static void i5400_process_error_info(struct mem_ctl_info *mci,
0658 struct i5400_error_info *info)
0659 { u32 allErrors;
0660
0661
0662 allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
0663 i5400_proccess_non_recoverable_info(mci, info, allErrors);
0664
0665
0666 i5400_process_nonfatal_error_info(mci, info);
0667 }
0668
0669
0670
0671
0672
0673
0674
0675 static void i5400_clear_error(struct mem_ctl_info *mci)
0676 {
0677 struct i5400_error_info info;
0678
0679 i5400_get_error_info(mci, &info);
0680 }
0681
0682
0683
0684
0685
0686 static void i5400_check_error(struct mem_ctl_info *mci)
0687 {
0688 struct i5400_error_info info;
0689
0690 i5400_get_error_info(mci, &info);
0691 i5400_process_error_info(mci, &info);
0692 }
0693
0694
0695
0696
0697
0698 static void i5400_put_devices(struct mem_ctl_info *mci)
0699 {
0700 struct i5400_pvt *pvt;
0701
0702 pvt = mci->pvt_info;
0703
0704
0705 pci_dev_put(pvt->branch_1);
0706 pci_dev_put(pvt->branch_0);
0707 pci_dev_put(pvt->fsb_error_regs);
0708 pci_dev_put(pvt->branchmap_werrors);
0709 }
0710
0711
0712
0713
0714
0715
0716
0717 static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
0718 {
0719 struct i5400_pvt *pvt;
0720 struct pci_dev *pdev;
0721
0722 pvt = mci->pvt_info;
0723 pvt->branchmap_werrors = NULL;
0724 pvt->fsb_error_regs = NULL;
0725 pvt->branch_0 = NULL;
0726 pvt->branch_1 = NULL;
0727
0728
0729 pdev = NULL;
0730 while (1) {
0731 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
0732 PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
0733 if (!pdev) {
0734
0735 i5400_printk(KERN_ERR,
0736 "'system address,Process Bus' "
0737 "device not found:"
0738 "vendor 0x%x device 0x%x ERR func 1 "
0739 "(broken BIOS?)\n",
0740 PCI_VENDOR_ID_INTEL,
0741 PCI_DEVICE_ID_INTEL_5400_ERR);
0742 return -ENODEV;
0743 }
0744
0745
0746 if (PCI_FUNC(pdev->devfn) == 1)
0747 break;
0748 }
0749 pvt->branchmap_werrors = pdev;
0750
0751 pdev = NULL;
0752 while (1) {
0753 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
0754 PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
0755 if (!pdev) {
0756
0757 i5400_printk(KERN_ERR,
0758 "'system address,Process Bus' "
0759 "device not found:"
0760 "vendor 0x%x device 0x%x ERR func 2 "
0761 "(broken BIOS?)\n",
0762 PCI_VENDOR_ID_INTEL,
0763 PCI_DEVICE_ID_INTEL_5400_ERR);
0764
0765 pci_dev_put(pvt->branchmap_werrors);
0766 return -ENODEV;
0767 }
0768
0769
0770 if (PCI_FUNC(pdev->devfn) == 2)
0771 break;
0772 }
0773 pvt->fsb_error_regs = pdev;
0774
0775 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
0776 pci_name(pvt->system_address),
0777 pvt->system_address->vendor, pvt->system_address->device);
0778 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
0779 pci_name(pvt->branchmap_werrors),
0780 pvt->branchmap_werrors->vendor,
0781 pvt->branchmap_werrors->device);
0782 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
0783 pci_name(pvt->fsb_error_regs),
0784 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
0785
0786 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
0787 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
0788 if (!pvt->branch_0) {
0789 i5400_printk(KERN_ERR,
0790 "MC: 'BRANCH 0' device not found:"
0791 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
0792 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
0793
0794 pci_dev_put(pvt->fsb_error_regs);
0795 pci_dev_put(pvt->branchmap_werrors);
0796 return -ENODEV;
0797 }
0798
0799
0800
0801
0802 if (pvt->maxch < CHANNELS_PER_BRANCH)
0803 return 0;
0804
0805 pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
0806 PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
0807 if (!pvt->branch_1) {
0808 i5400_printk(KERN_ERR,
0809 "MC: 'BRANCH 1' device not found:"
0810 "vendor 0x%x device 0x%x Func 0 "
0811 "(broken BIOS?)\n",
0812 PCI_VENDOR_ID_INTEL,
0813 PCI_DEVICE_ID_INTEL_5400_FBD1);
0814
0815 pci_dev_put(pvt->branch_0);
0816 pci_dev_put(pvt->fsb_error_regs);
0817 pci_dev_put(pvt->branchmap_werrors);
0818 return -ENODEV;
0819 }
0820
0821 return 0;
0822 }
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
0838 {
0839 int amb_present;
0840
0841 if (channel < CHANNELS_PER_BRANCH) {
0842 if (channel & 0x1)
0843 amb_present = pvt->b0_ambpresent1;
0844 else
0845 amb_present = pvt->b0_ambpresent0;
0846 } else {
0847 if (channel & 0x1)
0848 amb_present = pvt->b1_ambpresent1;
0849 else
0850 amb_present = pvt->b1_ambpresent0;
0851 }
0852
0853 return amb_present;
0854 }
0855
0856
0857
0858
0859
0860
0861 static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
0862 {
0863 int mtr;
0864 int n;
0865
0866
0867
0868
0869 n = dimm;
0870
0871 if (n >= DIMMS_PER_CHANNEL) {
0872 edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n",
0873 dimm);
0874 return 0;
0875 }
0876
0877 if (channel < CHANNELS_PER_BRANCH)
0878 mtr = pvt->b0_mtr[n];
0879 else
0880 mtr = pvt->b1_mtr[n];
0881
0882 return mtr;
0883 }
0884
0885
0886
0887 static void decode_mtr(int slot_row, u16 mtr)
0888 {
0889 int ans;
0890
0891 ans = MTR_DIMMS_PRESENT(mtr);
0892
0893 edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
0894 slot_row, mtr, ans ? "" : "NOT ");
0895 if (!ans)
0896 return;
0897
0898 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
0899
0900 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
0901 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
0902
0903 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
0904 edac_dbg(2, "\t\tNUMRANK: %s\n",
0905 MTR_DIMM_RANK(mtr) ? "double" : "single");
0906 edac_dbg(2, "\t\tNUMROW: %s\n",
0907 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
0908 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
0909 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
0910 "65,536 - 16 rows");
0911 edac_dbg(2, "\t\tNUMCOL: %s\n",
0912 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
0913 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
0914 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
0915 "reserved");
0916 }
0917
0918 static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
0919 struct i5400_dimm_info *dinfo)
0920 {
0921 int mtr;
0922 int amb_present_reg;
0923 int addrBits;
0924
0925 mtr = determine_mtr(pvt, dimm, channel);
0926 if (MTR_DIMMS_PRESENT(mtr)) {
0927 amb_present_reg = determine_amb_present_reg(pvt, channel);
0928
0929
0930 if (amb_present_reg & (1 << dimm)) {
0931
0932
0933 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
0934
0935 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
0936
0937 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
0938
0939 addrBits += MTR_DIMM_RANK(mtr);
0940
0941 addrBits += 6;
0942 addrBits -= 20;
0943 addrBits -= 3;
0944
0945 dinfo->megabytes = 1 << addrBits;
0946 }
0947 }
0948 }
0949
0950
0951
0952
0953
0954
0955
0956 static void calculate_dimm_size(struct i5400_pvt *pvt)
0957 {
0958 struct i5400_dimm_info *dinfo;
0959 int dimm, max_dimms;
0960 char *p, *mem_buffer;
0961 int space, n;
0962 int channel, branch;
0963
0964
0965 space = PAGE_SIZE;
0966 mem_buffer = p = kmalloc(space, GFP_KERNEL);
0967 if (p == NULL) {
0968 i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
0969 __FILE__, __func__);
0970 return;
0971 }
0972
0973
0974
0975
0976
0977
0978 max_dimms = pvt->maxdimmperch;
0979 for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
0980
0981
0982
0983 if (dimm & 0x1) {
0984 n = snprintf(p, space, "---------------------------"
0985 "-------------------------------");
0986 p += n;
0987 space -= n;
0988 edac_dbg(2, "%s\n", mem_buffer);
0989 p = mem_buffer;
0990 space = PAGE_SIZE;
0991 }
0992 n = snprintf(p, space, "dimm %2d ", dimm);
0993 p += n;
0994 space -= n;
0995
0996 for (channel = 0; channel < pvt->maxch; channel++) {
0997 dinfo = &pvt->dimm_info[dimm][channel];
0998 handle_channel(pvt, dimm, channel, dinfo);
0999 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
1000 p += n;
1001 space -= n;
1002 }
1003 edac_dbg(2, "%s\n", mem_buffer);
1004 p = mem_buffer;
1005 space = PAGE_SIZE;
1006 }
1007
1008
1009 n = snprintf(p, space, "---------------------------"
1010 "-------------------------------");
1011 p += n;
1012 space -= n;
1013 edac_dbg(2, "%s\n", mem_buffer);
1014 p = mem_buffer;
1015 space = PAGE_SIZE;
1016
1017
1018 n = snprintf(p, space, " ");
1019 p += n;
1020 space -= n;
1021 for (channel = 0; channel < pvt->maxch; channel++) {
1022 n = snprintf(p, space, "channel %d | ", channel);
1023 p += n;
1024 space -= n;
1025 }
1026
1027 space -= n;
1028 edac_dbg(2, "%s\n", mem_buffer);
1029 p = mem_buffer;
1030 space = PAGE_SIZE;
1031
1032 n = snprintf(p, space, " ");
1033 p += n;
1034 for (branch = 0; branch < MAX_BRANCHES; branch++) {
1035 n = snprintf(p, space, " branch %d | ", branch);
1036 p += n;
1037 space -= n;
1038 }
1039
1040
1041 edac_dbg(2, "%s\n", mem_buffer);
1042 kfree(mem_buffer);
1043 }
1044
1045
1046
1047
1048
1049
1050
1051 static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1052 {
1053 struct i5400_pvt *pvt;
1054 u32 actual_tolm;
1055 u16 limit;
1056 int slot_row;
1057 int way0, way1;
1058
1059 pvt = mci->pvt_info;
1060
1061 pci_read_config_dword(pvt->system_address, AMBASE,
1062 &pvt->u.ambase_bottom);
1063 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1064 &pvt->u.ambase_top);
1065
1066 edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1067 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1068
1069
1070 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1071 pvt->tolm >>= 12;
1072 edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n",
1073 pvt->tolm, pvt->tolm);
1074
1075 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
1076 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
1077 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
1078
1079 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1080 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
1081
1082
1083 limit = (pvt->mir0 >> 4) & 0x0fff;
1084 way0 = pvt->mir0 & 0x1;
1085 way1 = pvt->mir0 & 0x2;
1086 edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
1087 limit, way1, way0);
1088 limit = (pvt->mir1 >> 4) & 0xfff;
1089 way0 = pvt->mir1 & 0x1;
1090 way1 = pvt->mir1 & 0x2;
1091 edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
1092 limit, way1, way0);
1093
1094
1095 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
1096 int where = MTR0 + (slot_row * sizeof(u16));
1097
1098
1099 pci_read_config_word(pvt->branch_0, where,
1100 &pvt->b0_mtr[slot_row]);
1101
1102 edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
1103 slot_row, where, pvt->b0_mtr[slot_row]);
1104
1105 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1106 pvt->b1_mtr[slot_row] = 0;
1107 continue;
1108 }
1109
1110
1111 pci_read_config_word(pvt->branch_1, where,
1112 &pvt->b1_mtr[slot_row]);
1113 edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
1114 slot_row, where, pvt->b1_mtr[slot_row]);
1115 }
1116
1117
1118 edac_dbg(2, "Memory Technology Registers:\n");
1119 edac_dbg(2, " Branch 0:\n");
1120 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1121 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1122
1123 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
1124 &pvt->b0_ambpresent0);
1125 edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1126 pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
1127 &pvt->b0_ambpresent1);
1128 edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1129
1130
1131 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1132 pvt->b1_ambpresent0 = 0;
1133 pvt->b1_ambpresent1 = 0;
1134 } else {
1135
1136 edac_dbg(2, " Branch 1:\n");
1137 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1138 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1139
1140 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
1141 &pvt->b1_ambpresent0);
1142 edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
1143 pvt->b1_ambpresent0);
1144 pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
1145 &pvt->b1_ambpresent1);
1146 edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
1147 pvt->b1_ambpresent1);
1148 }
1149
1150
1151
1152 calculate_dimm_size(pvt);
1153 }
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 static int i5400_init_dimms(struct mem_ctl_info *mci)
1165 {
1166 struct i5400_pvt *pvt;
1167 struct dimm_info *dimm;
1168 int ndimms;
1169 int mtr;
1170 int size_mb;
1171 int channel, slot;
1172
1173 pvt = mci->pvt_info;
1174
1175 ndimms = 0;
1176
1177
1178
1179
1180
1181 for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
1182 channel++) {
1183 for (slot = 0; slot < mci->layers[2].size; slot++) {
1184 mtr = determine_mtr(pvt, slot, channel);
1185
1186
1187 if (!MTR_DIMMS_PRESENT(mtr))
1188 continue;
1189
1190 dimm = edac_get_dimm(mci, channel / 2, channel % 2, slot);
1191
1192 size_mb = pvt->dimm_info[slot][channel].megabytes;
1193
1194 edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n",
1195 channel / 2, channel % 2, slot,
1196 size_mb / 1000, size_mb % 1000);
1197
1198 dimm->nr_pages = size_mb << 8;
1199 dimm->grain = 8;
1200 dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
1201 DEV_X8 : DEV_X4;
1202 dimm->mtype = MEM_FB_DDR2;
1203
1204
1205
1206
1207 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
1208 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1209 ndimms++;
1210 }
1211 }
1212
1213
1214
1215
1216
1217 if (ndimms == 1)
1218 mci->dimms[0]->edac_mode = EDAC_SECDED;
1219
1220 return (ndimms == 0);
1221 }
1222
1223
1224
1225
1226
1227 static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
1228 {
1229 struct i5400_pvt *pvt;
1230 u32 fbd_error_mask;
1231
1232 pvt = mci->pvt_info;
1233
1234
1235 pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
1236 &fbd_error_mask);
1237
1238
1239 fbd_error_mask &= ~(ENABLE_EMASK_ALL);
1240
1241 pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
1242 fbd_error_mask);
1243 }
1244
1245
1246
1247
1248
1249
1250
1251
1252 static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1253 {
1254 struct mem_ctl_info *mci;
1255 struct i5400_pvt *pvt;
1256 struct edac_mc_layer layers[3];
1257
1258 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1259 return -EINVAL;
1260
1261 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1262 pdev->bus->number,
1263 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1264
1265
1266 if (PCI_FUNC(pdev->devfn) != 0)
1267 return -ENODEV;
1268
1269
1270
1271
1272
1273
1274 layers[0].type = EDAC_MC_LAYER_BRANCH;
1275 layers[0].size = MAX_BRANCHES;
1276 layers[0].is_virt_csrow = false;
1277 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1278 layers[1].size = CHANNELS_PER_BRANCH;
1279 layers[1].is_virt_csrow = false;
1280 layers[2].type = EDAC_MC_LAYER_SLOT;
1281 layers[2].size = DIMMS_PER_CHANNEL;
1282 layers[2].is_virt_csrow = true;
1283 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1284 if (mci == NULL)
1285 return -ENOMEM;
1286
1287 edac_dbg(0, "MC: mci = %p\n", mci);
1288
1289 mci->pdev = &pdev->dev;
1290
1291 pvt = mci->pvt_info;
1292 pvt->system_address = pdev;
1293 pvt->maxch = MAX_CHANNELS;
1294 pvt->maxdimmperch = DIMMS_PER_CHANNEL;
1295
1296
1297 if (i5400_get_devices(mci, dev_idx))
1298 goto fail0;
1299
1300
1301 i5400_get_mc_regs(mci);
1302
1303 mci->mc_idx = 0;
1304 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1305 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1306 mci->edac_cap = EDAC_FLAG_NONE;
1307 mci->mod_name = "i5400_edac.c";
1308 mci->ctl_name = i5400_devs[dev_idx].ctl_name;
1309 mci->dev_name = pci_name(pdev);
1310 mci->ctl_page_to_phys = NULL;
1311
1312
1313 mci->edac_check = i5400_check_error;
1314
1315
1316
1317 if (i5400_init_dimms(mci)) {
1318 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n");
1319 mci->edac_cap = EDAC_FLAG_NONE;
1320 } else {
1321 edac_dbg(1, "MC: Enable error reporting now\n");
1322 i5400_enable_error_reporting(mci);
1323 }
1324
1325
1326 if (edac_mc_add_mc(mci)) {
1327 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1328
1329
1330
1331 goto fail1;
1332 }
1333
1334 i5400_clear_error(mci);
1335
1336
1337 i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1338 if (!i5400_pci) {
1339 printk(KERN_WARNING
1340 "%s(): Unable to create PCI control\n",
1341 __func__);
1342 printk(KERN_WARNING
1343 "%s(): PCI error report via EDAC not setup\n",
1344 __func__);
1345 }
1346
1347 return 0;
1348
1349
1350 fail1:
1351
1352 i5400_put_devices(mci);
1353
1354 fail0:
1355 edac_mc_free(mci);
1356 return -ENODEV;
1357 }
1358
1359
1360
1361
1362
1363
1364
1365
1366 static int i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1367 {
1368 int rc;
1369
1370 edac_dbg(0, "MC:\n");
1371
1372
1373 rc = pci_enable_device(pdev);
1374 if (rc)
1375 return rc;
1376
1377
1378 return i5400_probe1(pdev, id->driver_data);
1379 }
1380
1381
1382
1383
1384
1385 static void i5400_remove_one(struct pci_dev *pdev)
1386 {
1387 struct mem_ctl_info *mci;
1388
1389 edac_dbg(0, "\n");
1390
1391 if (i5400_pci)
1392 edac_pci_release_generic_ctl(i5400_pci);
1393
1394 mci = edac_mc_del_mc(&pdev->dev);
1395 if (!mci)
1396 return;
1397
1398
1399 i5400_put_devices(mci);
1400
1401 pci_disable_device(pdev);
1402
1403 edac_mc_free(mci);
1404 }
1405
1406
1407
1408
1409
1410
1411 static const struct pci_device_id i5400_pci_tbl[] = {
1412 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
1413 {0,}
1414 };
1415
1416 MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
1417
1418
1419
1420
1421
1422 static struct pci_driver i5400_driver = {
1423 .name = "i5400_edac",
1424 .probe = i5400_init_one,
1425 .remove = i5400_remove_one,
1426 .id_table = i5400_pci_tbl,
1427 };
1428
1429
1430
1431
1432
1433 static int __init i5400_init(void)
1434 {
1435 int pci_rc;
1436
1437 edac_dbg(2, "MC:\n");
1438
1439
1440 opstate_init();
1441
1442 pci_rc = pci_register_driver(&i5400_driver);
1443
1444 return (pci_rc < 0) ? pci_rc : 0;
1445 }
1446
1447
1448
1449
1450
1451 static void __exit i5400_exit(void)
1452 {
1453 edac_dbg(2, "MC:\n");
1454 pci_unregister_driver(&i5400_driver);
1455 }
1456
1457 module_init(i5400_init);
1458 module_exit(i5400_exit);
1459
1460 MODULE_LICENSE("GPL");
1461 MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
1462 MODULE_AUTHOR("Mauro Carvalho Chehab");
1463 MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
1464 MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
1465 I5400_REVISION);
1466
1467 module_param(edac_op_state, int, 0444);
1468 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");