Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Intel X38 Memory Controller kernel module
0003  * Copyright (C) 2008 Cluster Computing, Inc.
0004  *
0005  * This file may be distributed under the terms of the
0006  * GNU General Public License.
0007  *
0008  * This file is based on i3200_edac.c
0009  *
0010  */
0011 
0012 #include <linux/module.h>
0013 #include <linux/init.h>
0014 #include <linux/pci.h>
0015 #include <linux/pci_ids.h>
0016 #include <linux/edac.h>
0017 
0018 #include <linux/io-64-nonatomic-lo-hi.h>
0019 #include "edac_module.h"
0020 
0021 #define EDAC_MOD_STR        "x38_edac"
0022 
0023 #define PCI_DEVICE_ID_INTEL_X38_HB  0x29e0
0024 
0025 #define X38_RANKS       8
0026 #define X38_RANKS_PER_CHANNEL   4
0027 #define X38_CHANNELS        2
0028 
0029 /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
0030 
0031 #define X38_MCHBAR_LOW  0x48    /* MCH Memory Mapped Register BAR */
0032 #define X38_MCHBAR_HIGH 0x4c
0033 #define X38_MCHBAR_MASK 0xfffffc000ULL  /* bits 35:14 */
0034 #define X38_MMR_WINDOW_SIZE 16384
0035 
0036 #define X38_TOM 0xa0    /* Top of Memory (16b)
0037                  *
0038                  * 15:10 reserved
0039                  *  9:0  total populated physical memory
0040                  */
0041 #define X38_TOM_MASK    0x3ff   /* bits 9:0 */
0042 #define X38_TOM_SHIFT 26    /* 64MiB grain */
0043 
0044 #define X38_ERRSTS  0xc8    /* Error Status Register (16b)
0045                  *
0046                  * 15    reserved
0047                  * 14    Isochronous TBWRR Run Behind FIFO Full
0048                  *       (ITCV)
0049                  * 13    Isochronous TBWRR Run Behind FIFO Put
0050                  *       (ITSTV)
0051                  * 12    reserved
0052                  * 11    MCH Thermal Sensor Event
0053                  *       for SMI/SCI/SERR (GTSE)
0054                  * 10    reserved
0055                  *  9    LOCK to non-DRAM Memory Flag (LCKF)
0056                  *  8    reserved
0057                  *  7    DRAM Throttle Flag (DTF)
0058                  *  6:2  reserved
0059                  *  1    Multi-bit DRAM ECC Error Flag (DMERR)
0060                  *  0    Single-bit DRAM ECC Error Flag (DSERR)
0061                  */
0062 #define X38_ERRSTS_UE       0x0002
0063 #define X38_ERRSTS_CE       0x0001
0064 #define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
0065 
0066 
0067 /* Intel  MMIO register space - device 0 function 0 - MMR space */
0068 
0069 #define X38_C0DRB   0x200   /* Channel 0 DRAM Rank Boundary (16b x 4)
0070                  *
0071                  * 15:10 reserved
0072                  *  9:0  Channel 0 DRAM Rank Boundary Address
0073                  */
0074 #define X38_C1DRB   0x600   /* Channel 1 DRAM Rank Boundary (16b x 4) */
0075 #define X38_DRB_MASK    0x3ff   /* bits 9:0 */
0076 #define X38_DRB_SHIFT 26    /* 64MiB grain */
0077 
0078 #define X38_C0ECCERRLOG 0x280   /* Channel 0 ECC Error Log (64b)
0079                  *
0080                  * 63:48 Error Column Address (ERRCOL)
0081                  * 47:32 Error Row Address (ERRROW)
0082                  * 31:29 Error Bank Address (ERRBANK)
0083                  * 28:27 Error Rank Address (ERRRANK)
0084                  * 26:24 reserved
0085                  * 23:16 Error Syndrome (ERRSYND)
0086                  * 15: 2 reserved
0087                  *    1  Multiple Bit Error Status (MERRSTS)
0088                  *    0  Correctable Error Status (CERRSTS)
0089                  */
0090 #define X38_C1ECCERRLOG 0x680   /* Channel 1 ECC Error Log (64b) */
0091 #define X38_ECCERRLOG_CE    0x1
0092 #define X38_ECCERRLOG_UE    0x2
0093 #define X38_ECCERRLOG_RANK_BITS 0x18000000
0094 #define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
0095 
0096 #define X38_CAPID0 0xe0 /* see P.94 of spec for details */
0097 
0098 static int x38_channel_num;
0099 
0100 static int how_many_channel(struct pci_dev *pdev)
0101 {
0102     unsigned char capid0_8b; /* 8th byte of CAPID0 */
0103 
0104     pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
0105     if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
0106         edac_dbg(0, "In single channel mode\n");
0107         x38_channel_num = 1;
0108     } else {
0109         edac_dbg(0, "In dual channel mode\n");
0110         x38_channel_num = 2;
0111     }
0112 
0113     return x38_channel_num;
0114 }
0115 
0116 static unsigned long eccerrlog_syndrome(u64 log)
0117 {
0118     return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
0119 }
0120 
0121 static int eccerrlog_row(int channel, u64 log)
0122 {
0123     return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
0124         (channel * X38_RANKS_PER_CHANNEL);
0125 }
0126 
0127 enum x38_chips {
0128     X38 = 0,
0129 };
0130 
0131 struct x38_dev_info {
0132     const char *ctl_name;
0133 };
0134 
0135 struct x38_error_info {
0136     u16 errsts;
0137     u16 errsts2;
0138     u64 eccerrlog[X38_CHANNELS];
0139 };
0140 
0141 static const struct x38_dev_info x38_devs[] = {
0142     [X38] = {
0143         .ctl_name = "x38"},
0144 };
0145 
0146 static struct pci_dev *mci_pdev;
0147 static int x38_registered = 1;
0148 
0149 
0150 static void x38_clear_error_info(struct mem_ctl_info *mci)
0151 {
0152     struct pci_dev *pdev;
0153 
0154     pdev = to_pci_dev(mci->pdev);
0155 
0156     /*
0157      * Clear any error bits.
0158      * (Yes, we really clear bits by writing 1 to them.)
0159      */
0160     pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
0161              X38_ERRSTS_BITS);
0162 }
0163 
0164 static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
0165                  struct x38_error_info *info)
0166 {
0167     struct pci_dev *pdev;
0168     void __iomem *window = mci->pvt_info;
0169 
0170     pdev = to_pci_dev(mci->pdev);
0171 
0172     /*
0173      * This is a mess because there is no atomic way to read all the
0174      * registers at once and the registers can transition from CE being
0175      * overwritten by UE.
0176      */
0177     pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
0178     if (!(info->errsts & X38_ERRSTS_BITS))
0179         return;
0180 
0181     info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
0182     if (x38_channel_num == 2)
0183         info->eccerrlog[1] = lo_hi_readq(window + X38_C1ECCERRLOG);
0184 
0185     pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
0186 
0187     /*
0188      * If the error is the same for both reads then the first set
0189      * of reads is valid.  If there is a change then there is a CE
0190      * with no info and the second set of reads is valid and
0191      * should be UE info.
0192      */
0193     if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
0194         info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
0195         if (x38_channel_num == 2)
0196             info->eccerrlog[1] =
0197                 lo_hi_readq(window + X38_C1ECCERRLOG);
0198     }
0199 
0200     x38_clear_error_info(mci);
0201 }
0202 
0203 static void x38_process_error_info(struct mem_ctl_info *mci,
0204                 struct x38_error_info *info)
0205 {
0206     int channel;
0207     u64 log;
0208 
0209     if (!(info->errsts & X38_ERRSTS_BITS))
0210         return;
0211 
0212     if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
0213         edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
0214                      -1, -1, -1,
0215                      "UE overwrote CE", "");
0216         info->errsts = info->errsts2;
0217     }
0218 
0219     for (channel = 0; channel < x38_channel_num; channel++) {
0220         log = info->eccerrlog[channel];
0221         if (log & X38_ECCERRLOG_UE) {
0222             edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0223                          0, 0, 0,
0224                          eccerrlog_row(channel, log),
0225                          -1, -1,
0226                          "x38 UE", "");
0227         } else if (log & X38_ECCERRLOG_CE) {
0228             edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0229                          0, 0, eccerrlog_syndrome(log),
0230                          eccerrlog_row(channel, log),
0231                          -1, -1,
0232                          "x38 CE", "");
0233         }
0234     }
0235 }
0236 
0237 static void x38_check(struct mem_ctl_info *mci)
0238 {
0239     struct x38_error_info info;
0240 
0241     x38_get_and_clear_error_info(mci, &info);
0242     x38_process_error_info(mci, &info);
0243 }
0244 
0245 static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
0246 {
0247     union {
0248         u64 mchbar;
0249         struct {
0250             u32 mchbar_low;
0251             u32 mchbar_high;
0252         };
0253     } u;
0254     void __iomem *window;
0255 
0256     pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
0257     pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
0258     pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
0259     u.mchbar &= X38_MCHBAR_MASK;
0260 
0261     if (u.mchbar != (resource_size_t)u.mchbar) {
0262         printk(KERN_ERR
0263             "x38: mmio space beyond accessible range (0x%llx)\n",
0264             (unsigned long long)u.mchbar);
0265         return NULL;
0266     }
0267 
0268     window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE);
0269     if (!window)
0270         printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
0271             (unsigned long long)u.mchbar);
0272 
0273     return window;
0274 }
0275 
0276 
0277 static void x38_get_drbs(void __iomem *window,
0278             u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
0279 {
0280     int i;
0281 
0282     for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
0283         drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
0284         drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
0285     }
0286 }
0287 
0288 static bool x38_is_stacked(struct pci_dev *pdev,
0289             u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
0290 {
0291     u16 tom;
0292 
0293     pci_read_config_word(pdev, X38_TOM, &tom);
0294     tom &= X38_TOM_MASK;
0295 
0296     return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
0297 }
0298 
0299 static unsigned long drb_to_nr_pages(
0300             u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
0301             bool stacked, int channel, int rank)
0302 {
0303     int n;
0304 
0305     n = drbs[channel][rank];
0306     if (rank > 0)
0307         n -= drbs[channel][rank - 1];
0308     if (stacked && (channel == 1) && drbs[channel][rank] ==
0309                 drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
0310         n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
0311     }
0312 
0313     n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
0314     return n;
0315 }
0316 
0317 static int x38_probe1(struct pci_dev *pdev, int dev_idx)
0318 {
0319     int rc;
0320     int i, j;
0321     struct mem_ctl_info *mci = NULL;
0322     struct edac_mc_layer layers[2];
0323     u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
0324     bool stacked;
0325     void __iomem *window;
0326 
0327     edac_dbg(0, "MC:\n");
0328 
0329     window = x38_map_mchbar(pdev);
0330     if (!window)
0331         return -ENODEV;
0332 
0333     x38_get_drbs(window, drbs);
0334 
0335     how_many_channel(pdev);
0336 
0337     /* FIXME: unconventional pvt_info usage */
0338     layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
0339     layers[0].size = X38_RANKS;
0340     layers[0].is_virt_csrow = true;
0341     layers[1].type = EDAC_MC_LAYER_CHANNEL;
0342     layers[1].size = x38_channel_num;
0343     layers[1].is_virt_csrow = false;
0344     mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
0345     if (!mci)
0346         return -ENOMEM;
0347 
0348     edac_dbg(3, "MC: init mci\n");
0349 
0350     mci->pdev = &pdev->dev;
0351     mci->mtype_cap = MEM_FLAG_DDR2;
0352 
0353     mci->edac_ctl_cap = EDAC_FLAG_SECDED;
0354     mci->edac_cap = EDAC_FLAG_SECDED;
0355 
0356     mci->mod_name = EDAC_MOD_STR;
0357     mci->ctl_name = x38_devs[dev_idx].ctl_name;
0358     mci->dev_name = pci_name(pdev);
0359     mci->edac_check = x38_check;
0360     mci->ctl_page_to_phys = NULL;
0361     mci->pvt_info = window;
0362 
0363     stacked = x38_is_stacked(pdev, drbs);
0364 
0365     /*
0366      * The dram rank boundary (DRB) reg values are boundary addresses
0367      * for each DRAM rank with a granularity of 64MB.  DRB regs are
0368      * cumulative; the last one will contain the total memory
0369      * contained in all ranks.
0370      */
0371     for (i = 0; i < mci->nr_csrows; i++) {
0372         unsigned long nr_pages;
0373         struct csrow_info *csrow = mci->csrows[i];
0374 
0375         nr_pages = drb_to_nr_pages(drbs, stacked,
0376             i / X38_RANKS_PER_CHANNEL,
0377             i % X38_RANKS_PER_CHANNEL);
0378 
0379         if (nr_pages == 0)
0380             continue;
0381 
0382         for (j = 0; j < x38_channel_num; j++) {
0383             struct dimm_info *dimm = csrow->channels[j]->dimm;
0384 
0385             dimm->nr_pages = nr_pages / x38_channel_num;
0386             dimm->grain = nr_pages << PAGE_SHIFT;
0387             dimm->mtype = MEM_DDR2;
0388             dimm->dtype = DEV_UNKNOWN;
0389             dimm->edac_mode = EDAC_UNKNOWN;
0390         }
0391     }
0392 
0393     x38_clear_error_info(mci);
0394 
0395     rc = -ENODEV;
0396     if (edac_mc_add_mc(mci)) {
0397         edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
0398         goto fail;
0399     }
0400 
0401     /* get this far and it's successful */
0402     edac_dbg(3, "MC: success\n");
0403     return 0;
0404 
0405 fail:
0406     iounmap(window);
0407     if (mci)
0408         edac_mc_free(mci);
0409 
0410     return rc;
0411 }
0412 
0413 static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0414 {
0415     int rc;
0416 
0417     edac_dbg(0, "MC:\n");
0418 
0419     if (pci_enable_device(pdev) < 0)
0420         return -EIO;
0421 
0422     rc = x38_probe1(pdev, ent->driver_data);
0423     if (!mci_pdev)
0424         mci_pdev = pci_dev_get(pdev);
0425 
0426     return rc;
0427 }
0428 
0429 static void x38_remove_one(struct pci_dev *pdev)
0430 {
0431     struct mem_ctl_info *mci;
0432 
0433     edac_dbg(0, "\n");
0434 
0435     mci = edac_mc_del_mc(&pdev->dev);
0436     if (!mci)
0437         return;
0438 
0439     iounmap(mci->pvt_info);
0440 
0441     edac_mc_free(mci);
0442 }
0443 
0444 static const struct pci_device_id x38_pci_tbl[] = {
0445     {
0446      PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
0447      X38},
0448     {
0449      0,
0450      }          /* 0 terminated list. */
0451 };
0452 
0453 MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
0454 
0455 static struct pci_driver x38_driver = {
0456     .name = EDAC_MOD_STR,
0457     .probe = x38_init_one,
0458     .remove = x38_remove_one,
0459     .id_table = x38_pci_tbl,
0460 };
0461 
0462 static int __init x38_init(void)
0463 {
0464     int pci_rc;
0465 
0466     edac_dbg(3, "MC:\n");
0467 
0468     /* Ensure that the OPSTATE is set correctly for POLL or NMI */
0469     opstate_init();
0470 
0471     pci_rc = pci_register_driver(&x38_driver);
0472     if (pci_rc < 0)
0473         goto fail0;
0474 
0475     if (!mci_pdev) {
0476         x38_registered = 0;
0477         mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
0478                     PCI_DEVICE_ID_INTEL_X38_HB, NULL);
0479         if (!mci_pdev) {
0480             edac_dbg(0, "x38 pci_get_device fail\n");
0481             pci_rc = -ENODEV;
0482             goto fail1;
0483         }
0484 
0485         pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
0486         if (pci_rc < 0) {
0487             edac_dbg(0, "x38 init fail\n");
0488             pci_rc = -ENODEV;
0489             goto fail1;
0490         }
0491     }
0492 
0493     return 0;
0494 
0495 fail1:
0496     pci_unregister_driver(&x38_driver);
0497 
0498 fail0:
0499     pci_dev_put(mci_pdev);
0500 
0501     return pci_rc;
0502 }
0503 
0504 static void __exit x38_exit(void)
0505 {
0506     edac_dbg(3, "MC:\n");
0507 
0508     pci_unregister_driver(&x38_driver);
0509     if (!x38_registered) {
0510         x38_remove_one(mci_pdev);
0511         pci_dev_put(mci_pdev);
0512     }
0513 }
0514 
0515 module_init(x38_init);
0516 module_exit(x38_exit);
0517 
0518 MODULE_LICENSE("GPL");
0519 MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
0520 MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
0521 
0522 module_param(edac_op_state, int, 0444);
0523 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");