Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Driver for Intel client SoC with integrated memory controller using IBECC
0004  *
0005  * Copyright (C) 2020 Intel Corporation
0006  *
0007  * The In-Band ECC (IBECC) IP provides ECC protection to all or specific
0008  * regions of the physical memory space. It's used for memory controllers
0009  * that don't support the out-of-band ECC which often needs an additional
0010  * storage device to each channel for storing ECC data.
0011  */
0012 
0013 #include <linux/module.h>
0014 #include <linux/init.h>
0015 #include <linux/pci.h>
0016 #include <linux/slab.h>
0017 #include <linux/irq_work.h>
0018 #include <linux/llist.h>
0019 #include <linux/genalloc.h>
0020 #include <linux/edac.h>
0021 #include <linux/bits.h>
0022 #include <linux/io.h>
0023 #include <asm/mach_traps.h>
0024 #include <asm/nmi.h>
0025 #include <asm/mce.h>
0026 
0027 #include "edac_mc.h"
0028 #include "edac_module.h"
0029 
0030 #define IGEN6_REVISION  "v2.5"
0031 
0032 #define EDAC_MOD_STR    "igen6_edac"
0033 #define IGEN6_NMI_NAME  "igen6_ibecc"
0034 
0035 /* Debug macros */
0036 #define igen6_printk(level, fmt, arg...)        \
0037     edac_printk(level, "igen6", fmt, ##arg)
0038 
0039 #define igen6_mc_printk(mci, level, fmt, arg...)    \
0040     edac_mc_chipset_printk(mci, level, "igen6", fmt, ##arg)
0041 
0042 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
0043 
0044 #define NUM_IMC             2 /* Max memory controllers */
0045 #define NUM_CHANNELS            2 /* Max channels */
0046 #define NUM_DIMMS           2 /* Max DIMMs per channel */
0047 
0048 #define _4GB                BIT_ULL(32)
0049 
0050 /* Size of physical memory */
0051 #define TOM_OFFSET          0xa0
0052 /* Top of low usable DRAM */
0053 #define TOLUD_OFFSET            0xbc
0054 /* Capability register C */
0055 #define CAPID_C_OFFSET          0xec
0056 #define CAPID_C_IBECC           BIT(15)
0057 
0058 /* Capability register E */
0059 #define CAPID_E_OFFSET          0xf0
0060 #define CAPID_E_IBECC           BIT(12)
0061 
0062 /* Error Status */
0063 #define ERRSTS_OFFSET           0xc8
0064 #define ERRSTS_CE           BIT_ULL(6)
0065 #define ERRSTS_UE           BIT_ULL(7)
0066 
0067 /* Error Command */
0068 #define ERRCMD_OFFSET           0xca
0069 #define ERRCMD_CE           BIT_ULL(6)
0070 #define ERRCMD_UE           BIT_ULL(7)
0071 
0072 /* IBECC MMIO base address */
0073 #define IBECC_BASE          (res_cfg->ibecc_base)
0074 #define IBECC_ACTIVATE_OFFSET       IBECC_BASE
0075 #define IBECC_ACTIVATE_EN       BIT(0)
0076 
0077 /* IBECC error log */
0078 #define ECC_ERROR_LOG_OFFSET        (IBECC_BASE + res_cfg->ibecc_error_log_offset)
0079 #define ECC_ERROR_LOG_CE        BIT_ULL(62)
0080 #define ECC_ERROR_LOG_UE        BIT_ULL(63)
0081 #define ECC_ERROR_LOG_ADDR_SHIFT    5
0082 #define ECC_ERROR_LOG_ADDR(v)       GET_BITFIELD(v, 5, 38)
0083 #define ECC_ERROR_LOG_SYND(v)       GET_BITFIELD(v, 46, 61)
0084 
0085 /* Host MMIO base address */
0086 #define MCHBAR_OFFSET           0x48
0087 #define MCHBAR_EN           BIT_ULL(0)
0088 #define MCHBAR_BASE(v)          (GET_BITFIELD(v, 16, 38) << 16)
0089 #define MCHBAR_SIZE         0x10000
0090 
0091 /* Parameters for the channel decode stage */
0092 #define IMC_BASE            (res_cfg->imc_base)
0093 #define MAD_INTER_CHANNEL_OFFSET    IMC_BASE
0094 #define MAD_INTER_CHANNEL_DDR_TYPE(v)   GET_BITFIELD(v, 0, 2)
0095 #define MAD_INTER_CHANNEL_ECHM(v)   GET_BITFIELD(v, 3, 3)
0096 #define MAD_INTER_CHANNEL_CH_L_MAP(v)   GET_BITFIELD(v, 4, 4)
0097 #define MAD_INTER_CHANNEL_CH_S_SIZE(v)  ((u64)GET_BITFIELD(v, 12, 19) << 29)
0098 
0099 /* Parameters for DRAM decode stage */
0100 #define MAD_INTRA_CH0_OFFSET        (IMC_BASE + 4)
0101 #define MAD_INTRA_CH_DIMM_L_MAP(v)  GET_BITFIELD(v, 0, 0)
0102 
0103 /* DIMM characteristics */
0104 #define MAD_DIMM_CH0_OFFSET     (IMC_BASE + 0xc)
0105 #define MAD_DIMM_CH_DIMM_L_SIZE(v)  ((u64)GET_BITFIELD(v, 0, 6) << 29)
0106 #define MAD_DIMM_CH_DLW(v)      GET_BITFIELD(v, 7, 8)
0107 #define MAD_DIMM_CH_DIMM_S_SIZE(v)  ((u64)GET_BITFIELD(v, 16, 22) << 29)
0108 #define MAD_DIMM_CH_DSW(v)      GET_BITFIELD(v, 24, 25)
0109 
0110 /* Hash for memory controller selection */
0111 #define MAD_MC_HASH_OFFSET      (IMC_BASE + 0x1b8)
0112 #define MAC_MC_HASH_LSB(v)      GET_BITFIELD(v, 1, 3)
0113 
0114 /* Hash for channel selection */
0115 #define CHANNEL_HASH_OFFSET     (IMC_BASE + 0x24)
0116 /* Hash for enhanced channel selection */
0117 #define CHANNEL_EHASH_OFFSET        (IMC_BASE + 0x28)
0118 #define CHANNEL_HASH_MASK(v)        (GET_BITFIELD(v, 6, 19) << 6)
0119 #define CHANNEL_HASH_LSB_MASK_BIT(v)    GET_BITFIELD(v, 24, 26)
0120 #define CHANNEL_HASH_MODE(v)        GET_BITFIELD(v, 28, 28)
0121 
0122 /* Parameters for memory slice decode stage */
0123 #define MEM_SLICE_HASH_MASK(v)      (GET_BITFIELD(v, 6, 19) << 6)
0124 #define MEM_SLICE_HASH_LSB_MASK_BIT(v)  GET_BITFIELD(v, 24, 26)
0125 
0126 static struct res_config {
0127     bool machine_check;
0128     int num_imc;
0129     u32 imc_base;
0130     u32 cmf_base;
0131     u32 cmf_size;
0132     u32 ms_hash_offset;
0133     u32 ibecc_base;
0134     u32 ibecc_error_log_offset;
0135     bool (*ibecc_available)(struct pci_dev *pdev);
0136     /* Convert error address logged in IBECC to system physical address */
0137     u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc);
0138     /* Convert error address logged in IBECC to integrated memory controller address */
0139     u64 (*err_addr_to_imc_addr)(u64 eaddr, int mc);
0140 } *res_cfg;
0141 
0142 struct igen6_imc {
0143     int mc;
0144     struct mem_ctl_info *mci;
0145     struct pci_dev *pdev;
0146     struct device dev;
0147     void __iomem *window;
0148     u64 size;
0149     u64 ch_s_size;
0150     int ch_l_map;
0151     u64 dimm_s_size[NUM_CHANNELS];
0152     u64 dimm_l_size[NUM_CHANNELS];
0153     int dimm_l_map[NUM_CHANNELS];
0154 };
0155 
0156 static struct igen6_pvt {
0157     struct igen6_imc imc[NUM_IMC];
0158     u64 ms_hash;
0159     u64 ms_s_size;
0160     int ms_l_map;
0161 } *igen6_pvt;
0162 
0163 /* The top of low usable DRAM */
0164 static u32 igen6_tolud;
0165 /* The size of physical memory */
0166 static u64 igen6_tom;
0167 
0168 struct decoded_addr {
0169     int mc;
0170     u64 imc_addr;
0171     u64 sys_addr;
0172     int channel_idx;
0173     u64 channel_addr;
0174     int sub_channel_idx;
0175     u64 sub_channel_addr;
0176 };
0177 
0178 struct ecclog_node {
0179     struct llist_node llnode;
0180     int mc;
0181     u64 ecclog;
0182 };
0183 
0184 /*
0185  * In the NMI handler, the driver uses the lock-less memory allocator
0186  * to allocate memory to store the IBECC error logs and links the logs
0187  * to the lock-less list. Delay printk() and the work of error reporting
0188  * to EDAC core in a worker.
0189  */
0190 #define ECCLOG_POOL_SIZE    PAGE_SIZE
0191 static LLIST_HEAD(ecclog_llist);
0192 static struct gen_pool *ecclog_pool;
0193 static char ecclog_buf[ECCLOG_POOL_SIZE];
0194 static struct irq_work ecclog_irq_work;
0195 static struct work_struct ecclog_work;
0196 
0197 /* Compute die IDs for Elkhart Lake with IBECC */
0198 #define DID_EHL_SKU5    0x4514
0199 #define DID_EHL_SKU6    0x4528
0200 #define DID_EHL_SKU7    0x452a
0201 #define DID_EHL_SKU8    0x4516
0202 #define DID_EHL_SKU9    0x452c
0203 #define DID_EHL_SKU10   0x452e
0204 #define DID_EHL_SKU11   0x4532
0205 #define DID_EHL_SKU12   0x4518
0206 #define DID_EHL_SKU13   0x451a
0207 #define DID_EHL_SKU14   0x4534
0208 #define DID_EHL_SKU15   0x4536
0209 
0210 /* Compute die IDs for ICL-NNPI with IBECC */
0211 #define DID_ICL_SKU8    0x4581
0212 #define DID_ICL_SKU10   0x4585
0213 #define DID_ICL_SKU11   0x4589
0214 #define DID_ICL_SKU12   0x458d
0215 
0216 /* Compute die IDs for Tiger Lake with IBECC */
0217 #define DID_TGL_SKU 0x9a14
0218 
0219 /* Compute die IDs for Alder Lake with IBECC */
0220 #define DID_ADL_SKU1    0x4601
0221 #define DID_ADL_SKU2    0x4602
0222 #define DID_ADL_SKU3    0x4621
0223 #define DID_ADL_SKU4    0x4641
0224 
0225 static bool ehl_ibecc_available(struct pci_dev *pdev)
0226 {
0227     u32 v;
0228 
0229     if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
0230         return false;
0231 
0232     return !!(CAPID_C_IBECC & v);
0233 }
0234 
0235 static u64 ehl_err_addr_to_sys_addr(u64 eaddr, int mc)
0236 {
0237     return eaddr;
0238 }
0239 
0240 static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc)
0241 {
0242     if (eaddr < igen6_tolud)
0243         return eaddr;
0244 
0245     if (igen6_tom <= _4GB)
0246         return eaddr + igen6_tolud - _4GB;
0247 
0248     if (eaddr < _4GB)
0249         return eaddr + igen6_tolud - igen6_tom;
0250 
0251     return eaddr;
0252 }
0253 
0254 static bool icl_ibecc_available(struct pci_dev *pdev)
0255 {
0256     u32 v;
0257 
0258     if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
0259         return false;
0260 
0261     return !(CAPID_C_IBECC & v) &&
0262         (boot_cpu_data.x86_stepping >= 1);
0263 }
0264 
0265 static bool tgl_ibecc_available(struct pci_dev *pdev)
0266 {
0267     u32 v;
0268 
0269     if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v))
0270         return false;
0271 
0272     return !(CAPID_E_IBECC & v);
0273 }
0274 
0275 static u64 mem_addr_to_sys_addr(u64 maddr)
0276 {
0277     if (maddr < igen6_tolud)
0278         return maddr;
0279 
0280     if (igen6_tom <= _4GB)
0281         return maddr - igen6_tolud + _4GB;
0282 
0283     if (maddr < _4GB)
0284         return maddr - igen6_tolud + igen6_tom;
0285 
0286     return maddr;
0287 }
0288 
0289 static u64 mem_slice_hash(u64 addr, u64 mask, u64 hash_init, int intlv_bit)
0290 {
0291     u64 hash_addr = addr & mask, hash = hash_init;
0292     u64 intlv = (addr >> intlv_bit) & 1;
0293     int i;
0294 
0295     for (i = 6; i < 20; i++)
0296         hash ^= (hash_addr >> i) & 1;
0297 
0298     return hash ^ intlv;
0299 }
0300 
0301 static u64 tgl_err_addr_to_mem_addr(u64 eaddr, int mc)
0302 {
0303     u64 maddr, hash, mask, ms_s_size;
0304     int intlv_bit;
0305     u32 ms_hash;
0306 
0307     ms_s_size = igen6_pvt->ms_s_size;
0308     if (eaddr >= ms_s_size)
0309         return eaddr + ms_s_size;
0310 
0311     ms_hash = igen6_pvt->ms_hash;
0312 
0313     mask = MEM_SLICE_HASH_MASK(ms_hash);
0314     intlv_bit = MEM_SLICE_HASH_LSB_MASK_BIT(ms_hash) + 6;
0315 
0316     maddr = GET_BITFIELD(eaddr, intlv_bit, 63) << (intlv_bit + 1) |
0317         GET_BITFIELD(eaddr, 0, intlv_bit - 1);
0318 
0319     hash = mem_slice_hash(maddr, mask, mc, intlv_bit);
0320 
0321     return maddr | (hash << intlv_bit);
0322 }
0323 
0324 static u64 tgl_err_addr_to_sys_addr(u64 eaddr, int mc)
0325 {
0326     u64 maddr = tgl_err_addr_to_mem_addr(eaddr, mc);
0327 
0328     return mem_addr_to_sys_addr(maddr);
0329 }
0330 
0331 static u64 tgl_err_addr_to_imc_addr(u64 eaddr, int mc)
0332 {
0333     return eaddr;
0334 }
0335 
0336 static u64 adl_err_addr_to_sys_addr(u64 eaddr, int mc)
0337 {
0338     return mem_addr_to_sys_addr(eaddr);
0339 }
0340 
0341 static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc)
0342 {
0343     u64 imc_addr, ms_s_size = igen6_pvt->ms_s_size;
0344     struct igen6_imc *imc = &igen6_pvt->imc[mc];
0345     int intlv_bit;
0346     u32 mc_hash;
0347 
0348     if (eaddr >= 2 * ms_s_size)
0349         return eaddr - ms_s_size;
0350 
0351     mc_hash = readl(imc->window + MAD_MC_HASH_OFFSET);
0352 
0353     intlv_bit = MAC_MC_HASH_LSB(mc_hash) + 6;
0354 
0355     imc_addr = GET_BITFIELD(eaddr, intlv_bit + 1, 63) << intlv_bit |
0356            GET_BITFIELD(eaddr, 0, intlv_bit - 1);
0357 
0358     return imc_addr;
0359 }
0360 
0361 static struct res_config ehl_cfg = {
0362     .num_imc        = 1,
0363     .imc_base       = 0x5000,
0364     .ibecc_base     = 0xdc00,
0365     .ibecc_available    = ehl_ibecc_available,
0366     .ibecc_error_log_offset = 0x170,
0367     .err_addr_to_sys_addr   = ehl_err_addr_to_sys_addr,
0368     .err_addr_to_imc_addr   = ehl_err_addr_to_imc_addr,
0369 };
0370 
0371 static struct res_config icl_cfg = {
0372     .num_imc        = 1,
0373     .imc_base       = 0x5000,
0374     .ibecc_base     = 0xd800,
0375     .ibecc_error_log_offset = 0x170,
0376     .ibecc_available    = icl_ibecc_available,
0377     .err_addr_to_sys_addr   = ehl_err_addr_to_sys_addr,
0378     .err_addr_to_imc_addr   = ehl_err_addr_to_imc_addr,
0379 };
0380 
0381 static struct res_config tgl_cfg = {
0382     .machine_check      = true,
0383     .num_imc        = 2,
0384     .imc_base       = 0x5000,
0385     .cmf_base       = 0x11000,
0386     .cmf_size       = 0x800,
0387     .ms_hash_offset     = 0xac,
0388     .ibecc_base     = 0xd400,
0389     .ibecc_error_log_offset = 0x170,
0390     .ibecc_available    = tgl_ibecc_available,
0391     .err_addr_to_sys_addr   = tgl_err_addr_to_sys_addr,
0392     .err_addr_to_imc_addr   = tgl_err_addr_to_imc_addr,
0393 };
0394 
0395 static struct res_config adl_cfg = {
0396     .machine_check      = true,
0397     .num_imc        = 2,
0398     .imc_base       = 0xd800,
0399     .ibecc_base     = 0xd400,
0400     .ibecc_error_log_offset = 0x68,
0401     .ibecc_available    = tgl_ibecc_available,
0402     .err_addr_to_sys_addr   = adl_err_addr_to_sys_addr,
0403     .err_addr_to_imc_addr   = adl_err_addr_to_imc_addr,
0404 };
0405 
0406 static const struct pci_device_id igen6_pci_tbl[] = {
0407     { PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
0408     { PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
0409     { PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg },
0410     { PCI_VDEVICE(INTEL, DID_EHL_SKU8), (kernel_ulong_t)&ehl_cfg },
0411     { PCI_VDEVICE(INTEL, DID_EHL_SKU9), (kernel_ulong_t)&ehl_cfg },
0412     { PCI_VDEVICE(INTEL, DID_EHL_SKU10), (kernel_ulong_t)&ehl_cfg },
0413     { PCI_VDEVICE(INTEL, DID_EHL_SKU11), (kernel_ulong_t)&ehl_cfg },
0414     { PCI_VDEVICE(INTEL, DID_EHL_SKU12), (kernel_ulong_t)&ehl_cfg },
0415     { PCI_VDEVICE(INTEL, DID_EHL_SKU13), (kernel_ulong_t)&ehl_cfg },
0416     { PCI_VDEVICE(INTEL, DID_EHL_SKU14), (kernel_ulong_t)&ehl_cfg },
0417     { PCI_VDEVICE(INTEL, DID_EHL_SKU15), (kernel_ulong_t)&ehl_cfg },
0418     { PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg },
0419     { PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg },
0420     { PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg },
0421     { PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg },
0422     { PCI_VDEVICE(INTEL, DID_TGL_SKU), (kernel_ulong_t)&tgl_cfg },
0423     { PCI_VDEVICE(INTEL, DID_ADL_SKU1), (kernel_ulong_t)&adl_cfg },
0424     { PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg },
0425     { PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg },
0426     { PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg },
0427     { },
0428 };
0429 MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
0430 
0431 static enum dev_type get_width(int dimm_l, u32 mad_dimm)
0432 {
0433     u32 w = dimm_l ? MAD_DIMM_CH_DLW(mad_dimm) :
0434              MAD_DIMM_CH_DSW(mad_dimm);
0435 
0436     switch (w) {
0437     case 0:
0438         return DEV_X8;
0439     case 1:
0440         return DEV_X16;
0441     case 2:
0442         return DEV_X32;
0443     default:
0444         return DEV_UNKNOWN;
0445     }
0446 }
0447 
0448 static enum mem_type get_memory_type(u32 mad_inter)
0449 {
0450     u32 t = MAD_INTER_CHANNEL_DDR_TYPE(mad_inter);
0451 
0452     switch (t) {
0453     case 0:
0454         return MEM_DDR4;
0455     case 1:
0456         return MEM_DDR3;
0457     case 2:
0458         return MEM_LPDDR3;
0459     case 3:
0460         return MEM_LPDDR4;
0461     case 4:
0462         return MEM_WIO2;
0463     default:
0464         return MEM_UNKNOWN;
0465     }
0466 }
0467 
0468 static int decode_chan_idx(u64 addr, u64 mask, int intlv_bit)
0469 {
0470     u64 hash_addr = addr & mask, hash = 0;
0471     u64 intlv = (addr >> intlv_bit) & 1;
0472     int i;
0473 
0474     for (i = 6; i < 20; i++)
0475         hash ^= (hash_addr >> i) & 1;
0476 
0477     return (int)hash ^ intlv;
0478 }
0479 
0480 static u64 decode_channel_addr(u64 addr, int intlv_bit)
0481 {
0482     u64 channel_addr;
0483 
0484     /* Remove the interleave bit and shift upper part down to fill gap */
0485     channel_addr  = GET_BITFIELD(addr, intlv_bit + 1, 63) << intlv_bit;
0486     channel_addr |= GET_BITFIELD(addr, 0, intlv_bit - 1);
0487 
0488     return channel_addr;
0489 }
0490 
0491 static void decode_addr(u64 addr, u32 hash, u64 s_size, int l_map,
0492             int *idx, u64 *sub_addr)
0493 {
0494     int intlv_bit = CHANNEL_HASH_LSB_MASK_BIT(hash) + 6;
0495 
0496     if (addr > 2 * s_size) {
0497         *sub_addr = addr - s_size;
0498         *idx = l_map;
0499         return;
0500     }
0501 
0502     if (CHANNEL_HASH_MODE(hash)) {
0503         *sub_addr = decode_channel_addr(addr, intlv_bit);
0504         *idx = decode_chan_idx(addr, CHANNEL_HASH_MASK(hash), intlv_bit);
0505     } else {
0506         *sub_addr = decode_channel_addr(addr, 6);
0507         *idx = GET_BITFIELD(addr, 6, 6);
0508     }
0509 }
0510 
0511 static int igen6_decode(struct decoded_addr *res)
0512 {
0513     struct igen6_imc *imc = &igen6_pvt->imc[res->mc];
0514     u64 addr = res->imc_addr, sub_addr, s_size;
0515     int idx, l_map;
0516     u32 hash;
0517 
0518     if (addr >= igen6_tom) {
0519         edac_dbg(0, "Address 0x%llx out of range\n", addr);
0520         return -EINVAL;
0521     }
0522 
0523     /* Decode channel */
0524     hash   = readl(imc->window + CHANNEL_HASH_OFFSET);
0525     s_size = imc->ch_s_size;
0526     l_map  = imc->ch_l_map;
0527     decode_addr(addr, hash, s_size, l_map, &idx, &sub_addr);
0528     res->channel_idx  = idx;
0529     res->channel_addr = sub_addr;
0530 
0531     /* Decode sub-channel/DIMM */
0532     hash   = readl(imc->window + CHANNEL_EHASH_OFFSET);
0533     s_size = imc->dimm_s_size[idx];
0534     l_map  = imc->dimm_l_map[idx];
0535     decode_addr(res->channel_addr, hash, s_size, l_map, &idx, &sub_addr);
0536     res->sub_channel_idx  = idx;
0537     res->sub_channel_addr = sub_addr;
0538 
0539     return 0;
0540 }
0541 
0542 static void igen6_output_error(struct decoded_addr *res,
0543                    struct mem_ctl_info *mci, u64 ecclog)
0544 {
0545     enum hw_event_mc_err_type type = ecclog & ECC_ERROR_LOG_UE ?
0546                      HW_EVENT_ERR_UNCORRECTED :
0547                      HW_EVENT_ERR_CORRECTED;
0548 
0549     edac_mc_handle_error(type, mci, 1,
0550                  res->sys_addr >> PAGE_SHIFT,
0551                  res->sys_addr & ~PAGE_MASK,
0552                  ECC_ERROR_LOG_SYND(ecclog),
0553                  res->channel_idx, res->sub_channel_idx,
0554                  -1, "", "");
0555 }
0556 
0557 static struct gen_pool *ecclog_gen_pool_create(void)
0558 {
0559     struct gen_pool *pool;
0560 
0561     pool = gen_pool_create(ilog2(sizeof(struct ecclog_node)), -1);
0562     if (!pool)
0563         return NULL;
0564 
0565     if (gen_pool_add(pool, (unsigned long)ecclog_buf, ECCLOG_POOL_SIZE, -1)) {
0566         gen_pool_destroy(pool);
0567         return NULL;
0568     }
0569 
0570     return pool;
0571 }
0572 
0573 static int ecclog_gen_pool_add(int mc, u64 ecclog)
0574 {
0575     struct ecclog_node *node;
0576 
0577     node = (void *)gen_pool_alloc(ecclog_pool, sizeof(*node));
0578     if (!node)
0579         return -ENOMEM;
0580 
0581     node->mc = mc;
0582     node->ecclog = ecclog;
0583     llist_add(&node->llnode, &ecclog_llist);
0584 
0585     return 0;
0586 }
0587 
0588 /*
0589  * Either the memory-mapped I/O status register ECC_ERROR_LOG or the PCI
0590  * configuration space status register ERRSTS can indicate whether a
0591  * correctable error or an uncorrectable error occurred. We only use the
0592  * ECC_ERROR_LOG register to check error type, but need to clear both
0593  * registers to enable future error events.
0594  */
0595 static u64 ecclog_read_and_clear(struct igen6_imc *imc)
0596 {
0597     u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
0598 
0599     if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
0600         /* Clear CE/UE bits by writing 1s */
0601         writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
0602         return ecclog;
0603     }
0604 
0605     return 0;
0606 }
0607 
0608 static void errsts_clear(struct igen6_imc *imc)
0609 {
0610     u16 errsts;
0611 
0612     if (pci_read_config_word(imc->pdev, ERRSTS_OFFSET, &errsts)) {
0613         igen6_printk(KERN_ERR, "Failed to read ERRSTS\n");
0614         return;
0615     }
0616 
0617     /* Clear CE/UE bits by writing 1s */
0618     if (errsts & (ERRSTS_CE | ERRSTS_UE))
0619         pci_write_config_word(imc->pdev, ERRSTS_OFFSET, errsts);
0620 }
0621 
0622 static int errcmd_enable_error_reporting(bool enable)
0623 {
0624     struct igen6_imc *imc = &igen6_pvt->imc[0];
0625     u16 errcmd;
0626     int rc;
0627 
0628     rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
0629     if (rc)
0630         return rc;
0631 
0632     if (enable)
0633         errcmd |= ERRCMD_CE | ERRSTS_UE;
0634     else
0635         errcmd &= ~(ERRCMD_CE | ERRSTS_UE);
0636 
0637     rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
0638     if (rc)
0639         return rc;
0640 
0641     return 0;
0642 }
0643 
0644 static int ecclog_handler(void)
0645 {
0646     struct igen6_imc *imc;
0647     int i, n = 0;
0648     u64 ecclog;
0649 
0650     for (i = 0; i < res_cfg->num_imc; i++) {
0651         imc = &igen6_pvt->imc[i];
0652 
0653         /* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */
0654 
0655         ecclog = ecclog_read_and_clear(imc);
0656         if (!ecclog)
0657             continue;
0658 
0659         if (!ecclog_gen_pool_add(i, ecclog))
0660             irq_work_queue(&ecclog_irq_work);
0661 
0662         n++;
0663     }
0664 
0665     return n;
0666 }
0667 
0668 static void ecclog_work_cb(struct work_struct *work)
0669 {
0670     struct ecclog_node *node, *tmp;
0671     struct mem_ctl_info *mci;
0672     struct llist_node *head;
0673     struct decoded_addr res;
0674     u64 eaddr;
0675 
0676     head = llist_del_all(&ecclog_llist);
0677     if (!head)
0678         return;
0679 
0680     llist_for_each_entry_safe(node, tmp, head, llnode) {
0681         memset(&res, 0, sizeof(res));
0682         eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
0683             ECC_ERROR_LOG_ADDR_SHIFT;
0684         res.mc       = node->mc;
0685         res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc);
0686         res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc);
0687 
0688         mci = igen6_pvt->imc[res.mc].mci;
0689 
0690         edac_dbg(2, "MC %d, ecclog = 0x%llx\n", node->mc, node->ecclog);
0691         igen6_mc_printk(mci, KERN_DEBUG, "HANDLING IBECC MEMORY ERROR\n");
0692         igen6_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", res.sys_addr);
0693 
0694         if (!igen6_decode(&res))
0695             igen6_output_error(&res, mci, node->ecclog);
0696 
0697         gen_pool_free(ecclog_pool, (unsigned long)node, sizeof(*node));
0698     }
0699 }
0700 
0701 static void ecclog_irq_work_cb(struct irq_work *irq_work)
0702 {
0703     int i;
0704 
0705     for (i = 0; i < res_cfg->num_imc; i++)
0706         errsts_clear(&igen6_pvt->imc[i]);
0707 
0708     if (!llist_empty(&ecclog_llist))
0709         schedule_work(&ecclog_work);
0710 }
0711 
0712 static int ecclog_nmi_handler(unsigned int cmd, struct pt_regs *regs)
0713 {
0714     unsigned char reason;
0715 
0716     if (!ecclog_handler())
0717         return NMI_DONE;
0718 
0719     /*
0720      * Both In-Band ECC correctable error and uncorrectable error are
0721      * reported by SERR# NMI. The NMI generic code (see pci_serr_error())
0722      * doesn't clear the bit NMI_REASON_CLEAR_SERR (in port 0x61) to
0723      * re-enable the SERR# NMI after NMI handling. So clear this bit here
0724      * to re-enable SERR# NMI for receiving future In-Band ECC errors.
0725      */
0726     reason  = x86_platform.get_nmi_reason() & NMI_REASON_CLEAR_MASK;
0727     reason |= NMI_REASON_CLEAR_SERR;
0728     outb(reason, NMI_REASON_PORT);
0729     reason &= ~NMI_REASON_CLEAR_SERR;
0730     outb(reason, NMI_REASON_PORT);
0731 
0732     return NMI_HANDLED;
0733 }
0734 
0735 static int ecclog_mce_handler(struct notifier_block *nb, unsigned long val,
0736                   void *data)
0737 {
0738     struct mce *mce = (struct mce *)data;
0739     char *type;
0740 
0741     if (mce->kflags & MCE_HANDLED_CEC)
0742         return NOTIFY_DONE;
0743 
0744     /*
0745      * Ignore unless this is a memory related error.
0746      * We don't check the bit MCI_STATUS_ADDRV of MCi_STATUS here,
0747      * since this bit isn't set on some CPU (e.g., Tiger Lake UP3).
0748      */
0749     if ((mce->status & 0xefff) >> 7 != 1)
0750         return NOTIFY_DONE;
0751 
0752     if (mce->mcgstatus & MCG_STATUS_MCIP)
0753         type = "Exception";
0754     else
0755         type = "Event";
0756 
0757     edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
0758          mce->extcpu, type, mce->mcgstatus,
0759          mce->bank, mce->status);
0760     edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
0761     edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
0762     edac_dbg(0, "MISC 0x%llx\n", mce->misc);
0763     edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
0764          mce->cpuvendor, mce->cpuid, mce->time,
0765          mce->socketid, mce->apicid);
0766     /*
0767      * We just use the Machine Check for the memory error notification.
0768      * Each memory controller is associated with an IBECC instance.
0769      * Directly read and clear the error information(error address and
0770      * error type) on all the IBECC instances so that we know on which
0771      * memory controller the memory error(s) occurred.
0772      */
0773     if (!ecclog_handler())
0774         return NOTIFY_DONE;
0775 
0776     mce->kflags |= MCE_HANDLED_EDAC;
0777 
0778     return NOTIFY_DONE;
0779 }
0780 
0781 static struct notifier_block ecclog_mce_dec = {
0782     .notifier_call  = ecclog_mce_handler,
0783     .priority   = MCE_PRIO_EDAC,
0784 };
0785 
0786 static bool igen6_check_ecc(struct igen6_imc *imc)
0787 {
0788     u32 activate = readl(imc->window + IBECC_ACTIVATE_OFFSET);
0789 
0790     return !!(activate & IBECC_ACTIVATE_EN);
0791 }
0792 
0793 static int igen6_get_dimm_config(struct mem_ctl_info *mci)
0794 {
0795     struct igen6_imc *imc = mci->pvt_info;
0796     u32 mad_inter, mad_intra, mad_dimm;
0797     int i, j, ndimms, mc = imc->mc;
0798     struct dimm_info *dimm;
0799     enum mem_type mtype;
0800     enum dev_type dtype;
0801     u64 dsize;
0802     bool ecc;
0803 
0804     edac_dbg(2, "\n");
0805 
0806     mad_inter = readl(imc->window + MAD_INTER_CHANNEL_OFFSET);
0807     mtype = get_memory_type(mad_inter);
0808     ecc = igen6_check_ecc(imc);
0809     imc->ch_s_size = MAD_INTER_CHANNEL_CH_S_SIZE(mad_inter);
0810     imc->ch_l_map  = MAD_INTER_CHANNEL_CH_L_MAP(mad_inter);
0811 
0812     for (i = 0; i < NUM_CHANNELS; i++) {
0813         mad_intra = readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4);
0814         mad_dimm  = readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4);
0815 
0816         imc->dimm_l_size[i] = MAD_DIMM_CH_DIMM_L_SIZE(mad_dimm);
0817         imc->dimm_s_size[i] = MAD_DIMM_CH_DIMM_S_SIZE(mad_dimm);
0818         imc->dimm_l_map[i]  = MAD_INTRA_CH_DIMM_L_MAP(mad_intra);
0819         imc->size += imc->dimm_s_size[i];
0820         imc->size += imc->dimm_l_size[i];
0821         ndimms = 0;
0822 
0823         for (j = 0; j < NUM_DIMMS; j++) {
0824             dimm = edac_get_dimm(mci, i, j, 0);
0825 
0826             if (j ^ imc->dimm_l_map[i]) {
0827                 dtype = get_width(0, mad_dimm);
0828                 dsize = imc->dimm_s_size[i];
0829             } else {
0830                 dtype = get_width(1, mad_dimm);
0831                 dsize = imc->dimm_l_size[i];
0832             }
0833 
0834             if (!dsize)
0835                 continue;
0836 
0837             dimm->grain = 64;
0838             dimm->mtype = mtype;
0839             dimm->dtype = dtype;
0840             dimm->nr_pages  = MiB_TO_PAGES(dsize >> 20);
0841             dimm->edac_mode = EDAC_SECDED;
0842             snprintf(dimm->label, sizeof(dimm->label),
0843                  "MC#%d_Chan#%d_DIMM#%d", mc, i, j);
0844             edac_dbg(0, "MC %d, Channel %d, DIMM %d, Size %llu MiB (%u pages)\n",
0845                  mc, i, j, dsize >> 20, dimm->nr_pages);
0846 
0847             ndimms++;
0848         }
0849 
0850         if (ndimms && !ecc) {
0851             igen6_printk(KERN_ERR, "MC%d In-Band ECC is disabled\n", mc);
0852             return -ENODEV;
0853         }
0854     }
0855 
0856     edac_dbg(0, "MC %d, total size %llu MiB\n", mc, imc->size >> 20);
0857 
0858     return 0;
0859 }
0860 
0861 #ifdef CONFIG_EDAC_DEBUG
0862 /* Top of upper usable DRAM */
0863 static u64 igen6_touud;
0864 #define TOUUD_OFFSET    0xa8
0865 
0866 static void igen6_reg_dump(struct igen6_imc *imc)
0867 {
0868     int i;
0869 
0870     edac_dbg(2, "CHANNEL_HASH     : 0x%x\n",
0871          readl(imc->window + CHANNEL_HASH_OFFSET));
0872     edac_dbg(2, "CHANNEL_EHASH    : 0x%x\n",
0873          readl(imc->window + CHANNEL_EHASH_OFFSET));
0874     edac_dbg(2, "MAD_INTER_CHANNEL: 0x%x\n",
0875          readl(imc->window + MAD_INTER_CHANNEL_OFFSET));
0876     edac_dbg(2, "ECC_ERROR_LOG    : 0x%llx\n",
0877          readq(imc->window + ECC_ERROR_LOG_OFFSET));
0878 
0879     for (i = 0; i < NUM_CHANNELS; i++) {
0880         edac_dbg(2, "MAD_INTRA_CH%d    : 0x%x\n", i,
0881              readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4));
0882         edac_dbg(2, "MAD_DIMM_CH%d     : 0x%x\n", i,
0883              readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4));
0884     }
0885     edac_dbg(2, "TOLUD            : 0x%x", igen6_tolud);
0886     edac_dbg(2, "TOUUD            : 0x%llx", igen6_touud);
0887     edac_dbg(2, "TOM              : 0x%llx", igen6_tom);
0888 }
0889 
0890 static struct dentry *igen6_test;
0891 
0892 static int debugfs_u64_set(void *data, u64 val)
0893 {
0894     u64 ecclog;
0895 
0896     if ((val >= igen6_tolud && val < _4GB) || val >= igen6_touud) {
0897         edac_dbg(0, "Address 0x%llx out of range\n", val);
0898         return 0;
0899     }
0900 
0901     pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
0902 
0903     val  >>= ECC_ERROR_LOG_ADDR_SHIFT;
0904     ecclog = (val << ECC_ERROR_LOG_ADDR_SHIFT) | ECC_ERROR_LOG_CE;
0905 
0906     if (!ecclog_gen_pool_add(0, ecclog))
0907         irq_work_queue(&ecclog_irq_work);
0908 
0909     return 0;
0910 }
0911 DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
0912 
0913 static void igen6_debug_setup(void)
0914 {
0915     igen6_test = edac_debugfs_create_dir("igen6_test");
0916     if (!igen6_test)
0917         return;
0918 
0919     if (!edac_debugfs_create_file("addr", 0200, igen6_test,
0920                       NULL, &fops_u64_wo)) {
0921         debugfs_remove(igen6_test);
0922         igen6_test = NULL;
0923     }
0924 }
0925 
0926 static void igen6_debug_teardown(void)
0927 {
0928     debugfs_remove_recursive(igen6_test);
0929 }
0930 #else
0931 static void igen6_reg_dump(struct igen6_imc *imc) {}
0932 static void igen6_debug_setup(void) {}
0933 static void igen6_debug_teardown(void) {}
0934 #endif
0935 
0936 static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar)
0937 {
0938     union  {
0939         u64 v;
0940         struct {
0941             u32 v_lo;
0942             u32 v_hi;
0943         };
0944     } u;
0945 
0946     edac_dbg(2, "\n");
0947 
0948     if (!res_cfg->ibecc_available(pdev)) {
0949         edac_dbg(2, "No In-Band ECC IP\n");
0950         goto fail;
0951     }
0952 
0953     if (pci_read_config_dword(pdev, TOLUD_OFFSET, &igen6_tolud)) {
0954         igen6_printk(KERN_ERR, "Failed to read TOLUD\n");
0955         goto fail;
0956     }
0957 
0958     igen6_tolud &= GENMASK(31, 20);
0959 
0960     if (pci_read_config_dword(pdev, TOM_OFFSET, &u.v_lo)) {
0961         igen6_printk(KERN_ERR, "Failed to read lower TOM\n");
0962         goto fail;
0963     }
0964 
0965     if (pci_read_config_dword(pdev, TOM_OFFSET + 4, &u.v_hi)) {
0966         igen6_printk(KERN_ERR, "Failed to read upper TOM\n");
0967         goto fail;
0968     }
0969 
0970     igen6_tom = u.v & GENMASK_ULL(38, 20);
0971 
0972     if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
0973         igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
0974         goto fail;
0975     }
0976 
0977     if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
0978         igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
0979         goto fail;
0980     }
0981 
0982     if (!(u.v & MCHBAR_EN)) {
0983         igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
0984         goto fail;
0985     }
0986 
0987     *mchbar = MCHBAR_BASE(u.v);
0988 
0989 #ifdef CONFIG_EDAC_DEBUG
0990     if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo))
0991         edac_dbg(2, "Failed to read lower TOUUD\n");
0992     else if (pci_read_config_dword(pdev, TOUUD_OFFSET + 4, &u.v_hi))
0993         edac_dbg(2, "Failed to read upper TOUUD\n");
0994     else
0995         igen6_touud = u.v & GENMASK_ULL(38, 20);
0996 #endif
0997 
0998     return 0;
0999 fail:
1000     return -ENODEV;
1001 }
1002 
1003 static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
1004 {
1005     struct edac_mc_layer layers[2];
1006     struct mem_ctl_info *mci;
1007     struct igen6_imc *imc;
1008     void __iomem *window;
1009     int rc;
1010 
1011     edac_dbg(2, "\n");
1012 
1013     mchbar += mc * MCHBAR_SIZE;
1014     window = ioremap(mchbar, MCHBAR_SIZE);
1015     if (!window) {
1016         igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
1017         return -ENODEV;
1018     }
1019 
1020     layers[0].type = EDAC_MC_LAYER_CHANNEL;
1021     layers[0].size = NUM_CHANNELS;
1022     layers[0].is_virt_csrow = false;
1023     layers[1].type = EDAC_MC_LAYER_SLOT;
1024     layers[1].size = NUM_DIMMS;
1025     layers[1].is_virt_csrow = true;
1026 
1027     mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
1028     if (!mci) {
1029         rc = -ENOMEM;
1030         goto fail;
1031     }
1032 
1033     mci->ctl_name = kasprintf(GFP_KERNEL, "Intel_client_SoC MC#%d", mc);
1034     if (!mci->ctl_name) {
1035         rc = -ENOMEM;
1036         goto fail2;
1037     }
1038 
1039     mci->mtype_cap = MEM_FLAG_LPDDR4 | MEM_FLAG_DDR4;
1040     mci->edac_ctl_cap = EDAC_FLAG_SECDED;
1041     mci->edac_cap = EDAC_FLAG_SECDED;
1042     mci->mod_name = EDAC_MOD_STR;
1043     mci->dev_name = pci_name(pdev);
1044     mci->pvt_info = &igen6_pvt->imc[mc];
1045 
1046     imc = mci->pvt_info;
1047     device_initialize(&imc->dev);
1048     /*
1049      * EDAC core uses mci->pdev(pointer of structure device) as
1050      * memory controller ID. The client SoCs attach one or more
1051      * memory controllers to single pci_dev (single pci_dev->dev
1052      * can be for multiple memory controllers).
1053      *
1054      * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
1055      * for the first memory controller and assign a unique imc->dev
1056      * to mci->pdev for each non-first memory controller.
1057      */
1058     mci->pdev = mc ? &imc->dev : &pdev->dev;
1059     imc->mc = mc;
1060     imc->pdev = pdev;
1061     imc->window = window;
1062 
1063     igen6_reg_dump(imc);
1064 
1065     rc = igen6_get_dimm_config(mci);
1066     if (rc)
1067         goto fail3;
1068 
1069     rc = edac_mc_add_mc(mci);
1070     if (rc) {
1071         igen6_printk(KERN_ERR, "Failed to register mci#%d\n", mc);
1072         goto fail3;
1073     }
1074 
1075     imc->mci = mci;
1076     return 0;
1077 fail3:
1078     kfree(mci->ctl_name);
1079 fail2:
1080     edac_mc_free(mci);
1081 fail:
1082     iounmap(window);
1083     return rc;
1084 }
1085 
1086 static void igen6_unregister_mcis(void)
1087 {
1088     struct mem_ctl_info *mci;
1089     struct igen6_imc *imc;
1090     int i;
1091 
1092     edac_dbg(2, "\n");
1093 
1094     for (i = 0; i < res_cfg->num_imc; i++) {
1095         imc = &igen6_pvt->imc[i];
1096         mci = imc->mci;
1097         if (!mci)
1098             continue;
1099 
1100         edac_mc_del_mc(mci->pdev);
1101         kfree(mci->ctl_name);
1102         edac_mc_free(mci);
1103         iounmap(imc->window);
1104     }
1105 }
1106 
1107 static int igen6_mem_slice_setup(u64 mchbar)
1108 {
1109     struct igen6_imc *imc = &igen6_pvt->imc[0];
1110     u64 base = mchbar + res_cfg->cmf_base;
1111     u32 offset = res_cfg->ms_hash_offset;
1112     u32 size = res_cfg->cmf_size;
1113     u64 ms_s_size, ms_hash;
1114     void __iomem *cmf;
1115     int ms_l_map;
1116 
1117     edac_dbg(2, "\n");
1118 
1119     if (imc[0].size < imc[1].size) {
1120         ms_s_size = imc[0].size;
1121         ms_l_map  = 1;
1122     } else {
1123         ms_s_size = imc[1].size;
1124         ms_l_map  = 0;
1125     }
1126 
1127     igen6_pvt->ms_s_size = ms_s_size;
1128     igen6_pvt->ms_l_map  = ms_l_map;
1129 
1130     edac_dbg(0, "ms_s_size: %llu MiB, ms_l_map %d\n",
1131          ms_s_size >> 20, ms_l_map);
1132 
1133     if (!size)
1134         return 0;
1135 
1136     cmf = ioremap(base, size);
1137     if (!cmf) {
1138         igen6_printk(KERN_ERR, "Failed to ioremap cmf 0x%llx\n", base);
1139         return -ENODEV;
1140     }
1141 
1142     ms_hash = readq(cmf + offset);
1143     igen6_pvt->ms_hash = ms_hash;
1144 
1145     edac_dbg(0, "MEM_SLICE_HASH: 0x%llx\n", ms_hash);
1146 
1147     iounmap(cmf);
1148 
1149     return 0;
1150 }
1151 
1152 static int register_err_handler(void)
1153 {
1154     int rc;
1155 
1156     if (res_cfg->machine_check) {
1157         mce_register_decode_chain(&ecclog_mce_dec);
1158         return 0;
1159     }
1160 
1161     rc = register_nmi_handler(NMI_SERR, ecclog_nmi_handler,
1162                   0, IGEN6_NMI_NAME);
1163     if (rc) {
1164         igen6_printk(KERN_ERR, "Failed to register NMI handler\n");
1165         return rc;
1166     }
1167 
1168     return 0;
1169 }
1170 
1171 static void unregister_err_handler(void)
1172 {
1173     if (res_cfg->machine_check) {
1174         mce_unregister_decode_chain(&ecclog_mce_dec);
1175         return;
1176     }
1177 
1178     unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
1179 }
1180 
1181 static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1182 {
1183     u64 mchbar;
1184     int i, rc;
1185 
1186     edac_dbg(2, "\n");
1187 
1188     igen6_pvt = kzalloc(sizeof(*igen6_pvt), GFP_KERNEL);
1189     if (!igen6_pvt)
1190         return -ENOMEM;
1191 
1192     res_cfg = (struct res_config *)ent->driver_data;
1193 
1194     rc = igen6_pci_setup(pdev, &mchbar);
1195     if (rc)
1196         goto fail;
1197 
1198     for (i = 0; i < res_cfg->num_imc; i++) {
1199         rc = igen6_register_mci(i, mchbar, pdev);
1200         if (rc)
1201             goto fail2;
1202     }
1203 
1204     if (res_cfg->num_imc > 1) {
1205         rc = igen6_mem_slice_setup(mchbar);
1206         if (rc)
1207             goto fail2;
1208     }
1209 
1210     ecclog_pool = ecclog_gen_pool_create();
1211     if (!ecclog_pool) {
1212         rc = -ENOMEM;
1213         goto fail2;
1214     }
1215 
1216     INIT_WORK(&ecclog_work, ecclog_work_cb);
1217     init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb);
1218 
1219     /* Check if any pending errors before registering the NMI handler */
1220     ecclog_handler();
1221 
1222     rc = register_err_handler();
1223     if (rc)
1224         goto fail3;
1225 
1226     /* Enable error reporting */
1227     rc = errcmd_enable_error_reporting(true);
1228     if (rc) {
1229         igen6_printk(KERN_ERR, "Failed to enable error reporting\n");
1230         goto fail4;
1231     }
1232 
1233     igen6_debug_setup();
1234     return 0;
1235 fail4:
1236     unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
1237 fail3:
1238     gen_pool_destroy(ecclog_pool);
1239 fail2:
1240     igen6_unregister_mcis();
1241 fail:
1242     kfree(igen6_pvt);
1243     return rc;
1244 }
1245 
1246 static void igen6_remove(struct pci_dev *pdev)
1247 {
1248     edac_dbg(2, "\n");
1249 
1250     igen6_debug_teardown();
1251     errcmd_enable_error_reporting(false);
1252     unregister_err_handler();
1253     irq_work_sync(&ecclog_irq_work);
1254     flush_work(&ecclog_work);
1255     gen_pool_destroy(ecclog_pool);
1256     igen6_unregister_mcis();
1257     kfree(igen6_pvt);
1258 }
1259 
1260 static struct pci_driver igen6_driver = {
1261     .name     = EDAC_MOD_STR,
1262     .probe    = igen6_probe,
1263     .remove   = igen6_remove,
1264     .id_table = igen6_pci_tbl,
1265 };
1266 
1267 static int __init igen6_init(void)
1268 {
1269     const char *owner;
1270     int rc;
1271 
1272     edac_dbg(2, "\n");
1273 
1274     owner = edac_get_owner();
1275     if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1276         return -ENODEV;
1277 
1278     edac_op_state = EDAC_OPSTATE_NMI;
1279 
1280     rc = pci_register_driver(&igen6_driver);
1281     if (rc)
1282         return rc;
1283 
1284     igen6_printk(KERN_INFO, "%s\n", IGEN6_REVISION);
1285 
1286     return 0;
1287 }
1288 
1289 static void __exit igen6_exit(void)
1290 {
1291     edac_dbg(2, "\n");
1292 
1293     pci_unregister_driver(&igen6_driver);
1294 }
1295 
1296 module_init(igen6_init);
1297 module_exit(igen6_exit);
1298 
1299 MODULE_LICENSE("GPL v2");
1300 MODULE_AUTHOR("Qiuxu Zhuo");
1301 MODULE_DESCRIPTION("MC Driver for Intel client SoC using In-Band ECC");