0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/module.h>
0012 #include <linux/init.h>
0013 #include <linux/pci.h>
0014 #include <linux/pci_ids.h>
0015 #include <linux/slab.h>
0016 #include <linux/delay.h>
0017 #include <linux/edac.h>
0018 #include <linux/mmzone.h>
0019 #include <linux/smp.h>
0020 #include <linux/bitmap.h>
0021 #include <linux/math64.h>
0022 #include <linux/mod_devicetable.h>
0023 #include <asm/cpu_device_id.h>
0024 #include <asm/intel-family.h>
0025 #include <asm/processor.h>
0026 #include <asm/mce.h>
0027
0028 #include "edac_module.h"
0029
0030
0031 static LIST_HEAD(sbridge_edac_list);
0032
0033
0034
0035
0036 #define SBRIDGE_REVISION " Ver: 1.1.2 "
0037 #define EDAC_MOD_STR "sb_edac"
0038
0039
0040
0041
0042 #define sbridge_printk(level, fmt, arg...) \
0043 edac_printk(level, "sbridge", fmt, ##arg)
0044
0045 #define sbridge_mc_printk(mci, level, fmt, arg...) \
0046 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
0047
0048
0049
0050
0051 #define GET_BITFIELD(v, lo, hi) \
0052 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
0053
0054
0055 static const u32 sbridge_dram_rule[] = {
0056 0x80, 0x88, 0x90, 0x98, 0xa0,
0057 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
0058 };
0059
0060 static const u32 ibridge_dram_rule[] = {
0061 0x60, 0x68, 0x70, 0x78, 0x80,
0062 0x88, 0x90, 0x98, 0xa0, 0xa8,
0063 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
0064 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
0065 };
0066
0067 static const u32 knl_dram_rule[] = {
0068 0x60, 0x68, 0x70, 0x78, 0x80,
0069 0x88, 0x90, 0x98, 0xa0, 0xa8,
0070 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
0071 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
0072 0x100, 0x108, 0x110, 0x118,
0073 };
0074
0075 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
0076 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
0077
0078 static char *show_dram_attr(u32 attr)
0079 {
0080 switch (attr) {
0081 case 0:
0082 return "DRAM";
0083 case 1:
0084 return "MMCFG";
0085 case 2:
0086 return "NXM";
0087 default:
0088 return "unknown";
0089 }
0090 }
0091
0092 static const u32 sbridge_interleave_list[] = {
0093 0x84, 0x8c, 0x94, 0x9c, 0xa4,
0094 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
0095 };
0096
0097 static const u32 ibridge_interleave_list[] = {
0098 0x64, 0x6c, 0x74, 0x7c, 0x84,
0099 0x8c, 0x94, 0x9c, 0xa4, 0xac,
0100 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
0101 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
0102 };
0103
0104 static const u32 knl_interleave_list[] = {
0105 0x64, 0x6c, 0x74, 0x7c, 0x84,
0106 0x8c, 0x94, 0x9c, 0xa4, 0xac,
0107 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
0108 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
0109 0x104, 0x10c, 0x114, 0x11c,
0110 };
0111 #define MAX_INTERLEAVE \
0112 (max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \
0113 max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \
0114 ARRAY_SIZE(knl_interleave_list))))
0115
0116 struct interleave_pkg {
0117 unsigned char start;
0118 unsigned char end;
0119 };
0120
0121 static const struct interleave_pkg sbridge_interleave_pkg[] = {
0122 { 0, 2 },
0123 { 3, 5 },
0124 { 8, 10 },
0125 { 11, 13 },
0126 { 16, 18 },
0127 { 19, 21 },
0128 { 24, 26 },
0129 { 27, 29 },
0130 };
0131
0132 static const struct interleave_pkg ibridge_interleave_pkg[] = {
0133 { 0, 3 },
0134 { 4, 7 },
0135 { 8, 11 },
0136 { 12, 15 },
0137 { 16, 19 },
0138 { 20, 23 },
0139 { 24, 27 },
0140 { 28, 31 },
0141 };
0142
0143 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
0144 int interleave)
0145 {
0146 return GET_BITFIELD(reg, table[interleave].start,
0147 table[interleave].end);
0148 }
0149
0150
0151
0152 #define TOLM 0x80
0153 #define TOHM 0x84
0154 #define HASWELL_TOLM 0xd0
0155 #define HASWELL_TOHM_0 0xd4
0156 #define HASWELL_TOHM_1 0xd8
0157 #define KNL_TOLM 0xd0
0158 #define KNL_TOHM_0 0xd4
0159 #define KNL_TOHM_1 0xd8
0160
0161 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
0162 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
0163
0164
0165
0166 #define SAD_TARGET 0xf0
0167
0168 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
0169
0170 #define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14)
0171
0172 #define SAD_CONTROL 0xf4
0173
0174
0175
0176 static const u32 tad_dram_rule[] = {
0177 0x40, 0x44, 0x48, 0x4c,
0178 0x50, 0x54, 0x58, 0x5c,
0179 0x60, 0x64, 0x68, 0x6c,
0180 };
0181 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
0182
0183 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
0184 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
0185 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
0186 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
0187 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
0188 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
0189 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
0190
0191
0192
0193 #define MCMTR 0x7c
0194 #define KNL_MCMTR 0x624
0195
0196 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
0197 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
0198 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
0199
0200
0201
0202 #define RASENABLES 0xac
0203 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
0204
0205
0206
0207 static const int mtr_regs[] = {
0208 0x80, 0x84, 0x88,
0209 };
0210
0211 static const int knl_mtr_reg = 0xb60;
0212
0213 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
0214 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
0215 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
0216 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
0217 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
0218
0219 static const u32 tad_ch_nilv_offset[] = {
0220 0x90, 0x94, 0x98, 0x9c,
0221 0xa0, 0xa4, 0xa8, 0xac,
0222 0xb0, 0xb4, 0xb8, 0xbc,
0223 };
0224 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
0225 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
0226
0227 static const u32 rir_way_limit[] = {
0228 0x108, 0x10c, 0x110, 0x114, 0x118,
0229 };
0230 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
0231
0232 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
0233 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
0234
0235 #define MAX_RIR_WAY 8
0236
0237 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
0238 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
0239 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
0240 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
0241 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
0242 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
0243 };
0244
0245 #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
0246 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
0247
0248 #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
0249 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
0250
0251
0252
0253
0254
0255
0256
0257 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
0258 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
0259 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
0260 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
0261
0262 #if 0
0263 static const u32 correrrcnt[] = {
0264 0x104, 0x108, 0x10c, 0x110,
0265 };
0266
0267 static const u32 correrrthrsld[] = {
0268 0x11c, 0x120, 0x124, 0x128,
0269 };
0270 #endif
0271
0272 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
0273 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
0274
0275
0276
0277
0278 #define SB_RANK_CFG_A 0x0328
0279
0280 #define IB_RANK_CFG_A 0x0320
0281
0282
0283
0284
0285
0286 #define NUM_CHANNELS 6
0287 #define MAX_DIMMS 3
0288 #define KNL_MAX_CHAS 38
0289 #define KNL_MAX_CHANNELS 6
0290 #define KNL_MAX_EDCS 8
0291 #define CHANNEL_UNSPECIFIED 0xf
0292
0293 enum type {
0294 SANDY_BRIDGE,
0295 IVY_BRIDGE,
0296 HASWELL,
0297 BROADWELL,
0298 KNIGHTS_LANDING,
0299 };
0300
0301 enum domain {
0302 IMC0 = 0,
0303 IMC1,
0304 SOCK,
0305 };
0306
0307 enum mirroring_mode {
0308 NON_MIRRORING,
0309 ADDR_RANGE_MIRRORING,
0310 FULL_MIRRORING,
0311 };
0312
0313 struct sbridge_pvt;
0314 struct sbridge_info {
0315 enum type type;
0316 u32 mcmtr;
0317 u32 rankcfgr;
0318 u64 (*get_tolm)(struct sbridge_pvt *pvt);
0319 u64 (*get_tohm)(struct sbridge_pvt *pvt);
0320 u64 (*rir_limit)(u32 reg);
0321 u64 (*sad_limit)(u32 reg);
0322 u32 (*interleave_mode)(u32 reg);
0323 u32 (*dram_attr)(u32 reg);
0324 const u32 *dram_rule;
0325 const u32 *interleave_list;
0326 const struct interleave_pkg *interleave_pkg;
0327 u8 max_sad;
0328 u8 (*get_node_id)(struct sbridge_pvt *pvt);
0329 u8 (*get_ha)(u8 bank);
0330 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
0331 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
0332 struct pci_dev *pci_vtd;
0333 };
0334
0335 struct sbridge_channel {
0336 u32 ranks;
0337 u32 dimms;
0338 };
0339
0340 struct pci_id_descr {
0341 int dev_id;
0342 int optional;
0343 enum domain dom;
0344 };
0345
0346 struct pci_id_table {
0347 const struct pci_id_descr *descr;
0348 int n_devs_per_imc;
0349 int n_devs_per_sock;
0350 int n_imcs_per_sock;
0351 enum type type;
0352 };
0353
0354 struct sbridge_dev {
0355 struct list_head list;
0356 int seg;
0357 u8 bus, mc;
0358 u8 node_id, source_id;
0359 struct pci_dev **pdev;
0360 enum domain dom;
0361 int n_devs;
0362 int i_devs;
0363 struct mem_ctl_info *mci;
0364 };
0365
0366 struct knl_pvt {
0367 struct pci_dev *pci_cha[KNL_MAX_CHAS];
0368 struct pci_dev *pci_channel[KNL_MAX_CHANNELS];
0369 struct pci_dev *pci_mc0;
0370 struct pci_dev *pci_mc1;
0371 struct pci_dev *pci_mc0_misc;
0372 struct pci_dev *pci_mc1_misc;
0373 struct pci_dev *pci_mc_info;
0374 };
0375
0376 struct sbridge_pvt {
0377
0378 struct pci_dev *pci_ddrio;
0379 struct pci_dev *pci_sad0, *pci_sad1;
0380 struct pci_dev *pci_br0, *pci_br1;
0381
0382 struct pci_dev *pci_ha, *pci_ta, *pci_ras;
0383 struct pci_dev *pci_tad[NUM_CHANNELS];
0384
0385 struct sbridge_dev *sbridge_dev;
0386
0387 struct sbridge_info info;
0388 struct sbridge_channel channel[NUM_CHANNELS];
0389
0390
0391 bool is_cur_addr_mirrored, is_lockstep, is_close_pg;
0392 bool is_chan_hash;
0393 enum mirroring_mode mirror_mode;
0394
0395
0396 u64 tolm, tohm;
0397 struct knl_pvt knl;
0398 };
0399
0400 #define PCI_DESCR(device_id, opt, domain) \
0401 .dev_id = (device_id), \
0402 .optional = opt, \
0403 .dom = domain
0404
0405 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
0406
0407 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) },
0408
0409
0410 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) },
0411 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) },
0412 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) },
0413 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) },
0414 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) },
0415 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) },
0416 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
0417
0418
0419 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) },
0420 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) },
0421
0422
0423 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) },
0424 };
0425
0426 #define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
0427 .descr = A, \
0428 .n_devs_per_imc = N, \
0429 .n_devs_per_sock = ARRAY_SIZE(A), \
0430 .n_imcs_per_sock = M, \
0431 .type = T \
0432 }
0433
0434 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
0435 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
0436 {0,}
0437 };
0438
0439
0440
0441
0442
0443
0444
0445 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
0446 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
0447
0448
0449 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
0450 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
0451 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
0452 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
0453 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
0454 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
0455 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
0456 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
0457 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
0458 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
0459 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
0460 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
0461 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
0462 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
0463 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
0464 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c
0465 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d
0466
0467 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
0468
0469 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
0470 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
0471
0472
0473 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
0474 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) },
0475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) },
0476 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) },
0477 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) },
0478 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
0479
0480
0481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
0482 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
0483 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
0484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) },
0485 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) },
0486 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) },
0487
0488 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
0489 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
0490
0491
0492 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) },
0493
0494
0495 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) },
0496 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) },
0497
0498 };
0499
0500 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
0501 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
0502 {0,}
0503 };
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521 #define HASWELL_DDRCRCLKCONTROLS 0xa10
0522 #define HASWELL_HASYSDEFEATURE2 0x84
0523 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
0524 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
0525 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
0526 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
0527 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71
0528 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
0529 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79
0530 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
0531 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
0532 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
0533 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
0534 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
0535 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
0536 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
0537 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
0538 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
0539 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
0540 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
0541 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
0542 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
0543 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
0544 static const struct pci_id_descr pci_dev_descr_haswell[] = {
0545
0546 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) },
0547 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) },
0548
0549 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) },
0550 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) },
0551 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
0552 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
0553 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
0554 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
0555
0556 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) },
0557 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) },
0558 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
0559 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
0560 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
0561 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
0562
0563 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
0564 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
0565 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) },
0566 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) },
0567 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) },
0568 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) },
0569 };
0570
0571 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
0572 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
0573 {0,}
0574 };
0575
0576
0577
0578
0579
0580
0581 #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
0582
0583
0584 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
0585
0586 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843
0587
0588 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
0589
0590 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a
0591
0592 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b
0593
0594 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c
0595
0596 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810
0597
0598
0599
0600
0601
0602
0603
0604
0605 static const struct pci_id_descr pci_dev_descr_knl[] = {
0606 [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)},
0607 [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) },
0608 [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) },
0609 [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
0610 [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) },
0611 [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) },
0612 [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) },
0613 };
0614
0615 static const struct pci_id_table pci_dev_descr_knl_table[] = {
0616 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
0617 {0,}
0618 };
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
0639 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
0640 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
0641 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
0642 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71
0643 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
0644 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79
0645 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
0646 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
0647 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
0648 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
0649 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
0650 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
0651 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
0652 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
0653 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
0654 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
0655 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
0656
0657 static const struct pci_id_descr pci_dev_descr_broadwell[] = {
0658
0659 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
0660 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
0661
0662 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) },
0663 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) },
0664 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
0665 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
0666 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
0667 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
0668
0669 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) },
0670 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) },
0671 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
0672 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
0673 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
0674 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
0675
0676 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
0677 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
0678 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) },
0679 };
0680
0681 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
0682 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
0683 {0,}
0684 };
0685
0686
0687
0688
0689
0690
0691 static inline int numrank(enum type type, u32 mtr)
0692 {
0693 int ranks = (1 << RANK_CNT_BITS(mtr));
0694 int max = 4;
0695
0696 if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
0697 max = 8;
0698
0699 if (ranks > max) {
0700 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
0701 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
0702 return -EINVAL;
0703 }
0704
0705 return ranks;
0706 }
0707
0708 static inline int numrow(u32 mtr)
0709 {
0710 int rows = (RANK_WIDTH_BITS(mtr) + 12);
0711
0712 if (rows < 13 || rows > 18) {
0713 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
0714 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
0715 return -EINVAL;
0716 }
0717
0718 return 1 << rows;
0719 }
0720
0721 static inline int numcol(u32 mtr)
0722 {
0723 int cols = (COL_WIDTH_BITS(mtr) + 10);
0724
0725 if (cols > 12) {
0726 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
0727 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
0728 return -EINVAL;
0729 }
0730
0731 return 1 << cols;
0732 }
0733
0734 static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
0735 int multi_bus,
0736 struct sbridge_dev *prev)
0737 {
0738 struct sbridge_dev *sbridge_dev;
0739
0740
0741
0742
0743
0744 if (multi_bus) {
0745 return list_first_entry_or_null(&sbridge_edac_list,
0746 struct sbridge_dev, list);
0747 }
0748
0749 sbridge_dev = list_entry(prev ? prev->list.next
0750 : sbridge_edac_list.next, struct sbridge_dev, list);
0751
0752 list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
0753 if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
0754 (dom == SOCK || dom == sbridge_dev->dom))
0755 return sbridge_dev;
0756 }
0757
0758 return NULL;
0759 }
0760
0761 static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
0762 const struct pci_id_table *table)
0763 {
0764 struct sbridge_dev *sbridge_dev;
0765
0766 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
0767 if (!sbridge_dev)
0768 return NULL;
0769
0770 sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
0771 sizeof(*sbridge_dev->pdev),
0772 GFP_KERNEL);
0773 if (!sbridge_dev->pdev) {
0774 kfree(sbridge_dev);
0775 return NULL;
0776 }
0777
0778 sbridge_dev->seg = seg;
0779 sbridge_dev->bus = bus;
0780 sbridge_dev->dom = dom;
0781 sbridge_dev->n_devs = table->n_devs_per_imc;
0782 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
0783
0784 return sbridge_dev;
0785 }
0786
0787 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
0788 {
0789 list_del(&sbridge_dev->list);
0790 kfree(sbridge_dev->pdev);
0791 kfree(sbridge_dev);
0792 }
0793
0794 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
0795 {
0796 u32 reg;
0797
0798
0799 pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
0800 return GET_TOLM(reg);
0801 }
0802
0803 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
0804 {
0805 u32 reg;
0806
0807 pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
0808 return GET_TOHM(reg);
0809 }
0810
0811 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
0812 {
0813 u32 reg;
0814
0815 pci_read_config_dword(pvt->pci_br1, TOLM, ®);
0816
0817 return GET_TOLM(reg);
0818 }
0819
0820 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
0821 {
0822 u32 reg;
0823
0824 pci_read_config_dword(pvt->pci_br1, TOHM, ®);
0825
0826 return GET_TOHM(reg);
0827 }
0828
0829 static u64 rir_limit(u32 reg)
0830 {
0831 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
0832 }
0833
0834 static u64 sad_limit(u32 reg)
0835 {
0836 return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
0837 }
0838
0839 static u32 interleave_mode(u32 reg)
0840 {
0841 return GET_BITFIELD(reg, 1, 1);
0842 }
0843
0844 static u32 dram_attr(u32 reg)
0845 {
0846 return GET_BITFIELD(reg, 2, 3);
0847 }
0848
0849 static u64 knl_sad_limit(u32 reg)
0850 {
0851 return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
0852 }
0853
0854 static u32 knl_interleave_mode(u32 reg)
0855 {
0856 return GET_BITFIELD(reg, 1, 2);
0857 }
0858
0859 static const char * const knl_intlv_mode[] = {
0860 "[8:6]", "[10:8]", "[14:12]", "[32:30]"
0861 };
0862
0863 static const char *get_intlv_mode_str(u32 reg, enum type t)
0864 {
0865 if (t == KNIGHTS_LANDING)
0866 return knl_intlv_mode[knl_interleave_mode(reg)];
0867 else
0868 return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
0869 }
0870
0871 static u32 dram_attr_knl(u32 reg)
0872 {
0873 return GET_BITFIELD(reg, 3, 4);
0874 }
0875
0876
0877 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
0878 {
0879 u32 reg;
0880 enum mem_type mtype;
0881
0882 if (pvt->pci_ddrio) {
0883 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
0884 ®);
0885 if (GET_BITFIELD(reg, 11, 11))
0886
0887 mtype = MEM_RDDR3;
0888 else
0889 mtype = MEM_DDR3;
0890 } else
0891 mtype = MEM_UNKNOWN;
0892
0893 return mtype;
0894 }
0895
0896 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
0897 {
0898 u32 reg;
0899 bool registered = false;
0900 enum mem_type mtype = MEM_UNKNOWN;
0901
0902 if (!pvt->pci_ddrio)
0903 goto out;
0904
0905 pci_read_config_dword(pvt->pci_ddrio,
0906 HASWELL_DDRCRCLKCONTROLS, ®);
0907
0908 if (GET_BITFIELD(reg, 16, 16))
0909 registered = true;
0910
0911 pci_read_config_dword(pvt->pci_ta, MCMTR, ®);
0912 if (GET_BITFIELD(reg, 14, 14)) {
0913 if (registered)
0914 mtype = MEM_RDDR4;
0915 else
0916 mtype = MEM_DDR4;
0917 } else {
0918 if (registered)
0919 mtype = MEM_RDDR3;
0920 else
0921 mtype = MEM_DDR3;
0922 }
0923
0924 out:
0925 return mtype;
0926 }
0927
0928 static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
0929 {
0930
0931 return DEV_X16;
0932 }
0933
0934 static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
0935 {
0936
0937 return DEV_UNKNOWN;
0938 }
0939
0940 static enum dev_type __ibridge_get_width(u32 mtr)
0941 {
0942 enum dev_type type = DEV_UNKNOWN;
0943
0944 switch (mtr) {
0945 case 2:
0946 type = DEV_X16;
0947 break;
0948 case 1:
0949 type = DEV_X8;
0950 break;
0951 case 0:
0952 type = DEV_X4;
0953 break;
0954 }
0955
0956 return type;
0957 }
0958
0959 static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
0960 {
0961
0962
0963
0964
0965 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
0966 }
0967
0968 static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
0969 {
0970
0971 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
0972 }
0973
0974 static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
0975 {
0976
0977 return MEM_RDDR4;
0978 }
0979
0980 static u8 get_node_id(struct sbridge_pvt *pvt)
0981 {
0982 u32 reg;
0983 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
0984 return GET_BITFIELD(reg, 0, 2);
0985 }
0986
0987 static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
0988 {
0989 u32 reg;
0990
0991 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
0992 return GET_BITFIELD(reg, 0, 3);
0993 }
0994
0995 static u8 knl_get_node_id(struct sbridge_pvt *pvt)
0996 {
0997 u32 reg;
0998
0999 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
1000 return GET_BITFIELD(reg, 0, 2);
1001 }
1002
1003
1004
1005
1006
1007
1008
1009 static u8 sbridge_get_ha(u8 bank)
1010 {
1011 return 0;
1012 }
1013
1014
1015
1016
1017
1018
1019 static u8 ibridge_get_ha(u8 bank)
1020 {
1021 switch (bank) {
1022 case 7 ... 8:
1023 return bank - 7;
1024 case 9 ... 16:
1025 return (bank - 9) / 4;
1026 default:
1027 return 0xff;
1028 }
1029 }
1030
1031
1032 static u8 knl_get_ha(u8 bank)
1033 {
1034 return 0xff;
1035 }
1036
1037 static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1038 {
1039 u32 reg;
1040
1041 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®);
1042 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1043 }
1044
1045 static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1046 {
1047 u64 rc;
1048 u32 reg;
1049
1050 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®);
1051 rc = GET_BITFIELD(reg, 26, 31);
1052 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®);
1053 rc = ((reg << 6) | rc) << 26;
1054
1055 return rc | 0x3ffffff;
1056 }
1057
1058 static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1059 {
1060 u32 reg;
1061
1062 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®);
1063 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1064 }
1065
1066 static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1067 {
1068 u64 rc;
1069 u32 reg_lo, reg_hi;
1070
1071 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo);
1072 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi);
1073 rc = ((u64)reg_hi << 32) | reg_lo;
1074 return rc | 0x3ffffff;
1075 }
1076
1077
1078 static u64 haswell_rir_limit(u32 reg)
1079 {
1080 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
1081 }
1082
1083 static inline u8 sad_pkg_socket(u8 pkg)
1084 {
1085
1086 return ((pkg >> 3) << 2) | (pkg & 0x3);
1087 }
1088
1089 static inline u8 sad_pkg_ha(u8 pkg)
1090 {
1091 return (pkg >> 2) & 0x1;
1092 }
1093
1094 static int haswell_chan_hash(int idx, u64 addr)
1095 {
1096 int i;
1097
1098
1099
1100
1101
1102 for (i = 12; i < 28; i += 2)
1103 idx ^= (addr >> i) & 3;
1104
1105 return idx;
1106 }
1107
1108
1109 static const u32 knl_tad_dram_limit_lo[] = {
1110 0x400, 0x500, 0x600, 0x700,
1111 0x800, 0x900, 0xa00, 0xb00,
1112 };
1113
1114
1115 static const u32 knl_tad_dram_offset_lo[] = {
1116 0x404, 0x504, 0x604, 0x704,
1117 0x804, 0x904, 0xa04, 0xb04,
1118 };
1119
1120
1121 static const u32 knl_tad_dram_hi[] = {
1122 0x408, 0x508, 0x608, 0x708,
1123 0x808, 0x908, 0xa08, 0xb08,
1124 };
1125
1126
1127 static const u32 knl_tad_ways[] = {
1128 8, 6, 4, 3, 2, 1,
1129 };
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 static int knl_get_tad(const struct sbridge_pvt *pvt,
1147 const int entry,
1148 const int mc,
1149 u64 *offset,
1150 u64 *limit,
1151 int *ways)
1152 {
1153 u32 reg_limit_lo, reg_offset_lo, reg_hi;
1154 struct pci_dev *pci_mc;
1155 int way_id;
1156
1157 switch (mc) {
1158 case 0:
1159 pci_mc = pvt->knl.pci_mc0;
1160 break;
1161 case 1:
1162 pci_mc = pvt->knl.pci_mc1;
1163 break;
1164 default:
1165 WARN_ON(1);
1166 return -EINVAL;
1167 }
1168
1169 pci_read_config_dword(pci_mc,
1170 knl_tad_dram_limit_lo[entry], ®_limit_lo);
1171 pci_read_config_dword(pci_mc,
1172 knl_tad_dram_offset_lo[entry], ®_offset_lo);
1173 pci_read_config_dword(pci_mc,
1174 knl_tad_dram_hi[entry], ®_hi);
1175
1176
1177 if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1178 return -ENODEV;
1179
1180 way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1181
1182 if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1183 *ways = knl_tad_ways[way_id];
1184 } else {
1185 *ways = 0;
1186 sbridge_printk(KERN_ERR,
1187 "Unexpected value %d in mc_tad_limit_lo wayness field\n",
1188 way_id);
1189 return -ENODEV;
1190 }
1191
1192
1193
1194
1195
1196 *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1197 ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32);
1198 *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 |
1199 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1200
1201 return 0;
1202 }
1203
1204
1205 static int knl_channel_mc(int channel)
1206 {
1207 WARN_ON(channel < 0 || channel >= 6);
1208
1209 return channel < 3 ? 1 : 0;
1210 }
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 static u32 knl_get_edc_route(int entry, u32 reg)
1228 {
1229 WARN_ON(entry >= KNL_MAX_EDCS);
1230 return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 static u32 knl_get_mc_route(int entry, u32 reg)
1251 {
1252 int mc, chan;
1253
1254 WARN_ON(entry >= KNL_MAX_CHANNELS);
1255
1256 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1257 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1258
1259 return knl_channel_remap(mc, chan);
1260 }
1261
1262
1263
1264
1265
1266 static void knl_show_edc_route(u32 reg, char *s)
1267 {
1268 int i;
1269
1270 for (i = 0; i < KNL_MAX_EDCS; i++) {
1271 s[i*2] = knl_get_edc_route(i, reg) + '0';
1272 s[i*2+1] = '-';
1273 }
1274
1275 s[KNL_MAX_EDCS*2 - 1] = '\0';
1276 }
1277
1278
1279
1280
1281
1282 static void knl_show_mc_route(u32 reg, char *s)
1283 {
1284 int i;
1285
1286 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1287 s[i*2] = knl_get_mc_route(i, reg) + '0';
1288 s[i*2+1] = '-';
1289 }
1290
1291 s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1292 }
1293
1294 #define KNL_EDC_ROUTE 0xb8
1295 #define KNL_MC_ROUTE 0xb4
1296
1297
1298 #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1299
1300
1301 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1302
1303
1304 #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1305
1306
1307 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1308
1309
1310 #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1341 {
1342 u64 sad_base, sad_limit = 0;
1343 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1344 int sad_rule = 0;
1345 int tad_rule = 0;
1346 int intrlv_ways, tad_ways;
1347 u32 first_pkg, pkg;
1348 int i;
1349 u64 sad_actual_size[2];
1350 u32 dram_rule, interleave_reg;
1351 u32 mc_route_reg[KNL_MAX_CHAS];
1352 u32 edc_route_reg[KNL_MAX_CHAS];
1353 int edram_only;
1354 char edc_route_string[KNL_MAX_EDCS*2];
1355 char mc_route_string[KNL_MAX_CHANNELS*2];
1356 int cur_reg_start;
1357 int mc;
1358 int channel;
1359 int participants[KNL_MAX_CHANNELS];
1360
1361 for (i = 0; i < KNL_MAX_CHANNELS; i++)
1362 mc_sizes[i] = 0;
1363
1364
1365 cur_reg_start = 0;
1366 for (i = 0; i < KNL_MAX_CHAS; i++) {
1367 pci_read_config_dword(pvt->knl.pci_cha[i],
1368 KNL_EDC_ROUTE, &edc_route_reg[i]);
1369
1370 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1371 knl_show_edc_route(edc_route_reg[i-1],
1372 edc_route_string);
1373 if (cur_reg_start == i-1)
1374 edac_dbg(0, "edc route table for CHA %d: %s\n",
1375 cur_reg_start, edc_route_string);
1376 else
1377 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1378 cur_reg_start, i-1, edc_route_string);
1379 cur_reg_start = i;
1380 }
1381 }
1382 knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1383 if (cur_reg_start == i-1)
1384 edac_dbg(0, "edc route table for CHA %d: %s\n",
1385 cur_reg_start, edc_route_string);
1386 else
1387 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1388 cur_reg_start, i-1, edc_route_string);
1389
1390
1391 cur_reg_start = 0;
1392 for (i = 0; i < KNL_MAX_CHAS; i++) {
1393 pci_read_config_dword(pvt->knl.pci_cha[i],
1394 KNL_MC_ROUTE, &mc_route_reg[i]);
1395
1396 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1397 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1398 if (cur_reg_start == i-1)
1399 edac_dbg(0, "mc route table for CHA %d: %s\n",
1400 cur_reg_start, mc_route_string);
1401 else
1402 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1403 cur_reg_start, i-1, mc_route_string);
1404 cur_reg_start = i;
1405 }
1406 }
1407 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1408 if (cur_reg_start == i-1)
1409 edac_dbg(0, "mc route table for CHA %d: %s\n",
1410 cur_reg_start, mc_route_string);
1411 else
1412 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1413 cur_reg_start, i-1, mc_route_string);
1414
1415
1416 for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1417
1418 sad_base = sad_limit;
1419
1420 pci_read_config_dword(pvt->pci_sad0,
1421 pvt->info.dram_rule[sad_rule], &dram_rule);
1422
1423 if (!DRAM_RULE_ENABLE(dram_rule))
1424 break;
1425
1426 edram_only = KNL_EDRAM_ONLY(dram_rule);
1427
1428 sad_limit = pvt->info.sad_limit(dram_rule)+1;
1429
1430 pci_read_config_dword(pvt->pci_sad0,
1431 pvt->info.interleave_list[sad_rule], &interleave_reg);
1432
1433
1434
1435
1436
1437 first_pkg = sad_pkg(pvt->info.interleave_pkg,
1438 interleave_reg, 0);
1439 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1440 pkg = sad_pkg(pvt->info.interleave_pkg,
1441 interleave_reg, intrlv_ways);
1442
1443 if ((pkg & 0x8) == 0) {
1444
1445
1446
1447
1448 edac_dbg(0, "Unexpected interleave target %d\n",
1449 pkg);
1450 return -1;
1451 }
1452
1453 if (pkg == first_pkg)
1454 break;
1455 }
1456 if (KNL_MOD3(dram_rule))
1457 intrlv_ways *= 3;
1458
1459 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1460 sad_rule,
1461 sad_base,
1462 sad_limit,
1463 intrlv_ways,
1464 edram_only ? ", EDRAM" : "");
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 for (mc = 0; mc < 2; mc++) {
1478 sad_actual_size[mc] = 0;
1479 tad_livespace = 0;
1480 for (tad_rule = 0;
1481 tad_rule < ARRAY_SIZE(
1482 knl_tad_dram_limit_lo);
1483 tad_rule++) {
1484 if (knl_get_tad(pvt,
1485 tad_rule,
1486 mc,
1487 &tad_deadspace,
1488 &tad_limit,
1489 &tad_ways))
1490 break;
1491
1492 tad_size = (tad_limit+1) -
1493 (tad_livespace + tad_deadspace);
1494 tad_livespace += tad_size;
1495 tad_base = (tad_limit+1) - tad_size;
1496
1497 if (tad_base < sad_base) {
1498 if (tad_limit > sad_base)
1499 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1500 } else if (tad_base < sad_limit) {
1501 if (tad_limit+1 > sad_limit) {
1502 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1503 } else {
1504
1505 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1506 tad_rule, tad_base,
1507 tad_limit, tad_size,
1508 mc);
1509 sad_actual_size[mc] += tad_size;
1510 }
1511 }
1512 }
1513 }
1514
1515 for (mc = 0; mc < 2; mc++) {
1516 edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1517 mc, sad_actual_size[mc], sad_actual_size[mc]);
1518 }
1519
1520
1521 if (edram_only)
1522 continue;
1523
1524
1525 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1526 participants[channel] = 0;
1527
1528
1529
1530
1531 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1532 int target;
1533 int cha;
1534
1535 for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1536 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1537 if (knl_get_mc_route(target,
1538 mc_route_reg[cha]) == channel
1539 && !participants[channel]) {
1540 participants[channel] = 1;
1541 break;
1542 }
1543 }
1544 }
1545 }
1546
1547 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1548 mc = knl_channel_mc(channel);
1549 if (participants[channel]) {
1550 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1551 channel,
1552 sad_actual_size[mc]/intrlv_ways,
1553 sad_rule);
1554 mc_sizes[channel] +=
1555 sad_actual_size[mc]/intrlv_ways;
1556 }
1557 }
1558 }
1559
1560 return 0;
1561 }
1562
1563 static void get_source_id(struct mem_ctl_info *mci)
1564 {
1565 struct sbridge_pvt *pvt = mci->pvt_info;
1566 u32 reg;
1567
1568 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1569 pvt->info.type == KNIGHTS_LANDING)
1570 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
1571 else
1572 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
1573
1574 if (pvt->info.type == KNIGHTS_LANDING)
1575 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1576 else
1577 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1578 }
1579
1580 static int __populate_dimms(struct mem_ctl_info *mci,
1581 u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1582 enum edac_type mode)
1583 {
1584 struct sbridge_pvt *pvt = mci->pvt_info;
1585 int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1586 : NUM_CHANNELS;
1587 unsigned int i, j, banks, ranks, rows, cols, npages;
1588 struct dimm_info *dimm;
1589 enum mem_type mtype;
1590 u64 size;
1591
1592 mtype = pvt->info.get_memory_type(pvt);
1593 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1594 edac_dbg(0, "Memory is registered\n");
1595 else if (mtype == MEM_UNKNOWN)
1596 edac_dbg(0, "Cannot determine memory type\n");
1597 else
1598 edac_dbg(0, "Memory is unregistered\n");
1599
1600 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1601 banks = 16;
1602 else
1603 banks = 8;
1604
1605 for (i = 0; i < channels; i++) {
1606 u32 mtr;
1607
1608 int max_dimms_per_channel;
1609
1610 if (pvt->info.type == KNIGHTS_LANDING) {
1611 max_dimms_per_channel = 1;
1612 if (!pvt->knl.pci_channel[i])
1613 continue;
1614 } else {
1615 max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1616 if (!pvt->pci_tad[i])
1617 continue;
1618 }
1619
1620 for (j = 0; j < max_dimms_per_channel; j++) {
1621 dimm = edac_get_dimm(mci, i, j, 0);
1622 if (pvt->info.type == KNIGHTS_LANDING) {
1623 pci_read_config_dword(pvt->knl.pci_channel[i],
1624 knl_mtr_reg, &mtr);
1625 } else {
1626 pci_read_config_dword(pvt->pci_tad[i],
1627 mtr_regs[j], &mtr);
1628 }
1629 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
1630 if (IS_DIMM_PRESENT(mtr)) {
1631 if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1632 sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1633 pvt->sbridge_dev->source_id,
1634 pvt->sbridge_dev->dom, i);
1635 return -ENODEV;
1636 }
1637 pvt->channel[i].dimms++;
1638
1639 ranks = numrank(pvt->info.type, mtr);
1640
1641 if (pvt->info.type == KNIGHTS_LANDING) {
1642
1643 cols = 1 << 10;
1644 rows = knl_mc_sizes[i] /
1645 ((u64) cols * ranks * banks * 8);
1646 } else {
1647 rows = numrow(mtr);
1648 cols = numcol(mtr);
1649 }
1650
1651 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1652 npages = MiB_TO_PAGES(size);
1653
1654 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1655 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1656 size, npages,
1657 banks, ranks, rows, cols);
1658
1659 dimm->nr_pages = npages;
1660 dimm->grain = 32;
1661 dimm->dtype = pvt->info.get_width(pvt, mtr);
1662 dimm->mtype = mtype;
1663 dimm->edac_mode = mode;
1664 snprintf(dimm->label, sizeof(dimm->label),
1665 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1666 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1667 }
1668 }
1669 }
1670
1671 return 0;
1672 }
1673
1674 static int get_dimm_config(struct mem_ctl_info *mci)
1675 {
1676 struct sbridge_pvt *pvt = mci->pvt_info;
1677 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1678 enum edac_type mode;
1679 u32 reg;
1680
1681 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1682 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1683 pvt->sbridge_dev->mc,
1684 pvt->sbridge_dev->node_id,
1685 pvt->sbridge_dev->source_id);
1686
1687
1688
1689
1690 if (pvt->info.type == KNIGHTS_LANDING) {
1691 mode = EDAC_S4ECD4ED;
1692 pvt->mirror_mode = NON_MIRRORING;
1693 pvt->is_cur_addr_mirrored = false;
1694
1695 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1696 return -1;
1697 if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1698 edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1699 return -ENODEV;
1700 }
1701 } else {
1702 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1703 if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®)) {
1704 edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1705 return -ENODEV;
1706 }
1707 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1708 if (GET_BITFIELD(reg, 28, 28)) {
1709 pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1710 edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1711 goto next;
1712 }
1713 }
1714 if (pci_read_config_dword(pvt->pci_ras, RASENABLES, ®)) {
1715 edac_dbg(0, "Failed to read RASENABLES register\n");
1716 return -ENODEV;
1717 }
1718 if (IS_MIRROR_ENABLED(reg)) {
1719 pvt->mirror_mode = FULL_MIRRORING;
1720 edac_dbg(0, "Full memory mirroring is enabled\n");
1721 } else {
1722 pvt->mirror_mode = NON_MIRRORING;
1723 edac_dbg(0, "Memory mirroring is disabled\n");
1724 }
1725
1726 next:
1727 if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1728 edac_dbg(0, "Failed to read MCMTR register\n");
1729 return -ENODEV;
1730 }
1731 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1732 edac_dbg(0, "Lockstep is enabled\n");
1733 mode = EDAC_S8ECD8ED;
1734 pvt->is_lockstep = true;
1735 } else {
1736 edac_dbg(0, "Lockstep is disabled\n");
1737 mode = EDAC_S4ECD4ED;
1738 pvt->is_lockstep = false;
1739 }
1740 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1741 edac_dbg(0, "address map is on closed page mode\n");
1742 pvt->is_close_pg = true;
1743 } else {
1744 edac_dbg(0, "address map is on open page mode\n");
1745 pvt->is_close_pg = false;
1746 }
1747 }
1748
1749 return __populate_dimms(mci, knl_mc_sizes, mode);
1750 }
1751
1752 static void get_memory_layout(const struct mem_ctl_info *mci)
1753 {
1754 struct sbridge_pvt *pvt = mci->pvt_info;
1755 int i, j, k, n_sads, n_tads, sad_interl;
1756 u32 reg;
1757 u64 limit, prv = 0;
1758 u64 tmp_mb;
1759 u32 gb, mb;
1760 u32 rir_way;
1761
1762
1763
1764
1765
1766 pvt->tolm = pvt->info.get_tolm(pvt);
1767 tmp_mb = (1 + pvt->tolm) >> 20;
1768
1769 gb = div_u64_rem(tmp_mb, 1024, &mb);
1770 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1771 gb, (mb*1000)/1024, (u64)pvt->tolm);
1772
1773
1774 pvt->tohm = pvt->info.get_tohm(pvt);
1775 tmp_mb = (1 + pvt->tohm) >> 20;
1776
1777 gb = div_u64_rem(tmp_mb, 1024, &mb);
1778 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1779 gb, (mb*1000)/1024, (u64)pvt->tohm);
1780
1781
1782
1783
1784
1785
1786
1787 prv = 0;
1788 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1789
1790 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1791 ®);
1792 limit = pvt->info.sad_limit(reg);
1793
1794 if (!DRAM_RULE_ENABLE(reg))
1795 continue;
1796
1797 if (limit <= prv)
1798 break;
1799
1800 tmp_mb = (limit + 1) >> 20;
1801 gb = div_u64_rem(tmp_mb, 1024, &mb);
1802 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1803 n_sads,
1804 show_dram_attr(pvt->info.dram_attr(reg)),
1805 gb, (mb*1000)/1024,
1806 ((u64)tmp_mb) << 20L,
1807 get_intlv_mode_str(reg, pvt->info.type),
1808 reg);
1809 prv = limit;
1810
1811 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1812 ®);
1813 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1814 for (j = 0; j < 8; j++) {
1815 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1816 if (j > 0 && sad_interl == pkg)
1817 break;
1818
1819 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1820 n_sads, j, pkg);
1821 }
1822 }
1823
1824 if (pvt->info.type == KNIGHTS_LANDING)
1825 return;
1826
1827
1828
1829
1830 prv = 0;
1831 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1832 pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®);
1833 limit = TAD_LIMIT(reg);
1834 if (limit <= prv)
1835 break;
1836 tmp_mb = (limit + 1) >> 20;
1837
1838 gb = div_u64_rem(tmp_mb, 1024, &mb);
1839 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1840 n_tads, gb, (mb*1000)/1024,
1841 ((u64)tmp_mb) << 20L,
1842 (u32)(1 << TAD_SOCK(reg)),
1843 (u32)TAD_CH(reg) + 1,
1844 (u32)TAD_TGT0(reg),
1845 (u32)TAD_TGT1(reg),
1846 (u32)TAD_TGT2(reg),
1847 (u32)TAD_TGT3(reg),
1848 reg);
1849 prv = limit;
1850 }
1851
1852
1853
1854
1855 for (i = 0; i < NUM_CHANNELS; i++) {
1856 if (!pvt->channel[i].dimms)
1857 continue;
1858 for (j = 0; j < n_tads; j++) {
1859 pci_read_config_dword(pvt->pci_tad[i],
1860 tad_ch_nilv_offset[j],
1861 ®);
1862 tmp_mb = TAD_OFFSET(reg) >> 20;
1863 gb = div_u64_rem(tmp_mb, 1024, &mb);
1864 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1865 i, j,
1866 gb, (mb*1000)/1024,
1867 ((u64)tmp_mb) << 20L,
1868 reg);
1869 }
1870 }
1871
1872
1873
1874
1875 for (i = 0; i < NUM_CHANNELS; i++) {
1876 if (!pvt->channel[i].dimms)
1877 continue;
1878 for (j = 0; j < MAX_RIR_RANGES; j++) {
1879 pci_read_config_dword(pvt->pci_tad[i],
1880 rir_way_limit[j],
1881 ®);
1882
1883 if (!IS_RIR_VALID(reg))
1884 continue;
1885
1886 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1887 rir_way = 1 << RIR_WAY(reg);
1888 gb = div_u64_rem(tmp_mb, 1024, &mb);
1889 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1890 i, j,
1891 gb, (mb*1000)/1024,
1892 ((u64)tmp_mb) << 20L,
1893 rir_way,
1894 reg);
1895
1896 for (k = 0; k < rir_way; k++) {
1897 pci_read_config_dword(pvt->pci_tad[i],
1898 rir_offset[j][k],
1899 ®);
1900 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1901
1902 gb = div_u64_rem(tmp_mb, 1024, &mb);
1903 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1904 i, j, k,
1905 gb, (mb*1000)/1024,
1906 ((u64)tmp_mb) << 20L,
1907 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1908 reg);
1909 }
1910 }
1911 }
1912 }
1913
1914 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1915 {
1916 struct sbridge_dev *sbridge_dev;
1917
1918 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1919 if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1920 return sbridge_dev->mci;
1921 }
1922 return NULL;
1923 }
1924
1925 static int get_memory_error_data(struct mem_ctl_info *mci,
1926 u64 addr,
1927 u8 *socket, u8 *ha,
1928 long *channel_mask,
1929 u8 *rank,
1930 char **area_type, char *msg)
1931 {
1932 struct mem_ctl_info *new_mci;
1933 struct sbridge_pvt *pvt = mci->pvt_info;
1934 struct pci_dev *pci_ha;
1935 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1936 int sad_interl, idx, base_ch;
1937 int interleave_mode, shiftup = 0;
1938 unsigned int sad_interleave[MAX_INTERLEAVE];
1939 u32 reg, dram_rule;
1940 u8 ch_way, sck_way, pkg, sad_ha = 0;
1941 u32 tad_offset;
1942 u32 rir_way;
1943 u32 mb, gb;
1944 u64 ch_addr, offset, limit = 0, prv = 0;
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1955 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1956 return -EINVAL;
1957 }
1958 if (addr >= (u64)pvt->tohm) {
1959 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1960 return -EINVAL;
1961 }
1962
1963
1964
1965
1966 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1967 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1968 ®);
1969
1970 if (!DRAM_RULE_ENABLE(reg))
1971 continue;
1972
1973 limit = pvt->info.sad_limit(reg);
1974 if (limit <= prv) {
1975 sprintf(msg, "Can't discover the memory socket");
1976 return -EINVAL;
1977 }
1978 if (addr <= limit)
1979 break;
1980 prv = limit;
1981 }
1982 if (n_sads == pvt->info.max_sad) {
1983 sprintf(msg, "Can't discover the memory socket");
1984 return -EINVAL;
1985 }
1986 dram_rule = reg;
1987 *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1988 interleave_mode = pvt->info.interleave_mode(dram_rule);
1989
1990 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1991 ®);
1992
1993 if (pvt->info.type == SANDY_BRIDGE) {
1994 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1995 for (sad_way = 0; sad_way < 8; sad_way++) {
1996 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1997 if (sad_way > 0 && sad_interl == pkg)
1998 break;
1999 sad_interleave[sad_way] = pkg;
2000 edac_dbg(0, "SAD interleave #%d: %d\n",
2001 sad_way, sad_interleave[sad_way]);
2002 }
2003 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2004 pvt->sbridge_dev->mc,
2005 n_sads,
2006 addr,
2007 limit,
2008 sad_way + 7,
2009 !interleave_mode ? "" : "XOR[18:16]");
2010 if (interleave_mode)
2011 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2012 else
2013 idx = (addr >> 6) & 7;
2014 switch (sad_way) {
2015 case 1:
2016 idx = 0;
2017 break;
2018 case 2:
2019 idx = idx & 1;
2020 break;
2021 case 4:
2022 idx = idx & 3;
2023 break;
2024 case 8:
2025 break;
2026 default:
2027 sprintf(msg, "Can't discover socket interleave");
2028 return -EINVAL;
2029 }
2030 *socket = sad_interleave[idx];
2031 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2032 idx, sad_way, *socket);
2033 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2034 int bits, a7mode = A7MODE(dram_rule);
2035
2036 if (a7mode) {
2037
2038 bits = GET_BITFIELD(addr, 7, 8) << 1;
2039 bits |= GET_BITFIELD(addr, 9, 9);
2040 } else
2041 bits = GET_BITFIELD(addr, 6, 8);
2042
2043 if (interleave_mode == 0) {
2044
2045 idx = GET_BITFIELD(addr, 16, 18);
2046 idx ^= bits;
2047 } else
2048 idx = bits;
2049
2050 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2051 *socket = sad_pkg_socket(pkg);
2052 sad_ha = sad_pkg_ha(pkg);
2053
2054 if (a7mode) {
2055
2056 pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®);
2057 shiftup = GET_BITFIELD(reg, 22, 22);
2058 }
2059
2060 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2061 idx, *socket, sad_ha, shiftup);
2062 } else {
2063
2064 idx = (addr >> 6) & 7;
2065 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2066 *socket = sad_pkg_socket(pkg);
2067 sad_ha = sad_pkg_ha(pkg);
2068 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2069 idx, *socket, sad_ha);
2070 }
2071
2072 *ha = sad_ha;
2073
2074
2075
2076
2077
2078 new_mci = get_mci_for_node_id(*socket, sad_ha);
2079 if (!new_mci) {
2080 sprintf(msg, "Struct for socket #%u wasn't initialized",
2081 *socket);
2082 return -EINVAL;
2083 }
2084 mci = new_mci;
2085 pvt = mci->pvt_info;
2086
2087
2088
2089
2090 prv = 0;
2091 pci_ha = pvt->pci_ha;
2092 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2093 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
2094 limit = TAD_LIMIT(reg);
2095 if (limit <= prv) {
2096 sprintf(msg, "Can't discover the memory channel");
2097 return -EINVAL;
2098 }
2099 if (addr <= limit)
2100 break;
2101 prv = limit;
2102 }
2103 if (n_tads == MAX_TAD) {
2104 sprintf(msg, "Can't discover the memory channel");
2105 return -EINVAL;
2106 }
2107
2108 ch_way = TAD_CH(reg) + 1;
2109 sck_way = TAD_SOCK(reg);
2110
2111 if (ch_way == 3)
2112 idx = addr >> 6;
2113 else {
2114 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2115 if (pvt->is_chan_hash)
2116 idx = haswell_chan_hash(idx, addr);
2117 }
2118 idx = idx % ch_way;
2119
2120
2121
2122
2123 switch (idx) {
2124 case 0:
2125 base_ch = TAD_TGT0(reg);
2126 break;
2127 case 1:
2128 base_ch = TAD_TGT1(reg);
2129 break;
2130 case 2:
2131 base_ch = TAD_TGT2(reg);
2132 break;
2133 case 3:
2134 base_ch = TAD_TGT3(reg);
2135 break;
2136 default:
2137 sprintf(msg, "Can't discover the TAD target");
2138 return -EINVAL;
2139 }
2140 *channel_mask = 1 << base_ch;
2141
2142 pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2143
2144 if (pvt->mirror_mode == FULL_MIRRORING ||
2145 (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2146 *channel_mask |= 1 << ((base_ch + 2) % 4);
2147 switch(ch_way) {
2148 case 2:
2149 case 4:
2150 sck_xch = (1 << sck_way) * (ch_way >> 1);
2151 break;
2152 default:
2153 sprintf(msg, "Invalid mirror set. Can't decode addr");
2154 return -EINVAL;
2155 }
2156
2157 pvt->is_cur_addr_mirrored = true;
2158 } else {
2159 sck_xch = (1 << sck_way) * ch_way;
2160 pvt->is_cur_addr_mirrored = false;
2161 }
2162
2163 if (pvt->is_lockstep)
2164 *channel_mask |= 1 << ((base_ch + 1) % 4);
2165
2166 offset = TAD_OFFSET(tad_offset);
2167
2168 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2169 n_tads,
2170 addr,
2171 limit,
2172 sck_way,
2173 ch_way,
2174 offset,
2175 idx,
2176 base_ch,
2177 *channel_mask);
2178
2179
2180
2181
2182 if (offset > addr) {
2183 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2184 offset, addr);
2185 return -EINVAL;
2186 }
2187
2188 ch_addr = addr - offset;
2189 ch_addr >>= (6 + shiftup);
2190 ch_addr /= sck_xch;
2191 ch_addr <<= (6 + shiftup);
2192 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2193
2194
2195
2196
2197 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2198 pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®);
2199
2200 if (!IS_RIR_VALID(reg))
2201 continue;
2202
2203 limit = pvt->info.rir_limit(reg);
2204 gb = div_u64_rem(limit >> 20, 1024, &mb);
2205 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2206 n_rir,
2207 gb, (mb*1000)/1024,
2208 limit,
2209 1 << RIR_WAY(reg));
2210 if (ch_addr <= limit)
2211 break;
2212 }
2213 if (n_rir == MAX_RIR_RANGES) {
2214 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2215 ch_addr);
2216 return -EINVAL;
2217 }
2218 rir_way = RIR_WAY(reg);
2219
2220 if (pvt->is_close_pg)
2221 idx = (ch_addr >> 6);
2222 else
2223 idx = (ch_addr >> 13);
2224 idx %= 1 << rir_way;
2225
2226 pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®);
2227 *rank = RIR_RNK_TGT(pvt->info.type, reg);
2228
2229 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2230 n_rir,
2231 ch_addr,
2232 limit,
2233 rir_way,
2234 idx);
2235
2236 return 0;
2237 }
2238
2239 static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2240 const struct mce *m, u8 *socket,
2241 u8 *ha, long *channel_mask,
2242 char *msg)
2243 {
2244 u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2245 struct mem_ctl_info *new_mci;
2246 struct sbridge_pvt *pvt;
2247 struct pci_dev *pci_ha;
2248 bool tad0;
2249
2250 if (channel >= NUM_CHANNELS) {
2251 sprintf(msg, "Invalid channel 0x%x", channel);
2252 return -EINVAL;
2253 }
2254
2255 pvt = mci->pvt_info;
2256 if (!pvt->info.get_ha) {
2257 sprintf(msg, "No get_ha()");
2258 return -EINVAL;
2259 }
2260 *ha = pvt->info.get_ha(m->bank);
2261 if (*ha != 0 && *ha != 1) {
2262 sprintf(msg, "Impossible bank %d", m->bank);
2263 return -EINVAL;
2264 }
2265
2266 *socket = m->socketid;
2267 new_mci = get_mci_for_node_id(*socket, *ha);
2268 if (!new_mci) {
2269 strcpy(msg, "mci socket got corrupted!");
2270 return -EINVAL;
2271 }
2272
2273 pvt = new_mci->pvt_info;
2274 pci_ha = pvt->pci_ha;
2275 pci_read_config_dword(pci_ha, tad_dram_rule[0], ®);
2276 tad0 = m->addr <= TAD_LIMIT(reg);
2277
2278 *channel_mask = 1 << channel;
2279 if (pvt->mirror_mode == FULL_MIRRORING ||
2280 (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2281 *channel_mask |= 1 << ((channel + 2) % 4);
2282 pvt->is_cur_addr_mirrored = true;
2283 } else {
2284 pvt->is_cur_addr_mirrored = false;
2285 }
2286
2287 if (pvt->is_lockstep)
2288 *channel_mask |= 1 << ((channel + 1) % 4);
2289
2290 return 0;
2291 }
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2302 {
2303 int i;
2304
2305 edac_dbg(0, "\n");
2306 for (i = 0; i < sbridge_dev->n_devs; i++) {
2307 struct pci_dev *pdev = sbridge_dev->pdev[i];
2308 if (!pdev)
2309 continue;
2310 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2311 pdev->bus->number,
2312 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2313 pci_dev_put(pdev);
2314 }
2315 }
2316
2317 static void sbridge_put_all_devices(void)
2318 {
2319 struct sbridge_dev *sbridge_dev, *tmp;
2320
2321 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2322 sbridge_put_devices(sbridge_dev);
2323 free_sbridge_dev(sbridge_dev);
2324 }
2325 }
2326
2327 static int sbridge_get_onedevice(struct pci_dev **prev,
2328 u8 *num_mc,
2329 const struct pci_id_table *table,
2330 const unsigned devno,
2331 const int multi_bus)
2332 {
2333 struct sbridge_dev *sbridge_dev = NULL;
2334 const struct pci_id_descr *dev_descr = &table->descr[devno];
2335 struct pci_dev *pdev = NULL;
2336 int seg = 0;
2337 u8 bus = 0;
2338 int i = 0;
2339
2340 sbridge_printk(KERN_DEBUG,
2341 "Seeking for: PCI ID %04x:%04x\n",
2342 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2343
2344 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2345 dev_descr->dev_id, *prev);
2346
2347 if (!pdev) {
2348 if (*prev) {
2349 *prev = pdev;
2350 return 0;
2351 }
2352
2353 if (dev_descr->optional)
2354 return 0;
2355
2356
2357 if (devno == 0)
2358 return -ENODEV;
2359
2360 sbridge_printk(KERN_INFO,
2361 "Device not found: %04x:%04x\n",
2362 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2363
2364
2365 return -ENODEV;
2366 }
2367 seg = pci_domain_nr(pdev->bus);
2368 bus = pdev->bus->number;
2369
2370 next_imc:
2371 sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2372 multi_bus, sbridge_dev);
2373 if (!sbridge_dev) {
2374
2375 if (dev_descr->dom == IMC1 && devno != 1) {
2376 edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2377 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2378 pci_dev_put(pdev);
2379 return 0;
2380 }
2381
2382 if (dev_descr->dom == SOCK)
2383 goto out_imc;
2384
2385 sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2386 if (!sbridge_dev) {
2387 pci_dev_put(pdev);
2388 return -ENOMEM;
2389 }
2390 (*num_mc)++;
2391 }
2392
2393 if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2394 sbridge_printk(KERN_ERR,
2395 "Duplicated device for %04x:%04x\n",
2396 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2397 pci_dev_put(pdev);
2398 return -ENODEV;
2399 }
2400
2401 sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2402
2403
2404 if (++i > 1)
2405 pci_dev_get(pdev);
2406
2407 if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2408 goto next_imc;
2409
2410 out_imc:
2411
2412 if (unlikely(pci_enable_device(pdev) < 0)) {
2413 sbridge_printk(KERN_ERR,
2414 "Couldn't enable %04x:%04x\n",
2415 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2416 return -ENODEV;
2417 }
2418
2419 edac_dbg(0, "Detected %04x:%04x\n",
2420 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2421
2422
2423
2424
2425
2426
2427 pci_dev_get(pdev);
2428
2429 *prev = pdev;
2430
2431 return 0;
2432 }
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443 static int sbridge_get_all_devices(u8 *num_mc,
2444 const struct pci_id_table *table)
2445 {
2446 int i, rc;
2447 struct pci_dev *pdev = NULL;
2448 int allow_dups = 0;
2449 int multi_bus = 0;
2450
2451 if (table->type == KNIGHTS_LANDING)
2452 allow_dups = multi_bus = 1;
2453 while (table && table->descr) {
2454 for (i = 0; i < table->n_devs_per_sock; i++) {
2455 if (!allow_dups || i == 0 ||
2456 table->descr[i].dev_id !=
2457 table->descr[i-1].dev_id) {
2458 pdev = NULL;
2459 }
2460 do {
2461 rc = sbridge_get_onedevice(&pdev, num_mc,
2462 table, i, multi_bus);
2463 if (rc < 0) {
2464 if (i == 0) {
2465 i = table->n_devs_per_sock;
2466 break;
2467 }
2468 sbridge_put_all_devices();
2469 return -ENODEV;
2470 }
2471 } while (pdev && !allow_dups);
2472 }
2473 table++;
2474 }
2475
2476 return 0;
2477 }
2478
2479
2480
2481
2482
2483
2484 #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2485
2486 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2487 struct sbridge_dev *sbridge_dev)
2488 {
2489 struct sbridge_pvt *pvt = mci->pvt_info;
2490 struct pci_dev *pdev;
2491 u8 saw_chan_mask = 0;
2492 int i;
2493
2494 for (i = 0; i < sbridge_dev->n_devs; i++) {
2495 pdev = sbridge_dev->pdev[i];
2496 if (!pdev)
2497 continue;
2498
2499 switch (pdev->device) {
2500 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2501 pvt->pci_sad0 = pdev;
2502 break;
2503 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2504 pvt->pci_sad1 = pdev;
2505 break;
2506 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2507 pvt->pci_br0 = pdev;
2508 break;
2509 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2510 pvt->pci_ha = pdev;
2511 break;
2512 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2513 pvt->pci_ta = pdev;
2514 break;
2515 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2516 pvt->pci_ras = pdev;
2517 break;
2518 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2519 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2520 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2521 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2522 {
2523 int id = TAD_DEV_TO_CHAN(pdev->device);
2524 pvt->pci_tad[id] = pdev;
2525 saw_chan_mask |= 1 << id;
2526 }
2527 break;
2528 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2529 pvt->pci_ddrio = pdev;
2530 break;
2531 default:
2532 goto error;
2533 }
2534
2535 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2536 pdev->vendor, pdev->device,
2537 sbridge_dev->bus,
2538 pdev);
2539 }
2540
2541
2542 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2543 !pvt->pci_ras || !pvt->pci_ta)
2544 goto enodev;
2545
2546 if (saw_chan_mask != 0x0f)
2547 goto enodev;
2548 return 0;
2549
2550 enodev:
2551 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2552 return -ENODEV;
2553
2554 error:
2555 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2556 PCI_VENDOR_ID_INTEL, pdev->device);
2557 return -EINVAL;
2558 }
2559
2560 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2561 struct sbridge_dev *sbridge_dev)
2562 {
2563 struct sbridge_pvt *pvt = mci->pvt_info;
2564 struct pci_dev *pdev;
2565 u8 saw_chan_mask = 0;
2566 int i;
2567
2568 for (i = 0; i < sbridge_dev->n_devs; i++) {
2569 pdev = sbridge_dev->pdev[i];
2570 if (!pdev)
2571 continue;
2572
2573 switch (pdev->device) {
2574 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2575 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2576 pvt->pci_ha = pdev;
2577 break;
2578 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2579 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2580 pvt->pci_ta = pdev;
2581 break;
2582 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2583 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2584 pvt->pci_ras = pdev;
2585 break;
2586 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2587 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2588 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2589 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2590 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2591 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2592 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2593 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2594 {
2595 int id = TAD_DEV_TO_CHAN(pdev->device);
2596 pvt->pci_tad[id] = pdev;
2597 saw_chan_mask |= 1 << id;
2598 }
2599 break;
2600 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2601 pvt->pci_ddrio = pdev;
2602 break;
2603 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2604 pvt->pci_ddrio = pdev;
2605 break;
2606 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2607 pvt->pci_sad0 = pdev;
2608 break;
2609 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2610 pvt->pci_br0 = pdev;
2611 break;
2612 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2613 pvt->pci_br1 = pdev;
2614 break;
2615 default:
2616 goto error;
2617 }
2618
2619 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2620 sbridge_dev->bus,
2621 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2622 pdev);
2623 }
2624
2625
2626 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2627 !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2628 goto enodev;
2629
2630 if (saw_chan_mask != 0x0f &&
2631 saw_chan_mask != 0x03)
2632 goto enodev;
2633 return 0;
2634
2635 enodev:
2636 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2637 return -ENODEV;
2638
2639 error:
2640 sbridge_printk(KERN_ERR,
2641 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2642 pdev->device);
2643 return -EINVAL;
2644 }
2645
2646 static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2647 struct sbridge_dev *sbridge_dev)
2648 {
2649 struct sbridge_pvt *pvt = mci->pvt_info;
2650 struct pci_dev *pdev;
2651 u8 saw_chan_mask = 0;
2652 int i;
2653
2654
2655 if (pvt->info.pci_vtd == NULL)
2656
2657 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2658 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2659 NULL);
2660
2661 for (i = 0; i < sbridge_dev->n_devs; i++) {
2662 pdev = sbridge_dev->pdev[i];
2663 if (!pdev)
2664 continue;
2665
2666 switch (pdev->device) {
2667 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2668 pvt->pci_sad0 = pdev;
2669 break;
2670 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2671 pvt->pci_sad1 = pdev;
2672 break;
2673 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2674 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2675 pvt->pci_ha = pdev;
2676 break;
2677 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2678 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2679 pvt->pci_ta = pdev;
2680 break;
2681 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2682 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2683 pvt->pci_ras = pdev;
2684 break;
2685 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2686 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2687 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2688 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2689 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2690 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2691 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2692 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2693 {
2694 int id = TAD_DEV_TO_CHAN(pdev->device);
2695 pvt->pci_tad[id] = pdev;
2696 saw_chan_mask |= 1 << id;
2697 }
2698 break;
2699 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2700 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2701 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2702 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2703 if (!pvt->pci_ddrio)
2704 pvt->pci_ddrio = pdev;
2705 break;
2706 default:
2707 break;
2708 }
2709
2710 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2711 sbridge_dev->bus,
2712 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2713 pdev);
2714 }
2715
2716
2717 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2718 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2719 goto enodev;
2720
2721 if (saw_chan_mask != 0x0f &&
2722 saw_chan_mask != 0x03)
2723 goto enodev;
2724 return 0;
2725
2726 enodev:
2727 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2728 return -ENODEV;
2729 }
2730
2731 static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2732 struct sbridge_dev *sbridge_dev)
2733 {
2734 struct sbridge_pvt *pvt = mci->pvt_info;
2735 struct pci_dev *pdev;
2736 u8 saw_chan_mask = 0;
2737 int i;
2738
2739
2740 if (pvt->info.pci_vtd == NULL)
2741
2742 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2743 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2744 NULL);
2745
2746 for (i = 0; i < sbridge_dev->n_devs; i++) {
2747 pdev = sbridge_dev->pdev[i];
2748 if (!pdev)
2749 continue;
2750
2751 switch (pdev->device) {
2752 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2753 pvt->pci_sad0 = pdev;
2754 break;
2755 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2756 pvt->pci_sad1 = pdev;
2757 break;
2758 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2759 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2760 pvt->pci_ha = pdev;
2761 break;
2762 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2763 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2764 pvt->pci_ta = pdev;
2765 break;
2766 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2767 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2768 pvt->pci_ras = pdev;
2769 break;
2770 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2771 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2772 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2773 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2774 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2775 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2776 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2777 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2778 {
2779 int id = TAD_DEV_TO_CHAN(pdev->device);
2780 pvt->pci_tad[id] = pdev;
2781 saw_chan_mask |= 1 << id;
2782 }
2783 break;
2784 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2785 pvt->pci_ddrio = pdev;
2786 break;
2787 default:
2788 break;
2789 }
2790
2791 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2792 sbridge_dev->bus,
2793 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2794 pdev);
2795 }
2796
2797
2798 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2799 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2800 goto enodev;
2801
2802 if (saw_chan_mask != 0x0f &&
2803 saw_chan_mask != 0x03)
2804 goto enodev;
2805 return 0;
2806
2807 enodev:
2808 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2809 return -ENODEV;
2810 }
2811
2812 static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2813 struct sbridge_dev *sbridge_dev)
2814 {
2815 struct sbridge_pvt *pvt = mci->pvt_info;
2816 struct pci_dev *pdev;
2817 int dev, func;
2818
2819 int i;
2820 int devidx;
2821
2822 for (i = 0; i < sbridge_dev->n_devs; i++) {
2823 pdev = sbridge_dev->pdev[i];
2824 if (!pdev)
2825 continue;
2826
2827
2828 dev = (pdev->devfn >> 3) & 0x1f;
2829 func = pdev->devfn & 0x7;
2830
2831 switch (pdev->device) {
2832 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2833 if (dev == 8)
2834 pvt->knl.pci_mc0 = pdev;
2835 else if (dev == 9)
2836 pvt->knl.pci_mc1 = pdev;
2837 else {
2838 sbridge_printk(KERN_ERR,
2839 "Memory controller in unexpected place! (dev %d, fn %d)\n",
2840 dev, func);
2841 continue;
2842 }
2843 break;
2844
2845 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2846 pvt->pci_sad0 = pdev;
2847 break;
2848
2849 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2850 pvt->pci_sad1 = pdev;
2851 break;
2852
2853 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2854
2855
2856
2857 devidx = ((dev-14)*8)+func;
2858
2859 if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2860 sbridge_printk(KERN_ERR,
2861 "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2862 dev, func);
2863 continue;
2864 }
2865
2866 WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2867
2868 pvt->knl.pci_cha[devidx] = pdev;
2869 break;
2870
2871 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2872 devidx = -1;
2873
2874
2875
2876
2877
2878
2879 if (dev == 9)
2880 devidx = func-2;
2881 else if (dev == 8)
2882 devidx = 3 + (func-2);
2883
2884 if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2885 sbridge_printk(KERN_ERR,
2886 "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2887 dev, func);
2888 continue;
2889 }
2890
2891 WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2892 pvt->knl.pci_channel[devidx] = pdev;
2893 break;
2894
2895 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2896 pvt->knl.pci_mc_info = pdev;
2897 break;
2898
2899 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2900 pvt->pci_ta = pdev;
2901 break;
2902
2903 default:
2904 sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2905 pdev->device);
2906 break;
2907 }
2908 }
2909
2910 if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 ||
2911 !pvt->pci_sad0 || !pvt->pci_sad1 ||
2912 !pvt->pci_ta) {
2913 goto enodev;
2914 }
2915
2916 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2917 if (!pvt->knl.pci_channel[i]) {
2918 sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2919 goto enodev;
2920 }
2921 }
2922
2923 for (i = 0; i < KNL_MAX_CHAS; i++) {
2924 if (!pvt->knl.pci_cha[i]) {
2925 sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2926 goto enodev;
2927 }
2928 }
2929
2930 return 0;
2931
2932 enodev:
2933 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2934 return -ENODEV;
2935 }
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2948 const struct mce *m)
2949 {
2950 struct mem_ctl_info *new_mci;
2951 struct sbridge_pvt *pvt = mci->pvt_info;
2952 enum hw_event_mc_err_type tp_event;
2953 char *optype, msg[256];
2954 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2955 bool overflow = GET_BITFIELD(m->status, 62, 62);
2956 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2957 bool recoverable;
2958 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2959 u32 mscod = GET_BITFIELD(m->status, 16, 31);
2960 u32 errcode = GET_BITFIELD(m->status, 0, 15);
2961 u32 channel = GET_BITFIELD(m->status, 0, 3);
2962 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2963
2964
2965
2966
2967
2968 u32 lsb = GET_BITFIELD(m->misc, 0, 5);
2969 long channel_mask, first_channel;
2970 u8 rank = 0xff, socket, ha;
2971 int rc, dimm;
2972 char *area_type = "DRAM";
2973
2974 if (pvt->info.type != SANDY_BRIDGE)
2975 recoverable = true;
2976 else
2977 recoverable = GET_BITFIELD(m->status, 56, 56);
2978
2979 if (uncorrected_error) {
2980 core_err_cnt = 1;
2981 if (ripv) {
2982 tp_event = HW_EVENT_ERR_UNCORRECTED;
2983 } else {
2984 tp_event = HW_EVENT_ERR_FATAL;
2985 }
2986 } else {
2987 tp_event = HW_EVENT_ERR_CORRECTED;
2988 }
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001 switch (optypenum) {
3002 case 0:
3003 optype = "generic undef request error";
3004 break;
3005 case 1:
3006 optype = "memory read error";
3007 break;
3008 case 2:
3009 optype = "memory write error";
3010 break;
3011 case 3:
3012 optype = "addr/cmd error";
3013 break;
3014 case 4:
3015 optype = "memory scrubbing error";
3016 break;
3017 default:
3018 optype = "reserved";
3019 break;
3020 }
3021
3022 if (pvt->info.type == KNIGHTS_LANDING) {
3023 if (channel == 14) {
3024 edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3025 overflow ? " OVERFLOW" : "",
3026 (uncorrected_error && recoverable)
3027 ? " recoverable" : "",
3028 mscod, errcode,
3029 m->bank);
3030 } else {
3031 char A = *("A");
3032
3033
3034
3035
3036
3037
3038
3039 channel = knl_channel_remap(m->bank == 16, channel);
3040 channel_mask = 1 << channel;
3041
3042 snprintf(msg, sizeof(msg),
3043 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3044 overflow ? " OVERFLOW" : "",
3045 (uncorrected_error && recoverable)
3046 ? " recoverable" : " ",
3047 mscod, errcode, channel, A + channel);
3048 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3049 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3050 channel, 0, -1,
3051 optype, msg);
3052 }
3053 return;
3054 } else if (lsb < 12) {
3055 rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3056 &channel_mask, &rank,
3057 &area_type, msg);
3058 } else {
3059 rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3060 &channel_mask, msg);
3061 }
3062
3063 if (rc < 0)
3064 goto err_parsing;
3065 new_mci = get_mci_for_node_id(socket, ha);
3066 if (!new_mci) {
3067 strcpy(msg, "Error: socket got corrupted!");
3068 goto err_parsing;
3069 }
3070 mci = new_mci;
3071 pvt = mci->pvt_info;
3072
3073 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3074
3075 if (rank == 0xff)
3076 dimm = -1;
3077 else if (rank < 4)
3078 dimm = 0;
3079 else if (rank < 8)
3080 dimm = 1;
3081 else
3082 dimm = 2;
3083
3084
3085
3086
3087
3088
3089
3090 if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3091 channel = first_channel;
3092
3093 snprintf(msg, sizeof(msg),
3094 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3095 overflow ? " OVERFLOW" : "",
3096 (uncorrected_error && recoverable) ? " recoverable" : "",
3097 area_type,
3098 mscod, errcode,
3099 socket, ha,
3100 channel_mask,
3101 rank);
3102
3103 edac_dbg(0, "%s\n", msg);
3104
3105
3106
3107 if (channel == CHANNEL_UNSPECIFIED)
3108 channel = -1;
3109
3110
3111 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3112 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3113 channel, dimm, -1,
3114 optype, msg);
3115 return;
3116 err_parsing:
3117 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3118 -1, -1, -1,
3119 msg, "");
3120
3121 }
3122
3123
3124
3125
3126
3127 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3128 void *data)
3129 {
3130 struct mce *mce = (struct mce *)data;
3131 struct mem_ctl_info *mci;
3132 char *type;
3133
3134 if (mce->kflags & MCE_HANDLED_CEC)
3135 return NOTIFY_DONE;
3136
3137
3138
3139
3140
3141
3142
3143 if ((mce->status & 0xefff) >> 7 != 1)
3144 return NOTIFY_DONE;
3145
3146
3147 if (!GET_BITFIELD(mce->status, 58, 58))
3148 return NOTIFY_DONE;
3149
3150
3151 if (!GET_BITFIELD(mce->status, 59, 59))
3152 return NOTIFY_DONE;
3153
3154
3155 if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3156 return NOTIFY_DONE;
3157
3158 mci = get_mci_for_node_id(mce->socketid, IMC0);
3159 if (!mci)
3160 return NOTIFY_DONE;
3161
3162 if (mce->mcgstatus & MCG_STATUS_MCIP)
3163 type = "Exception";
3164 else
3165 type = "Event";
3166
3167 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3168
3169 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3170 "Bank %d: %016Lx\n", mce->extcpu, type,
3171 mce->mcgstatus, mce->bank, mce->status);
3172 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3173 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3174 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3175
3176 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3177 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3178 mce->time, mce->socketid, mce->apicid);
3179
3180 sbridge_mce_output_error(mci, mce);
3181
3182
3183 mce->kflags |= MCE_HANDLED_EDAC;
3184 return NOTIFY_OK;
3185 }
3186
3187 static struct notifier_block sbridge_mce_dec = {
3188 .notifier_call = sbridge_mce_check_error,
3189 .priority = MCE_PRIO_EDAC,
3190 };
3191
3192
3193
3194
3195
3196 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3197 {
3198 struct mem_ctl_info *mci = sbridge_dev->mci;
3199
3200 if (unlikely(!mci || !mci->pvt_info)) {
3201 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3202
3203 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3204 return;
3205 }
3206
3207 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3208 mci, &sbridge_dev->pdev[0]->dev);
3209
3210
3211 edac_mc_del_mc(mci->pdev);
3212
3213 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3214 kfree(mci->ctl_name);
3215 edac_mc_free(mci);
3216 sbridge_dev->mci = NULL;
3217 }
3218
3219 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3220 {
3221 struct mem_ctl_info *mci;
3222 struct edac_mc_layer layers[2];
3223 struct sbridge_pvt *pvt;
3224 struct pci_dev *pdev = sbridge_dev->pdev[0];
3225 int rc;
3226
3227
3228 layers[0].type = EDAC_MC_LAYER_CHANNEL;
3229 layers[0].size = type == KNIGHTS_LANDING ?
3230 KNL_MAX_CHANNELS : NUM_CHANNELS;
3231 layers[0].is_virt_csrow = false;
3232 layers[1].type = EDAC_MC_LAYER_SLOT;
3233 layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3234 layers[1].is_virt_csrow = true;
3235 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3236 sizeof(*pvt));
3237
3238 if (unlikely(!mci))
3239 return -ENOMEM;
3240
3241 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3242 mci, &pdev->dev);
3243
3244 pvt = mci->pvt_info;
3245 memset(pvt, 0, sizeof(*pvt));
3246
3247
3248 pvt->sbridge_dev = sbridge_dev;
3249 sbridge_dev->mci = mci;
3250
3251 mci->mtype_cap = type == KNIGHTS_LANDING ?
3252 MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3253 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3254 mci->edac_cap = EDAC_FLAG_NONE;
3255 mci->mod_name = EDAC_MOD_STR;
3256 mci->dev_name = pci_name(pdev);
3257 mci->ctl_page_to_phys = NULL;
3258
3259 pvt->info.type = type;
3260 switch (type) {
3261 case IVY_BRIDGE:
3262 pvt->info.rankcfgr = IB_RANK_CFG_A;
3263 pvt->info.get_tolm = ibridge_get_tolm;
3264 pvt->info.get_tohm = ibridge_get_tohm;
3265 pvt->info.dram_rule = ibridge_dram_rule;
3266 pvt->info.get_memory_type = get_memory_type;
3267 pvt->info.get_node_id = get_node_id;
3268 pvt->info.get_ha = ibridge_get_ha;
3269 pvt->info.rir_limit = rir_limit;
3270 pvt->info.sad_limit = sad_limit;
3271 pvt->info.interleave_mode = interleave_mode;
3272 pvt->info.dram_attr = dram_attr;
3273 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3274 pvt->info.interleave_list = ibridge_interleave_list;
3275 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3276 pvt->info.get_width = ibridge_get_width;
3277
3278
3279 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3280 if (unlikely(rc < 0))
3281 goto fail0;
3282 get_source_id(mci);
3283 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3284 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3285 break;
3286 case SANDY_BRIDGE:
3287 pvt->info.rankcfgr = SB_RANK_CFG_A;
3288 pvt->info.get_tolm = sbridge_get_tolm;
3289 pvt->info.get_tohm = sbridge_get_tohm;
3290 pvt->info.dram_rule = sbridge_dram_rule;
3291 pvt->info.get_memory_type = get_memory_type;
3292 pvt->info.get_node_id = get_node_id;
3293 pvt->info.get_ha = sbridge_get_ha;
3294 pvt->info.rir_limit = rir_limit;
3295 pvt->info.sad_limit = sad_limit;
3296 pvt->info.interleave_mode = interleave_mode;
3297 pvt->info.dram_attr = dram_attr;
3298 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3299 pvt->info.interleave_list = sbridge_interleave_list;
3300 pvt->info.interleave_pkg = sbridge_interleave_pkg;
3301 pvt->info.get_width = sbridge_get_width;
3302
3303
3304 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3305 if (unlikely(rc < 0))
3306 goto fail0;
3307 get_source_id(mci);
3308 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3309 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3310 break;
3311 case HASWELL:
3312
3313 pvt->info.get_tolm = haswell_get_tolm;
3314 pvt->info.get_tohm = haswell_get_tohm;
3315 pvt->info.dram_rule = ibridge_dram_rule;
3316 pvt->info.get_memory_type = haswell_get_memory_type;
3317 pvt->info.get_node_id = haswell_get_node_id;
3318 pvt->info.get_ha = ibridge_get_ha;
3319 pvt->info.rir_limit = haswell_rir_limit;
3320 pvt->info.sad_limit = sad_limit;
3321 pvt->info.interleave_mode = interleave_mode;
3322 pvt->info.dram_attr = dram_attr;
3323 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3324 pvt->info.interleave_list = ibridge_interleave_list;
3325 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3326 pvt->info.get_width = ibridge_get_width;
3327
3328
3329 rc = haswell_mci_bind_devs(mci, sbridge_dev);
3330 if (unlikely(rc < 0))
3331 goto fail0;
3332 get_source_id(mci);
3333 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3334 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3335 break;
3336 case BROADWELL:
3337
3338 pvt->info.get_tolm = haswell_get_tolm;
3339 pvt->info.get_tohm = haswell_get_tohm;
3340 pvt->info.dram_rule = ibridge_dram_rule;
3341 pvt->info.get_memory_type = haswell_get_memory_type;
3342 pvt->info.get_node_id = haswell_get_node_id;
3343 pvt->info.get_ha = ibridge_get_ha;
3344 pvt->info.rir_limit = haswell_rir_limit;
3345 pvt->info.sad_limit = sad_limit;
3346 pvt->info.interleave_mode = interleave_mode;
3347 pvt->info.dram_attr = dram_attr;
3348 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3349 pvt->info.interleave_list = ibridge_interleave_list;
3350 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3351 pvt->info.get_width = broadwell_get_width;
3352
3353
3354 rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3355 if (unlikely(rc < 0))
3356 goto fail0;
3357 get_source_id(mci);
3358 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3359 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3360 break;
3361 case KNIGHTS_LANDING:
3362
3363 pvt->info.get_tolm = knl_get_tolm;
3364 pvt->info.get_tohm = knl_get_tohm;
3365 pvt->info.dram_rule = knl_dram_rule;
3366 pvt->info.get_memory_type = knl_get_memory_type;
3367 pvt->info.get_node_id = knl_get_node_id;
3368 pvt->info.get_ha = knl_get_ha;
3369 pvt->info.rir_limit = NULL;
3370 pvt->info.sad_limit = knl_sad_limit;
3371 pvt->info.interleave_mode = knl_interleave_mode;
3372 pvt->info.dram_attr = dram_attr_knl;
3373 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3374 pvt->info.interleave_list = knl_interleave_list;
3375 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3376 pvt->info.get_width = knl_get_width;
3377
3378 rc = knl_mci_bind_devs(mci, sbridge_dev);
3379 if (unlikely(rc < 0))
3380 goto fail0;
3381 get_source_id(mci);
3382 mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3383 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3384 break;
3385 }
3386
3387 if (!mci->ctl_name) {
3388 rc = -ENOMEM;
3389 goto fail0;
3390 }
3391
3392
3393 rc = get_dimm_config(mci);
3394 if (rc < 0) {
3395 edac_dbg(0, "MC: failed to get_dimm_config()\n");
3396 goto fail;
3397 }
3398 get_memory_layout(mci);
3399
3400
3401 mci->pdev = &pdev->dev;
3402
3403
3404 if (unlikely(edac_mc_add_mc(mci))) {
3405 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3406 rc = -EINVAL;
3407 goto fail;
3408 }
3409
3410 return 0;
3411
3412 fail:
3413 kfree(mci->ctl_name);
3414 fail0:
3415 edac_mc_free(mci);
3416 sbridge_dev->mci = NULL;
3417 return rc;
3418 }
3419
3420 static const struct x86_cpu_id sbridge_cpuids[] = {
3421 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
3422 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
3423 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table),
3424 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table),
3425 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table),
3426 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table),
3427 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table),
3428 { }
3429 };
3430 MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440 static int sbridge_probe(const struct x86_cpu_id *id)
3441 {
3442 int rc;
3443 u8 mc, num_mc = 0;
3444 struct sbridge_dev *sbridge_dev;
3445 struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3446
3447
3448 rc = sbridge_get_all_devices(&num_mc, ptable);
3449
3450 if (unlikely(rc < 0)) {
3451 edac_dbg(0, "couldn't get all devices\n");
3452 goto fail0;
3453 }
3454
3455 mc = 0;
3456
3457 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3458 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3459 mc, mc + 1, num_mc);
3460
3461 sbridge_dev->mc = mc++;
3462 rc = sbridge_register_mci(sbridge_dev, ptable->type);
3463 if (unlikely(rc < 0))
3464 goto fail1;
3465 }
3466
3467 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3468
3469 return 0;
3470
3471 fail1:
3472 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3473 sbridge_unregister_mci(sbridge_dev);
3474
3475 sbridge_put_all_devices();
3476 fail0:
3477 return rc;
3478 }
3479
3480
3481
3482
3483
3484 static void sbridge_remove(void)
3485 {
3486 struct sbridge_dev *sbridge_dev;
3487
3488 edac_dbg(0, "\n");
3489
3490 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3491 sbridge_unregister_mci(sbridge_dev);
3492
3493
3494 sbridge_put_all_devices();
3495 }
3496
3497
3498
3499
3500
3501 static int __init sbridge_init(void)
3502 {
3503 const struct x86_cpu_id *id;
3504 const char *owner;
3505 int rc;
3506
3507 edac_dbg(2, "\n");
3508
3509 owner = edac_get_owner();
3510 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3511 return -EBUSY;
3512
3513 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
3514 return -ENODEV;
3515
3516 id = x86_match_cpu(sbridge_cpuids);
3517 if (!id)
3518 return -ENODEV;
3519
3520
3521 opstate_init();
3522
3523 rc = sbridge_probe(id);
3524
3525 if (rc >= 0) {
3526 mce_register_decode_chain(&sbridge_mce_dec);
3527 return 0;
3528 }
3529
3530 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3531 rc);
3532
3533 return rc;
3534 }
3535
3536
3537
3538
3539
3540 static void __exit sbridge_exit(void)
3541 {
3542 edac_dbg(2, "\n");
3543 sbridge_remove();
3544 mce_unregister_decode_chain(&sbridge_mce_dec);
3545 }
3546
3547 module_init(sbridge_init);
3548 module_exit(sbridge_exit);
3549
3550 module_param(edac_op_state, int, 0444);
3551 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3552
3553 MODULE_LICENSE("GPL");
3554 MODULE_AUTHOR("Mauro Carvalho Chehab");
3555 MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
3556 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3557 SBRIDGE_REVISION);