Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * AMD64 class Memory Controller kernel module
0003  *
0004  * Copyright (c) 2009 SoftwareBitMaker.
0005  * Copyright (c) 2009-15 Advanced Micro Devices, Inc.
0006  *
0007  * This file may be distributed under the terms of the
0008  * GNU General Public License.
0009  */
0010 
0011 #include <linux/module.h>
0012 #include <linux/ctype.h>
0013 #include <linux/init.h>
0014 #include <linux/pci.h>
0015 #include <linux/pci_ids.h>
0016 #include <linux/slab.h>
0017 #include <linux/mmzone.h>
0018 #include <linux/edac.h>
0019 #include <asm/cpu_device_id.h>
0020 #include <asm/msr.h>
0021 #include "edac_module.h"
0022 #include "mce_amd.h"
0023 
0024 #define amd64_info(fmt, arg...) \
0025     edac_printk(KERN_INFO, "amd64", fmt, ##arg)
0026 
0027 #define amd64_warn(fmt, arg...) \
0028     edac_printk(KERN_WARNING, "amd64", "Warning: " fmt, ##arg)
0029 
0030 #define amd64_err(fmt, arg...) \
0031     edac_printk(KERN_ERR, "amd64", "Error: " fmt, ##arg)
0032 
0033 #define amd64_mc_warn(mci, fmt, arg...) \
0034     edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
0035 
0036 #define amd64_mc_err(mci, fmt, arg...) \
0037     edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
0038 
0039 /*
0040  * Throughout the comments in this code, the following terms are used:
0041  *
0042  *  SysAddr, DramAddr, and InputAddr
0043  *
0044  *  These terms come directly from the amd64 documentation
0045  * (AMD publication #26094).  They are defined as follows:
0046  *
0047  *     SysAddr:
0048  *         This is a physical address generated by a CPU core or a device
0049  *         doing DMA.  If generated by a CPU core, a SysAddr is the result of
0050  *         a virtual to physical address translation by the CPU core's address
0051  *         translation mechanism (MMU).
0052  *
0053  *     DramAddr:
0054  *         A DramAddr is derived from a SysAddr by subtracting an offset that
0055  *         depends on which node the SysAddr maps to and whether the SysAddr
0056  *         is within a range affected by memory hoisting.  The DRAM Base
0057  *         (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
0058  *         determine which node a SysAddr maps to.
0059  *
0060  *         If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
0061  *         is within the range of addresses specified by this register, then
0062  *         a value x from the DHAR is subtracted from the SysAddr to produce a
0063  *         DramAddr.  Here, x represents the base address for the node that
0064  *         the SysAddr maps to plus an offset due to memory hoisting.  See
0065  *         section 3.4.8 and the comments in amd64_get_dram_hole_info() and
0066  *         sys_addr_to_dram_addr() below for more information.
0067  *
0068  *         If the SysAddr is not affected by the DHAR then a value y is
0069  *         subtracted from the SysAddr to produce a DramAddr.  Here, y is the
0070  *         base address for the node that the SysAddr maps to.  See section
0071  *         3.4.4 and the comments in sys_addr_to_dram_addr() below for more
0072  *         information.
0073  *
0074  *     InputAddr:
0075  *         A DramAddr is translated to an InputAddr before being passed to the
0076  *         memory controller for the node that the DramAddr is associated
0077  *         with.  The memory controller then maps the InputAddr to a csrow.
0078  *         If node interleaving is not in use, then the InputAddr has the same
0079  *         value as the DramAddr.  Otherwise, the InputAddr is produced by
0080  *         discarding the bits used for node interleaving from the DramAddr.
0081  *         See section 3.4.4 for more information.
0082  *
0083  *         The memory controller for a given node uses its DRAM CS Base and
0084  *         DRAM CS Mask registers to map an InputAddr to a csrow.  See
0085  *         sections 3.5.4 and 3.5.5 for more information.
0086  */
0087 
0088 #define EDAC_AMD64_VERSION      "3.5.0"
0089 #define EDAC_MOD_STR            "amd64_edac"
0090 
0091 /* Extended Model from CPUID, for CPU Revision numbers */
0092 #define K8_REV_D            1
0093 #define K8_REV_E            2
0094 #define K8_REV_F            4
0095 
0096 /* Hardware limit on ChipSelect rows per MC and processors per system */
0097 #define NUM_CHIPSELECTS         8
0098 #define DRAM_RANGES         8
0099 #define NUM_CONTROLLERS         12
0100 
0101 #define ON true
0102 #define OFF false
0103 
0104 /*
0105  * PCI-defined configuration space registers
0106  */
0107 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
0108 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
0109 #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
0110 #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
0111 #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F1 0x1571
0112 #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F2 0x1572
0113 #define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
0114 #define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
0115 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
0116 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
0117 #define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
0118 #define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
0119 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
0120 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
0121 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
0122 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
0123 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
0124 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
0125 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
0126 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
0127 #define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
0128 #define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
0129 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
0130 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
0131 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
0132 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
0133 
0134 /*
0135  * Function 1 - Address Map
0136  */
0137 #define DRAM_BASE_LO            0x40
0138 #define DRAM_LIMIT_LO           0x44
0139 
0140 /*
0141  * F15 M30h D18F1x2[1C:00]
0142  */
0143 #define DRAM_CONT_BASE          0x200
0144 #define DRAM_CONT_LIMIT         0x204
0145 
0146 /*
0147  * F15 M30h D18F1x2[4C:40]
0148  */
0149 #define DRAM_CONT_HIGH_OFF      0x240
0150 
0151 #define dram_rw(pvt, i)         ((u8)(pvt->ranges[i].base.lo & 0x3))
0152 #define dram_intlv_sel(pvt, i)      ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
0153 #define dram_dst_node(pvt, i)       ((u8)(pvt->ranges[i].lim.lo & 0x7))
0154 
0155 #define DHAR                0xf0
0156 #define dhar_mem_hoist_valid(pvt)   ((pvt)->dhar & BIT(1))
0157 #define dhar_base(pvt)          ((pvt)->dhar & 0xff000000)
0158 #define k8_dhar_offset(pvt)     (((pvt)->dhar & 0x0000ff00) << 16)
0159 
0160                     /* NOTE: Extra mask bit vs K8 */
0161 #define f10_dhar_offset(pvt)        (((pvt)->dhar & 0x0000ff80) << 16)
0162 
0163 #define DCT_CFG_SEL         0x10C
0164 
0165 #define DRAM_LOCAL_NODE_BASE        0x120
0166 #define DRAM_LOCAL_NODE_LIM     0x124
0167 
0168 #define DRAM_BASE_HI            0x140
0169 #define DRAM_LIMIT_HI           0x144
0170 
0171 
0172 /*
0173  * Function 2 - DRAM controller
0174  */
0175 #define DCSB0               0x40
0176 #define DCSB1               0x140
0177 #define DCSB_CS_ENABLE          BIT(0)
0178 
0179 #define DCSM0               0x60
0180 #define DCSM1               0x160
0181 
0182 #define csrow_enabled(i, dct, pvt)  ((pvt)->csels[(dct)].csbases[(i)]     & DCSB_CS_ENABLE)
0183 #define csrow_sec_enabled(i, dct, pvt)  ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE)
0184 
0185 #define DRAM_CONTROL            0x78
0186 
0187 #define DBAM0               0x80
0188 #define DBAM1               0x180
0189 
0190 /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
0191 #define DBAM_DIMM(i, reg)       ((((reg) >> (4*(i)))) & 0xF)
0192 
0193 #define DBAM_MAX_VALUE          11
0194 
0195 #define DCLR0               0x90
0196 #define DCLR1               0x190
0197 #define REVE_WIDTH_128          BIT(16)
0198 #define WIDTH_128           BIT(11)
0199 
0200 #define DCHR0               0x94
0201 #define DCHR1               0x194
0202 #define DDR3_MODE           BIT(8)
0203 
0204 #define DCT_SEL_LO          0x110
0205 #define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
0206 #define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
0207 
0208 #define dct_ganging_enabled(pvt)    ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
0209 
0210 #define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
0211 #define dct_memory_cleared(pvt)     ((pvt)->dct_sel_lo & BIT(10))
0212 
0213 #define SWAP_INTLV_REG          0x10c
0214 
0215 #define DCT_SEL_HI          0x114
0216 
0217 #define F15H_M60H_SCRCTRL       0x1C8
0218 #define F17H_SCR_BASE_ADDR      0x48
0219 #define F17H_SCR_LIMIT_ADDR     0x4C
0220 
0221 /*
0222  * Function 3 - Misc Control
0223  */
0224 #define NBCTL               0x40
0225 
0226 #define NBCFG               0x44
0227 #define NBCFG_CHIPKILL          BIT(23)
0228 #define NBCFG_ECC_ENABLE        BIT(22)
0229 
0230 /* F3x48: NBSL */
0231 #define F10_NBSL_EXT_ERR_ECC        0x8
0232 #define NBSL_PP_OBS         0x2
0233 
0234 #define SCRCTRL             0x58
0235 
0236 #define F10_ONLINE_SPARE        0xB0
0237 #define online_spare_swap_done(pvt, c)  (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
0238 #define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
0239 
0240 #define F10_NB_ARRAY_ADDR       0xB8
0241 #define F10_NB_ARRAY_DRAM       BIT(31)
0242 
0243 /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline  */
0244 #define SET_NB_ARRAY_ADDR(section)  (((section) & 0x3) << 1)
0245 
0246 #define F10_NB_ARRAY_DATA       0xBC
0247 #define F10_NB_ARR_ECC_WR_REQ       BIT(17)
0248 #define SET_NB_DRAM_INJECTION_WRITE(inj)  \
0249                     (BIT(((inj.word) & 0xF) + 20) | \
0250                     F10_NB_ARR_ECC_WR_REQ | inj.bit_map)
0251 #define SET_NB_DRAM_INJECTION_READ(inj)  \
0252                     (BIT(((inj.word) & 0xF) + 20) | \
0253                     BIT(16) |  inj.bit_map)
0254 
0255 
0256 #define NBCAP               0xE8
0257 #define NBCAP_CHIPKILL          BIT(4)
0258 #define NBCAP_SECDED            BIT(3)
0259 #define NBCAP_DCT_DUAL          BIT(0)
0260 
0261 #define EXT_NB_MCA_CFG          0x180
0262 
0263 /* MSRs */
0264 #define MSR_MCGCTL_NBE          BIT(4)
0265 
0266 /* F17h */
0267 
0268 /* F0: */
0269 #define DF_DHAR             0x104
0270 
0271 /* UMC CH register offsets */
0272 #define UMCCH_BASE_ADDR         0x0
0273 #define UMCCH_BASE_ADDR_SEC     0x10
0274 #define UMCCH_ADDR_MASK         0x20
0275 #define UMCCH_ADDR_MASK_SEC     0x28
0276 #define UMCCH_ADDR_MASK_SEC_DDR5    0x30
0277 #define UMCCH_ADDR_CFG          0x30
0278 #define UMCCH_ADDR_CFG_DDR5     0x40
0279 #define UMCCH_DIMM_CFG          0x80
0280 #define UMCCH_DIMM_CFG_DDR5     0x90
0281 #define UMCCH_UMC_CFG           0x100
0282 #define UMCCH_SDP_CTRL          0x104
0283 #define UMCCH_ECC_CTRL          0x14C
0284 #define UMCCH_ECC_BAD_SYMBOL        0xD90
0285 #define UMCCH_UMC_CAP           0xDF0
0286 #define UMCCH_UMC_CAP_HI        0xDF4
0287 
0288 /* UMC CH bitfields */
0289 #define UMC_ECC_CHIPKILL_CAP        BIT(31)
0290 #define UMC_ECC_ENABLED         BIT(30)
0291 
0292 #define UMC_SDP_INIT            BIT(31)
0293 
0294 enum amd_families {
0295     K8_CPUS = 0,
0296     F10_CPUS,
0297     F15_CPUS,
0298     F15_M30H_CPUS,
0299     F15_M60H_CPUS,
0300     F16_CPUS,
0301     F16_M30H_CPUS,
0302     F17_CPUS,
0303     F17_M10H_CPUS,
0304     F17_M30H_CPUS,
0305     F17_M60H_CPUS,
0306     F17_M70H_CPUS,
0307     F19_CPUS,
0308     F19_M10H_CPUS,
0309     F19_M50H_CPUS,
0310     NUM_FAMILIES,
0311 };
0312 
0313 /* Error injection control structure */
0314 struct error_injection {
0315     u32  section;
0316     u32  word;
0317     u32  bit_map;
0318 };
0319 
0320 /* low and high part of PCI config space regs */
0321 struct reg_pair {
0322     u32 lo, hi;
0323 };
0324 
0325 /*
0326  * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
0327  */
0328 struct dram_range {
0329     struct reg_pair base;
0330     struct reg_pair lim;
0331 };
0332 
0333 /* A DCT chip selects collection */
0334 struct chip_select {
0335     u32 csbases[NUM_CHIPSELECTS];
0336     u32 csbases_sec[NUM_CHIPSELECTS];
0337     u8 b_cnt;
0338 
0339     u32 csmasks[NUM_CHIPSELECTS];
0340     u32 csmasks_sec[NUM_CHIPSELECTS];
0341     u8 m_cnt;
0342 };
0343 
0344 struct amd64_umc {
0345     u32 dimm_cfg;       /* DIMM Configuration reg */
0346     u32 umc_cfg;        /* Configuration reg */
0347     u32 sdp_ctrl;       /* SDP Control reg */
0348     u32 ecc_ctrl;       /* DRAM ECC Control reg */
0349     u32 umc_cap_hi;     /* Capabilities High reg */
0350 
0351     /* cache the dram_type */
0352     enum mem_type dram_type;
0353 };
0354 
0355 struct amd64_pvt {
0356     struct low_ops *ops;
0357 
0358     /* pci_device handles which we utilize */
0359     struct pci_dev *F0, *F1, *F2, *F3, *F6;
0360 
0361     u16 mc_node_id;     /* MC index of this MC node */
0362     u8 fam;         /* CPU family */
0363     u8 model;       /* ... model */
0364     u8 stepping;        /* ... stepping */
0365 
0366     int ext_model;      /* extended model value of this node */
0367     int channel_count;
0368 
0369     /* Raw registers */
0370     u32 dclr0;      /* DRAM Configuration Low DCT0 reg */
0371     u32 dclr1;      /* DRAM Configuration Low DCT1 reg */
0372     u32 dchr0;      /* DRAM Configuration High DCT0 reg */
0373     u32 dchr1;      /* DRAM Configuration High DCT1 reg */
0374     u32 nbcap;      /* North Bridge Capabilities */
0375     u32 nbcfg;      /* F10 North Bridge Configuration */
0376     u32 ext_nbcfg;      /* Extended F10 North Bridge Configuration */
0377     u32 dhar;       /* DRAM Hoist reg */
0378     u32 dbam0;      /* DRAM Base Address Mapping reg for DCT0 */
0379     u32 dbam1;      /* DRAM Base Address Mapping reg for DCT1 */
0380 
0381     /* one for each DCT/UMC */
0382     struct chip_select csels[NUM_CONTROLLERS];
0383 
0384     /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
0385     struct dram_range ranges[DRAM_RANGES];
0386 
0387     u64 top_mem;        /* top of memory below 4GB */
0388     u64 top_mem2;       /* top of memory above 4GB */
0389 
0390     u32 dct_sel_lo;     /* DRAM Controller Select Low */
0391     u32 dct_sel_hi;     /* DRAM Controller Select High */
0392     u32 online_spare;   /* On-Line spare Reg */
0393 
0394     /* x4, x8, or x16 syndromes in use */
0395     u8 ecc_sym_sz;
0396 
0397     /* place to store error injection parameters prior to issue */
0398     struct error_injection injection;
0399 
0400     /*
0401      * cache the dram_type
0402      *
0403      * NOTE: Don't use this for Family 17h and later.
0404      *   Use dram_type in struct amd64_umc instead.
0405      */
0406     enum mem_type dram_type;
0407 
0408     struct amd64_umc *umc;  /* UMC registers */
0409 };
0410 
0411 enum err_codes {
0412     DECODE_OK   =  0,
0413     ERR_NODE    = -1,
0414     ERR_CSROW   = -2,
0415     ERR_CHANNEL = -3,
0416     ERR_SYND    = -4,
0417     ERR_NORM_ADDR   = -5,
0418 };
0419 
0420 struct err_info {
0421     int err_code;
0422     struct mem_ctl_info *src_mci;
0423     int csrow;
0424     int channel;
0425     u16 syndrome;
0426     u32 page;
0427     u32 offset;
0428 };
0429 
0430 static inline u32 get_umc_base(u8 channel)
0431 {
0432     /* chY: 0xY50000 */
0433     return 0x50000 + (channel << 20);
0434 }
0435 
0436 static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
0437 {
0438     u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
0439 
0440     if (boot_cpu_data.x86 == 0xf)
0441         return addr;
0442 
0443     return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
0444 }
0445 
0446 static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
0447 {
0448     u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
0449 
0450     if (boot_cpu_data.x86 == 0xf)
0451         return lim;
0452 
0453     return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
0454 }
0455 
0456 static inline u16 extract_syndrome(u64 status)
0457 {
0458     return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
0459 }
0460 
0461 static inline u8 dct_sel_interleave_addr(struct amd64_pvt *pvt)
0462 {
0463     if (pvt->fam == 0x15 && pvt->model >= 0x30)
0464         return (((pvt->dct_sel_hi >> 9) & 0x1) << 2) |
0465             ((pvt->dct_sel_lo >> 6) & 0x3);
0466 
0467     return  ((pvt)->dct_sel_lo >> 6) & 0x3;
0468 }
0469 /*
0470  * per-node ECC settings descriptor
0471  */
0472 struct ecc_settings {
0473     u32 old_nbctl;
0474     bool nbctl_valid;
0475 
0476     struct flags {
0477         unsigned long nb_mce_enable:1;
0478         unsigned long nb_ecc_prev:1;
0479     } flags;
0480 };
0481 
0482 /*
0483  * Each of the PCI Device IDs types have their own set of hardware accessor
0484  * functions and per device encoding/decoding logic.
0485  */
0486 struct low_ops {
0487     int (*early_channel_count)  (struct amd64_pvt *pvt);
0488     void (*map_sysaddr_to_csrow)    (struct mem_ctl_info *mci, u64 sys_addr,
0489                      struct err_info *);
0490     int (*dbam_to_cs)       (struct amd64_pvt *pvt, u8 dct,
0491                      unsigned cs_mode, int cs_mask_nr);
0492 };
0493 
0494 struct amd64_family_flags {
0495     /*
0496      * Indicates that the system supports the new register offsets, etc.
0497      * first introduced with Family 19h Model 10h.
0498      */
0499     __u64 zn_regs_v2    : 1,
0500 
0501           __reserved    : 63;
0502 };
0503 
0504 struct amd64_family_type {
0505     const char *ctl_name;
0506     u16 f0_id, f1_id, f2_id, f6_id;
0507     /* Maximum number of memory controllers per die/node. */
0508     u8 max_mcs;
0509     struct amd64_family_flags flags;
0510     struct low_ops ops;
0511 };
0512 
0513 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
0514                    u32 *val, const char *func);
0515 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
0516                 u32 val, const char *func);
0517 
0518 #define amd64_read_pci_cfg(pdev, offset, val)   \
0519     __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
0520 
0521 #define amd64_write_pci_cfg(pdev, offset, val)  \
0522     __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
0523 
0524 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
0525 
0526 /* Injection helpers */
0527 static inline void disable_caches(void *dummy)
0528 {
0529     write_cr0(read_cr0() | X86_CR0_CD);
0530     wbinvd();
0531 }
0532 
0533 static inline void enable_caches(void *dummy)
0534 {
0535     write_cr0(read_cr0() & ~X86_CR0_CD);
0536 }
0537 
0538 static inline u8 dram_intlv_en(struct amd64_pvt *pvt, unsigned int i)
0539 {
0540     if (pvt->fam == 0x15 && pvt->model >= 0x30) {
0541         u32 tmp;
0542         amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &tmp);
0543         return (u8) tmp & 0xF;
0544     }
0545     return (u8) (pvt->ranges[i].base.lo >> 8) & 0x7;
0546 }
0547 
0548 static inline u8 dhar_valid(struct amd64_pvt *pvt)
0549 {
0550     if (pvt->fam == 0x15 && pvt->model >= 0x30) {
0551         u32 tmp;
0552         amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp);
0553         return (tmp >> 1) & BIT(0);
0554     }
0555     return (pvt)->dhar & BIT(0);
0556 }
0557 
0558 static inline u32 dct_sel_baseaddr(struct amd64_pvt *pvt)
0559 {
0560     if (pvt->fam == 0x15 && pvt->model >= 0x30) {
0561         u32 tmp;
0562         amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp);
0563         return (tmp >> 11) & 0x1FFF;
0564     }
0565     return (pvt)->dct_sel_lo & 0xFFFFF800;
0566 }