0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "nbio_v7_0.h"
0026
0027 #include "nbio/nbio_7_0_default.h"
0028 #include "nbio/nbio_7_0_offset.h"
0029 #include "nbio/nbio_7_0_sh_mask.h"
0030 #include "nbio/nbio_7_0_smn.h"
0031 #include "vega10_enum.h"
0032 #include <uapi/linux/kfd_ioctl.h>
0033
0034 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
0035
0036 static void nbio_v7_0_remap_hdp_registers(struct amdgpu_device *adev)
0037 {
0038 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
0039 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
0040 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
0041 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
0042 }
0043
0044 static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
0045 {
0046 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
0047
0048 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
0049 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
0050
0051 return tmp;
0052 }
0053
0054 static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
0055 {
0056 if (enable)
0057 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
0058 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
0059 else
0060 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
0061 }
0062
0063 static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
0064 {
0065 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
0066 }
0067
0068 static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
0069 bool use_doorbell, int doorbell_index, int doorbell_size)
0070 {
0071 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
0072 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
0073
0074 u32 doorbell_range = RREG32(reg);
0075
0076 if (use_doorbell) {
0077 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
0078 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
0079 } else
0080 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
0081
0082 WREG32(reg, doorbell_range);
0083 }
0084
0085 static void nbio_v7_0_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
0086 int doorbell_index, int instance)
0087 {
0088 u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
0089
0090 u32 doorbell_range = RREG32(reg);
0091
0092 if (use_doorbell) {
0093 doorbell_range = REG_SET_FIELD(doorbell_range,
0094 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
0095 doorbell_index);
0096 doorbell_range = REG_SET_FIELD(doorbell_range,
0097 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
0098 } else
0099 doorbell_range = REG_SET_FIELD(doorbell_range,
0100 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
0101
0102 WREG32(reg, doorbell_range);
0103 }
0104
0105 static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
0106 bool enable)
0107 {
0108 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
0109 }
0110
0111 static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
0112 bool enable)
0113 {
0114
0115 }
0116
0117 static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
0118 bool use_doorbell, int doorbell_index)
0119 {
0120 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE);
0121
0122 if (use_doorbell) {
0123 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
0124 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
0125 } else
0126 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
0127
0128 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
0129 }
0130
0131 static uint32_t nbio_7_0_read_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t offset)
0132 {
0133 uint32_t data;
0134
0135 WREG32_SOC15(NBIO, 0, mmSYSHUB_INDEX, offset);
0136 data = RREG32_SOC15(NBIO, 0, mmSYSHUB_DATA);
0137
0138 return data;
0139 }
0140
0141 static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t offset,
0142 uint32_t data)
0143 {
0144 WREG32_SOC15(NBIO, 0, mmSYSHUB_INDEX, offset);
0145 WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
0146 }
0147
0148 static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0149 bool enable)
0150 {
0151 uint32_t def, data;
0152
0153
0154 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
0155
0156 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
0157 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK;
0158 else
0159 data &= ~NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK;
0160
0161 if (def != data)
0162 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
0163
0164
0165 def = data = nbio_7_0_read_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK);
0166
0167 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
0168 data |= SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK;
0169 else
0170 data &= ~SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK;
0171
0172 if (def != data)
0173 nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK, data);
0174
0175
0176 def = data = nbio_7_0_read_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK);
0177
0178 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
0179 data |= SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK;
0180 else
0181 data &= ~SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK;
0182
0183 if (def != data)
0184 nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
0185 }
0186
0187 static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
0188 bool enable)
0189 {
0190 uint32_t def, data;
0191
0192 def = data = RREG32_PCIE(smnPCIE_CNTL2);
0193 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
0194 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
0195 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
0196 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
0197 } else {
0198 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
0199 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
0200 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
0201 }
0202
0203 if (def != data)
0204 WREG32_PCIE(smnPCIE_CNTL2, data);
0205 }
0206
0207 static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
0208 u64 *flags)
0209 {
0210 int data;
0211
0212
0213 data = RREG32_PCIE(smnCPM_CONTROL);
0214 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
0215 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
0216
0217
0218 data = RREG32_PCIE(smnPCIE_CNTL2);
0219 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
0220 *flags |= AMD_CG_SUPPORT_BIF_LS;
0221 }
0222
0223 static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
0224 {
0225 u32 interrupt_cntl;
0226
0227
0228 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
0229 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
0230
0231
0232
0233 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
0234
0235 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
0236 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
0237 }
0238
0239 static u32 nbio_v7_0_get_hdp_flush_req_offset(struct amdgpu_device *adev)
0240 {
0241 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
0242 }
0243
0244 static u32 nbio_v7_0_get_hdp_flush_done_offset(struct amdgpu_device *adev)
0245 {
0246 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
0247 }
0248
0249 static u32 nbio_v7_0_get_pcie_index_offset(struct amdgpu_device *adev)
0250 {
0251 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
0252 }
0253
0254 static u32 nbio_v7_0_get_pcie_data_offset(struct amdgpu_device *adev)
0255 {
0256 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
0257 }
0258
0259 const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
0260 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
0261 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
0262 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
0263 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
0264 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
0265 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
0266 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
0267 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
0268 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
0269 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
0270 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
0271 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
0272 };
0273
0274 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
0275 {
0276 if (amdgpu_sriov_vf(adev))
0277 adev->rmmio_remap.reg_offset =
0278 SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
0279 }
0280
0281 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
0282 .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
0283 .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
0284 .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
0285 .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
0286 .get_rev_id = nbio_v7_0_get_rev_id,
0287 .mc_access_enable = nbio_v7_0_mc_access_enable,
0288 .get_memsize = nbio_v7_0_get_memsize,
0289 .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
0290 .vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
0291 .enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
0292 .enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
0293 .ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
0294 .update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating,
0295 .update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep,
0296 .get_clockgating_state = nbio_v7_0_get_clockgating_state,
0297 .ih_control = nbio_v7_0_ih_control,
0298 .init_registers = nbio_v7_0_init_registers,
0299 .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
0300 };