0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "nbio_v7_7.h"
0026
0027 #include "nbio/nbio_7_7_0_offset.h"
0028 #include "nbio/nbio_7_7_0_sh_mask.h"
0029 #include <uapi/linux/kfd_ioctl.h>
0030
0031 static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
0032 {
0033 WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
0034 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
0035 WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
0036 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
0037 }
0038
0039 static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
0040 {
0041 u32 tmp;
0042
0043 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
0044 tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
0045 tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
0046
0047 return tmp;
0048 }
0049
0050 static void nbio_v7_7_mc_access_enable(struct amdgpu_device *adev, bool enable)
0051 {
0052 if (enable)
0053 WREG32_SOC15(NBIO, 0, regBIF_BX1_BIF_FB_EN,
0054 BIF_BX1_BIF_FB_EN__FB_READ_EN_MASK |
0055 BIF_BX1_BIF_FB_EN__FB_WRITE_EN_MASK);
0056 else
0057 WREG32_SOC15(NBIO, 0, regBIF_BX1_BIF_FB_EN, 0);
0058 }
0059
0060 static u32 nbio_v7_7_get_memsize(struct amdgpu_device *adev)
0061 {
0062 return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
0063 }
0064
0065 static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
0066 bool use_doorbell, int doorbell_index,
0067 int doorbell_size)
0068 {
0069 u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_CSDMA_DOORBELL_RANGE);
0070 u32 doorbell_range = RREG32_PCIE_PORT(reg);
0071
0072 if (use_doorbell) {
0073 doorbell_range = REG_SET_FIELD(doorbell_range,
0074 GDC0_BIF_CSDMA_DOORBELL_RANGE,
0075 OFFSET, doorbell_index);
0076 doorbell_range = REG_SET_FIELD(doorbell_range,
0077 GDC0_BIF_CSDMA_DOORBELL_RANGE,
0078 SIZE, doorbell_size);
0079 } else {
0080 doorbell_range = REG_SET_FIELD(doorbell_range,
0081 GDC0_BIF_SDMA0_DOORBELL_RANGE,
0082 SIZE, 0);
0083 }
0084
0085 WREG32_PCIE_PORT(reg, doorbell_range);
0086 }
0087
0088 static void nbio_v7_7_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
0089 int doorbell_index, int instance)
0090 {
0091 u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
0092 u32 doorbell_range = RREG32_PCIE_PORT(reg);
0093
0094 if (use_doorbell) {
0095 doorbell_range = REG_SET_FIELD(doorbell_range,
0096 GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET,
0097 doorbell_index);
0098 doorbell_range = REG_SET_FIELD(doorbell_range,
0099 GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8);
0100 } else {
0101 doorbell_range = REG_SET_FIELD(doorbell_range,
0102 GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0);
0103 }
0104
0105 WREG32_PCIE_PORT(reg, doorbell_range);
0106 }
0107
0108 static void nbio_v7_7_enable_doorbell_aperture(struct amdgpu_device *adev,
0109 bool enable)
0110 {
0111 u32 reg;
0112
0113 reg = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN);
0114 reg = REG_SET_FIELD(reg, RCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN,
0115 BIF_DOORBELL_APER_EN, enable ? 1 : 0);
0116
0117 WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, reg);
0118 }
0119
0120 static void nbio_v7_7_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
0121 bool enable)
0122 {
0123 u32 tmp = 0;
0124
0125 if (enable) {
0126 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0127 DOORBELL_SELFRING_GPA_APER_EN, 1) |
0128 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0129 DOORBELL_SELFRING_GPA_APER_MODE, 1) |
0130 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0131 DOORBELL_SELFRING_GPA_APER_SIZE, 0);
0132
0133 WREG32_SOC15(NBIO, 0,
0134 regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
0135 lower_32_bits(adev->doorbell.base));
0136 WREG32_SOC15(NBIO, 0,
0137 regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
0138 upper_32_bits(adev->doorbell.base));
0139 }
0140
0141 WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0142 tmp);
0143 }
0144
0145
0146 static void nbio_v7_7_ih_doorbell_range(struct amdgpu_device *adev,
0147 bool use_doorbell, int doorbell_index)
0148 {
0149 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0,
0150 regGDC0_BIF_IH_DOORBELL_RANGE);
0151
0152 if (use_doorbell) {
0153 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0154 GDC0_BIF_IH_DOORBELL_RANGE, OFFSET,
0155 doorbell_index);
0156 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0157 GDC0_BIF_IH_DOORBELL_RANGE, SIZE,
0158 2);
0159 } else {
0160 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0161 GDC0_BIF_IH_DOORBELL_RANGE, SIZE,
0162 0);
0163 }
0164
0165 WREG32_SOC15(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE,
0166 ih_doorbell_range);
0167 }
0168
0169 static void nbio_v7_7_ih_control(struct amdgpu_device *adev)
0170 {
0171 u32 interrupt_cntl;
0172
0173
0174 WREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL2,
0175 adev->dummy_page_addr >> 8);
0176
0177 interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL);
0178
0179
0180
0181
0182 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX1_INTERRUPT_CNTL,
0183 IH_DUMMY_RD_OVERRIDE, 0);
0184
0185
0186 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX1_INTERRUPT_CNTL,
0187 IH_REQ_NONSNOOP_EN, 0);
0188
0189 WREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL, interrupt_cntl);
0190 }
0191
0192 static u32 nbio_v7_7_get_hdp_flush_req_offset(struct amdgpu_device *adev)
0193 {
0194 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
0195 }
0196
0197 static u32 nbio_v7_7_get_hdp_flush_done_offset(struct amdgpu_device *adev)
0198 {
0199 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
0200 }
0201
0202 static u32 nbio_v7_7_get_pcie_index_offset(struct amdgpu_device *adev)
0203 {
0204 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2);
0205 }
0206
0207 static u32 nbio_v7_7_get_pcie_data_offset(struct amdgpu_device *adev)
0208 {
0209 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
0210 }
0211
0212 static u32 nbio_v7_7_get_pcie_port_index_offset(struct amdgpu_device *adev)
0213 {
0214 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
0215 }
0216
0217 static u32 nbio_v7_7_get_pcie_port_data_offset(struct amdgpu_device *adev)
0218 {
0219 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
0220 }
0221
0222 const struct nbio_hdp_flush_reg nbio_v7_7_hdp_flush_reg = {
0223 .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
0224 .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
0225 .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
0226 .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
0227 .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
0228 .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
0229 .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
0230 .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
0231 .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
0232 .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
0233 .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
0234 .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
0235 };
0236
0237 static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
0238 {
0239 uint32_t def, data;
0240
0241 def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3);
0242 data = REG_SET_FIELD(data, BIF0_PCIE_MST_CTRL_3,
0243 CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
0244 data = REG_SET_FIELD(data, BIF0_PCIE_MST_CTRL_3,
0245 CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
0246
0247 if (def != data)
0248 WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
0249
0250 }
0251
0252 static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0253 bool enable)
0254 {
0255 uint32_t def, data;
0256
0257 if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
0258 return;
0259
0260 def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
0261 if (enable) {
0262 data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
0263 BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
0264 BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
0265 BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
0266 BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
0267 BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
0268 } else {
0269 data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
0270 BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
0271 BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
0272 BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
0273 BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
0274 BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
0275 }
0276
0277 if (def != data)
0278 WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
0279 }
0280
0281 static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
0282 bool enable)
0283 {
0284 uint32_t def, data;
0285
0286 if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
0287 return;
0288
0289 def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
0290 if (enable)
0291 data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
0292 else
0293 data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
0294
0295 if (def != data)
0296 WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
0297
0298 def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
0299 if (enable) {
0300 data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
0301 BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
0302 } else {
0303 data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
0304 BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
0305 }
0306
0307 if (def != data)
0308 WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
0309 }
0310
0311 static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
0312 u64 *flags)
0313 {
0314 uint32_t data;
0315
0316
0317 data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
0318 if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
0319 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
0320
0321
0322 data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
0323 if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
0324 *flags |= AMD_CG_SUPPORT_BIF_LS;
0325 }
0326
0327 const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
0328 .get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
0329 .get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
0330 .get_pcie_index_offset = nbio_v7_7_get_pcie_index_offset,
0331 .get_pcie_data_offset = nbio_v7_7_get_pcie_data_offset,
0332 .get_pcie_port_index_offset = nbio_v7_7_get_pcie_port_index_offset,
0333 .get_pcie_port_data_offset = nbio_v7_7_get_pcie_port_data_offset,
0334 .get_rev_id = nbio_v7_7_get_rev_id,
0335 .mc_access_enable = nbio_v7_7_mc_access_enable,
0336 .get_memsize = nbio_v7_7_get_memsize,
0337 .sdma_doorbell_range = nbio_v7_7_sdma_doorbell_range,
0338 .vcn_doorbell_range = nbio_v7_7_vcn_doorbell_range,
0339 .enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
0340 .enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
0341 .ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
0342 .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
0343 .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
0344 .get_clockgating_state = nbio_v7_7_get_clockgating_state,
0345 .ih_control = nbio_v7_7_ih_control,
0346 .init_registers = nbio_v7_7_init_registers,
0347 .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
0348 };