Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2021 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "nbio_v4_3.h"
0026 
0027 #include "nbio/nbio_4_3_0_offset.h"
0028 #include "nbio/nbio_4_3_0_sh_mask.h"
0029 #include <uapi/linux/kfd_ioctl.h>
0030 
0031 static void nbio_v4_3_remap_hdp_registers(struct amdgpu_device *adev)
0032 {
0033     WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
0034         adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
0035     WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
0036         adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
0037 }
0038 
0039 static u32 nbio_v4_3_get_rev_id(struct amdgpu_device *adev)
0040 {
0041     u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
0042 
0043     tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
0044     tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
0045 
0046     return tmp;
0047 }
0048 
0049 static void nbio_v4_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
0050 {
0051     if (enable)
0052         WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
0053                  BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
0054                  BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
0055     else
0056         WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
0057 }
0058 
0059 static u32 nbio_v4_3_get_memsize(struct amdgpu_device *adev)
0060 {
0061     return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
0062 }
0063 
0064 static void nbio_v4_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
0065                       bool use_doorbell, int doorbell_index,
0066                       int doorbell_size)
0067 {
0068     if (instance == 0) {
0069         u32 doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL);
0070 
0071         if (use_doorbell) {
0072             doorbell_range = REG_SET_FIELD(doorbell_range,
0073                                S2A_DOORBELL_ENTRY_2_CTRL,
0074                                S2A_DOORBELL_PORT2_ENABLE,
0075                                0x1);
0076             doorbell_range = REG_SET_FIELD(doorbell_range,
0077                                S2A_DOORBELL_ENTRY_2_CTRL,
0078                                S2A_DOORBELL_PORT2_AWID,
0079                                0xe);
0080             doorbell_range = REG_SET_FIELD(doorbell_range,
0081                                S2A_DOORBELL_ENTRY_2_CTRL,
0082                                S2A_DOORBELL_PORT2_RANGE_OFFSET,
0083                                doorbell_index);
0084             doorbell_range = REG_SET_FIELD(doorbell_range,
0085                                S2A_DOORBELL_ENTRY_2_CTRL,
0086                                S2A_DOORBELL_PORT2_RANGE_SIZE,
0087                                doorbell_size);
0088             doorbell_range = REG_SET_FIELD(doorbell_range,
0089                                S2A_DOORBELL_ENTRY_2_CTRL,
0090                                S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE,
0091                                0x3);
0092         } else
0093             doorbell_range = REG_SET_FIELD(doorbell_range,
0094                                S2A_DOORBELL_ENTRY_2_CTRL,
0095                                S2A_DOORBELL_PORT2_RANGE_SIZE,
0096                                0);
0097 
0098         WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_range);
0099     }
0100 }
0101 
0102 static void nbio_v4_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
0103                      int doorbell_index, int instance)
0104 {
0105     u32 doorbell_range;
0106 
0107     if (instance)
0108         doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL);
0109     else
0110         doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL);
0111 
0112     if (use_doorbell) {
0113         doorbell_range = REG_SET_FIELD(doorbell_range,
0114                            S2A_DOORBELL_ENTRY_4_CTRL,
0115                            S2A_DOORBELL_PORT4_ENABLE,
0116                            0x1);
0117         doorbell_range = REG_SET_FIELD(doorbell_range,
0118                            S2A_DOORBELL_ENTRY_4_CTRL,
0119                            S2A_DOORBELL_PORT4_AWID,
0120                            instance ? 0x7 : 0x4);
0121         doorbell_range = REG_SET_FIELD(doorbell_range,
0122                            S2A_DOORBELL_ENTRY_4_CTRL,
0123                            S2A_DOORBELL_PORT4_RANGE_OFFSET,
0124                            doorbell_index);
0125         doorbell_range = REG_SET_FIELD(doorbell_range,
0126                            S2A_DOORBELL_ENTRY_4_CTRL,
0127                            S2A_DOORBELL_PORT4_RANGE_SIZE,
0128                            8);
0129         doorbell_range = REG_SET_FIELD(doorbell_range,
0130                            S2A_DOORBELL_ENTRY_4_CTRL,
0131                            S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE,
0132                            instance ? 0x7 : 0x4);
0133     } else
0134         doorbell_range = REG_SET_FIELD(doorbell_range,
0135                            S2A_DOORBELL_ENTRY_4_CTRL,
0136                            S2A_DOORBELL_PORT4_RANGE_SIZE,
0137                            0);
0138 
0139     if (instance)
0140         WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_range);
0141     else
0142         WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_range);
0143 }
0144 
0145 static void nbio_v4_3_gc_doorbell_init(struct amdgpu_device *adev)
0146 {
0147     WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_0_CTRL, 0x30000007);
0148     WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, 0x3000000d);
0149 }
0150 
0151 static void nbio_v4_3_enable_doorbell_aperture(struct amdgpu_device *adev,
0152                            bool enable)
0153 {
0154     WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
0155             BIF_DOORBELL_APER_EN, enable ? 1 : 0);
0156 }
0157 
0158 static void nbio_v4_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
0159                             bool enable)
0160 {
0161     u32 tmp = 0;
0162 
0163     if (enable) {
0164         tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0165                     DOORBELL_SELFRING_GPA_APER_EN, 1) |
0166               REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0167                     DOORBELL_SELFRING_GPA_APER_MODE, 1) |
0168               REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0169                     DOORBELL_SELFRING_GPA_APER_SIZE, 0);
0170 
0171         WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
0172                  lower_32_bits(adev->doorbell.base));
0173         WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
0174                  upper_32_bits(adev->doorbell.base));
0175     }
0176 
0177     WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
0178              tmp);
0179 }
0180 
0181 static void nbio_v4_3_ih_doorbell_range(struct amdgpu_device *adev,
0182                     bool use_doorbell, int doorbell_index)
0183 {
0184     u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL);
0185 
0186     if (use_doorbell) {
0187         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0188                           S2A_DOORBELL_ENTRY_1_CTRL,
0189                           S2A_DOORBELL_PORT1_ENABLE,
0190                           0x1);
0191         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0192                           S2A_DOORBELL_ENTRY_1_CTRL,
0193                           S2A_DOORBELL_PORT1_AWID,
0194                           0x0);
0195         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0196                           S2A_DOORBELL_ENTRY_1_CTRL,
0197                           S2A_DOORBELL_PORT1_RANGE_OFFSET,
0198                           doorbell_index);
0199         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0200                           S2A_DOORBELL_ENTRY_1_CTRL,
0201                           S2A_DOORBELL_PORT1_RANGE_SIZE,
0202                           2);
0203         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0204                           S2A_DOORBELL_ENTRY_1_CTRL,
0205                           S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0206                           0x0);
0207     } else
0208         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
0209                           S2A_DOORBELL_ENTRY_1_CTRL,
0210                           S2A_DOORBELL_PORT1_RANGE_SIZE,
0211                           0);
0212 
0213     WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, ih_doorbell_range);
0214 }
0215 
0216 static void nbio_v4_3_ih_control(struct amdgpu_device *adev)
0217 {
0218     u32 interrupt_cntl;
0219 
0220     /* setup interrupt control */
0221     WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
0222 
0223     interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
0224     /*
0225      * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
0226      * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
0227      */
0228     interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
0229                        IH_DUMMY_RD_OVERRIDE, 0);
0230 
0231     /* BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
0232     interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
0233                        IH_REQ_NONSNOOP_EN, 0);
0234 
0235     WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
0236 }
0237 
0238 static void nbio_v4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0239                                bool enable)
0240 {
0241     uint32_t def, data;
0242 
0243     if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
0244         return;
0245 
0246     def = data = RREG32_SOC15(NBIO, 0, regCPM_CONTROL);
0247     if (enable) {
0248         data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
0249              CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
0250              CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
0251              CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
0252              CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
0253              CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
0254     } else {
0255         data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
0256               CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
0257               CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
0258               CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
0259               CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
0260               CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
0261     }
0262 
0263     if (def != data)
0264         WREG32_SOC15(NBIO, 0, regCPM_CONTROL, data);
0265 }
0266 
0267 static void nbio_v4_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
0268                               bool enable)
0269 {
0270     uint32_t def, data;
0271 
0272     if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
0273         return;
0274 
0275     /* TODO: need update in future */
0276     def = data = RREG32_SOC15(NBIO, 0, regPCIE_CNTL2);
0277     if (enable) {
0278         data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
0279     } else {
0280         data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
0281     }
0282 
0283     if (def != data)
0284         WREG32_SOC15(NBIO, 0, regPCIE_CNTL2, data);
0285 }
0286 
0287 static void nbio_v4_3_get_clockgating_state(struct amdgpu_device *adev,
0288                         u64 *flags)
0289 {
0290     int data;
0291 
0292     /* AMD_CG_SUPPORT_BIF_MGCG */
0293     data = RREG32_SOC15(NBIO, 0, regCPM_CONTROL);
0294     if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
0295         *flags |= AMD_CG_SUPPORT_BIF_MGCG;
0296 
0297     /* AMD_CG_SUPPORT_BIF_LS */
0298     data = RREG32_SOC15(NBIO, 0, regPCIE_CNTL2);
0299     if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
0300         *flags |= AMD_CG_SUPPORT_BIF_LS;
0301 }
0302 
0303 static u32 nbio_v4_3_get_hdp_flush_req_offset(struct amdgpu_device *adev)
0304 {
0305     return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
0306 }
0307 
0308 static u32 nbio_v4_3_get_hdp_flush_done_offset(struct amdgpu_device *adev)
0309 {
0310     return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
0311 }
0312 
0313 static u32 nbio_v4_3_get_pcie_index_offset(struct amdgpu_device *adev)
0314 {
0315     return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
0316 }
0317 
0318 static u32 nbio_v4_3_get_pcie_data_offset(struct amdgpu_device *adev)
0319 {
0320     return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
0321 }
0322 
0323 const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
0324     .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
0325     .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
0326     .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
0327     .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
0328     .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
0329     .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
0330     .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
0331     .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
0332     .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
0333     .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
0334     .ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
0335     .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
0336 };
0337 
0338 static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
0339 {
0340     return;
0341 }
0342 
0343 static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
0344 {
0345     u32 data, rom_offset;
0346 
0347     data = RREG32_SOC15(NBIO, 0, regREGS_ROM_OFFSET_CTRL);
0348     rom_offset = REG_GET_FIELD(data, REGS_ROM_OFFSET_CTRL, ROM_OFFSET);
0349 
0350     return rom_offset;
0351 }
0352 
0353 #ifdef CONFIG_PCIEASPM
0354 static void nbio_v4_3_program_ltr(struct amdgpu_device *adev)
0355 {
0356     uint32_t def, data;
0357 
0358     def = RREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
0359     data = 0x35EB;
0360     data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
0361     data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK;
0362     if (def != data)
0363         WREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
0364 
0365     def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2);
0366     data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
0367     if (def != data)
0368         WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2, data);
0369 
0370     def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
0371     if (adev->pdev->ltr_path)
0372         data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
0373     else
0374         data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
0375     if (def != data)
0376         WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
0377 }
0378 #endif
0379 
0380 static void nbio_v4_3_program_aspm(struct amdgpu_device *adev)
0381 {
0382 #ifdef CONFIG_PCIEASPM
0383     uint32_t def, data;
0384 
0385     if (!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 4, 0)) &&
0386           !(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 6, 0)))
0387         return;
0388 
0389     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL);
0390     data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
0391     data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
0392     data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
0393     if (def != data)
0394         WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL, data);
0395 
0396     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL7);
0397     data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
0398     if (def != data)
0399         WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL7, data);
0400 
0401     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3);
0402     data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
0403     if (def != data)
0404         WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3, data);
0405 
0406     def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
0407     data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
0408     data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
0409     if (def != data)
0410         WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
0411 
0412     def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
0413     data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
0414     if (def != data)
0415         WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
0416 
0417     def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
0418     data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
0419     if (def != data)
0420         WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
0421 
0422     WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
0423 
0424     def = data = RREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2);
0425     data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
0426         PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
0427     data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
0428     if (def != data)
0429         WREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2, data);
0430 
0431     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL4);
0432     data |= PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK;
0433     if (def != data)
0434         WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL4, data);
0435 
0436     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL);
0437     data |= PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK;
0438     if (def != data)
0439         WREG32_SOC15(NBIO, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL, data);
0440 
0441     nbio_v4_3_program_ltr(adev);
0442 
0443     def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
0444     data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
0445     data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
0446     if (def != data)
0447         WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
0448 
0449     def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
0450     data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
0451     if (def != data)
0452         WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
0453 
0454     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL);
0455     data |= 0x0 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
0456     data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
0457     data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
0458     if (def != data)
0459         WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL, data);
0460 
0461     def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3);
0462     data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
0463     if (def != data)
0464         WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3, data);
0465 #endif
0466 }
0467 
0468 const struct amdgpu_nbio_funcs nbio_v4_3_funcs = {
0469     .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset,
0470     .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset,
0471     .get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset,
0472     .get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset,
0473     .get_rev_id = nbio_v4_3_get_rev_id,
0474     .mc_access_enable = nbio_v4_3_mc_access_enable,
0475     .get_memsize = nbio_v4_3_get_memsize,
0476     .sdma_doorbell_range = nbio_v4_3_sdma_doorbell_range,
0477     .vcn_doorbell_range = nbio_v4_3_vcn_doorbell_range,
0478     .gc_doorbell_init = nbio_v4_3_gc_doorbell_init,
0479     .enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture,
0480     .enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture,
0481     .ih_doorbell_range = nbio_v4_3_ih_doorbell_range,
0482     .update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating,
0483     .update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep,
0484     .get_clockgating_state = nbio_v4_3_get_clockgating_state,
0485     .ih_control = nbio_v4_3_ih_control,
0486     .init_registers = nbio_v4_3_init_registers,
0487     .remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
0488     .get_rom_offset = nbio_v4_3_get_rom_offset,
0489     .program_aspm = nbio_v4_3_program_aspm,
0490 };