Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2018 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "nbio_v7_4.h"
0026 #include "amdgpu_ras.h"
0027 
0028 #include "nbio/nbio_7_4_offset.h"
0029 #include "nbio/nbio_7_4_sh_mask.h"
0030 #include "nbio/nbio_7_4_0_smn.h"
0031 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
0032 #include <uapi/linux/kfd_ioctl.h>
0033 
0034 #define smnPCIE_LC_CNTL     0x11140280
0035 #define smnPCIE_LC_CNTL3    0x111402d4
0036 #define smnPCIE_LC_CNTL6    0x111402ec
0037 #define smnPCIE_LC_CNTL7    0x111402f0
0038 #define smnNBIF_MGCG_CTRL_LCLK  0x1013a21c
0039 #define smnRCC_BIF_STRAP3   0x1012348c
0040 #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK    0x0000FFFFL
0041 #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK  0xFFFF0000L
0042 #define smnRCC_BIF_STRAP5   0x10123494
0043 #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK    0x0000FFFFL
0044 #define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2   0x1014008c
0045 #define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK         0x0400L
0046 #define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP   0x10140324
0047 #define smnPSWUSP0_PCIE_LC_CNTL2        0x111402c4
0048 #define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL    0x10123538
0049 #define smnRCC_BIF_STRAP2   0x10123488
0050 #define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK    0x00004000L
0051 #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT  0x0
0052 #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT    0x10
0053 #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT  0x0
0054 
0055 /*
0056  * These are nbio v7_4_1 registers mask. Temporarily define these here since
0057  * nbio v7_4_1 header is incomplete.
0058  */
0059 #define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK  0x00001000L /* Don't use.  Firmware uses this bit internally */
0060 #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK  0x00002000L
0061 #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK  0x00004000L
0062 #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK  0x00008000L
0063 #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK  0x00010000L
0064 #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK  0x00020000L
0065 #define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK  0x00040000L
0066 #define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK  0x00080000L
0067 #define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK  0x00100000L
0068 
0069 #define mmBIF_MMSCH1_DOORBELL_RANGE                     0x01dc
0070 #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX            2
0071 //BIF_MMSCH1_DOORBELL_RANGE
0072 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT        0x2
0073 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT          0x10
0074 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK          0x00000FFCL
0075 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK            0x001F0000L
0076 
0077 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK          0x00000FFCL
0078 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK            0x001F0000L
0079 
0080 #define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE                0x01d8
0081 #define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE_BASE_IDX       2
0082 //BIF_MMSCH1_DOORBELL_ALDE_RANGE
0083 #define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET__SHIFT   0x2
0084 #define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE__SHIFT     0x10
0085 #define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET_MASK     0x00000FFCL
0086 #define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE_MASK       0x001F0000L
0087 
0088 #define mmRCC_DEV0_EPF0_STRAP0_ALDE         0x0015
0089 #define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX        2
0090 
0091 #define mmBIF_DOORBELL_INT_CNTL_ALDE            0x00fe
0092 #define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX       2
0093 #define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT   0x18
0094 #define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
0095 
0096 #define mmBIF_INTR_CNTL_ALDE                0x0101
0097 #define mmBIF_INTR_CNTL_ALDE_BASE_IDX           2
0098 
0099 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
0100                     void *ras_error_status);
0101 
0102 static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
0103 {
0104     WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
0105         adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
0106     WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
0107         adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
0108 }
0109 
0110 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
0111 {
0112     u32 tmp;
0113 
0114     if (adev->asic_type == CHIP_ALDEBARAN)
0115         tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_ALDE);
0116     else
0117         tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
0118 
0119     tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
0120     tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
0121 
0122     return tmp;
0123 }
0124 
0125 static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
0126 {
0127     if (enable)
0128         WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
0129             BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
0130     else
0131         WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
0132 }
0133 
0134 static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
0135 {
0136     return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
0137 }
0138 
0139 static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
0140             bool use_doorbell, int doorbell_index, int doorbell_size)
0141 {
0142     u32 reg, doorbell_range;
0143 
0144     if (instance < 2) {
0145         reg = instance +
0146             SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
0147     } else {
0148         /*
0149          * These registers address of SDMA2~7 is not consecutive
0150          * from SDMA0~1. Need plus 4 dwords offset.
0151          *
0152          *   BIF_SDMA0_DOORBELL_RANGE:  0x3bc0
0153          *   BIF_SDMA1_DOORBELL_RANGE:  0x3bc4
0154          *   BIF_SDMA2_DOORBELL_RANGE:  0x3bd8
0155 +        *   BIF_SDMA4_DOORBELL_RANGE:
0156 +        *     ARCTURUS:  0x3be0
0157 +        *     ALDEBARAN: 0x3be4
0158          */
0159         if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
0160             reg = instance + 0x4 + 0x1 +
0161                 SOC15_REG_OFFSET(NBIO, 0,
0162                          mmBIF_SDMA0_DOORBELL_RANGE);
0163         else
0164             reg = instance + 0x4 +
0165                 SOC15_REG_OFFSET(NBIO, 0,
0166                          mmBIF_SDMA0_DOORBELL_RANGE);
0167     }
0168 
0169     doorbell_range = RREG32(reg);
0170 
0171     if (use_doorbell) {
0172         doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
0173         doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
0174     } else
0175         doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
0176 
0177     WREG32(reg, doorbell_range);
0178 }
0179 
0180 static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
0181                      int doorbell_index, int instance)
0182 {
0183     u32 reg;
0184     u32 doorbell_range;
0185 
0186     if (instance) {
0187         if (adev->asic_type == CHIP_ALDEBARAN)
0188             reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE_ALDE);
0189         else
0190             reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
0191     } else
0192         reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
0193 
0194     doorbell_range = RREG32(reg);
0195 
0196     if (use_doorbell) {
0197         doorbell_range = REG_SET_FIELD(doorbell_range,
0198                            BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
0199                            doorbell_index);
0200         doorbell_range = REG_SET_FIELD(doorbell_range,
0201                            BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
0202     } else
0203         doorbell_range = REG_SET_FIELD(doorbell_range,
0204                            BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
0205 
0206     WREG32(reg, doorbell_range);
0207 }
0208 
0209 static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
0210                            bool enable)
0211 {
0212     WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
0213 }
0214 
0215 static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
0216                             bool enable)
0217 {
0218     u32 tmp = 0;
0219 
0220     if (enable) {
0221         tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
0222               REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
0223               REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
0224 
0225         WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
0226                  lower_32_bits(adev->doorbell.base));
0227         WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
0228                  upper_32_bits(adev->doorbell.base));
0229     }
0230 
0231     WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
0232 }
0233 
0234 static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
0235                     bool use_doorbell, int doorbell_index)
0236 {
0237     u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
0238 
0239     if (use_doorbell) {
0240         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
0241         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
0242     } else
0243         ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
0244 
0245     WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
0246 }
0247 
0248 
0249 static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0250                                bool enable)
0251 {
0252     //TODO: Add support for v7.4
0253 }
0254 
0255 static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
0256                               bool enable)
0257 {
0258     uint32_t def, data;
0259 
0260     def = data = RREG32_PCIE(smnPCIE_CNTL2);
0261     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
0262         data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
0263              PCIE_CNTL2__MST_MEM_LS_EN_MASK |
0264              PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
0265     } else {
0266         data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
0267               PCIE_CNTL2__MST_MEM_LS_EN_MASK |
0268               PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
0269     }
0270 
0271     if (def != data)
0272         WREG32_PCIE(smnPCIE_CNTL2, data);
0273 }
0274 
0275 static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
0276                         u64 *flags)
0277 {
0278     int data;
0279 
0280     /* AMD_CG_SUPPORT_BIF_MGCG */
0281     data = RREG32_PCIE(smnCPM_CONTROL);
0282     if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
0283         *flags |= AMD_CG_SUPPORT_BIF_MGCG;
0284 
0285     /* AMD_CG_SUPPORT_BIF_LS */
0286     data = RREG32_PCIE(smnPCIE_CNTL2);
0287     if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
0288         *flags |= AMD_CG_SUPPORT_BIF_LS;
0289 }
0290 
0291 static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
0292 {
0293     u32 interrupt_cntl;
0294 
0295     /* setup interrupt control */
0296     WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
0297     interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
0298     /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
0299      * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
0300      */
0301     interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
0302     /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
0303     interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
0304     WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
0305 }
0306 
0307 static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
0308 {
0309     return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
0310 }
0311 
0312 static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
0313 {
0314     return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
0315 }
0316 
0317 static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
0318 {
0319     return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
0320 }
0321 
0322 static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
0323 {
0324     return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
0325 }
0326 
0327 const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
0328     .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
0329     .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
0330     .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
0331     .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
0332     .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
0333     .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
0334     .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
0335     .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
0336     .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
0337     .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
0338     .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
0339     .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
0340 };
0341 
0342 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
0343 {
0344     uint32_t baco_cntl;
0345 
0346     if (amdgpu_sriov_vf(adev))
0347         adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
0348             mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
0349 
0350     if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) &&
0351         !amdgpu_sriov_vf(adev)) {
0352         baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL);
0353         if (baco_cntl &
0354             (BACO_CNTL__BACO_DUMMY_EN_MASK | BACO_CNTL__BACO_EN_MASK)) {
0355             baco_cntl &= ~(BACO_CNTL__BACO_DUMMY_EN_MASK |
0356                        BACO_CNTL__BACO_EN_MASK);
0357             dev_dbg(adev->dev, "Unsetting baco dummy mode %x",
0358                 baco_cntl);
0359             WREG32_SOC15(NBIO, 0, mmBACO_CNTL, baco_cntl);
0360         }
0361     }
0362 }
0363 
0364 static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
0365 {
0366     uint32_t bif_doorbell_intr_cntl;
0367     struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
0368     struct ras_err_data err_data = {0, 0, 0, NULL};
0369     struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
0370 
0371     if (adev->asic_type == CHIP_ALDEBARAN)
0372         bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
0373     else
0374         bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
0375 
0376     if (REG_GET_FIELD(bif_doorbell_intr_cntl,
0377         BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
0378         /* driver has to clear the interrupt status when bif ring is disabled */
0379         bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
0380                         BIF_DOORBELL_INT_CNTL,
0381                         RAS_CNTLR_INTERRUPT_CLEAR, 1);
0382         if (adev->asic_type == CHIP_ALDEBARAN)
0383             WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
0384         else
0385             WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
0386 
0387         if (!ras->disable_ras_err_cnt_harvest) {
0388             /*
0389              * clear error status after ras_controller_intr
0390              * according to hw team and count ue number
0391              * for query
0392              */
0393             nbio_v7_4_query_ras_error_count(adev, &err_data);
0394 
0395             /* logging on error cnt and printing for awareness */
0396             obj->err_data.ue_count += err_data.ue_count;
0397             obj->err_data.ce_count += err_data.ce_count;
0398 
0399             if (err_data.ce_count)
0400                 dev_info(adev->dev, "%ld correctable hardware "
0401                         "errors detected in %s block, "
0402                         "no user action is needed.\n",
0403                         obj->err_data.ce_count,
0404                         get_ras_block_str(adev->nbio.ras_if));
0405 
0406             if (err_data.ue_count)
0407                 dev_info(adev->dev, "%ld uncorrectable hardware "
0408                         "errors detected in %s block\n",
0409                         obj->err_data.ue_count,
0410                         get_ras_block_str(adev->nbio.ras_if));
0411         }
0412 
0413         dev_info(adev->dev, "RAS controller interrupt triggered "
0414                     "by NBIF error\n");
0415 
0416         /* ras_controller_int is dedicated for nbif ras error,
0417          * not the global interrupt for sync flood
0418          */
0419         amdgpu_ras_reset_gpu(adev);
0420     }
0421 }
0422 
0423 static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
0424 {
0425     uint32_t bif_doorbell_intr_cntl;
0426 
0427     if (adev->asic_type == CHIP_ALDEBARAN)
0428         bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
0429     else
0430         bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
0431 
0432     if (REG_GET_FIELD(bif_doorbell_intr_cntl,
0433         BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
0434         /* driver has to clear the interrupt status when bif ring is disabled */
0435         bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
0436                         BIF_DOORBELL_INT_CNTL,
0437                         RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
0438 
0439         if (adev->asic_type == CHIP_ALDEBARAN)
0440             WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
0441         else
0442             WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
0443 
0444         amdgpu_ras_global_ras_isr(adev);
0445     }
0446 }
0447 
0448 
0449 static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
0450                           struct amdgpu_irq_src *src,
0451                           unsigned type,
0452                           enum amdgpu_interrupt_state state)
0453 {
0454     /* The ras_controller_irq enablement should be done in psp bl when it
0455      * tries to enable ras feature. Driver only need to set the correct interrupt
0456      * vector for bare-metal and sriov use case respectively
0457      */
0458     uint32_t bif_intr_cntl;
0459 
0460     if (adev->asic_type == CHIP_ALDEBARAN)
0461         bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
0462     else
0463         bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
0464 
0465     if (state == AMDGPU_IRQ_STATE_ENABLE) {
0466         /* set interrupt vector select bit to 0 to select
0467          * vetcor 1 for bare metal case */
0468         bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
0469                           BIF_INTR_CNTL,
0470                           RAS_INTR_VEC_SEL, 0);
0471 
0472         if (adev->asic_type == CHIP_ALDEBARAN)
0473             WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
0474         else
0475             WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
0476 
0477     }
0478 
0479     return 0;
0480 }
0481 
0482 static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
0483                         struct amdgpu_irq_src *source,
0484                         struct amdgpu_iv_entry *entry)
0485 {
0486     /* By design, the ih cookie for ras_controller_irq should be written
0487      * to BIFring instead of general iv ring. However, due to known bif ring
0488      * hw bug, it has to be disabled. There is no chance the process function
0489      * will be involked. Just left it as a dummy one.
0490      */
0491     return 0;
0492 }
0493 
0494 static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
0495                                struct amdgpu_irq_src *src,
0496                                unsigned type,
0497                                enum amdgpu_interrupt_state state)
0498 {
0499     /* The ras_controller_irq enablement should be done in psp bl when it
0500      * tries to enable ras feature. Driver only need to set the correct interrupt
0501      * vector for bare-metal and sriov use case respectively
0502      */
0503     uint32_t bif_intr_cntl;
0504 
0505     if (adev->asic_type == CHIP_ALDEBARAN)
0506         bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
0507     else
0508         bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
0509 
0510     if (state == AMDGPU_IRQ_STATE_ENABLE) {
0511         /* set interrupt vector select bit to 0 to select
0512          * vetcor 1 for bare metal case */
0513         bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
0514                           BIF_INTR_CNTL,
0515                           RAS_INTR_VEC_SEL, 0);
0516 
0517         if (adev->asic_type == CHIP_ALDEBARAN)
0518             WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
0519         else
0520             WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
0521     }
0522 
0523     return 0;
0524 }
0525 
0526 static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
0527                          struct amdgpu_irq_src *source,
0528                          struct amdgpu_iv_entry *entry)
0529 {
0530     /* By design, the ih cookie for err_event_athub_irq should be written
0531      * to BIFring instead of general iv ring. However, due to known bif ring
0532      * hw bug, it has to be disabled. There is no chance the process function
0533      * will be involked. Just left it as a dummy one.
0534      */
0535     return 0;
0536 }
0537 
0538 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
0539     .set = nbio_v7_4_set_ras_controller_irq_state,
0540     .process = nbio_v7_4_process_ras_controller_irq,
0541 };
0542 
0543 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
0544     .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
0545     .process = nbio_v7_4_process_err_event_athub_irq,
0546 };
0547 
0548 static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
0549 {
0550     int r;
0551 
0552     /* init the irq funcs */
0553     adev->nbio.ras_controller_irq.funcs =
0554         &nbio_v7_4_ras_controller_irq_funcs;
0555     adev->nbio.ras_controller_irq.num_types = 1;
0556 
0557     /* register ras controller interrupt */
0558     r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
0559                   NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
0560                   &adev->nbio.ras_controller_irq);
0561 
0562     return r;
0563 }
0564 
0565 static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
0566 {
0567 
0568     int r;
0569 
0570     /* init the irq funcs */
0571     adev->nbio.ras_err_event_athub_irq.funcs =
0572         &nbio_v7_4_ras_err_event_athub_irq_funcs;
0573     adev->nbio.ras_err_event_athub_irq.num_types = 1;
0574 
0575     /* register ras err event athub interrupt */
0576     r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
0577                   NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
0578                   &adev->nbio.ras_err_event_athub_irq);
0579 
0580     return r;
0581 }
0582 
0583 #define smnPARITY_ERROR_STATUS_UNCORR_GRP2      0x13a20030
0584 #define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030
0585 #define smnRAS_GLOBAL_STATUS_LO_ALDE            0x13b20020
0586 
0587 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
0588                     void *ras_error_status)
0589 {
0590     uint32_t global_sts, central_sts, int_eoi, parity_sts;
0591     uint32_t corr, fatal, non_fatal;
0592     struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
0593 
0594     if (adev->asic_type == CHIP_ALDEBARAN)
0595         global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE);
0596     else
0597         global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
0598 
0599     corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
0600     fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
0601     non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
0602                 ParityErrNonFatal);
0603 
0604     if (adev->asic_type == CHIP_ALDEBARAN)
0605         parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE);
0606     else
0607         parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
0608 
0609     if (corr)
0610         err_data->ce_count++;
0611     if (fatal)
0612         err_data->ue_count++;
0613 
0614     if (corr || fatal || non_fatal) {
0615         central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
0616 
0617         /* clear error status register */
0618         if (adev->asic_type == CHIP_ALDEBARAN)
0619             WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts);
0620         else
0621             WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
0622 
0623         if (fatal)
0624         {
0625             /* clear parity fatal error indication field */
0626             if (adev->asic_type == CHIP_ALDEBARAN)
0627                 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts);
0628             else
0629                 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts);
0630         }
0631 
0632         if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
0633                 BIFL_RasContller_Intr_Recv)) {
0634             /* clear interrupt status register */
0635             WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
0636             int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
0637             int_eoi = REG_SET_FIELD(int_eoi,
0638                     IOHC_INTERRUPT_EOI, SMI_EOI, 1);
0639             WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
0640         }
0641     }
0642 }
0643 
0644 static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
0645                         bool enable)
0646 {
0647     if (adev->asic_type == CHIP_ALDEBARAN)
0648         WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
0649                DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
0650     else
0651         WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
0652                DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
0653 }
0654 
0655 const struct amdgpu_ras_block_hw_ops nbio_v7_4_ras_hw_ops = {
0656     .query_ras_error_count = nbio_v7_4_query_ras_error_count,
0657 };
0658 
0659 struct amdgpu_nbio_ras nbio_v7_4_ras = {
0660     .ras_block = {
0661         .ras_comm = {
0662             .name = "pcie_bif",
0663             .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
0664             .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
0665         },
0666         .hw_ops = &nbio_v7_4_ras_hw_ops,
0667         .ras_late_init = amdgpu_nbio_ras_late_init,
0668     },
0669     .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
0670     .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
0671     .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
0672     .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
0673 };
0674 
0675 
0676 #ifdef CONFIG_PCIEASPM
0677 static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
0678 {
0679     uint32_t def, data;
0680 
0681     WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
0682 
0683     def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
0684     data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
0685     if (def != data)
0686         WREG32_PCIE(smnRCC_BIF_STRAP2, data);
0687 
0688     def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
0689     data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
0690     if (def != data)
0691         WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
0692 
0693     def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
0694     data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
0695     if (def != data)
0696         WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
0697 }
0698 #endif
0699 
0700 static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
0701 {
0702 #ifdef CONFIG_PCIEASPM
0703     uint32_t def, data;
0704 
0705     if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
0706         return;
0707 
0708     def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
0709     data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
0710     data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
0711     data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
0712     if (def != data)
0713         WREG32_PCIE(smnPCIE_LC_CNTL, data);
0714 
0715     def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
0716     data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
0717     if (def != data)
0718         WREG32_PCIE(smnPCIE_LC_CNTL7, data);
0719 
0720     def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
0721     data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
0722     if (def != data)
0723         WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
0724 
0725     def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
0726     data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
0727     if (def != data)
0728         WREG32_PCIE(smnPCIE_LC_CNTL3, data);
0729 
0730     def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
0731     data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
0732     data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
0733     if (def != data)
0734         WREG32_PCIE(smnRCC_BIF_STRAP3, data);
0735 
0736     def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
0737     data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
0738     if (def != data)
0739         WREG32_PCIE(smnRCC_BIF_STRAP5, data);
0740 
0741     def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
0742     data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
0743     if (def != data)
0744         WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
0745 
0746     WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
0747 
0748     def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
0749     data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
0750         PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
0751     data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
0752     if (def != data)
0753         WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
0754 
0755     def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
0756     data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
0757         PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
0758     if (def != data)
0759         WREG32_PCIE(smnPCIE_LC_CNTL6, data);
0760 
0761     /* Don't bother about LTR if LTR is not enabled
0762      * in the path */
0763     if (adev->pdev->ltr_path)
0764         nbio_v7_4_program_ltr(adev);
0765 
0766     def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
0767     data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
0768     data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
0769     if (def != data)
0770         WREG32_PCIE(smnRCC_BIF_STRAP3, data);
0771 
0772     def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
0773     data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
0774     if (def != data)
0775         WREG32_PCIE(smnRCC_BIF_STRAP5, data);
0776 
0777     def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
0778     data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
0779     data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
0780     data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
0781     if (def != data)
0782         WREG32_PCIE(smnPCIE_LC_CNTL, data);
0783 
0784     def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
0785     data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
0786     if (def != data)
0787         WREG32_PCIE(smnPCIE_LC_CNTL3, data);
0788 #endif
0789 }
0790 
0791 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
0792     .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
0793     .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
0794     .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
0795     .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
0796     .get_rev_id = nbio_v7_4_get_rev_id,
0797     .mc_access_enable = nbio_v7_4_mc_access_enable,
0798     .get_memsize = nbio_v7_4_get_memsize,
0799     .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
0800     .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
0801     .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
0802     .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
0803     .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
0804     .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
0805     .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
0806     .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
0807     .get_clockgating_state = nbio_v7_4_get_clockgating_state,
0808     .ih_control = nbio_v7_4_ih_control,
0809     .init_registers = nbio_v7_4_init_registers,
0810     .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
0811     .program_aspm =  nbio_v7_4_program_aspm,
0812 };