Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2019 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "umc_v6_1.h"
0024 #include "amdgpu_ras.h"
0025 #include "amdgpu_umc.h"
0026 #include "amdgpu.h"
0027 
0028 #include "rsmu/rsmu_0_0_2_offset.h"
0029 #include "rsmu/rsmu_0_0_2_sh_mask.h"
0030 #include "umc/umc_6_1_1_offset.h"
0031 #include "umc/umc_6_1_1_sh_mask.h"
0032 #include "umc/umc_6_1_2_offset.h"
0033 
0034 #define UMC_6_INST_DIST         0x40000
0035 
0036 const uint32_t
0037     umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
0038         {2, 18, 11, 27},    {4, 20, 13, 29},
0039         {1, 17, 8, 24},     {7, 23, 14, 30},
0040         {10, 26, 3, 19},    {12, 28, 5, 21},
0041         {9, 25, 0, 16},     {15, 31, 6, 22}
0042 };
0043 
0044 static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
0045 {
0046     uint32_t rsmu_umc_addr, rsmu_umc_val;
0047 
0048     rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
0049             mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
0050     rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
0051 
0052     rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
0053             RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
0054             RSMU_UMC_INDEX_MODE_EN, 1);
0055 
0056     WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
0057 }
0058 
0059 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
0060 {
0061     uint32_t rsmu_umc_addr, rsmu_umc_val;
0062 
0063     rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
0064             mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
0065     rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
0066 
0067     rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
0068             RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
0069             RSMU_UMC_INDEX_MODE_EN, 0);
0070 
0071     WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
0072 }
0073 
0074 static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
0075 {
0076     uint32_t rsmu_umc_addr, rsmu_umc_val;
0077 
0078     rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
0079             mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
0080     rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
0081 
0082     return REG_GET_FIELD(rsmu_umc_val,
0083             RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
0084             RSMU_UMC_INDEX_MODE_EN);
0085 }
0086 
0087 static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
0088                         uint32_t umc_inst,
0089                         uint32_t ch_inst)
0090 {
0091     return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
0092 }
0093 
0094 static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
0095                     uint32_t umc_reg_offset)
0096 {
0097     uint32_t ecc_err_cnt_addr;
0098     uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
0099 
0100     if (adev->asic_type == CHIP_ARCTURUS) {
0101         /* UMC 6_1_2 registers */
0102         ecc_err_cnt_sel_addr =
0103             SOC15_REG_OFFSET(UMC, 0,
0104                     mmUMCCH0_0_EccErrCntSel_ARCT);
0105         ecc_err_cnt_addr =
0106             SOC15_REG_OFFSET(UMC, 0,
0107                     mmUMCCH0_0_EccErrCnt_ARCT);
0108     } else {
0109         /* UMC 6_1_1 registers */
0110         ecc_err_cnt_sel_addr =
0111             SOC15_REG_OFFSET(UMC, 0,
0112                     mmUMCCH0_0_EccErrCntSel);
0113         ecc_err_cnt_addr =
0114             SOC15_REG_OFFSET(UMC, 0,
0115                     mmUMCCH0_0_EccErrCnt);
0116     }
0117 
0118     /* select the lower chip */
0119     ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
0120                     umc_reg_offset) * 4);
0121     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
0122                     UMCCH0_0_EccErrCntSel,
0123                     EccErrCntCsSel, 0);
0124     WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
0125             ecc_err_cnt_sel);
0126 
0127     /* clear lower chip error count */
0128     WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
0129             UMC_V6_1_CE_CNT_INIT);
0130 
0131     /* select the higher chip */
0132     ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
0133                     umc_reg_offset) * 4);
0134     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
0135                     UMCCH0_0_EccErrCntSel,
0136                     EccErrCntCsSel, 1);
0137     WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
0138             ecc_err_cnt_sel);
0139 
0140     /* clear higher chip error count */
0141     WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
0142             UMC_V6_1_CE_CNT_INIT);
0143 }
0144 
0145 static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
0146 {
0147     uint32_t umc_inst        = 0;
0148     uint32_t ch_inst         = 0;
0149     uint32_t umc_reg_offset  = 0;
0150     uint32_t rsmu_umc_index_state =
0151                 umc_v6_1_get_umc_index_mode_state(adev);
0152 
0153     if (rsmu_umc_index_state)
0154         umc_v6_1_disable_umc_index_mode(adev);
0155 
0156     LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
0157         umc_reg_offset = get_umc_6_reg_offset(adev,
0158                         umc_inst,
0159                         ch_inst);
0160 
0161         umc_v6_1_clear_error_count_per_channel(adev,
0162                         umc_reg_offset);
0163     }
0164 
0165     if (rsmu_umc_index_state)
0166         umc_v6_1_enable_umc_index_mode(adev);
0167 }
0168 
0169 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
0170                            uint32_t umc_reg_offset,
0171                            unsigned long *error_count)
0172 {
0173     uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
0174     uint32_t ecc_err_cnt, ecc_err_cnt_addr;
0175     uint64_t mc_umc_status;
0176     uint32_t mc_umc_status_addr;
0177 
0178     if (adev->asic_type == CHIP_ARCTURUS) {
0179         /* UMC 6_1_2 registers */
0180         ecc_err_cnt_sel_addr =
0181             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
0182         ecc_err_cnt_addr =
0183             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
0184         mc_umc_status_addr =
0185             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
0186     } else {
0187         /* UMC 6_1_1 registers */
0188         ecc_err_cnt_sel_addr =
0189             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
0190         ecc_err_cnt_addr =
0191             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
0192         mc_umc_status_addr =
0193             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
0194     }
0195 
0196     /* select the lower chip and check the error count */
0197     ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
0198     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
0199                     EccErrCntCsSel, 0);
0200     WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
0201 
0202     ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
0203     *error_count +=
0204         (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
0205          UMC_V6_1_CE_CNT_INIT);
0206 
0207     /* select the higher chip and check the err counter */
0208     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
0209                     EccErrCntCsSel, 1);
0210     WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
0211 
0212     ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
0213     *error_count +=
0214         (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
0215          UMC_V6_1_CE_CNT_INIT);
0216 
0217     /* check for SRAM correctable error
0218       MCUMC_STATUS is a 64 bit register */
0219     mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
0220     if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
0221         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
0222         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
0223         *error_count += 1;
0224 }
0225 
0226 static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev,
0227                               uint32_t umc_reg_offset,
0228                               unsigned long *error_count)
0229 {
0230     uint64_t mc_umc_status;
0231     uint32_t mc_umc_status_addr;
0232 
0233     if (adev->asic_type == CHIP_ARCTURUS) {
0234         /* UMC 6_1_2 registers */
0235         mc_umc_status_addr =
0236             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
0237     } else {
0238         /* UMC 6_1_1 registers */
0239         mc_umc_status_addr =
0240             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
0241     }
0242 
0243     /* check the MCUMC_STATUS */
0244     mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
0245     if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
0246         (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
0247         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
0248         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
0249         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
0250         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
0251         *error_count += 1;
0252 }
0253 
0254 static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
0255                        void *ras_error_status)
0256 {
0257     struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
0258 
0259     uint32_t umc_inst        = 0;
0260     uint32_t ch_inst         = 0;
0261     uint32_t umc_reg_offset  = 0;
0262 
0263     uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
0264 
0265     if (rsmu_umc_index_state)
0266         umc_v6_1_disable_umc_index_mode(adev);
0267 
0268     if ((adev->asic_type == CHIP_ARCTURUS) &&
0269         amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
0270         DRM_WARN("Fail to disable DF-Cstate.\n");
0271 
0272     LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
0273         umc_reg_offset = get_umc_6_reg_offset(adev,
0274                               umc_inst,
0275                               ch_inst);
0276 
0277         umc_v6_1_query_correctable_error_count(adev,
0278                                umc_reg_offset,
0279                                &(err_data->ce_count));
0280         umc_v6_1_querry_uncorrectable_error_count(adev,
0281                               umc_reg_offset,
0282                               &(err_data->ue_count));
0283     }
0284 
0285     if ((adev->asic_type == CHIP_ARCTURUS) &&
0286         amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
0287         DRM_WARN("Fail to enable DF-Cstate\n");
0288 
0289     if (rsmu_umc_index_state)
0290         umc_v6_1_enable_umc_index_mode(adev);
0291 
0292     umc_v6_1_clear_error_count(adev);
0293 }
0294 
0295 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
0296                      struct ras_err_data *err_data,
0297                      uint32_t umc_reg_offset,
0298                      uint32_t ch_inst,
0299                      uint32_t umc_inst)
0300 {
0301     uint32_t lsb, mc_umc_status_addr;
0302     uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
0303     uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
0304 
0305     if (adev->asic_type == CHIP_ARCTURUS) {
0306         /* UMC 6_1_2 registers */
0307         mc_umc_status_addr =
0308             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
0309         mc_umc_addrt0 =
0310             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
0311     } else {
0312         /* UMC 6_1_1 registers */
0313         mc_umc_status_addr =
0314             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
0315         mc_umc_addrt0 =
0316             SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
0317     }
0318 
0319     mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
0320 
0321     if (mc_umc_status == 0)
0322         return;
0323 
0324     if (!err_data->err_addr) {
0325         /* clear umc status */
0326         WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
0327         return;
0328     }
0329 
0330     /* calculate error address if ue/ce error is detected */
0331     if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
0332         (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
0333         REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
0334 
0335         err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
0336         /* the lowest lsb bits should be ignored */
0337         lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
0338         err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
0339         err_addr &= ~((0x1ULL << lsb) - 1);
0340 
0341         /* translate umc channel address to soc pa, 3 parts are included */
0342         retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
0343                 ADDR_OF_256B_BLOCK(channel_index) |
0344                 OFFSET_IN_256B_BLOCK(err_addr);
0345 
0346         /* we only save ue error information currently, ce is skipped */
0347         if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
0348                 == 1)
0349             amdgpu_umc_fill_error_record(err_data, err_addr,
0350                     retired_page, channel_index, umc_inst);
0351     }
0352 
0353     /* clear umc status */
0354     WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
0355 }
0356 
0357 static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
0358                          void *ras_error_status)
0359 {
0360     struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
0361 
0362     uint32_t umc_inst        = 0;
0363     uint32_t ch_inst         = 0;
0364     uint32_t umc_reg_offset  = 0;
0365 
0366     uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
0367 
0368     if (rsmu_umc_index_state)
0369         umc_v6_1_disable_umc_index_mode(adev);
0370 
0371     if ((adev->asic_type == CHIP_ARCTURUS) &&
0372         amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
0373         DRM_WARN("Fail to disable DF-Cstate.\n");
0374 
0375     LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
0376         umc_reg_offset = get_umc_6_reg_offset(adev,
0377                               umc_inst,
0378                               ch_inst);
0379 
0380         umc_v6_1_query_error_address(adev,
0381                          err_data,
0382                          umc_reg_offset,
0383                          ch_inst,
0384                          umc_inst);
0385     }
0386 
0387     if ((adev->asic_type == CHIP_ARCTURUS) &&
0388         amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
0389         DRM_WARN("Fail to enable DF-Cstate\n");
0390 
0391     if (rsmu_umc_index_state)
0392         umc_v6_1_enable_umc_index_mode(adev);
0393 }
0394 
0395 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
0396                           uint32_t umc_reg_offset)
0397 {
0398     uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
0399     uint32_t ecc_err_cnt_addr;
0400 
0401     if (adev->asic_type == CHIP_ARCTURUS) {
0402         /* UMC 6_1_2 registers */
0403         ecc_err_cnt_sel_addr =
0404             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
0405         ecc_err_cnt_addr =
0406             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
0407     } else {
0408         /* UMC 6_1_1 registers */
0409         ecc_err_cnt_sel_addr =
0410             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
0411         ecc_err_cnt_addr =
0412             SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
0413     }
0414 
0415     /* select the lower chip and check the error count */
0416     ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
0417     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
0418                     EccErrCntCsSel, 0);
0419     /* set ce error interrupt type to APIC based interrupt */
0420     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
0421                     EccErrInt, 0x1);
0422     WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
0423     /* set error count to initial value */
0424     WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
0425 
0426     /* select the higher chip and check the err counter */
0427     ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
0428                     EccErrCntCsSel, 1);
0429     WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
0430     WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
0431 }
0432 
0433 static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
0434 {
0435     uint32_t umc_inst        = 0;
0436     uint32_t ch_inst         = 0;
0437     uint32_t umc_reg_offset  = 0;
0438 
0439     uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
0440 
0441     if (rsmu_umc_index_state)
0442         umc_v6_1_disable_umc_index_mode(adev);
0443 
0444     LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
0445         umc_reg_offset = get_umc_6_reg_offset(adev,
0446                               umc_inst,
0447                               ch_inst);
0448 
0449         umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
0450     }
0451 
0452     if (rsmu_umc_index_state)
0453         umc_v6_1_enable_umc_index_mode(adev);
0454 }
0455 
0456 const struct amdgpu_ras_block_hw_ops umc_v6_1_ras_hw_ops = {
0457     .query_ras_error_count = umc_v6_1_query_ras_error_count,
0458     .query_ras_error_address = umc_v6_1_query_ras_error_address,
0459 };
0460 
0461 struct amdgpu_umc_ras umc_v6_1_ras = {
0462     .ras_block = {
0463         .hw_ops = &umc_v6_1_ras_hw_ops,
0464     },
0465     .err_cnt_init = umc_v6_1_err_cnt_init,
0466 };