Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2016 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "amdgpu.h"
0024 #include "amdgpu_ras.h"
0025 #include "mmhub_v1_0.h"
0026 
0027 #include "mmhub/mmhub_1_0_offset.h"
0028 #include "mmhub/mmhub_1_0_sh_mask.h"
0029 #include "mmhub/mmhub_1_0_default.h"
0030 #include "vega10_enum.h"
0031 #include "soc15.h"
0032 #include "soc15_common.h"
0033 
0034 #define mmDAGB0_CNTL_MISC2_RV 0x008f
0035 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
0036 
0037 static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
0038 {
0039     u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
0040     u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
0041 
0042     base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
0043     base <<= 24;
0044 
0045     top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
0046     top <<= 24;
0047 
0048     adev->gmc.fb_start = base;
0049     adev->gmc.fb_end = top;
0050 
0051     return base;
0052 }
0053 
0054 static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
0055                 uint64_t page_table_base)
0056 {
0057     struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0058 
0059     WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
0060                 hub->ctx_addr_distance * vmid,
0061                 lower_32_bits(page_table_base));
0062 
0063     WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
0064                 hub->ctx_addr_distance * vmid,
0065                 upper_32_bits(page_table_base));
0066 }
0067 
0068 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
0069 {
0070     uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
0071 
0072     mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
0073 
0074     WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
0075              (u32)(adev->gmc.gart_start >> 12));
0076     WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
0077              (u32)(adev->gmc.gart_start >> 44));
0078 
0079     WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
0080              (u32)(adev->gmc.gart_end >> 12));
0081     WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
0082              (u32)(adev->gmc.gart_end >> 44));
0083 }
0084 
0085 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
0086 {
0087     uint64_t value;
0088     uint32_t tmp;
0089 
0090     /* Program the AGP BAR */
0091     WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
0092     WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
0093     WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
0094 
0095     /* Program the system aperture low logical page number. */
0096     WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
0097              min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
0098 
0099     if (adev->apu_flags & AMD_APU_IS_RAVEN2)
0100         /*
0101          * Raven2 has a HW issue that it is unable to use the vram which
0102          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
0103          * workaround that increase system aperture high address (add 1)
0104          * to get rid of the VM fault and hardware hang.
0105          */
0106         WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
0107                  max((adev->gmc.fb_end >> 18) + 0x1,
0108                  adev->gmc.agp_end >> 18));
0109     else
0110         WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
0111                  max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
0112 
0113     if (amdgpu_sriov_vf(adev))
0114         return;
0115 
0116     /* Set default page address. */
0117     value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
0118     WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
0119              (u32)(value >> 12));
0120     WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
0121              (u32)(value >> 44));
0122 
0123     /* Program "protection fault". */
0124     WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
0125              (u32)(adev->dummy_page_addr >> 12));
0126     WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
0127              (u32)((u64)adev->dummy_page_addr >> 44));
0128 
0129     tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
0130     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
0131                 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
0132     WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
0133 }
0134 
0135 static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
0136 {
0137     uint32_t tmp;
0138 
0139     /* Setup TLB control */
0140     tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
0141 
0142     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
0143     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
0144     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
0145                 ENABLE_ADVANCED_DRIVER_MODEL, 1);
0146     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
0147                 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
0148     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
0149                 MTYPE, MTYPE_UC);/* XXX for emulation. */
0150     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
0151 
0152     WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
0153 }
0154 
0155 static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
0156 {
0157     uint32_t tmp;
0158 
0159     if (amdgpu_sriov_vf(adev))
0160         return;
0161 
0162     /* Setup L2 cache */
0163     tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
0164     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
0165     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
0166     /* XXX for emulation, Refer to closed source code.*/
0167     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0168                 0);
0169     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
0170     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
0171     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
0172     WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
0173 
0174     tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
0175     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
0176     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
0177     WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
0178 
0179     tmp = mmVM_L2_CNTL3_DEFAULT;
0180     if (adev->gmc.translate_further) {
0181         tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
0182         tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
0183                     L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
0184     } else {
0185         tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
0186         tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
0187                     L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
0188     }
0189     WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
0190 
0191     tmp = mmVM_L2_CNTL4_DEFAULT;
0192     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
0193     tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
0194     WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
0195 }
0196 
0197 static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
0198 {
0199     uint32_t tmp;
0200 
0201     tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
0202     tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
0203     tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
0204     tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
0205                 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
0206     WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
0207 }
0208 
0209 static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
0210 {
0211     if (amdgpu_sriov_vf(adev))
0212         return;
0213 
0214     WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0215              0XFFFFFFFF);
0216     WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
0217              0x0000000F);
0218 
0219     WREG32_SOC15(MMHUB, 0,
0220              mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
0221     WREG32_SOC15(MMHUB, 0,
0222              mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
0223 
0224     WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
0225              0);
0226     WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
0227              0);
0228 }
0229 
0230 static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
0231 {
0232     struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0233     unsigned num_level, block_size;
0234     uint32_t tmp;
0235     int i;
0236 
0237     num_level = adev->vm_manager.num_level;
0238     block_size = adev->vm_manager.block_size;
0239     if (adev->gmc.translate_further)
0240         num_level -= 1;
0241     else
0242         block_size -= 9;
0243 
0244     for (i = 0; i <= 14; i++) {
0245         tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
0246         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
0247         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
0248                     num_level);
0249         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0250                     RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0251         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0252                     DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
0253                     1);
0254         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0255                     PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0256         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0257                     VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0258         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0259                     READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0260         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0261                     WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0262         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0263                     EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0264         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0265                     PAGE_TABLE_BLOCK_SIZE,
0266                     block_size);
0267         /* Send no-retry XNACK on fault to suppress VM fault storm. */
0268         tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0269                     RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
0270                     !adev->gmc.noretry);
0271         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
0272                     i * hub->ctx_distance, tmp);
0273         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
0274                     i * hub->ctx_addr_distance, 0);
0275         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
0276                     i * hub->ctx_addr_distance, 0);
0277         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
0278                     i * hub->ctx_addr_distance,
0279                     lower_32_bits(adev->vm_manager.max_pfn - 1));
0280         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
0281                     i * hub->ctx_addr_distance,
0282                     upper_32_bits(adev->vm_manager.max_pfn - 1));
0283     }
0284 }
0285 
0286 static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
0287 {
0288     struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0289     unsigned i;
0290 
0291     for (i = 0; i < 18; ++i) {
0292         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
0293                     i * hub->eng_addr_distance, 0xffffffff);
0294         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
0295                     i * hub->eng_addr_distance, 0x1f);
0296     }
0297 }
0298 
0299 static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
0300                 bool enable)
0301 {
0302     if (amdgpu_sriov_vf(adev))
0303         return;
0304 
0305     if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
0306         amdgpu_dpm_set_powergating_by_smu(adev,
0307                           AMD_IP_BLOCK_TYPE_GMC,
0308                           enable);
0309 }
0310 
0311 static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
0312 {
0313     if (amdgpu_sriov_vf(adev)) {
0314         /*
0315          * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
0316          * VF copy registers so vbios post doesn't program them, for
0317          * SRIOV driver need to program them
0318          */
0319         WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
0320                  adev->gmc.vram_start >> 24);
0321         WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
0322                  adev->gmc.vram_end >> 24);
0323     }
0324 
0325     /* GART Enable. */
0326     mmhub_v1_0_init_gart_aperture_regs(adev);
0327     mmhub_v1_0_init_system_aperture_regs(adev);
0328     mmhub_v1_0_init_tlb_regs(adev);
0329     mmhub_v1_0_init_cache_regs(adev);
0330 
0331     mmhub_v1_0_enable_system_domain(adev);
0332     mmhub_v1_0_disable_identity_aperture(adev);
0333     mmhub_v1_0_setup_vmid_config(adev);
0334     mmhub_v1_0_program_invalidation(adev);
0335 
0336     return 0;
0337 }
0338 
0339 static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
0340 {
0341     struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0342     u32 tmp;
0343     u32 i;
0344 
0345     /* Disable all tables */
0346     for (i = 0; i < AMDGPU_NUM_VMID; i++)
0347         WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL,
0348                     i * hub->ctx_distance, 0);
0349 
0350     /* Setup TLB control */
0351     tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
0352     tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
0353     tmp = REG_SET_FIELD(tmp,
0354                 MC_VM_MX_L1_TLB_CNTL,
0355                 ENABLE_ADVANCED_DRIVER_MODEL,
0356                 0);
0357     WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
0358 
0359     if (!amdgpu_sriov_vf(adev)) {
0360         /* Setup L2 cache */
0361         tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
0362         tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
0363         WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
0364         WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
0365     }
0366 }
0367 
0368 /**
0369  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
0370  *
0371  * @adev: amdgpu_device pointer
0372  * @value: true redirects VM faults to the default page
0373  */
0374 static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
0375 {
0376     u32 tmp;
0377 
0378     if (amdgpu_sriov_vf(adev))
0379         return;
0380 
0381     tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
0382     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0383             RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0384     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0385             PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0386     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0387             PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0388     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0389             PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0390     tmp = REG_SET_FIELD(tmp,
0391             VM_L2_PROTECTION_FAULT_CNTL,
0392             TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
0393             value);
0394     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0395             NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0396     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0397             DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0398     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0399             VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0400     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0401             READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0402     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0403             WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0404     tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0405             EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0406     if (!value) {
0407         tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0408                 CRASH_ON_NO_RETRY_FAULT, 1);
0409         tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
0410                 CRASH_ON_RETRY_FAULT, 1);
0411     }
0412 
0413     WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
0414 }
0415 
0416 static void mmhub_v1_0_init(struct amdgpu_device *adev)
0417 {
0418     struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0419 
0420     hub->ctx0_ptb_addr_lo32 =
0421         SOC15_REG_OFFSET(MMHUB, 0,
0422                  mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
0423     hub->ctx0_ptb_addr_hi32 =
0424         SOC15_REG_OFFSET(MMHUB, 0,
0425                  mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
0426     hub->vm_inv_eng0_sem =
0427         SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
0428     hub->vm_inv_eng0_req =
0429         SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
0430     hub->vm_inv_eng0_ack =
0431         SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
0432     hub->vm_context0_cntl =
0433         SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
0434     hub->vm_l2_pro_fault_status =
0435         SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
0436     hub->vm_l2_pro_fault_cntl =
0437         SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
0438 
0439     hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
0440     hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
0441         mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
0442     hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
0443     hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
0444         mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
0445 }
0446 
0447 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0448                             bool enable)
0449 {
0450     uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
0451 
0452     def  = data  = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
0453 
0454     if (adev->asic_type != CHIP_RAVEN) {
0455         def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
0456         def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
0457     } else
0458         def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
0459 
0460     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
0461         data |= ATC_L2_MISC_CG__ENABLE_MASK;
0462 
0463         data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0464                    DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0465                    DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0466                    DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0467                    DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0468                    DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
0469 
0470         if (adev->asic_type != CHIP_RAVEN)
0471             data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0472                        DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0473                        DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0474                        DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0475                        DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0476                        DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
0477     } else {
0478         data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
0479 
0480         data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0481               DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0482               DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0483               DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0484               DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0485               DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
0486 
0487         if (adev->asic_type != CHIP_RAVEN)
0488             data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0489                       DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0490                       DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0491                       DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0492                       DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0493                       DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
0494     }
0495 
0496     if (def != data)
0497         WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
0498 
0499     if (def1 != data1) {
0500         if (adev->asic_type != CHIP_RAVEN)
0501             WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
0502         else
0503             WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
0504     }
0505 
0506     if (adev->asic_type != CHIP_RAVEN && def2 != data2)
0507         WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
0508 }
0509 
0510 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
0511                                bool enable)
0512 {
0513     uint32_t def, data;
0514 
0515     def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
0516 
0517     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
0518         data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
0519     else
0520         data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
0521 
0522     if (def != data)
0523         WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
0524 }
0525 
0526 static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
0527                    enum amd_clockgating_state state)
0528 {
0529     if (amdgpu_sriov_vf(adev))
0530         return 0;
0531 
0532     switch (adev->asic_type) {
0533     case CHIP_VEGA10:
0534     case CHIP_VEGA12:
0535     case CHIP_VEGA20:
0536     case CHIP_RAVEN:
0537     case CHIP_RENOIR:
0538         mmhub_v1_0_update_medium_grain_clock_gating(adev,
0539                 state == AMD_CG_STATE_GATE);
0540         mmhub_v1_0_update_medium_grain_light_sleep(adev,
0541                 state == AMD_CG_STATE_GATE);
0542         break;
0543     default:
0544         break;
0545     }
0546 
0547     return 0;
0548 }
0549 
0550 static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
0551 {
0552     int data, data1;
0553 
0554     if (amdgpu_sriov_vf(adev))
0555         *flags = 0;
0556 
0557     data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
0558 
0559     data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
0560 
0561     /* AMD_CG_SUPPORT_MC_MGCG */
0562     if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
0563         !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0564                DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0565                DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0566                DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0567                DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0568                DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
0569         *flags |= AMD_CG_SUPPORT_MC_MGCG;
0570 
0571     /* AMD_CG_SUPPORT_MC_LS */
0572     if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
0573         *flags |= AMD_CG_SUPPORT_MC_LS;
0574 }
0575 
0576 static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
0577     { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0578     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
0579     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
0580     },
0581     { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0582     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
0583     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
0584     },
0585     { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0586     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
0587     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
0588     },
0589     { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0590     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
0591     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
0592     },
0593     { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0594     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
0595     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
0596     },
0597     { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0598     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
0599     0, 0,
0600     },
0601     { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0602     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
0603     0, 0,
0604     },
0605     { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0606     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
0607     0, 0,
0608     },
0609     { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0610     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
0611     0, 0,
0612     },
0613     { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
0614     SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
0615     0, 0,
0616     },
0617     { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
0618     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
0619     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
0620     },
0621     { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
0622     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
0623     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
0624     },
0625     { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
0626     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
0627     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
0628     },
0629     { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
0630     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
0631     0, 0,
0632     },
0633     { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
0634     SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
0635     0, 0,
0636     },
0637     { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0638     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
0639     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
0640     },
0641     { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0642     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
0643     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
0644     },
0645     { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0646     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
0647     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
0648     },
0649     { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0650     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
0651     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
0652     },
0653     { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0654     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
0655     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
0656     },
0657     { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0658     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
0659     0, 0,
0660     },
0661     { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0662     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
0663     0, 0,
0664     },
0665     { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0666     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
0667     0, 0,
0668     },
0669     { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0670     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
0671     0, 0,
0672     },
0673     { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
0674     SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
0675     0, 0,
0676     },
0677     { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
0678     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
0679     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
0680     },
0681     { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
0682     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
0683     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
0684     },
0685     { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
0686     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
0687     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
0688     },
0689     { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
0690     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
0691     0, 0,
0692     },
0693     { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
0694     SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
0695     0, 0,
0696     }
0697 };
0698 
0699 static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
0700    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
0701    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
0702    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
0703    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
0704 };
0705 
0706 static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
0707     const struct soc15_reg_entry *reg,
0708     uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
0709 {
0710     uint32_t i;
0711     uint32_t sec_cnt, ded_cnt;
0712 
0713     for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
0714         if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
0715             continue;
0716 
0717         sec_cnt = (value &
0718                 mmhub_v1_0_ras_fields[i].sec_count_mask) >>
0719                 mmhub_v1_0_ras_fields[i].sec_count_shift;
0720         if (sec_cnt) {
0721             dev_info(adev->dev,
0722                 "MMHUB SubBlock %s, SEC %d\n",
0723                 mmhub_v1_0_ras_fields[i].name,
0724                 sec_cnt);
0725             *sec_count += sec_cnt;
0726         }
0727 
0728         ded_cnt = (value &
0729                 mmhub_v1_0_ras_fields[i].ded_count_mask) >>
0730                 mmhub_v1_0_ras_fields[i].ded_count_shift;
0731         if (ded_cnt) {
0732             dev_info(adev->dev,
0733                 "MMHUB SubBlock %s, DED %d\n",
0734                 mmhub_v1_0_ras_fields[i].name,
0735                 ded_cnt);
0736             *ded_count += ded_cnt;
0737         }
0738     }
0739 
0740     return 0;
0741 }
0742 
0743 static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
0744                        void *ras_error_status)
0745 {
0746     struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
0747     uint32_t sec_count = 0, ded_count = 0;
0748     uint32_t i;
0749     uint32_t reg_value;
0750 
0751     err_data->ue_count = 0;
0752     err_data->ce_count = 0;
0753 
0754     for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
0755         reg_value =
0756             RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
0757         if (reg_value)
0758             mmhub_v1_0_get_ras_error_count(adev,
0759                 &mmhub_v1_0_edc_cnt_regs[i],
0760                 reg_value, &sec_count, &ded_count);
0761     }
0762 
0763     err_data->ce_count += sec_count;
0764     err_data->ue_count += ded_count;
0765 }
0766 
0767 static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
0768 {
0769     uint32_t i;
0770 
0771     /* read back edc counter registers to reset the counters to 0 */
0772     if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
0773         for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
0774             RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
0775     }
0776 }
0777 
0778 struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = {
0779     .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
0780     .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
0781 };
0782 
0783 struct amdgpu_mmhub_ras mmhub_v1_0_ras = {
0784     .ras_block = {
0785         .hw_ops = &mmhub_v1_0_ras_hw_ops,
0786     },
0787 };
0788 
0789 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
0790     .get_fb_location = mmhub_v1_0_get_fb_location,
0791     .init = mmhub_v1_0_init,
0792     .gart_enable = mmhub_v1_0_gart_enable,
0793     .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
0794     .gart_disable = mmhub_v1_0_gart_disable,
0795     .set_clockgating = mmhub_v1_0_set_clockgating,
0796     .get_clockgating = mmhub_v1_0_get_clockgating,
0797     .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
0798     .update_power_gating = mmhub_v1_0_update_power_gating,
0799 };