0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "amdgpu.h"
0025 #include "mmhub_v2_0.h"
0026
0027 #include "mmhub/mmhub_2_0_0_offset.h"
0028 #include "mmhub/mmhub_2_0_0_sh_mask.h"
0029 #include "mmhub/mmhub_2_0_0_default.h"
0030 #include "navi10_enum.h"
0031
0032 #include "gc/gc_10_1_0_offset.h"
0033 #include "soc15_common.h"
0034
0035 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d
0036 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0
0037 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
0038 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
0039
0040 static const char *mmhub_client_ids_navi1x[][2] = {
0041 [3][0] = "DCEDMC",
0042 [4][0] = "DCEVGA",
0043 [5][0] = "MP0",
0044 [6][0] = "MP1",
0045 [13][0] = "VMC",
0046 [14][0] = "HDP",
0047 [15][0] = "OSS",
0048 [16][0] = "VCNU",
0049 [17][0] = "JPEG",
0050 [18][0] = "VCN",
0051 [3][1] = "DCEDMC",
0052 [4][1] = "DCEXFC",
0053 [5][1] = "DCEVGA",
0054 [6][1] = "DCEDWB",
0055 [7][1] = "MP0",
0056 [8][1] = "MP1",
0057 [9][1] = "DBGU1",
0058 [10][1] = "DBGU0",
0059 [11][1] = "XDP",
0060 [14][1] = "HDP",
0061 [15][1] = "OSS",
0062 [16][1] = "VCNU",
0063 [17][1] = "JPEG",
0064 [18][1] = "VCN",
0065 };
0066
0067 static const char *mmhub_client_ids_sienna_cichlid[][2] = {
0068 [3][0] = "DCEDMC",
0069 [4][0] = "DCEVGA",
0070 [5][0] = "MP0",
0071 [6][0] = "MP1",
0072 [8][0] = "VMC",
0073 [9][0] = "VCNU0",
0074 [10][0] = "JPEG",
0075 [12][0] = "VCNU1",
0076 [13][0] = "VCN1",
0077 [14][0] = "HDP",
0078 [15][0] = "OSS",
0079 [32+11][0] = "VCN0",
0080 [0][1] = "DBGU0",
0081 [1][1] = "DBGU1",
0082 [2][1] = "DCEDWB",
0083 [3][1] = "DCEDMC",
0084 [4][1] = "DCEVGA",
0085 [5][1] = "MP0",
0086 [6][1] = "MP1",
0087 [7][1] = "XDP",
0088 [9][1] = "VCNU0",
0089 [10][1] = "JPEG",
0090 [11][1] = "VCN0",
0091 [12][1] = "VCNU1",
0092 [13][1] = "VCN1",
0093 [14][1] = "HDP",
0094 [15][1] = "OSS",
0095 };
0096
0097 static const char *mmhub_client_ids_beige_goby[][2] = {
0098 [3][0] = "DCEDMC",
0099 [4][0] = "DCEVGA",
0100 [5][0] = "MP0",
0101 [6][0] = "MP1",
0102 [8][0] = "VMC",
0103 [9][0] = "VCNU0",
0104 [11][0] = "VCN0",
0105 [14][0] = "HDP",
0106 [15][0] = "OSS",
0107 [0][1] = "DBGU0",
0108 [1][1] = "DBGU1",
0109 [2][1] = "DCEDWB",
0110 [3][1] = "DCEDMC",
0111 [4][1] = "DCEVGA",
0112 [5][1] = "MP0",
0113 [6][1] = "MP1",
0114 [7][1] = "XDP",
0115 [9][1] = "VCNU0",
0116 [11][1] = "VCN0",
0117 [14][1] = "HDP",
0118 [15][1] = "OSS",
0119 };
0120
0121 static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid,
0122 uint32_t flush_type)
0123 {
0124 u32 req = 0;
0125
0126
0127 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
0128 PER_VMID_INVALIDATE_REQ, 1 << vmid);
0129 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
0130 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
0131 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
0132 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
0133 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
0134 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
0135 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
0136 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
0137
0138 return req;
0139 }
0140
0141 static void
0142 mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
0143 uint32_t status)
0144 {
0145 uint32_t cid, rw;
0146 const char *mmhub_cid = NULL;
0147
0148 cid = REG_GET_FIELD(status,
0149 MMVM_L2_PROTECTION_FAULT_STATUS, CID);
0150 rw = REG_GET_FIELD(status,
0151 MMVM_L2_PROTECTION_FAULT_STATUS, RW);
0152
0153 dev_err(adev->dev,
0154 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
0155 status);
0156 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0157 case IP_VERSION(2, 0, 0):
0158 case IP_VERSION(2, 0, 2):
0159 mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
0160 break;
0161 case IP_VERSION(2, 1, 0):
0162 case IP_VERSION(2, 1, 1):
0163 mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
0164 break;
0165 case IP_VERSION(2, 1, 2):
0166 mmhub_cid = mmhub_client_ids_beige_goby[cid][rw];
0167 break;
0168 default:
0169 mmhub_cid = NULL;
0170 break;
0171 }
0172 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
0173 mmhub_cid ? mmhub_cid : "unknown", cid);
0174 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
0175 REG_GET_FIELD(status,
0176 MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
0177 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
0178 REG_GET_FIELD(status,
0179 MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
0180 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
0181 REG_GET_FIELD(status,
0182 MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
0183 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
0184 REG_GET_FIELD(status,
0185 MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
0186 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
0187 }
0188
0189 static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
0190 uint64_t page_table_base)
0191 {
0192 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0193
0194 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
0195 hub->ctx_addr_distance * vmid,
0196 lower_32_bits(page_table_base));
0197
0198 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
0199 hub->ctx_addr_distance * vmid,
0200 upper_32_bits(page_table_base));
0201 }
0202
0203 static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
0204 {
0205 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
0206
0207 mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
0208
0209 WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
0210 (u32)(adev->gmc.gart_start >> 12));
0211 WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
0212 (u32)(adev->gmc.gart_start >> 44));
0213
0214 WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
0215 (u32)(adev->gmc.gart_end >> 12));
0216 WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
0217 (u32)(adev->gmc.gart_end >> 44));
0218 }
0219
0220 static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
0221 {
0222 uint64_t value;
0223 uint32_t tmp;
0224
0225 if (!amdgpu_sriov_vf(adev)) {
0226
0227 WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
0228 WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
0229 WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
0230
0231
0232 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
0233 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
0234 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
0235 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
0236 }
0237
0238
0239 value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
0240 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
0241 (u32)(value >> 12));
0242 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
0243 (u32)(value >> 44));
0244
0245
0246 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
0247 (u32)(adev->dummy_page_addr >> 12));
0248 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
0249 (u32)((u64)adev->dummy_page_addr >> 44));
0250
0251 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2);
0252 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
0253 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
0254 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
0255 }
0256
0257 static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
0258 {
0259 uint32_t tmp;
0260
0261
0262 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
0263
0264 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
0265 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
0266 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
0267 ENABLE_ADVANCED_DRIVER_MODEL, 1);
0268 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
0269 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
0270 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
0271 MTYPE, MTYPE_UC);
0272
0273 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
0274 }
0275
0276 static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
0277 {
0278 uint32_t tmp;
0279
0280
0281
0282
0283 if (amdgpu_sriov_vf(adev))
0284 return;
0285
0286
0287 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
0288 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
0289 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
0290 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
0291 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
0292
0293 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0294 0);
0295 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
0296 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
0297 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
0298 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
0299
0300 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2);
0301 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
0302 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
0303 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
0304
0305 tmp = mmMMVM_L2_CNTL3_DEFAULT;
0306 if (adev->gmc.translate_further) {
0307 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
0308 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
0309 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
0310 } else {
0311 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
0312 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
0313 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
0314 }
0315 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
0316
0317 tmp = mmMMVM_L2_CNTL4_DEFAULT;
0318 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
0319 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
0320 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp);
0321
0322 tmp = mmMMVM_L2_CNTL5_DEFAULT;
0323 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
0324 WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp);
0325 }
0326
0327 static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
0328 {
0329 uint32_t tmp;
0330
0331 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
0332 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
0333 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
0334 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
0335 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
0336 WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
0337 }
0338
0339 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
0340 {
0341
0342
0343
0344 if (amdgpu_sriov_vf(adev))
0345 return;
0346
0347 WREG32_SOC15(MMHUB, 0,
0348 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0349 0xFFFFFFFF);
0350 WREG32_SOC15(MMHUB, 0,
0351 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
0352 0x0000000F);
0353
0354 WREG32_SOC15(MMHUB, 0,
0355 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
0356 WREG32_SOC15(MMHUB, 0,
0357 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
0358
0359 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
0360 0);
0361 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
0362 0);
0363 }
0364
0365 static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
0366 {
0367 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0368 int i;
0369 uint32_t tmp;
0370
0371 for (i = 0; i <= 14; i++) {
0372 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i);
0373 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
0374 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
0375 adev->vm_manager.num_level);
0376 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0377 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0378 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0379 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
0380 1);
0381 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0382 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0383 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0384 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0385 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0386 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0387 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0388 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0389 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0390 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
0391 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0392 PAGE_TABLE_BLOCK_SIZE,
0393 adev->vm_manager.block_size - 9);
0394
0395 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
0396 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
0397 !adev->gmc.noretry);
0398 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
0399 i * hub->ctx_distance, tmp);
0400 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
0401 i * hub->ctx_addr_distance, 0);
0402 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
0403 i * hub->ctx_addr_distance, 0);
0404 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
0405 i * hub->ctx_addr_distance,
0406 lower_32_bits(adev->vm_manager.max_pfn - 1));
0407 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
0408 i * hub->ctx_addr_distance,
0409 upper_32_bits(adev->vm_manager.max_pfn - 1));
0410 }
0411
0412 hub->vm_cntx_cntl = tmp;
0413 }
0414
0415 static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
0416 {
0417 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0418 unsigned i;
0419
0420 for (i = 0; i < 18; ++i) {
0421 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
0422 i * hub->eng_addr_distance, 0xffffffff);
0423 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
0424 i * hub->eng_addr_distance, 0x1f);
0425 }
0426 }
0427
0428 static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
0429 {
0430
0431 mmhub_v2_0_init_gart_aperture_regs(adev);
0432 mmhub_v2_0_init_system_aperture_regs(adev);
0433 mmhub_v2_0_init_tlb_regs(adev);
0434 mmhub_v2_0_init_cache_regs(adev);
0435
0436 mmhub_v2_0_enable_system_domain(adev);
0437 mmhub_v2_0_disable_identity_aperture(adev);
0438 mmhub_v2_0_setup_vmid_config(adev);
0439 mmhub_v2_0_program_invalidation(adev);
0440
0441 return 0;
0442 }
0443
0444 static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
0445 {
0446 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0447 u32 tmp;
0448 u32 i;
0449
0450
0451 for (i = 0; i < AMDGPU_NUM_VMID; i++)
0452 WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
0453 i * hub->ctx_distance, 0);
0454
0455
0456 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
0457 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
0458 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
0459 ENABLE_ADVANCED_DRIVER_MODEL, 0);
0460 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
0461
0462
0463 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
0464 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
0465 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
0466 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0);
0467 }
0468
0469
0470
0471
0472
0473
0474
0475 static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
0476 {
0477 u32 tmp;
0478
0479
0480
0481
0482 if (amdgpu_sriov_vf(adev))
0483 return;
0484
0485 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
0486 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0487 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0488 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0489 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0490 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0491 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0492 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0493 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0494 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0495 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
0496 value);
0497 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0498 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0499 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0500 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0501 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0502 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0503 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0504 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0505 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0506 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0507 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0508 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0509 if (!value) {
0510 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0511 CRASH_ON_NO_RETRY_FAULT, 1);
0512 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
0513 CRASH_ON_RETRY_FAULT, 1);
0514 }
0515 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
0516 }
0517
0518 static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = {
0519 .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status,
0520 .get_invalidate_req = mmhub_v2_0_get_invalidate_req,
0521 };
0522
0523 static void mmhub_v2_0_init(struct amdgpu_device *adev)
0524 {
0525 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
0526
0527 hub->ctx0_ptb_addr_lo32 =
0528 SOC15_REG_OFFSET(MMHUB, 0,
0529 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
0530 hub->ctx0_ptb_addr_hi32 =
0531 SOC15_REG_OFFSET(MMHUB, 0,
0532 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
0533 hub->vm_inv_eng0_sem =
0534 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
0535 hub->vm_inv_eng0_req =
0536 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
0537 hub->vm_inv_eng0_ack =
0538 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK);
0539 hub->vm_context0_cntl =
0540 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
0541 hub->vm_l2_pro_fault_status =
0542 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS);
0543 hub->vm_l2_pro_fault_cntl =
0544 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
0545
0546 hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL;
0547 hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
0548 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
0549 hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ -
0550 mmMMVM_INVALIDATE_ENG0_REQ;
0551 hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
0552 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
0553
0554 hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
0555 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
0556 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
0557 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
0558 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
0559 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
0560 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
0561
0562 hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs;
0563 }
0564
0565 static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0566 bool enable)
0567 {
0568 uint32_t def, data, def1, data1;
0569
0570 if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
0571 return;
0572
0573 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0574 case IP_VERSION(2, 1, 0):
0575 case IP_VERSION(2, 1, 1):
0576 case IP_VERSION(2, 1, 2):
0577 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
0578 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
0579 break;
0580 default:
0581 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
0582 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
0583 break;
0584 }
0585
0586 if (enable) {
0587 data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
0588
0589 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0590 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0591 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0592 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0593 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0594 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
0595
0596 } else {
0597 data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
0598
0599 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0600 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0601 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0602 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0603 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0604 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
0605 }
0606
0607 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0608 case IP_VERSION(2, 1, 0):
0609 case IP_VERSION(2, 1, 1):
0610 case IP_VERSION(2, 1, 2):
0611 if (def != data)
0612 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
0613 if (def1 != data1)
0614 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1);
0615 break;
0616 default:
0617 if (def != data)
0618 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
0619 if (def1 != data1)
0620 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
0621 break;
0622 }
0623 }
0624
0625 static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
0626 bool enable)
0627 {
0628 uint32_t def, data;
0629
0630 if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
0631 return;
0632
0633 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0634 case IP_VERSION(2, 1, 0):
0635 case IP_VERSION(2, 1, 1):
0636 case IP_VERSION(2, 1, 2):
0637 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
0638 break;
0639 default:
0640 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
0641 break;
0642 }
0643
0644 if (enable)
0645 data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
0646 else
0647 data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
0648
0649 if (def != data) {
0650 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0651 case IP_VERSION(2, 1, 0):
0652 case IP_VERSION(2, 1, 1):
0653 case IP_VERSION(2, 1, 2):
0654 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
0655 break;
0656 default:
0657 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
0658 break;
0659 }
0660 }
0661 }
0662
0663 static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
0664 enum amd_clockgating_state state)
0665 {
0666 if (amdgpu_sriov_vf(adev))
0667 return 0;
0668
0669 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0670 case IP_VERSION(2, 0, 0):
0671 case IP_VERSION(2, 0, 2):
0672 case IP_VERSION(2, 1, 0):
0673 case IP_VERSION(2, 1, 1):
0674 case IP_VERSION(2, 1, 2):
0675 mmhub_v2_0_update_medium_grain_clock_gating(adev,
0676 state == AMD_CG_STATE_GATE);
0677 mmhub_v2_0_update_medium_grain_light_sleep(adev,
0678 state == AMD_CG_STATE_GATE);
0679 break;
0680 default:
0681 break;
0682 }
0683
0684 return 0;
0685 }
0686
0687 static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
0688 {
0689 int data, data1;
0690
0691 if (amdgpu_sriov_vf(adev))
0692 *flags = 0;
0693
0694 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0695 case IP_VERSION(2, 1, 0):
0696 case IP_VERSION(2, 1, 1):
0697 case IP_VERSION(2, 1, 2):
0698 data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
0699 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
0700 break;
0701 default:
0702 data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
0703 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
0704 break;
0705 }
0706
0707
0708 if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
0709 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
0710 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
0711 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
0712 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
0713 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
0714 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
0715 *flags |= AMD_CG_SUPPORT_MC_MGCG;
0716
0717
0718 if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
0719 *flags |= AMD_CG_SUPPORT_MC_LS;
0720 }
0721
0722 const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
0723 .init = mmhub_v2_0_init,
0724 .gart_enable = mmhub_v2_0_gart_enable,
0725 .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
0726 .gart_disable = mmhub_v2_0_gart_disable,
0727 .set_clockgating = mmhub_v2_0_set_clockgating,
0728 .get_clockgating = mmhub_v2_0_get_clockgating,
0729 .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs,
0730 };