0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/firmware.h>
0025 #include <linux/module.h>
0026 #include <linux/pci.h>
0027
0028 #include <drm/drm_cache.h>
0029 #include "amdgpu.h"
0030 #include "gmc_v6_0.h"
0031 #include "amdgpu_ucode.h"
0032 #include "amdgpu_gem.h"
0033
0034 #include "bif/bif_3_0_d.h"
0035 #include "bif/bif_3_0_sh_mask.h"
0036 #include "oss/oss_1_0_d.h"
0037 #include "oss/oss_1_0_sh_mask.h"
0038 #include "gmc/gmc_6_0_d.h"
0039 #include "gmc/gmc_6_0_sh_mask.h"
0040 #include "dce/dce_6_0_d.h"
0041 #include "dce/dce_6_0_sh_mask.h"
0042 #include "si_enums.h"
0043
0044 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
0045 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
0046 static int gmc_v6_0_wait_for_idle(void *handle);
0047
0048 MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
0049 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
0050 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
0051 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
0052 MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
0053 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
0054
0055 #define MC_SEQ_MISC0__MT__MASK 0xf0000000
0056 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
0057 #define MC_SEQ_MISC0__MT__DDR2 0x20000000
0058 #define MC_SEQ_MISC0__MT__GDDR3 0x30000000
0059 #define MC_SEQ_MISC0__MT__GDDR4 0x40000000
0060 #define MC_SEQ_MISC0__MT__GDDR5 0x50000000
0061 #define MC_SEQ_MISC0__MT__HBM 0x60000000
0062 #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
0063
0064 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
0065 {
0066 u32 blackout;
0067
0068 gmc_v6_0_wait_for_idle((void *)adev);
0069
0070 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
0071 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
0072
0073 WREG32(mmBIF_FB_EN, 0);
0074
0075 blackout = REG_SET_FIELD(blackout,
0076 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
0077 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
0078 }
0079
0080 udelay(100);
0081
0082 }
0083
0084 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
0085 {
0086 u32 tmp;
0087
0088
0089 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
0090 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
0091 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
0092
0093 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
0094 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
0095 WREG32(mmBIF_FB_EN, tmp);
0096 }
0097
0098 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
0099 {
0100 const char *chip_name;
0101 char fw_name[30];
0102 int err;
0103 bool is_58_fw = false;
0104
0105 DRM_DEBUG("\n");
0106
0107 switch (adev->asic_type) {
0108 case CHIP_TAHITI:
0109 chip_name = "tahiti";
0110 break;
0111 case CHIP_PITCAIRN:
0112 chip_name = "pitcairn";
0113 break;
0114 case CHIP_VERDE:
0115 chip_name = "verde";
0116 break;
0117 case CHIP_OLAND:
0118 chip_name = "oland";
0119 break;
0120 case CHIP_HAINAN:
0121 chip_name = "hainan";
0122 break;
0123 default: BUG();
0124 }
0125
0126
0127 if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
0128 is_58_fw = true;
0129
0130 if (is_58_fw)
0131 snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
0132 else
0133 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
0134 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
0135 if (err)
0136 goto out;
0137
0138 err = amdgpu_ucode_validate(adev->gmc.fw);
0139
0140 out:
0141 if (err) {
0142 dev_err(adev->dev,
0143 "si_mc: Failed to load firmware \"%s\"\n",
0144 fw_name);
0145 release_firmware(adev->gmc.fw);
0146 adev->gmc.fw = NULL;
0147 }
0148 return err;
0149 }
0150
0151 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
0152 {
0153 const __le32 *new_fw_data = NULL;
0154 u32 running;
0155 const __le32 *new_io_mc_regs = NULL;
0156 int i, regs_size, ucode_size;
0157 const struct mc_firmware_header_v1_0 *hdr;
0158
0159 if (!adev->gmc.fw)
0160 return -EINVAL;
0161
0162 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
0163
0164 amdgpu_ucode_print_mc_hdr(&hdr->header);
0165
0166 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
0167 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
0168 new_io_mc_regs = (const __le32 *)
0169 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
0170 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
0171 new_fw_data = (const __le32 *)
0172 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0173
0174 running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
0175
0176 if (running == 0) {
0177
0178
0179 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
0180 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
0181
0182
0183 for (i = 0; i < regs_size; i++) {
0184 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
0185 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
0186 }
0187
0188 for (i = 0; i < ucode_size; i++) {
0189 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
0190 }
0191
0192
0193 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
0194 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
0195 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
0196
0197
0198 for (i = 0; i < adev->usec_timeout; i++) {
0199 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
0200 break;
0201 udelay(1);
0202 }
0203 for (i = 0; i < adev->usec_timeout; i++) {
0204 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
0205 break;
0206 udelay(1);
0207 }
0208
0209 }
0210
0211 return 0;
0212 }
0213
0214 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
0215 struct amdgpu_gmc *mc)
0216 {
0217 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
0218 base <<= 24;
0219
0220 amdgpu_gmc_vram_location(adev, mc, base);
0221 amdgpu_gmc_gart_location(adev, mc);
0222 }
0223
0224 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
0225 {
0226 int i, j;
0227
0228
0229 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
0230 WREG32((0xb05 + j), 0x00000000);
0231 WREG32((0xb06 + j), 0x00000000);
0232 WREG32((0xb07 + j), 0x00000000);
0233 WREG32((0xb08 + j), 0x00000000);
0234 WREG32((0xb09 + j), 0x00000000);
0235 }
0236 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
0237
0238 if (gmc_v6_0_wait_for_idle((void *)adev)) {
0239 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
0240 }
0241
0242 if (adev->mode_info.num_crtc) {
0243 u32 tmp;
0244
0245
0246 tmp = RREG32(mmVGA_HDP_CONTROL);
0247 tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
0248 WREG32(mmVGA_HDP_CONTROL, tmp);
0249
0250
0251 tmp = RREG32(mmVGA_RENDER_CONTROL);
0252 tmp &= ~VGA_VSTATUS_CNTL;
0253 WREG32(mmVGA_RENDER_CONTROL, tmp);
0254 }
0255
0256 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
0257 adev->gmc.vram_start >> 12);
0258 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
0259 adev->gmc.vram_end >> 12);
0260 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
0261 adev->vram_scratch.gpu_addr >> 12);
0262 WREG32(mmMC_VM_AGP_BASE, 0);
0263 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
0264 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
0265
0266 if (gmc_v6_0_wait_for_idle((void *)adev)) {
0267 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
0268 }
0269 }
0270
0271 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
0272 {
0273
0274 u32 tmp;
0275 int chansize, numchan;
0276 int r;
0277
0278 tmp = RREG32(mmMC_ARB_RAMCFG);
0279 if (tmp & (1 << 11)) {
0280 chansize = 16;
0281 } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
0282 chansize = 64;
0283 } else {
0284 chansize = 32;
0285 }
0286 tmp = RREG32(mmMC_SHARED_CHMAP);
0287 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
0288 case 0:
0289 default:
0290 numchan = 1;
0291 break;
0292 case 1:
0293 numchan = 2;
0294 break;
0295 case 2:
0296 numchan = 4;
0297 break;
0298 case 3:
0299 numchan = 8;
0300 break;
0301 case 4:
0302 numchan = 3;
0303 break;
0304 case 5:
0305 numchan = 6;
0306 break;
0307 case 6:
0308 numchan = 10;
0309 break;
0310 case 7:
0311 numchan = 12;
0312 break;
0313 case 8:
0314 numchan = 16;
0315 break;
0316 }
0317 adev->gmc.vram_width = numchan * chansize;
0318
0319 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
0320 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
0321
0322 if (!(adev->flags & AMD_IS_APU)) {
0323 r = amdgpu_device_resize_fb_bar(adev);
0324 if (r)
0325 return r;
0326 }
0327 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
0328 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
0329 adev->gmc.visible_vram_size = adev->gmc.aper_size;
0330
0331
0332 if (amdgpu_gart_size == -1) {
0333 switch (adev->asic_type) {
0334 case CHIP_HAINAN:
0335 default:
0336 adev->gmc.gart_size = 256ULL << 20;
0337 break;
0338 case CHIP_VERDE:
0339 case CHIP_TAHITI:
0340 case CHIP_PITCAIRN:
0341 case CHIP_OLAND:
0342 adev->gmc.gart_size = 1024ULL << 20;
0343 break;
0344 }
0345 } else {
0346 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
0347 }
0348
0349 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
0350 gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
0351
0352 return 0;
0353 }
0354
0355 static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
0356 uint32_t vmhub, uint32_t flush_type)
0357 {
0358 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
0359 }
0360
0361 static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
0362 unsigned vmid, uint64_t pd_addr)
0363 {
0364 uint32_t reg;
0365
0366
0367 if (vmid < 8)
0368 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
0369 else
0370 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
0371 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
0372
0373
0374 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
0375
0376 return pd_addr;
0377 }
0378
0379 static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
0380 uint64_t *addr, uint64_t *flags)
0381 {
0382 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
0383 }
0384
0385 static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
0386 struct amdgpu_bo_va_mapping *mapping,
0387 uint64_t *flags)
0388 {
0389 *flags &= ~AMDGPU_PTE_EXECUTABLE;
0390 *flags &= ~AMDGPU_PTE_PRT;
0391 }
0392
0393 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
0394 bool value)
0395 {
0396 u32 tmp;
0397
0398 tmp = RREG32(mmVM_CONTEXT1_CNTL);
0399 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0400 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0401 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0402 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0403 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0404 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0405 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0406 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0407 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0408 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0409 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
0410 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
0411 WREG32(mmVM_CONTEXT1_CNTL, tmp);
0412 }
0413
0414
0415
0416
0417
0418
0419
0420 static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
0421 {
0422 u32 tmp;
0423
0424 if (enable && !adev->gmc.prt_warning) {
0425 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
0426 adev->gmc.prt_warning = true;
0427 }
0428
0429 tmp = RREG32(mmVM_PRT_CNTL);
0430 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
0431 CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
0432 enable);
0433 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
0434 TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
0435 enable);
0436 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
0437 L2_CACHE_STORE_INVALID_ENTRIES,
0438 enable);
0439 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
0440 L1_TLB_STORE_INVALID_ENTRIES,
0441 enable);
0442 WREG32(mmVM_PRT_CNTL, tmp);
0443
0444 if (enable) {
0445 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
0446 uint32_t high = adev->vm_manager.max_pfn -
0447 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
0448
0449 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
0450 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
0451 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
0452 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
0453 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
0454 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
0455 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
0456 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
0457 } else {
0458 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
0459 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
0460 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
0461 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
0462 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
0463 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
0464 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
0465 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
0466 }
0467 }
0468
0469 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
0470 {
0471 uint64_t table_addr;
0472 u32 field;
0473 int i;
0474
0475 if (adev->gart.bo == NULL) {
0476 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
0477 return -EINVAL;
0478 }
0479 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
0480
0481 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
0482
0483
0484 WREG32(mmMC_VM_MX_L1_TLB_CNTL,
0485 (0xA << 7) |
0486 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
0487 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
0488 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
0489 MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
0490 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
0491
0492 WREG32(mmVM_L2_CNTL,
0493 VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
0494 VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
0495 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
0496 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
0497 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
0498 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
0499 WREG32(mmVM_L2_CNTL2,
0500 VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
0501 VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
0502
0503 field = adev->vm_manager.fragment_size;
0504 WREG32(mmVM_L2_CNTL3,
0505 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
0506 (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
0507 (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
0508
0509 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
0510 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
0511 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
0512 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
0513 (u32)(adev->dummy_page_addr >> 12));
0514 WREG32(mmVM_CONTEXT0_CNTL2, 0);
0515 WREG32(mmVM_CONTEXT0_CNTL,
0516 VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
0517 (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
0518 VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
0519
0520 WREG32(0x575, 0);
0521 WREG32(0x576, 0);
0522 WREG32(0x577, 0);
0523
0524
0525
0526 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
0527 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
0528
0529
0530
0531
0532 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
0533 if (i < 8)
0534 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
0535 table_addr >> 12);
0536 else
0537 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
0538 table_addr >> 12);
0539 }
0540
0541
0542 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
0543 (u32)(adev->dummy_page_addr >> 12));
0544 WREG32(mmVM_CONTEXT1_CNTL2, 4);
0545 WREG32(mmVM_CONTEXT1_CNTL,
0546 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
0547 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
0548 ((adev->vm_manager.block_size - 9)
0549 << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
0550 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
0551 gmc_v6_0_set_fault_enable_default(adev, false);
0552 else
0553 gmc_v6_0_set_fault_enable_default(adev, true);
0554
0555 gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
0556 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
0557 (unsigned)(adev->gmc.gart_size >> 20),
0558 (unsigned long long)table_addr);
0559 return 0;
0560 }
0561
0562 static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
0563 {
0564 int r;
0565
0566 if (adev->gart.bo) {
0567 dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
0568 return 0;
0569 }
0570 r = amdgpu_gart_init(adev);
0571 if (r)
0572 return r;
0573 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
0574 adev->gart.gart_pte_flags = 0;
0575 return amdgpu_gart_table_vram_alloc(adev);
0576 }
0577
0578 static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
0579 {
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 WREG32(mmVM_CONTEXT0_CNTL, 0);
0593 WREG32(mmVM_CONTEXT1_CNTL, 0);
0594
0595 WREG32(mmMC_VM_MX_L1_TLB_CNTL,
0596 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
0597 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
0598
0599 WREG32(mmVM_L2_CNTL,
0600 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
0601 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
0602 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
0603 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
0604 WREG32(mmVM_L2_CNTL2, 0);
0605 WREG32(mmVM_L2_CNTL3,
0606 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
0607 (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
0608 }
0609
0610 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
0611 u32 status, u32 addr, u32 mc_client)
0612 {
0613 u32 mc_id;
0614 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
0615 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
0616 PROTECTIONS);
0617 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
0618 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
0619
0620 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
0621 MEMORY_CLIENT_ID);
0622
0623 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
0624 protections, vmid, addr,
0625 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
0626 MEMORY_CLIENT_RW) ?
0627 "write" : "read", block, mc_client, mc_id);
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
0758 {
0759 switch (mc_seq_vram_type) {
0760 case MC_SEQ_MISC0__MT__GDDR1:
0761 return AMDGPU_VRAM_TYPE_GDDR1;
0762 case MC_SEQ_MISC0__MT__DDR2:
0763 return AMDGPU_VRAM_TYPE_DDR2;
0764 case MC_SEQ_MISC0__MT__GDDR3:
0765 return AMDGPU_VRAM_TYPE_GDDR3;
0766 case MC_SEQ_MISC0__MT__GDDR4:
0767 return AMDGPU_VRAM_TYPE_GDDR4;
0768 case MC_SEQ_MISC0__MT__GDDR5:
0769 return AMDGPU_VRAM_TYPE_GDDR5;
0770 case MC_SEQ_MISC0__MT__DDR3:
0771 return AMDGPU_VRAM_TYPE_DDR3;
0772 default:
0773 return AMDGPU_VRAM_TYPE_UNKNOWN;
0774 }
0775 }
0776
0777 static int gmc_v6_0_early_init(void *handle)
0778 {
0779 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0780
0781 gmc_v6_0_set_gmc_funcs(adev);
0782 gmc_v6_0_set_irq_funcs(adev);
0783
0784 return 0;
0785 }
0786
0787 static int gmc_v6_0_late_init(void *handle)
0788 {
0789 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0790
0791 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
0792 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
0793 else
0794 return 0;
0795 }
0796
0797 static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
0798 {
0799 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
0800 unsigned size;
0801
0802 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
0803 size = AMDGPU_VBIOS_VGA_ALLOCATION;
0804 } else {
0805 u32 viewport = RREG32(mmVIEWPORT_SIZE);
0806 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
0807 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
0808 4);
0809 }
0810 return size;
0811 }
0812
0813 static int gmc_v6_0_sw_init(void *handle)
0814 {
0815 int r;
0816 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0817
0818 adev->num_vmhubs = 1;
0819
0820 if (adev->flags & AMD_IS_APU) {
0821 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
0822 } else {
0823 u32 tmp = RREG32(mmMC_SEQ_MISC0);
0824 tmp &= MC_SEQ_MISC0__MT__MASK;
0825 adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
0826 }
0827
0828 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
0829 if (r)
0830 return r;
0831
0832 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
0833 if (r)
0834 return r;
0835
0836 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
0837
0838 adev->gmc.mc_mask = 0xffffffffffULL;
0839
0840 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
0841 if (r) {
0842 dev_warn(adev->dev, "No suitable DMA available.\n");
0843 return r;
0844 }
0845 adev->need_swiotlb = drm_need_swiotlb(40);
0846
0847 r = gmc_v6_0_init_microcode(adev);
0848 if (r) {
0849 dev_err(adev->dev, "Failed to load mc firmware!\n");
0850 return r;
0851 }
0852
0853 r = gmc_v6_0_mc_init(adev);
0854 if (r)
0855 return r;
0856
0857 amdgpu_gmc_get_vbios_allocations(adev);
0858
0859 r = amdgpu_bo_init(adev);
0860 if (r)
0861 return r;
0862
0863 r = gmc_v6_0_gart_init(adev);
0864 if (r)
0865 return r;
0866
0867
0868
0869
0870
0871
0872
0873 adev->vm_manager.first_kfd_vmid = 8;
0874 amdgpu_vm_manager_init(adev);
0875
0876
0877 if (adev->flags & AMD_IS_APU) {
0878 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
0879
0880 tmp <<= 22;
0881 adev->vm_manager.vram_base_offset = tmp;
0882 } else {
0883 adev->vm_manager.vram_base_offset = 0;
0884 }
0885
0886 return 0;
0887 }
0888
0889 static int gmc_v6_0_sw_fini(void *handle)
0890 {
0891 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0892
0893 amdgpu_gem_force_release(adev);
0894 amdgpu_vm_manager_fini(adev);
0895 amdgpu_gart_table_vram_free(adev);
0896 amdgpu_bo_fini(adev);
0897 release_firmware(adev->gmc.fw);
0898 adev->gmc.fw = NULL;
0899
0900 return 0;
0901 }
0902
0903 static int gmc_v6_0_hw_init(void *handle)
0904 {
0905 int r;
0906 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0907
0908 gmc_v6_0_mc_program(adev);
0909
0910 if (!(adev->flags & AMD_IS_APU)) {
0911 r = gmc_v6_0_mc_load_microcode(adev);
0912 if (r) {
0913 dev_err(adev->dev, "Failed to load MC firmware!\n");
0914 return r;
0915 }
0916 }
0917
0918 r = gmc_v6_0_gart_enable(adev);
0919 if (r)
0920 return r;
0921
0922 if (amdgpu_emu_mode == 1)
0923 return amdgpu_gmc_vram_checking(adev);
0924 else
0925 return r;
0926 }
0927
0928 static int gmc_v6_0_hw_fini(void *handle)
0929 {
0930 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0931
0932 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
0933 gmc_v6_0_gart_disable(adev);
0934
0935 return 0;
0936 }
0937
0938 static int gmc_v6_0_suspend(void *handle)
0939 {
0940 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0941
0942 gmc_v6_0_hw_fini(adev);
0943
0944 return 0;
0945 }
0946
0947 static int gmc_v6_0_resume(void *handle)
0948 {
0949 int r;
0950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0951
0952 r = gmc_v6_0_hw_init(adev);
0953 if (r)
0954 return r;
0955
0956 amdgpu_vmid_reset_all(adev);
0957
0958 return 0;
0959 }
0960
0961 static bool gmc_v6_0_is_idle(void *handle)
0962 {
0963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0964 u32 tmp = RREG32(mmSRBM_STATUS);
0965
0966 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
0967 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
0968 return false;
0969
0970 return true;
0971 }
0972
0973 static int gmc_v6_0_wait_for_idle(void *handle)
0974 {
0975 unsigned i;
0976 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0977
0978 for (i = 0; i < adev->usec_timeout; i++) {
0979 if (gmc_v6_0_is_idle(handle))
0980 return 0;
0981 udelay(1);
0982 }
0983 return -ETIMEDOUT;
0984
0985 }
0986
0987 static int gmc_v6_0_soft_reset(void *handle)
0988 {
0989 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0990 u32 srbm_soft_reset = 0;
0991 u32 tmp = RREG32(mmSRBM_STATUS);
0992
0993 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
0994 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
0995 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
0996
0997 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
0998 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
0999 if (!(adev->flags & AMD_IS_APU))
1000 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1001 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1002 }
1003
1004 if (srbm_soft_reset) {
1005 gmc_v6_0_mc_stop(adev);
1006 if (gmc_v6_0_wait_for_idle(adev)) {
1007 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1008 }
1009
1010
1011 tmp = RREG32(mmSRBM_SOFT_RESET);
1012 tmp |= srbm_soft_reset;
1013 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1014 WREG32(mmSRBM_SOFT_RESET, tmp);
1015 tmp = RREG32(mmSRBM_SOFT_RESET);
1016
1017 udelay(50);
1018
1019 tmp &= ~srbm_soft_reset;
1020 WREG32(mmSRBM_SOFT_RESET, tmp);
1021 tmp = RREG32(mmSRBM_SOFT_RESET);
1022
1023 udelay(50);
1024
1025 gmc_v6_0_mc_resume(adev);
1026 udelay(50);
1027 }
1028
1029 return 0;
1030 }
1031
1032 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1033 struct amdgpu_irq_src *src,
1034 unsigned type,
1035 enum amdgpu_interrupt_state state)
1036 {
1037 u32 tmp;
1038 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1039 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1040 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1041 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1042 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1043 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1044
1045 switch (state) {
1046 case AMDGPU_IRQ_STATE_DISABLE:
1047 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1048 tmp &= ~bits;
1049 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1050 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1051 tmp &= ~bits;
1052 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1053 break;
1054 case AMDGPU_IRQ_STATE_ENABLE:
1055 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1056 tmp |= bits;
1057 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1058 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1059 tmp |= bits;
1060 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1061 break;
1062 default:
1063 break;
1064 }
1065
1066 return 0;
1067 }
1068
1069 static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1070 struct amdgpu_irq_src *source,
1071 struct amdgpu_iv_entry *entry)
1072 {
1073 u32 addr, status;
1074
1075 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1076 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1077 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1078
1079 if (!addr && !status)
1080 return 0;
1081
1082 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1083 gmc_v6_0_set_fault_enable_default(adev, false);
1084
1085 if (printk_ratelimit()) {
1086 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1087 entry->src_id, entry->src_data[0]);
1088 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1089 addr);
1090 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1091 status);
1092 gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1093 }
1094
1095 return 0;
1096 }
1097
1098 static int gmc_v6_0_set_clockgating_state(void *handle,
1099 enum amd_clockgating_state state)
1100 {
1101 return 0;
1102 }
1103
1104 static int gmc_v6_0_set_powergating_state(void *handle,
1105 enum amd_powergating_state state)
1106 {
1107 return 0;
1108 }
1109
1110 static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1111 .name = "gmc_v6_0",
1112 .early_init = gmc_v6_0_early_init,
1113 .late_init = gmc_v6_0_late_init,
1114 .sw_init = gmc_v6_0_sw_init,
1115 .sw_fini = gmc_v6_0_sw_fini,
1116 .hw_init = gmc_v6_0_hw_init,
1117 .hw_fini = gmc_v6_0_hw_fini,
1118 .suspend = gmc_v6_0_suspend,
1119 .resume = gmc_v6_0_resume,
1120 .is_idle = gmc_v6_0_is_idle,
1121 .wait_for_idle = gmc_v6_0_wait_for_idle,
1122 .soft_reset = gmc_v6_0_soft_reset,
1123 .set_clockgating_state = gmc_v6_0_set_clockgating_state,
1124 .set_powergating_state = gmc_v6_0_set_powergating_state,
1125 };
1126
1127 static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1128 .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1129 .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1130 .set_prt = gmc_v6_0_set_prt,
1131 .get_vm_pde = gmc_v6_0_get_vm_pde,
1132 .get_vm_pte = gmc_v6_0_get_vm_pte,
1133 .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
1134 };
1135
1136 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1137 .set = gmc_v6_0_vm_fault_interrupt_state,
1138 .process = gmc_v6_0_process_interrupt,
1139 };
1140
1141 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1142 {
1143 adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1144 }
1145
1146 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1147 {
1148 adev->gmc.vm_fault.num_types = 1;
1149 adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1150 }
1151
1152 const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
1153 {
1154 .type = AMD_IP_BLOCK_TYPE_GMC,
1155 .major = 6,
1156 .minor = 0,
1157 .rev = 0,
1158 .funcs = &gmc_v6_0_ip_funcs,
1159 };