0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/firmware.h>
0024 #include <linux/pci.h>
0025
0026 #include <drm/drm_cache.h>
0027
0028 #include "amdgpu.h"
0029 #include "amdgpu_atomfirmware.h"
0030 #include "gmc_v11_0.h"
0031 #include "umc_v8_10.h"
0032 #include "athub/athub_3_0_0_sh_mask.h"
0033 #include "athub/athub_3_0_0_offset.h"
0034 #include "oss/osssys_6_0_0_offset.h"
0035 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
0036 #include "navi10_enum.h"
0037 #include "soc15.h"
0038 #include "soc15d.h"
0039 #include "soc15_common.h"
0040 #include "nbio_v4_3.h"
0041 #include "gfxhub_v3_0.h"
0042 #include "mmhub_v3_0.h"
0043 #include "mmhub_v3_0_1.h"
0044 #include "mmhub_v3_0_2.h"
0045 #include "athub_v3_0.h"
0046
0047
0048 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
0049 struct amdgpu_irq_src *src,
0050 unsigned type,
0051 enum amdgpu_interrupt_state state)
0052 {
0053 return 0;
0054 }
0055
0056 static int
0057 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
0058 struct amdgpu_irq_src *src, unsigned type,
0059 enum amdgpu_interrupt_state state)
0060 {
0061 switch (state) {
0062 case AMDGPU_IRQ_STATE_DISABLE:
0063
0064 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
0065
0066 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
0067 break;
0068 case AMDGPU_IRQ_STATE_ENABLE:
0069
0070 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
0071
0072 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
0073 break;
0074 default:
0075 break;
0076 }
0077
0078 return 0;
0079 }
0080
0081 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
0082 struct amdgpu_irq_src *source,
0083 struct amdgpu_iv_entry *entry)
0084 {
0085 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
0086 uint32_t status = 0;
0087 u64 addr;
0088
0089 addr = (u64)entry->src_data[0] << 12;
0090 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
0091
0092 if (!amdgpu_sriov_vf(adev)) {
0093
0094
0095
0096
0097
0098 if (entry->vmid_src == AMDGPU_GFXHUB_0)
0099 RREG32(hub->vm_l2_pro_fault_status);
0100
0101 status = RREG32(hub->vm_l2_pro_fault_status);
0102 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
0103 }
0104
0105 if (printk_ratelimit()) {
0106 struct amdgpu_task_info task_info;
0107
0108 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
0109 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
0110
0111 dev_err(adev->dev,
0112 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
0113 "for process %s pid %d thread %s pid %d)\n",
0114 entry->vmid_src ? "mmhub" : "gfxhub",
0115 entry->src_id, entry->ring_id, entry->vmid,
0116 entry->pasid, task_info.process_name, task_info.tgid,
0117 task_info.task_name, task_info.pid);
0118 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
0119 addr, entry->client_id);
0120 if (!amdgpu_sriov_vf(adev))
0121 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
0122 }
0123
0124 return 0;
0125 }
0126
0127 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
0128 .set = gmc_v11_0_vm_fault_interrupt_state,
0129 .process = gmc_v11_0_process_interrupt,
0130 };
0131
0132 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
0133 .set = gmc_v11_0_ecc_interrupt_state,
0134 .process = amdgpu_umc_process_ecc_irq,
0135 };
0136
0137 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
0138 {
0139 adev->gmc.vm_fault.num_types = 1;
0140 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
0141
0142 if (!amdgpu_sriov_vf(adev)) {
0143 adev->gmc.ecc_irq.num_types = 1;
0144 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
0145 }
0146 }
0147
0148
0149
0150
0151
0152
0153
0154
0155 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
0156 uint32_t vmhub)
0157 {
0158 return ((vmhub == AMDGPU_MMHUB_0) &&
0159 (!amdgpu_sriov_vf(adev)));
0160 }
0161
0162 static bool gmc_v11_0_get_vmid_pasid_mapping_info(
0163 struct amdgpu_device *adev,
0164 uint8_t vmid, uint16_t *p_pasid)
0165 {
0166 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
0167
0168 return !!(*p_pasid);
0169 }
0170
0171
0172
0173
0174
0175
0176
0177
0178 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
0179 unsigned int vmhub, uint32_t flush_type)
0180 {
0181 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
0182 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
0183 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
0184 u32 tmp;
0185
0186 const unsigned eng = 17;
0187 unsigned int i;
0188
0189 spin_lock(&adev->gmc.invalidate_lock);
0190
0191
0192
0193
0194
0195
0196
0197
0198 if (use_semaphore) {
0199 for (i = 0; i < adev->usec_timeout; i++) {
0200
0201 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
0202 hub->eng_distance * eng);
0203 if (tmp & 0x1)
0204 break;
0205 udelay(1);
0206 }
0207
0208 if (i >= adev->usec_timeout)
0209 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
0210 }
0211
0212 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
0213
0214
0215 for (i = 0; i < adev->usec_timeout; i++) {
0216 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
0217 hub->eng_distance * eng);
0218 tmp &= 1 << vmid;
0219 if (tmp)
0220 break;
0221
0222 udelay(1);
0223 }
0224
0225
0226 if (use_semaphore)
0227
0228
0229
0230
0231 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
0232 hub->eng_distance * eng, 0);
0233
0234
0235 if ((vmhub != AMDGPU_GFXHUB_0) &&
0236 (hub->vm_l2_bank_select_reserved_cid2)) {
0237 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
0238
0239 inv_req |= (1 << 25);
0240
0241 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
0242
0243 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
0244 }
0245
0246 spin_unlock(&adev->gmc.invalidate_lock);
0247
0248 if (i < adev->usec_timeout)
0249 return;
0250
0251 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
0263 uint32_t vmhub, uint32_t flush_type)
0264 {
0265 if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
0266 return;
0267
0268
0269 adev->hdp.funcs->flush_hdp(adev, NULL);
0270
0271
0272
0273
0274 if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
0275 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
0276 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
0277 const unsigned eng = 17;
0278 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
0279 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
0280 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
0281
0282 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
0283 1 << vmid);
0284 return;
0285 }
0286
0287 mutex_lock(&adev->mman.gtt_window_lock);
0288 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
0289 mutex_unlock(&adev->mman.gtt_window_lock);
0290 return;
0291 }
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
0302 uint16_t pasid, uint32_t flush_type,
0303 bool all_hub)
0304 {
0305 int vmid, i;
0306 signed long r;
0307 uint32_t seq;
0308 uint16_t queried_pasid;
0309 bool ret;
0310 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
0311 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
0312
0313 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
0314 spin_lock(&adev->gfx.kiq.ring_lock);
0315
0316 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
0317 kiq->pmf->kiq_invalidate_tlbs(ring,
0318 pasid, flush_type, all_hub);
0319 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
0320 if (r) {
0321 amdgpu_ring_undo(ring);
0322 spin_unlock(&adev->gfx.kiq.ring_lock);
0323 return -ETIME;
0324 }
0325
0326 amdgpu_ring_commit(ring);
0327 spin_unlock(&adev->gfx.kiq.ring_lock);
0328 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
0329 if (r < 1) {
0330 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
0331 return -ETIME;
0332 }
0333
0334 return 0;
0335 }
0336
0337 for (vmid = 1; vmid < 16; vmid++) {
0338
0339 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
0340 &queried_pasid);
0341 if (ret && queried_pasid == pasid) {
0342 if (all_hub) {
0343 for (i = 0; i < adev->num_vmhubs; i++)
0344 gmc_v11_0_flush_gpu_tlb(adev, vmid,
0345 i, flush_type);
0346 } else {
0347 gmc_v11_0_flush_gpu_tlb(adev, vmid,
0348 AMDGPU_GFXHUB_0, flush_type);
0349 }
0350 }
0351 }
0352
0353 return 0;
0354 }
0355
0356 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
0357 unsigned vmid, uint64_t pd_addr)
0358 {
0359 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
0360 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
0361 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
0362 unsigned eng = ring->vm_inv_eng;
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372 if (use_semaphore)
0373
0374 amdgpu_ring_emit_reg_wait(ring,
0375 hub->vm_inv_eng0_sem +
0376 hub->eng_distance * eng, 0x1, 0x1);
0377
0378 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
0379 (hub->ctx_addr_distance * vmid),
0380 lower_32_bits(pd_addr));
0381
0382 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
0383 (hub->ctx_addr_distance * vmid),
0384 upper_32_bits(pd_addr));
0385
0386 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
0387 hub->eng_distance * eng,
0388 hub->vm_inv_eng0_ack +
0389 hub->eng_distance * eng,
0390 req, 1 << vmid);
0391
0392
0393 if (use_semaphore)
0394
0395
0396
0397
0398 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
0399 hub->eng_distance * eng, 0);
0400
0401 return pd_addr;
0402 }
0403
0404 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
0405 unsigned pasid)
0406 {
0407 struct amdgpu_device *adev = ring->adev;
0408 uint32_t reg;
0409
0410
0411 if (ring->is_mes_queue)
0412 return;
0413
0414 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
0415 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
0416 else
0417 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
0418
0419 amdgpu_ring_emit_wreg(ring, reg, pasid);
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
0455 {
0456 switch (flags) {
0457 case AMDGPU_VM_MTYPE_DEFAULT:
0458 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
0459 case AMDGPU_VM_MTYPE_NC:
0460 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
0461 case AMDGPU_VM_MTYPE_WC:
0462 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
0463 case AMDGPU_VM_MTYPE_CC:
0464 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
0465 case AMDGPU_VM_MTYPE_UC:
0466 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
0467 default:
0468 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
0469 }
0470 }
0471
0472 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
0473 uint64_t *addr, uint64_t *flags)
0474 {
0475 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
0476 *addr = adev->vm_manager.vram_base_offset + *addr -
0477 adev->gmc.vram_start;
0478 BUG_ON(*addr & 0xFFFF00000000003FULL);
0479
0480 if (!adev->gmc.translate_further)
0481 return;
0482
0483 if (level == AMDGPU_VM_PDB1) {
0484
0485 if (!(*flags & AMDGPU_PDE_PTE))
0486 *flags |= AMDGPU_PDE_BFS(0x9);
0487
0488 } else if (level == AMDGPU_VM_PDB0) {
0489 if (*flags & AMDGPU_PDE_PTE)
0490 *flags &= ~AMDGPU_PDE_PTE;
0491 else
0492 *flags |= AMDGPU_PTE_TF;
0493 }
0494 }
0495
0496 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
0497 struct amdgpu_bo_va_mapping *mapping,
0498 uint64_t *flags)
0499 {
0500 *flags &= ~AMDGPU_PTE_EXECUTABLE;
0501 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
0502
0503 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
0504 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
0505
0506 *flags &= ~AMDGPU_PTE_NOALLOC;
0507 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
0508
0509 if (mapping->flags & AMDGPU_PTE_PRT) {
0510 *flags |= AMDGPU_PTE_PRT;
0511 *flags |= AMDGPU_PTE_SNOOPED;
0512 *flags |= AMDGPU_PTE_LOG;
0513 *flags |= AMDGPU_PTE_SYSTEM;
0514 *flags &= ~AMDGPU_PTE_VALID;
0515 }
0516 }
0517
0518 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
0519 {
0520 return 0;
0521 }
0522
0523 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
0524 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
0525 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
0526 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
0527 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
0528 .map_mtype = gmc_v11_0_map_mtype,
0529 .get_vm_pde = gmc_v11_0_get_vm_pde,
0530 .get_vm_pte = gmc_v11_0_get_vm_pte,
0531 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
0532 };
0533
0534 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
0535 {
0536 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
0537 }
0538
0539 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
0540 {
0541 switch (adev->ip_versions[UMC_HWIP][0]) {
0542 case IP_VERSION(8, 10, 0):
0543 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
0544 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
0545 adev->umc.node_inst_num = adev->gmc.num_umc;
0546 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
0547 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
0548 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
0549 adev->umc.ras = &umc_v8_10_ras;
0550 break;
0551 case IP_VERSION(8, 11, 0):
0552 break;
0553 default:
0554 break;
0555 }
0556
0557 if (adev->umc.ras) {
0558 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
0559
0560 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
0561 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
0562 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
0563 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
0564
0565
0566 if (!adev->umc.ras->ras_block.ras_late_init)
0567 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
0568
0569
0570 if (!adev->umc.ras->ras_block.ras_cb)
0571 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
0572 }
0573 }
0574
0575
0576 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
0577 {
0578 switch (adev->ip_versions[MMHUB_HWIP][0]) {
0579 case IP_VERSION(3, 0, 1):
0580 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
0581 break;
0582 case IP_VERSION(3, 0, 2):
0583 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
0584 break;
0585 default:
0586 adev->mmhub.funcs = &mmhub_v3_0_funcs;
0587 break;
0588 }
0589 }
0590
0591 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
0592 {
0593 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
0594 }
0595
0596 static int gmc_v11_0_early_init(void *handle)
0597 {
0598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0599
0600 gmc_v11_0_set_gfxhub_funcs(adev);
0601 gmc_v11_0_set_mmhub_funcs(adev);
0602 gmc_v11_0_set_gmc_funcs(adev);
0603 gmc_v11_0_set_irq_funcs(adev);
0604 gmc_v11_0_set_umc_funcs(adev);
0605
0606 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
0607 adev->gmc.shared_aperture_end =
0608 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
0609 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
0610 adev->gmc.private_aperture_end =
0611 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
0612
0613 return 0;
0614 }
0615
0616 static int gmc_v11_0_late_init(void *handle)
0617 {
0618 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0619 int r;
0620
0621 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
0622 if (r)
0623 return r;
0624
0625 r = amdgpu_gmc_ras_late_init(adev);
0626 if (r)
0627 return r;
0628
0629 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
0630 }
0631
0632 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
0633 struct amdgpu_gmc *mc)
0634 {
0635 u64 base = 0;
0636
0637 base = adev->mmhub.funcs->get_fb_location(adev);
0638
0639 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
0640 amdgpu_gmc_gart_location(adev, mc);
0641
0642
0643 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655 static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
0656 {
0657 int r;
0658
0659
0660 adev->gmc.mc_vram_size =
0661 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
0662 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
0663
0664 if (!(adev->flags & AMD_IS_APU)) {
0665 r = amdgpu_device_resize_fb_bar(adev);
0666 if (r)
0667 return r;
0668 }
0669 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
0670 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
0671
0672 #ifdef CONFIG_X86_64
0673 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
0674 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
0675 adev->gmc.aper_size = adev->gmc.real_vram_size;
0676 }
0677 #endif
0678
0679 adev->gmc.visible_vram_size = adev->gmc.aper_size;
0680 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
0681 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
0682
0683
0684 if (amdgpu_gart_size == -1) {
0685 adev->gmc.gart_size = 512ULL << 20;
0686 } else
0687 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
0688
0689 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
0690
0691 return 0;
0692 }
0693
0694 static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
0695 {
0696 int r;
0697
0698 if (adev->gart.bo) {
0699 WARN(1, "PCIE GART already initialized\n");
0700 return 0;
0701 }
0702
0703
0704 r = amdgpu_gart_init(adev);
0705 if (r)
0706 return r;
0707
0708 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
0709 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
0710 AMDGPU_PTE_EXECUTABLE;
0711
0712 return amdgpu_gart_table_vram_alloc(adev);
0713 }
0714
0715 static int gmc_v11_0_sw_init(void *handle)
0716 {
0717 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
0718 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0719
0720 adev->mmhub.funcs->init(adev);
0721
0722 spin_lock_init(&adev->gmc.invalidate_lock);
0723
0724 r = amdgpu_atomfirmware_get_vram_info(adev,
0725 &vram_width, &vram_type, &vram_vendor);
0726 adev->gmc.vram_width = vram_width;
0727
0728 adev->gmc.vram_type = vram_type;
0729 adev->gmc.vram_vendor = vram_vendor;
0730
0731 switch (adev->ip_versions[GC_HWIP][0]) {
0732 case IP_VERSION(11, 0, 0):
0733 case IP_VERSION(11, 0, 1):
0734 case IP_VERSION(11, 0, 2):
0735 adev->num_vmhubs = 2;
0736
0737
0738
0739
0740
0741 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
0742 break;
0743 default:
0744 break;
0745 }
0746
0747
0748 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
0749 VMC_1_0__SRCID__VM_FAULT,
0750 &adev->gmc.vm_fault);
0751
0752 if (r)
0753 return r;
0754
0755 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
0756 UTCL2_1_0__SRCID__FAULT,
0757 &adev->gmc.vm_fault);
0758 if (r)
0759 return r;
0760
0761 if (!amdgpu_sriov_vf(adev)) {
0762
0763 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
0764 &adev->gmc.ecc_irq);
0765 if (r)
0766 return r;
0767 }
0768
0769
0770
0771
0772
0773 adev->gmc.mc_mask = 0xffffffffffffULL;
0774
0775 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
0776 if (r) {
0777 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
0778 return r;
0779 }
0780
0781 adev->need_swiotlb = drm_need_swiotlb(44);
0782
0783 r = gmc_v11_0_mc_init(adev);
0784 if (r)
0785 return r;
0786
0787 amdgpu_gmc_get_vbios_allocations(adev);
0788
0789
0790 r = amdgpu_bo_init(adev);
0791 if (r)
0792 return r;
0793
0794 r = gmc_v11_0_gart_init(adev);
0795 if (r)
0796 return r;
0797
0798
0799
0800
0801
0802
0803
0804 adev->vm_manager.first_kfd_vmid = 8;
0805
0806 amdgpu_vm_manager_init(adev);
0807
0808 return 0;
0809 }
0810
0811
0812
0813
0814
0815
0816
0817
0818 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
0819 {
0820 amdgpu_gart_table_vram_free(adev);
0821 }
0822
0823 static int gmc_v11_0_sw_fini(void *handle)
0824 {
0825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0826
0827 amdgpu_vm_manager_fini(adev);
0828 gmc_v11_0_gart_fini(adev);
0829 amdgpu_gem_force_release(adev);
0830 amdgpu_bo_fini(adev);
0831
0832 return 0;
0833 }
0834
0835 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
0836 {
0837 }
0838
0839
0840
0841
0842
0843
0844 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
0845 {
0846 int r;
0847 bool value;
0848
0849 if (adev->gart.bo == NULL) {
0850 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
0851 return -EINVAL;
0852 }
0853
0854 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
0855
0856 r = adev->mmhub.funcs->gart_enable(adev);
0857 if (r)
0858 return r;
0859
0860
0861 adev->hdp.funcs->flush_hdp(adev, NULL);
0862
0863 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
0864 false : true;
0865
0866 adev->mmhub.funcs->set_fault_enable_default(adev, value);
0867 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
0868
0869 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
0870 (unsigned)(adev->gmc.gart_size >> 20),
0871 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
0872
0873 return 0;
0874 }
0875
0876 static int gmc_v11_0_hw_init(void *handle)
0877 {
0878 int r;
0879 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0880
0881
0882 gmc_v11_0_init_golden_registers(adev);
0883
0884 r = gmc_v11_0_gart_enable(adev);
0885 if (r)
0886 return r;
0887
0888 if (adev->umc.funcs && adev->umc.funcs->init_registers)
0889 adev->umc.funcs->init_registers(adev);
0890
0891 return 0;
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
0902 {
0903 adev->mmhub.funcs->gart_disable(adev);
0904 }
0905
0906 static int gmc_v11_0_hw_fini(void *handle)
0907 {
0908 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0909
0910 if (amdgpu_sriov_vf(adev)) {
0911
0912 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
0913 return 0;
0914 }
0915
0916 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
0917 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
0918 gmc_v11_0_gart_disable(adev);
0919
0920 return 0;
0921 }
0922
0923 static int gmc_v11_0_suspend(void *handle)
0924 {
0925 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0926
0927 gmc_v11_0_hw_fini(adev);
0928
0929 return 0;
0930 }
0931
0932 static int gmc_v11_0_resume(void *handle)
0933 {
0934 int r;
0935 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0936
0937 r = gmc_v11_0_hw_init(adev);
0938 if (r)
0939 return r;
0940
0941 amdgpu_vmid_reset_all(adev);
0942
0943 return 0;
0944 }
0945
0946 static bool gmc_v11_0_is_idle(void *handle)
0947 {
0948
0949 return true;
0950 }
0951
0952 static int gmc_v11_0_wait_for_idle(void *handle)
0953 {
0954
0955 return 0;
0956 }
0957
0958 static int gmc_v11_0_soft_reset(void *handle)
0959 {
0960 return 0;
0961 }
0962
0963 static int gmc_v11_0_set_clockgating_state(void *handle,
0964 enum amd_clockgating_state state)
0965 {
0966 int r;
0967 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0968
0969 r = adev->mmhub.funcs->set_clockgating(adev, state);
0970 if (r)
0971 return r;
0972
0973 return athub_v3_0_set_clockgating(adev, state);
0974 }
0975
0976 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
0977 {
0978 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0979
0980 adev->mmhub.funcs->get_clockgating(adev, flags);
0981
0982 athub_v3_0_get_clockgating(adev, flags);
0983 }
0984
0985 static int gmc_v11_0_set_powergating_state(void *handle,
0986 enum amd_powergating_state state)
0987 {
0988 return 0;
0989 }
0990
0991 const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
0992 .name = "gmc_v11_0",
0993 .early_init = gmc_v11_0_early_init,
0994 .sw_init = gmc_v11_0_sw_init,
0995 .hw_init = gmc_v11_0_hw_init,
0996 .late_init = gmc_v11_0_late_init,
0997 .sw_fini = gmc_v11_0_sw_fini,
0998 .hw_fini = gmc_v11_0_hw_fini,
0999 .suspend = gmc_v11_0_suspend,
1000 .resume = gmc_v11_0_resume,
1001 .is_idle = gmc_v11_0_is_idle,
1002 .wait_for_idle = gmc_v11_0_wait_for_idle,
1003 .soft_reset = gmc_v11_0_soft_reset,
1004 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
1005 .set_powergating_state = gmc_v11_0_set_powergating_state,
1006 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
1007 };
1008
1009 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1010 .type = AMD_IP_BLOCK_TYPE_GMC,
1011 .major = 11,
1012 .minor = 0,
1013 .rev = 0,
1014 .funcs = &gmc_v11_0_ip_funcs,
1015 };