0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/firmware.h>
0026
0027 #include "amdgpu.h"
0028 #include "amdgpu_uvd.h"
0029 #include "vid.h"
0030 #include "uvd/uvd_6_0_d.h"
0031 #include "uvd/uvd_6_0_sh_mask.h"
0032 #include "oss/oss_2_0_d.h"
0033 #include "oss/oss_2_0_sh_mask.h"
0034 #include "smu/smu_7_1_3_d.h"
0035 #include "smu/smu_7_1_3_sh_mask.h"
0036 #include "bif/bif_5_1_d.h"
0037 #include "gmc/gmc_8_1_d.h"
0038 #include "vi.h"
0039 #include "ivsrcid/ivsrcid_vislands30.h"
0040
0041
0042 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
0043
0044 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
0045 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
0046
0047 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
0048 static int uvd_v6_0_start(struct amdgpu_device *adev);
0049 static void uvd_v6_0_stop(struct amdgpu_device *adev);
0050 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
0051 static int uvd_v6_0_set_clockgating_state(void *handle,
0052 enum amd_clockgating_state state);
0053 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
0054 bool enable);
0055
0056
0057
0058
0059
0060
0061
0062
0063 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
0064 {
0065 return ((adev->asic_type >= CHIP_POLARIS10) &&
0066 (adev->asic_type <= CHIP_VEGAM) &&
0067 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
0068 }
0069
0070
0071
0072
0073
0074
0075
0076
0077 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
0078 {
0079 struct amdgpu_device *adev = ring->adev;
0080
0081 return RREG32(mmUVD_RBC_RB_RPTR);
0082 }
0083
0084
0085
0086
0087
0088
0089
0090
0091 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
0092 {
0093 struct amdgpu_device *adev = ring->adev;
0094
0095 if (ring == &adev->uvd.inst->ring_enc[0])
0096 return RREG32(mmUVD_RB_RPTR);
0097 else
0098 return RREG32(mmUVD_RB_RPTR2);
0099 }
0100
0101
0102
0103
0104
0105
0106
0107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
0108 {
0109 struct amdgpu_device *adev = ring->adev;
0110
0111 return RREG32(mmUVD_RBC_RB_WPTR);
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
0122 {
0123 struct amdgpu_device *adev = ring->adev;
0124
0125 if (ring == &adev->uvd.inst->ring_enc[0])
0126 return RREG32(mmUVD_RB_WPTR);
0127 else
0128 return RREG32(mmUVD_RB_WPTR2);
0129 }
0130
0131
0132
0133
0134
0135
0136
0137
0138 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
0139 {
0140 struct amdgpu_device *adev = ring->adev;
0141
0142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
0153 {
0154 struct amdgpu_device *adev = ring->adev;
0155
0156 if (ring == &adev->uvd.inst->ring_enc[0])
0157 WREG32(mmUVD_RB_WPTR,
0158 lower_32_bits(ring->wptr));
0159 else
0160 WREG32(mmUVD_RB_WPTR2,
0161 lower_32_bits(ring->wptr));
0162 }
0163
0164
0165
0166
0167
0168
0169
0170 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
0171 {
0172 struct amdgpu_device *adev = ring->adev;
0173 uint32_t rptr;
0174 unsigned i;
0175 int r;
0176
0177 r = amdgpu_ring_alloc(ring, 16);
0178 if (r)
0179 return r;
0180
0181 rptr = amdgpu_ring_get_rptr(ring);
0182
0183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
0184 amdgpu_ring_commit(ring);
0185
0186 for (i = 0; i < adev->usec_timeout; i++) {
0187 if (amdgpu_ring_get_rptr(ring) != rptr)
0188 break;
0189 udelay(1);
0190 }
0191
0192 if (i >= adev->usec_timeout)
0193 r = -ETIMEDOUT;
0194
0195 return r;
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
0209 struct amdgpu_bo *bo,
0210 struct dma_fence **fence)
0211 {
0212 const unsigned ib_size_dw = 16;
0213 struct amdgpu_job *job;
0214 struct amdgpu_ib *ib;
0215 struct dma_fence *f = NULL;
0216 uint64_t addr;
0217 int i, r;
0218
0219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
0220 AMDGPU_IB_POOL_DIRECT, &job);
0221 if (r)
0222 return r;
0223
0224 ib = &job->ibs[0];
0225 addr = amdgpu_bo_gpu_offset(bo);
0226
0227 ib->length_dw = 0;
0228 ib->ptr[ib->length_dw++] = 0x00000018;
0229 ib->ptr[ib->length_dw++] = 0x00000001;
0230 ib->ptr[ib->length_dw++] = handle;
0231 ib->ptr[ib->length_dw++] = 0x00010000;
0232 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
0233 ib->ptr[ib->length_dw++] = addr;
0234
0235 ib->ptr[ib->length_dw++] = 0x00000014;
0236 ib->ptr[ib->length_dw++] = 0x00000002;
0237 ib->ptr[ib->length_dw++] = 0x0000001c;
0238 ib->ptr[ib->length_dw++] = 0x00000001;
0239 ib->ptr[ib->length_dw++] = 0x00000000;
0240
0241 ib->ptr[ib->length_dw++] = 0x00000008;
0242 ib->ptr[ib->length_dw++] = 0x08000001;
0243
0244 for (i = ib->length_dw; i < ib_size_dw; ++i)
0245 ib->ptr[i] = 0x0;
0246
0247 r = amdgpu_job_submit_direct(job, ring, &f);
0248 if (r)
0249 goto err;
0250
0251 if (fence)
0252 *fence = dma_fence_get(f);
0253 dma_fence_put(f);
0254 return 0;
0255
0256 err:
0257 amdgpu_job_free(job);
0258 return r;
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
0272 uint32_t handle,
0273 struct amdgpu_bo *bo,
0274 struct dma_fence **fence)
0275 {
0276 const unsigned ib_size_dw = 16;
0277 struct amdgpu_job *job;
0278 struct amdgpu_ib *ib;
0279 struct dma_fence *f = NULL;
0280 uint64_t addr;
0281 int i, r;
0282
0283 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
0284 AMDGPU_IB_POOL_DIRECT, &job);
0285 if (r)
0286 return r;
0287
0288 ib = &job->ibs[0];
0289 addr = amdgpu_bo_gpu_offset(bo);
0290
0291 ib->length_dw = 0;
0292 ib->ptr[ib->length_dw++] = 0x00000018;
0293 ib->ptr[ib->length_dw++] = 0x00000001;
0294 ib->ptr[ib->length_dw++] = handle;
0295 ib->ptr[ib->length_dw++] = 0x00010000;
0296 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
0297 ib->ptr[ib->length_dw++] = addr;
0298
0299 ib->ptr[ib->length_dw++] = 0x00000014;
0300 ib->ptr[ib->length_dw++] = 0x00000002;
0301 ib->ptr[ib->length_dw++] = 0x0000001c;
0302 ib->ptr[ib->length_dw++] = 0x00000001;
0303 ib->ptr[ib->length_dw++] = 0x00000000;
0304
0305 ib->ptr[ib->length_dw++] = 0x00000008;
0306 ib->ptr[ib->length_dw++] = 0x08000002;
0307
0308 for (i = ib->length_dw; i < ib_size_dw; ++i)
0309 ib->ptr[i] = 0x0;
0310
0311 r = amdgpu_job_submit_direct(job, ring, &f);
0312 if (r)
0313 goto err;
0314
0315 if (fence)
0316 *fence = dma_fence_get(f);
0317 dma_fence_put(f);
0318 return 0;
0319
0320 err:
0321 amdgpu_job_free(job);
0322 return r;
0323 }
0324
0325
0326
0327
0328
0329
0330
0331
0332 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
0333 {
0334 struct dma_fence *fence = NULL;
0335 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
0336 long r;
0337
0338 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
0339 if (r)
0340 goto error;
0341
0342 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
0343 if (r)
0344 goto error;
0345
0346 r = dma_fence_wait_timeout(fence, false, timeout);
0347 if (r == 0)
0348 r = -ETIMEDOUT;
0349 else if (r > 0)
0350 r = 0;
0351
0352 error:
0353 dma_fence_put(fence);
0354 return r;
0355 }
0356
0357 static int uvd_v6_0_early_init(void *handle)
0358 {
0359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0360 adev->uvd.num_uvd_inst = 1;
0361
0362 if (!(adev->flags & AMD_IS_APU) &&
0363 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
0364 return -ENOENT;
0365
0366 uvd_v6_0_set_ring_funcs(adev);
0367
0368 if (uvd_v6_0_enc_support(adev)) {
0369 adev->uvd.num_enc_rings = 2;
0370 uvd_v6_0_set_enc_ring_funcs(adev);
0371 }
0372
0373 uvd_v6_0_set_irq_funcs(adev);
0374
0375 return 0;
0376 }
0377
0378 static int uvd_v6_0_sw_init(void *handle)
0379 {
0380 struct amdgpu_ring *ring;
0381 int i, r;
0382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0383
0384
0385 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
0386 if (r)
0387 return r;
0388
0389
0390 if (uvd_v6_0_enc_support(adev)) {
0391 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
0392 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
0393 if (r)
0394 return r;
0395 }
0396 }
0397
0398 r = amdgpu_uvd_sw_init(adev);
0399 if (r)
0400 return r;
0401
0402 if (!uvd_v6_0_enc_support(adev)) {
0403 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
0404 adev->uvd.inst->ring_enc[i].funcs = NULL;
0405
0406 adev->uvd.inst->irq.num_types = 1;
0407 adev->uvd.num_enc_rings = 0;
0408
0409 DRM_INFO("UVD ENC is disabled\n");
0410 }
0411
0412 ring = &adev->uvd.inst->ring;
0413 sprintf(ring->name, "uvd");
0414 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
0415 AMDGPU_RING_PRIO_DEFAULT, NULL);
0416 if (r)
0417 return r;
0418
0419 r = amdgpu_uvd_resume(adev);
0420 if (r)
0421 return r;
0422
0423 if (uvd_v6_0_enc_support(adev)) {
0424 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
0425 ring = &adev->uvd.inst->ring_enc[i];
0426 sprintf(ring->name, "uvd_enc%d", i);
0427 r = amdgpu_ring_init(adev, ring, 512,
0428 &adev->uvd.inst->irq, 0,
0429 AMDGPU_RING_PRIO_DEFAULT, NULL);
0430 if (r)
0431 return r;
0432 }
0433 }
0434
0435 r = amdgpu_uvd_entity_init(adev);
0436
0437 return r;
0438 }
0439
0440 static int uvd_v6_0_sw_fini(void *handle)
0441 {
0442 int i, r;
0443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0444
0445 r = amdgpu_uvd_suspend(adev);
0446 if (r)
0447 return r;
0448
0449 if (uvd_v6_0_enc_support(adev)) {
0450 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
0451 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
0452 }
0453
0454 return amdgpu_uvd_sw_fini(adev);
0455 }
0456
0457
0458
0459
0460
0461
0462
0463
0464 static int uvd_v6_0_hw_init(void *handle)
0465 {
0466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0467 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
0468 uint32_t tmp;
0469 int i, r;
0470
0471 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
0472 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
0473 uvd_v6_0_enable_mgcg(adev, true);
0474
0475 r = amdgpu_ring_test_helper(ring);
0476 if (r)
0477 goto done;
0478
0479 r = amdgpu_ring_alloc(ring, 10);
0480 if (r) {
0481 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
0482 goto done;
0483 }
0484
0485 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
0486 amdgpu_ring_write(ring, tmp);
0487 amdgpu_ring_write(ring, 0xFFFFF);
0488
0489 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
0490 amdgpu_ring_write(ring, tmp);
0491 amdgpu_ring_write(ring, 0xFFFFF);
0492
0493 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
0494 amdgpu_ring_write(ring, tmp);
0495 amdgpu_ring_write(ring, 0xFFFFF);
0496
0497
0498 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
0499 amdgpu_ring_write(ring, 0x8);
0500
0501 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
0502 amdgpu_ring_write(ring, 3);
0503
0504 amdgpu_ring_commit(ring);
0505
0506 if (uvd_v6_0_enc_support(adev)) {
0507 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
0508 ring = &adev->uvd.inst->ring_enc[i];
0509 r = amdgpu_ring_test_helper(ring);
0510 if (r)
0511 goto done;
0512 }
0513 }
0514
0515 done:
0516 if (!r) {
0517 if (uvd_v6_0_enc_support(adev))
0518 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
0519 else
0520 DRM_INFO("UVD initialized successfully.\n");
0521 }
0522
0523 return r;
0524 }
0525
0526
0527
0528
0529
0530
0531
0532
0533 static int uvd_v6_0_hw_fini(void *handle)
0534 {
0535 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0536
0537 cancel_delayed_work_sync(&adev->uvd.idle_work);
0538
0539 if (RREG32(mmUVD_STATUS) != 0)
0540 uvd_v6_0_stop(adev);
0541
0542 return 0;
0543 }
0544
0545 static int uvd_v6_0_suspend(void *handle)
0546 {
0547 int r;
0548 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 cancel_delayed_work_sync(&adev->uvd.idle_work);
0562
0563 if (adev->pm.dpm_enabled) {
0564 amdgpu_dpm_enable_uvd(adev, false);
0565 } else {
0566 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
0567
0568 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0569 AMD_PG_STATE_GATE);
0570 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0571 AMD_CG_STATE_GATE);
0572 }
0573
0574 r = uvd_v6_0_hw_fini(adev);
0575 if (r)
0576 return r;
0577
0578 return amdgpu_uvd_suspend(adev);
0579 }
0580
0581 static int uvd_v6_0_resume(void *handle)
0582 {
0583 int r;
0584 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0585
0586 r = amdgpu_uvd_resume(adev);
0587 if (r)
0588 return r;
0589
0590 return uvd_v6_0_hw_init(adev);
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
0601 {
0602 uint64_t offset;
0603 uint32_t size;
0604
0605
0606 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
0607 lower_32_bits(adev->uvd.inst->gpu_addr));
0608 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
0609 upper_32_bits(adev->uvd.inst->gpu_addr));
0610
0611 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
0612 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
0613 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
0614 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
0615
0616 offset += size;
0617 size = AMDGPU_UVD_HEAP_SIZE;
0618 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
0619 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
0620
0621 offset += size;
0622 size = AMDGPU_UVD_STACK_SIZE +
0623 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
0624 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
0625 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
0626
0627 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0628 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0629 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0630
0631 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
0632 }
0633
0634 #if 0
0635 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
0636 bool enable)
0637 {
0638 u32 data, data1;
0639
0640 data = RREG32(mmUVD_CGC_GATE);
0641 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
0642 if (enable) {
0643 data |= UVD_CGC_GATE__SYS_MASK |
0644 UVD_CGC_GATE__UDEC_MASK |
0645 UVD_CGC_GATE__MPEG2_MASK |
0646 UVD_CGC_GATE__RBC_MASK |
0647 UVD_CGC_GATE__LMI_MC_MASK |
0648 UVD_CGC_GATE__IDCT_MASK |
0649 UVD_CGC_GATE__MPRD_MASK |
0650 UVD_CGC_GATE__MPC_MASK |
0651 UVD_CGC_GATE__LBSI_MASK |
0652 UVD_CGC_GATE__LRBBM_MASK |
0653 UVD_CGC_GATE__UDEC_RE_MASK |
0654 UVD_CGC_GATE__UDEC_CM_MASK |
0655 UVD_CGC_GATE__UDEC_IT_MASK |
0656 UVD_CGC_GATE__UDEC_DB_MASK |
0657 UVD_CGC_GATE__UDEC_MP_MASK |
0658 UVD_CGC_GATE__WCB_MASK |
0659 UVD_CGC_GATE__VCPU_MASK |
0660 UVD_CGC_GATE__SCPU_MASK;
0661 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
0662 UVD_SUVD_CGC_GATE__SIT_MASK |
0663 UVD_SUVD_CGC_GATE__SMP_MASK |
0664 UVD_SUVD_CGC_GATE__SCM_MASK |
0665 UVD_SUVD_CGC_GATE__SDB_MASK |
0666 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
0667 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
0668 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
0669 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
0670 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
0671 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
0672 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
0673 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
0674 } else {
0675 data &= ~(UVD_CGC_GATE__SYS_MASK |
0676 UVD_CGC_GATE__UDEC_MASK |
0677 UVD_CGC_GATE__MPEG2_MASK |
0678 UVD_CGC_GATE__RBC_MASK |
0679 UVD_CGC_GATE__LMI_MC_MASK |
0680 UVD_CGC_GATE__LMI_UMC_MASK |
0681 UVD_CGC_GATE__IDCT_MASK |
0682 UVD_CGC_GATE__MPRD_MASK |
0683 UVD_CGC_GATE__MPC_MASK |
0684 UVD_CGC_GATE__LBSI_MASK |
0685 UVD_CGC_GATE__LRBBM_MASK |
0686 UVD_CGC_GATE__UDEC_RE_MASK |
0687 UVD_CGC_GATE__UDEC_CM_MASK |
0688 UVD_CGC_GATE__UDEC_IT_MASK |
0689 UVD_CGC_GATE__UDEC_DB_MASK |
0690 UVD_CGC_GATE__UDEC_MP_MASK |
0691 UVD_CGC_GATE__WCB_MASK |
0692 UVD_CGC_GATE__VCPU_MASK |
0693 UVD_CGC_GATE__SCPU_MASK);
0694 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
0695 UVD_SUVD_CGC_GATE__SIT_MASK |
0696 UVD_SUVD_CGC_GATE__SMP_MASK |
0697 UVD_SUVD_CGC_GATE__SCM_MASK |
0698 UVD_SUVD_CGC_GATE__SDB_MASK |
0699 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
0700 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
0701 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
0702 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
0703 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
0704 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
0705 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
0706 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
0707 }
0708 WREG32(mmUVD_CGC_GATE, data);
0709 WREG32(mmUVD_SUVD_CGC_GATE, data1);
0710 }
0711 #endif
0712
0713
0714
0715
0716
0717
0718
0719
0720 static int uvd_v6_0_start(struct amdgpu_device *adev)
0721 {
0722 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
0723 uint32_t rb_bufsz, tmp;
0724 uint32_t lmi_swap_cntl;
0725 uint32_t mp_swap_cntl;
0726 int i, j, r;
0727
0728
0729 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
0730
0731
0732 lmi_swap_cntl = 0;
0733 mp_swap_cntl = 0;
0734
0735 uvd_v6_0_mc_resume(adev);
0736
0737
0738 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
0739
0740
0741 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
0742 mdelay(1);
0743
0744
0745 WREG32(mmUVD_SOFT_RESET,
0746 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
0747 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
0748 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
0749 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
0750 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
0751 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
0752 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
0753 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
0754 mdelay(5);
0755
0756
0757 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
0758 mdelay(5);
0759
0760
0761 WREG32(mmUVD_LMI_CTRL,
0762 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0763 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
0764 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
0765 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
0766 UVD_LMI_CTRL__REQ_MODE_MASK |
0767 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
0768
0769 #ifdef __BIG_ENDIAN
0770
0771 lmi_swap_cntl = 0xa;
0772 mp_swap_cntl = 0;
0773 #endif
0774 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
0775 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
0776
0777 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
0778 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
0779 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
0780 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
0781 WREG32(mmUVD_MPC_SET_ALU, 0);
0782 WREG32(mmUVD_MPC_SET_MUX, 0x88);
0783
0784
0785 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0786 mdelay(5);
0787
0788
0789 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
0790
0791
0792 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
0793
0794
0795 WREG32(mmUVD_SOFT_RESET, 0);
0796 mdelay(10);
0797
0798 for (i = 0; i < 10; ++i) {
0799 uint32_t status;
0800
0801 for (j = 0; j < 100; ++j) {
0802 status = RREG32(mmUVD_STATUS);
0803 if (status & 2)
0804 break;
0805 mdelay(10);
0806 }
0807 r = 0;
0808 if (status & 2)
0809 break;
0810
0811 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
0812 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
0813 mdelay(10);
0814 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
0815 mdelay(10);
0816 r = -1;
0817 }
0818
0819 if (r) {
0820 DRM_ERROR("UVD not responding, giving up!!!\n");
0821 return r;
0822 }
0823
0824 WREG32_P(mmUVD_MASTINT_EN,
0825 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
0826 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
0827
0828
0829 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
0830
0831
0832 rb_bufsz = order_base_2(ring->ring_size);
0833 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
0834 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
0835 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
0836 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
0837 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
0838 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
0839 WREG32(mmUVD_RBC_RB_CNTL, tmp);
0840
0841
0842 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
0843
0844
0845 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
0846
0847
0848 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
0849 lower_32_bits(ring->gpu_addr));
0850 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
0851 upper_32_bits(ring->gpu_addr));
0852
0853
0854 WREG32(mmUVD_RBC_RB_RPTR, 0);
0855
0856 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
0857 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0858
0859 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
0860
0861 if (uvd_v6_0_enc_support(adev)) {
0862 ring = &adev->uvd.inst->ring_enc[0];
0863 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
0864 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
0865 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
0866 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
0867 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
0868
0869 ring = &adev->uvd.inst->ring_enc[1];
0870 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
0871 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
0872 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
0873 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
0874 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
0875 }
0876
0877 return 0;
0878 }
0879
0880
0881
0882
0883
0884
0885
0886
0887 static void uvd_v6_0_stop(struct amdgpu_device *adev)
0888 {
0889
0890 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
0891
0892
0893 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
0894 mdelay(1);
0895
0896
0897 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0898 mdelay(5);
0899
0900
0901 WREG32(mmUVD_VCPU_CNTL, 0x0);
0902
0903
0904 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
0905
0906 WREG32(mmUVD_STATUS, 0);
0907 }
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
0920 unsigned flags)
0921 {
0922 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
0923
0924 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
0925 amdgpu_ring_write(ring, seq);
0926 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
0927 amdgpu_ring_write(ring, addr & 0xffffffff);
0928 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
0929 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
0930 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
0931 amdgpu_ring_write(ring, 0);
0932
0933 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
0934 amdgpu_ring_write(ring, 0);
0935 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
0936 amdgpu_ring_write(ring, 0);
0937 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
0938 amdgpu_ring_write(ring, 2);
0939 }
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
0952 u64 seq, unsigned flags)
0953 {
0954 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
0955
0956 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
0957 amdgpu_ring_write(ring, addr);
0958 amdgpu_ring_write(ring, upper_32_bits(addr));
0959 amdgpu_ring_write(ring, seq);
0960 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
0961 }
0962
0963
0964
0965
0966
0967
0968 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
0969 {
0970
0971 }
0972
0973
0974
0975
0976
0977
0978
0979
0980 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
0981 {
0982 struct amdgpu_device *adev = ring->adev;
0983 uint32_t tmp = 0;
0984 unsigned i;
0985 int r;
0986
0987 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
0988 r = amdgpu_ring_alloc(ring, 3);
0989 if (r)
0990 return r;
0991
0992 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
0993 amdgpu_ring_write(ring, 0xDEADBEEF);
0994 amdgpu_ring_commit(ring);
0995 for (i = 0; i < adev->usec_timeout; i++) {
0996 tmp = RREG32(mmUVD_CONTEXT_ID);
0997 if (tmp == 0xDEADBEEF)
0998 break;
0999 udelay(1);
1000 }
1001
1002 if (i >= adev->usec_timeout)
1003 r = -ETIMEDOUT;
1004
1005 return r;
1006 }
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1019 struct amdgpu_job *job,
1020 struct amdgpu_ib *ib,
1021 uint32_t flags)
1022 {
1023 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1024
1025 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1026 amdgpu_ring_write(ring, vmid);
1027
1028 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1029 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1030 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1031 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1032 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1033 amdgpu_ring_write(ring, ib->length_dw);
1034 }
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1047 struct amdgpu_job *job,
1048 struct amdgpu_ib *ib,
1049 uint32_t flags)
1050 {
1051 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1052
1053 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1054 amdgpu_ring_write(ring, vmid);
1055 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1056 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1057 amdgpu_ring_write(ring, ib->length_dw);
1058 }
1059
1060 static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1061 uint32_t reg, uint32_t val)
1062 {
1063 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1064 amdgpu_ring_write(ring, reg << 2);
1065 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1066 amdgpu_ring_write(ring, val);
1067 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1068 amdgpu_ring_write(ring, 0x8);
1069 }
1070
1071 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1072 unsigned vmid, uint64_t pd_addr)
1073 {
1074 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1075
1076 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1077 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1078 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1079 amdgpu_ring_write(ring, 0);
1080 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1081 amdgpu_ring_write(ring, 1 << vmid);
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1083 amdgpu_ring_write(ring, 0xC);
1084 }
1085
1086 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1087 {
1088 uint32_t seq = ring->fence_drv.sync_seq;
1089 uint64_t addr = ring->fence_drv.gpu_addr;
1090
1091 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1092 amdgpu_ring_write(ring, lower_32_bits(addr));
1093 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1094 amdgpu_ring_write(ring, upper_32_bits(addr));
1095 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1096 amdgpu_ring_write(ring, 0xffffffff);
1097 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1098 amdgpu_ring_write(ring, seq);
1099 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1100 amdgpu_ring_write(ring, 0xE);
1101 }
1102
1103 static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1104 {
1105 int i;
1106
1107 WARN_ON(ring->wptr % 2 || count % 2);
1108
1109 for (i = 0; i < count / 2; i++) {
1110 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1111 amdgpu_ring_write(ring, 0);
1112 }
1113 }
1114
1115 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1116 {
1117 uint32_t seq = ring->fence_drv.sync_seq;
1118 uint64_t addr = ring->fence_drv.gpu_addr;
1119
1120 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1121 amdgpu_ring_write(ring, lower_32_bits(addr));
1122 amdgpu_ring_write(ring, upper_32_bits(addr));
1123 amdgpu_ring_write(ring, seq);
1124 }
1125
1126 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1127 {
1128 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1129 }
1130
1131 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1132 unsigned int vmid, uint64_t pd_addr)
1133 {
1134 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1135 amdgpu_ring_write(ring, vmid);
1136 amdgpu_ring_write(ring, pd_addr >> 12);
1137
1138 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1139 amdgpu_ring_write(ring, vmid);
1140 }
1141
1142 static bool uvd_v6_0_is_idle(void *handle)
1143 {
1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145
1146 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1147 }
1148
1149 static int uvd_v6_0_wait_for_idle(void *handle)
1150 {
1151 unsigned i;
1152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1153
1154 for (i = 0; i < adev->usec_timeout; i++) {
1155 if (uvd_v6_0_is_idle(handle))
1156 return 0;
1157 }
1158 return -ETIMEDOUT;
1159 }
1160
1161 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1162 static bool uvd_v6_0_check_soft_reset(void *handle)
1163 {
1164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165 u32 srbm_soft_reset = 0;
1166 u32 tmp = RREG32(mmSRBM_STATUS);
1167
1168 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1169 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1170 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1171 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1172
1173 if (srbm_soft_reset) {
1174 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1175 return true;
1176 } else {
1177 adev->uvd.inst->srbm_soft_reset = 0;
1178 return false;
1179 }
1180 }
1181
1182 static int uvd_v6_0_pre_soft_reset(void *handle)
1183 {
1184 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1185
1186 if (!adev->uvd.inst->srbm_soft_reset)
1187 return 0;
1188
1189 uvd_v6_0_stop(adev);
1190 return 0;
1191 }
1192
1193 static int uvd_v6_0_soft_reset(void *handle)
1194 {
1195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1196 u32 srbm_soft_reset;
1197
1198 if (!adev->uvd.inst->srbm_soft_reset)
1199 return 0;
1200 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1201
1202 if (srbm_soft_reset) {
1203 u32 tmp;
1204
1205 tmp = RREG32(mmSRBM_SOFT_RESET);
1206 tmp |= srbm_soft_reset;
1207 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1208 WREG32(mmSRBM_SOFT_RESET, tmp);
1209 tmp = RREG32(mmSRBM_SOFT_RESET);
1210
1211 udelay(50);
1212
1213 tmp &= ~srbm_soft_reset;
1214 WREG32(mmSRBM_SOFT_RESET, tmp);
1215 tmp = RREG32(mmSRBM_SOFT_RESET);
1216
1217
1218 udelay(50);
1219 }
1220
1221 return 0;
1222 }
1223
1224 static int uvd_v6_0_post_soft_reset(void *handle)
1225 {
1226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227
1228 if (!adev->uvd.inst->srbm_soft_reset)
1229 return 0;
1230
1231 mdelay(5);
1232
1233 return uvd_v6_0_start(adev);
1234 }
1235
1236 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1237 struct amdgpu_irq_src *source,
1238 unsigned type,
1239 enum amdgpu_interrupt_state state)
1240 {
1241
1242 return 0;
1243 }
1244
1245 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1246 struct amdgpu_irq_src *source,
1247 struct amdgpu_iv_entry *entry)
1248 {
1249 bool int_handled = true;
1250 DRM_DEBUG("IH: UVD TRAP\n");
1251
1252 switch (entry->src_id) {
1253 case 124:
1254 amdgpu_fence_process(&adev->uvd.inst->ring);
1255 break;
1256 case 119:
1257 if (likely(uvd_v6_0_enc_support(adev)))
1258 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1259 else
1260 int_handled = false;
1261 break;
1262 case 120:
1263 if (likely(uvd_v6_0_enc_support(adev)))
1264 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1265 else
1266 int_handled = false;
1267 break;
1268 }
1269
1270 if (!int_handled)
1271 DRM_ERROR("Unhandled interrupt: %d %d\n",
1272 entry->src_id, entry->src_data[0]);
1273
1274 return 0;
1275 }
1276
1277 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1278 {
1279 uint32_t data1, data3;
1280
1281 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1282 data3 = RREG32(mmUVD_CGC_GATE);
1283
1284 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1285 UVD_SUVD_CGC_GATE__SIT_MASK |
1286 UVD_SUVD_CGC_GATE__SMP_MASK |
1287 UVD_SUVD_CGC_GATE__SCM_MASK |
1288 UVD_SUVD_CGC_GATE__SDB_MASK |
1289 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1290 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1291 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1292 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1293 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1294 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1295 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1296 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1297
1298 if (enable) {
1299 data3 |= (UVD_CGC_GATE__SYS_MASK |
1300 UVD_CGC_GATE__UDEC_MASK |
1301 UVD_CGC_GATE__MPEG2_MASK |
1302 UVD_CGC_GATE__RBC_MASK |
1303 UVD_CGC_GATE__LMI_MC_MASK |
1304 UVD_CGC_GATE__LMI_UMC_MASK |
1305 UVD_CGC_GATE__IDCT_MASK |
1306 UVD_CGC_GATE__MPRD_MASK |
1307 UVD_CGC_GATE__MPC_MASK |
1308 UVD_CGC_GATE__LBSI_MASK |
1309 UVD_CGC_GATE__LRBBM_MASK |
1310 UVD_CGC_GATE__UDEC_RE_MASK |
1311 UVD_CGC_GATE__UDEC_CM_MASK |
1312 UVD_CGC_GATE__UDEC_IT_MASK |
1313 UVD_CGC_GATE__UDEC_DB_MASK |
1314 UVD_CGC_GATE__UDEC_MP_MASK |
1315 UVD_CGC_GATE__WCB_MASK |
1316 UVD_CGC_GATE__JPEG_MASK |
1317 UVD_CGC_GATE__SCPU_MASK |
1318 UVD_CGC_GATE__JPEG2_MASK);
1319
1320 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1321 data3 |= UVD_CGC_GATE__VCPU_MASK;
1322
1323 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1324 } else {
1325 data3 = 0;
1326 }
1327
1328 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1329 WREG32(mmUVD_CGC_GATE, data3);
1330 }
1331
1332 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1333 {
1334 uint32_t data, data2;
1335
1336 data = RREG32(mmUVD_CGC_CTRL);
1337 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1338
1339
1340 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1341 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1342
1343
1344 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1345 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1346 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1347
1348 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1349 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1350 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1351 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1352 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1353 UVD_CGC_CTRL__SYS_MODE_MASK |
1354 UVD_CGC_CTRL__UDEC_MODE_MASK |
1355 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1356 UVD_CGC_CTRL__REGS_MODE_MASK |
1357 UVD_CGC_CTRL__RBC_MODE_MASK |
1358 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1359 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1360 UVD_CGC_CTRL__IDCT_MODE_MASK |
1361 UVD_CGC_CTRL__MPRD_MODE_MASK |
1362 UVD_CGC_CTRL__MPC_MODE_MASK |
1363 UVD_CGC_CTRL__LBSI_MODE_MASK |
1364 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1365 UVD_CGC_CTRL__WCB_MODE_MASK |
1366 UVD_CGC_CTRL__VCPU_MODE_MASK |
1367 UVD_CGC_CTRL__JPEG_MODE_MASK |
1368 UVD_CGC_CTRL__SCPU_MODE_MASK |
1369 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1370 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1371 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1372 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1373 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1374 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1375
1376 WREG32(mmUVD_CGC_CTRL, data);
1377 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1378 }
1379
1380 #if 0
1381 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1382 {
1383 uint32_t data, data1, cgc_flags, suvd_flags;
1384
1385 data = RREG32(mmUVD_CGC_GATE);
1386 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1387
1388 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1389 UVD_CGC_GATE__UDEC_MASK |
1390 UVD_CGC_GATE__MPEG2_MASK |
1391 UVD_CGC_GATE__RBC_MASK |
1392 UVD_CGC_GATE__LMI_MC_MASK |
1393 UVD_CGC_GATE__IDCT_MASK |
1394 UVD_CGC_GATE__MPRD_MASK |
1395 UVD_CGC_GATE__MPC_MASK |
1396 UVD_CGC_GATE__LBSI_MASK |
1397 UVD_CGC_GATE__LRBBM_MASK |
1398 UVD_CGC_GATE__UDEC_RE_MASK |
1399 UVD_CGC_GATE__UDEC_CM_MASK |
1400 UVD_CGC_GATE__UDEC_IT_MASK |
1401 UVD_CGC_GATE__UDEC_DB_MASK |
1402 UVD_CGC_GATE__UDEC_MP_MASK |
1403 UVD_CGC_GATE__WCB_MASK |
1404 UVD_CGC_GATE__VCPU_MASK |
1405 UVD_CGC_GATE__SCPU_MASK |
1406 UVD_CGC_GATE__JPEG_MASK |
1407 UVD_CGC_GATE__JPEG2_MASK;
1408
1409 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1410 UVD_SUVD_CGC_GATE__SIT_MASK |
1411 UVD_SUVD_CGC_GATE__SMP_MASK |
1412 UVD_SUVD_CGC_GATE__SCM_MASK |
1413 UVD_SUVD_CGC_GATE__SDB_MASK;
1414
1415 data |= cgc_flags;
1416 data1 |= suvd_flags;
1417
1418 WREG32(mmUVD_CGC_GATE, data);
1419 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1420 }
1421 #endif
1422
1423 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1424 bool enable)
1425 {
1426 u32 orig, data;
1427
1428 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1429 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1430 data |= 0xfff;
1431 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1432
1433 orig = data = RREG32(mmUVD_CGC_CTRL);
1434 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1435 if (orig != data)
1436 WREG32(mmUVD_CGC_CTRL, data);
1437 } else {
1438 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1439 data &= ~0xfff;
1440 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1441
1442 orig = data = RREG32(mmUVD_CGC_CTRL);
1443 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1444 if (orig != data)
1445 WREG32(mmUVD_CGC_CTRL, data);
1446 }
1447 }
1448
1449 static int uvd_v6_0_set_clockgating_state(void *handle,
1450 enum amd_clockgating_state state)
1451 {
1452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453 bool enable = (state == AMD_CG_STATE_GATE);
1454
1455 if (enable) {
1456
1457 if (uvd_v6_0_wait_for_idle(handle))
1458 return -EBUSY;
1459 uvd_v6_0_enable_clock_gating(adev, true);
1460
1461
1462 } else {
1463
1464 uvd_v6_0_enable_clock_gating(adev, false);
1465 }
1466 uvd_v6_0_set_sw_clock_gating(adev);
1467 return 0;
1468 }
1469
1470 static int uvd_v6_0_set_powergating_state(void *handle,
1471 enum amd_powergating_state state)
1472 {
1473
1474
1475
1476
1477
1478
1479
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481 int ret = 0;
1482
1483 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1484
1485 if (state == AMD_PG_STATE_GATE) {
1486 uvd_v6_0_stop(adev);
1487 } else {
1488 ret = uvd_v6_0_start(adev);
1489 if (ret)
1490 goto out;
1491 }
1492
1493 out:
1494 return ret;
1495 }
1496
1497 static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
1498 {
1499 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1500 int data;
1501
1502 mutex_lock(&adev->pm.mutex);
1503
1504 if (adev->flags & AMD_IS_APU)
1505 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1506 else
1507 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1508
1509 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1510 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1511 goto out;
1512 }
1513
1514
1515 data = RREG32(mmUVD_CGC_CTRL);
1516 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1517 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1518
1519 out:
1520 mutex_unlock(&adev->pm.mutex);
1521 }
1522
1523 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1524 .name = "uvd_v6_0",
1525 .early_init = uvd_v6_0_early_init,
1526 .late_init = NULL,
1527 .sw_init = uvd_v6_0_sw_init,
1528 .sw_fini = uvd_v6_0_sw_fini,
1529 .hw_init = uvd_v6_0_hw_init,
1530 .hw_fini = uvd_v6_0_hw_fini,
1531 .suspend = uvd_v6_0_suspend,
1532 .resume = uvd_v6_0_resume,
1533 .is_idle = uvd_v6_0_is_idle,
1534 .wait_for_idle = uvd_v6_0_wait_for_idle,
1535 .check_soft_reset = uvd_v6_0_check_soft_reset,
1536 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1537 .soft_reset = uvd_v6_0_soft_reset,
1538 .post_soft_reset = uvd_v6_0_post_soft_reset,
1539 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1540 .set_powergating_state = uvd_v6_0_set_powergating_state,
1541 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1542 };
1543
1544 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1545 .type = AMDGPU_RING_TYPE_UVD,
1546 .align_mask = 0xf,
1547 .support_64bit_ptrs = false,
1548 .no_user_fence = true,
1549 .get_rptr = uvd_v6_0_ring_get_rptr,
1550 .get_wptr = uvd_v6_0_ring_get_wptr,
1551 .set_wptr = uvd_v6_0_ring_set_wptr,
1552 .parse_cs = amdgpu_uvd_ring_parse_cs,
1553 .emit_frame_size =
1554 6 +
1555 10 +
1556 14,
1557 .emit_ib_size = 8,
1558 .emit_ib = uvd_v6_0_ring_emit_ib,
1559 .emit_fence = uvd_v6_0_ring_emit_fence,
1560 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1561 .test_ring = uvd_v6_0_ring_test_ring,
1562 .test_ib = amdgpu_uvd_ring_test_ib,
1563 .insert_nop = uvd_v6_0_ring_insert_nop,
1564 .pad_ib = amdgpu_ring_generic_pad_ib,
1565 .begin_use = amdgpu_uvd_ring_begin_use,
1566 .end_use = amdgpu_uvd_ring_end_use,
1567 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1568 };
1569
1570 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1571 .type = AMDGPU_RING_TYPE_UVD,
1572 .align_mask = 0xf,
1573 .support_64bit_ptrs = false,
1574 .no_user_fence = true,
1575 .get_rptr = uvd_v6_0_ring_get_rptr,
1576 .get_wptr = uvd_v6_0_ring_get_wptr,
1577 .set_wptr = uvd_v6_0_ring_set_wptr,
1578 .emit_frame_size =
1579 6 +
1580 10 +
1581 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 +
1582 14 + 14,
1583 .emit_ib_size = 8,
1584 .emit_ib = uvd_v6_0_ring_emit_ib,
1585 .emit_fence = uvd_v6_0_ring_emit_fence,
1586 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1587 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1588 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1589 .test_ring = uvd_v6_0_ring_test_ring,
1590 .test_ib = amdgpu_uvd_ring_test_ib,
1591 .insert_nop = uvd_v6_0_ring_insert_nop,
1592 .pad_ib = amdgpu_ring_generic_pad_ib,
1593 .begin_use = amdgpu_uvd_ring_begin_use,
1594 .end_use = amdgpu_uvd_ring_end_use,
1595 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1596 };
1597
1598 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1599 .type = AMDGPU_RING_TYPE_UVD_ENC,
1600 .align_mask = 0x3f,
1601 .nop = HEVC_ENC_CMD_NO_OP,
1602 .support_64bit_ptrs = false,
1603 .no_user_fence = true,
1604 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1605 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1606 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1607 .emit_frame_size =
1608 4 +
1609 5 +
1610 5 + 5 +
1611 1,
1612 .emit_ib_size = 5,
1613 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1614 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1615 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1616 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1617 .test_ring = uvd_v6_0_enc_ring_test_ring,
1618 .test_ib = uvd_v6_0_enc_ring_test_ib,
1619 .insert_nop = amdgpu_ring_insert_nop,
1620 .insert_end = uvd_v6_0_enc_ring_insert_end,
1621 .pad_ib = amdgpu_ring_generic_pad_ib,
1622 .begin_use = amdgpu_uvd_ring_begin_use,
1623 .end_use = amdgpu_uvd_ring_end_use,
1624 };
1625
1626 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1627 {
1628 if (adev->asic_type >= CHIP_POLARIS10) {
1629 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1630 DRM_INFO("UVD is enabled in VM mode\n");
1631 } else {
1632 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1633 DRM_INFO("UVD is enabled in physical mode\n");
1634 }
1635 }
1636
1637 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1638 {
1639 int i;
1640
1641 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1642 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1643
1644 DRM_INFO("UVD ENC is enabled in VM mode\n");
1645 }
1646
1647 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1648 .set = uvd_v6_0_set_interrupt_state,
1649 .process = uvd_v6_0_process_interrupt,
1650 };
1651
1652 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1653 {
1654 if (uvd_v6_0_enc_support(adev))
1655 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1656 else
1657 adev->uvd.inst->irq.num_types = 1;
1658
1659 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1660 }
1661
1662 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1663 {
1664 .type = AMD_IP_BLOCK_TYPE_UVD,
1665 .major = 6,
1666 .minor = 0,
1667 .rev = 0,
1668 .funcs = &uvd_v6_0_ip_funcs,
1669 };
1670
1671 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1672 {
1673 .type = AMD_IP_BLOCK_TYPE_UVD,
1674 .major = 6,
1675 .minor = 2,
1676 .rev = 0,
1677 .funcs = &uvd_v6_0_ip_funcs,
1678 };
1679
1680 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1681 {
1682 .type = AMD_IP_BLOCK_TYPE_UVD,
1683 .major = 6,
1684 .minor = 3,
1685 .rev = 0,
1686 .funcs = &uvd_v6_0_ip_funcs,
1687 };