0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/delay.h>
0026 #include <linux/firmware.h>
0027
0028 #include "amdgpu.h"
0029 #include "amdgpu_uvd.h"
0030 #include "vid.h"
0031 #include "uvd/uvd_5_0_d.h"
0032 #include "uvd/uvd_5_0_sh_mask.h"
0033 #include "oss/oss_2_0_d.h"
0034 #include "oss/oss_2_0_sh_mask.h"
0035 #include "bif/bif_5_0_d.h"
0036 #include "vi.h"
0037 #include "smu/smu_7_1_2_d.h"
0038 #include "smu/smu_7_1_2_sh_mask.h"
0039 #include "ivsrcid/ivsrcid_vislands30.h"
0040
0041 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
0042 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
0043 static int uvd_v5_0_start(struct amdgpu_device *adev);
0044 static void uvd_v5_0_stop(struct amdgpu_device *adev);
0045 static int uvd_v5_0_set_clockgating_state(void *handle,
0046 enum amd_clockgating_state state);
0047 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
0048 bool enable);
0049
0050
0051
0052
0053
0054
0055
0056 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
0057 {
0058 struct amdgpu_device *adev = ring->adev;
0059
0060 return RREG32(mmUVD_RBC_RB_RPTR);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
0071 {
0072 struct amdgpu_device *adev = ring->adev;
0073
0074 return RREG32(mmUVD_RBC_RB_WPTR);
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
0085 {
0086 struct amdgpu_device *adev = ring->adev;
0087
0088 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0089 }
0090
0091 static int uvd_v5_0_early_init(void *handle)
0092 {
0093 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0094 adev->uvd.num_uvd_inst = 1;
0095
0096 uvd_v5_0_set_ring_funcs(adev);
0097 uvd_v5_0_set_irq_funcs(adev);
0098
0099 return 0;
0100 }
0101
0102 static int uvd_v5_0_sw_init(void *handle)
0103 {
0104 struct amdgpu_ring *ring;
0105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0106 int r;
0107
0108
0109 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
0110 if (r)
0111 return r;
0112
0113 r = amdgpu_uvd_sw_init(adev);
0114 if (r)
0115 return r;
0116
0117 ring = &adev->uvd.inst->ring;
0118 sprintf(ring->name, "uvd");
0119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
0120 AMDGPU_RING_PRIO_DEFAULT, NULL);
0121 if (r)
0122 return r;
0123
0124 r = amdgpu_uvd_resume(adev);
0125 if (r)
0126 return r;
0127
0128 r = amdgpu_uvd_entity_init(adev);
0129
0130 return r;
0131 }
0132
0133 static int uvd_v5_0_sw_fini(void *handle)
0134 {
0135 int r;
0136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0137
0138 r = amdgpu_uvd_suspend(adev);
0139 if (r)
0140 return r;
0141
0142 return amdgpu_uvd_sw_fini(adev);
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152 static int uvd_v5_0_hw_init(void *handle)
0153 {
0154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0155 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
0156 uint32_t tmp;
0157 int r;
0158
0159 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
0160 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
0161 uvd_v5_0_enable_mgcg(adev, true);
0162
0163 r = amdgpu_ring_test_helper(ring);
0164 if (r)
0165 goto done;
0166
0167 r = amdgpu_ring_alloc(ring, 10);
0168 if (r) {
0169 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
0170 goto done;
0171 }
0172
0173 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
0174 amdgpu_ring_write(ring, tmp);
0175 amdgpu_ring_write(ring, 0xFFFFF);
0176
0177 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
0178 amdgpu_ring_write(ring, tmp);
0179 amdgpu_ring_write(ring, 0xFFFFF);
0180
0181 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
0182 amdgpu_ring_write(ring, tmp);
0183 amdgpu_ring_write(ring, 0xFFFFF);
0184
0185
0186 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
0187 amdgpu_ring_write(ring, 0x8);
0188
0189 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
0190 amdgpu_ring_write(ring, 3);
0191
0192 amdgpu_ring_commit(ring);
0193
0194 done:
0195 if (!r)
0196 DRM_INFO("UVD initialized successfully.\n");
0197
0198 return r;
0199
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209 static int uvd_v5_0_hw_fini(void *handle)
0210 {
0211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0212
0213 cancel_delayed_work_sync(&adev->uvd.idle_work);
0214
0215 if (RREG32(mmUVD_STATUS) != 0)
0216 uvd_v5_0_stop(adev);
0217
0218 return 0;
0219 }
0220
0221 static int uvd_v5_0_suspend(void *handle)
0222 {
0223 int r;
0224 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 cancel_delayed_work_sync(&adev->uvd.idle_work);
0238
0239 if (adev->pm.dpm_enabled) {
0240 amdgpu_dpm_enable_uvd(adev, false);
0241 } else {
0242 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
0243
0244 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0245 AMD_PG_STATE_GATE);
0246 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0247 AMD_CG_STATE_GATE);
0248 }
0249
0250 r = uvd_v5_0_hw_fini(adev);
0251 if (r)
0252 return r;
0253
0254 return amdgpu_uvd_suspend(adev);
0255 }
0256
0257 static int uvd_v5_0_resume(void *handle)
0258 {
0259 int r;
0260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0261
0262 r = amdgpu_uvd_resume(adev);
0263 if (r)
0264 return r;
0265
0266 return uvd_v5_0_hw_init(adev);
0267 }
0268
0269
0270
0271
0272
0273
0274
0275
0276 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
0277 {
0278 uint64_t offset;
0279 uint32_t size;
0280
0281
0282 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
0283 lower_32_bits(adev->uvd.inst->gpu_addr));
0284 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
0285 upper_32_bits(adev->uvd.inst->gpu_addr));
0286
0287 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
0288 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
0289 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
0290 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
0291
0292 offset += size;
0293 size = AMDGPU_UVD_HEAP_SIZE;
0294 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
0295 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
0296
0297 offset += size;
0298 size = AMDGPU_UVD_STACK_SIZE +
0299 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
0300 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
0301 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
0302
0303 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0304 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0305 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315 static int uvd_v5_0_start(struct amdgpu_device *adev)
0316 {
0317 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
0318 uint32_t rb_bufsz, tmp;
0319 uint32_t lmi_swap_cntl;
0320 uint32_t mp_swap_cntl;
0321 int i, j, r;
0322
0323
0324 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
0325
0326
0327 lmi_swap_cntl = 0;
0328 mp_swap_cntl = 0;
0329
0330 uvd_v5_0_mc_resume(adev);
0331
0332
0333 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
0334
0335
0336 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
0337 mdelay(1);
0338
0339
0340 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
0341 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
0342 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
0343 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
0344 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
0345 mdelay(5);
0346
0347
0348 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
0349 mdelay(5);
0350
0351
0352 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
0353 (1 << 21) | (1 << 9) | (1 << 20));
0354
0355 #ifdef __BIG_ENDIAN
0356
0357 lmi_swap_cntl = 0xa;
0358 mp_swap_cntl = 0;
0359 #endif
0360 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
0361 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
0362
0363 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
0364 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
0365 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
0366 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
0367 WREG32(mmUVD_MPC_SET_ALU, 0);
0368 WREG32(mmUVD_MPC_SET_MUX, 0x88);
0369
0370
0371 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0372 mdelay(5);
0373
0374
0375 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
0376
0377
0378 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
0379
0380
0381 WREG32(mmUVD_SOFT_RESET, 0);
0382 mdelay(10);
0383
0384 for (i = 0; i < 10; ++i) {
0385 uint32_t status;
0386 for (j = 0; j < 100; ++j) {
0387 status = RREG32(mmUVD_STATUS);
0388 if (status & 2)
0389 break;
0390 mdelay(10);
0391 }
0392 r = 0;
0393 if (status & 2)
0394 break;
0395
0396 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
0397 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
0398 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0399 mdelay(10);
0400 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0401 mdelay(10);
0402 r = -1;
0403 }
0404
0405 if (r) {
0406 DRM_ERROR("UVD not responding, giving up!!!\n");
0407 return r;
0408 }
0409
0410 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
0411
0412
0413 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
0414
0415 rb_bufsz = order_base_2(ring->ring_size);
0416 tmp = 0;
0417 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
0418 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
0419 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
0420 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
0421 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
0422 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
0423
0424 WREG32(mmUVD_RBC_RB_CNTL, tmp);
0425
0426
0427 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
0428
0429
0430 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
0431
0432
0433 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
0434 lower_32_bits(ring->gpu_addr));
0435 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
0436 upper_32_bits(ring->gpu_addr));
0437
0438
0439 WREG32(mmUVD_RBC_RB_RPTR, 0);
0440
0441 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
0442 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0443
0444 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
0445
0446 return 0;
0447 }
0448
0449
0450
0451
0452
0453
0454
0455
0456 static void uvd_v5_0_stop(struct amdgpu_device *adev)
0457 {
0458
0459 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
0460
0461
0462 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
0463 mdelay(1);
0464
0465
0466 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0467 mdelay(5);
0468
0469
0470 WREG32(mmUVD_VCPU_CNTL, 0x0);
0471
0472
0473 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
0474
0475 WREG32(mmUVD_STATUS, 0);
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
0489 unsigned flags)
0490 {
0491 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
0492
0493 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
0494 amdgpu_ring_write(ring, seq);
0495 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
0496 amdgpu_ring_write(ring, addr & 0xffffffff);
0497 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
0498 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
0499 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
0500 amdgpu_ring_write(ring, 0);
0501
0502 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
0503 amdgpu_ring_write(ring, 0);
0504 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
0505 amdgpu_ring_write(ring, 0);
0506 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
0507 amdgpu_ring_write(ring, 2);
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
0518 {
0519 struct amdgpu_device *adev = ring->adev;
0520 uint32_t tmp = 0;
0521 unsigned i;
0522 int r;
0523
0524 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
0525 r = amdgpu_ring_alloc(ring, 3);
0526 if (r)
0527 return r;
0528 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
0529 amdgpu_ring_write(ring, 0xDEADBEEF);
0530 amdgpu_ring_commit(ring);
0531 for (i = 0; i < adev->usec_timeout; i++) {
0532 tmp = RREG32(mmUVD_CONTEXT_ID);
0533 if (tmp == 0xDEADBEEF)
0534 break;
0535 udelay(1);
0536 }
0537
0538 if (i >= adev->usec_timeout)
0539 r = -ETIMEDOUT;
0540
0541 return r;
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
0555 struct amdgpu_job *job,
0556 struct amdgpu_ib *ib,
0557 uint32_t flags)
0558 {
0559 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
0560 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
0561 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
0562 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
0563 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
0564 amdgpu_ring_write(ring, ib->length_dw);
0565 }
0566
0567 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
0568 {
0569 int i;
0570
0571 WARN_ON(ring->wptr % 2 || count % 2);
0572
0573 for (i = 0; i < count / 2; i++) {
0574 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
0575 amdgpu_ring_write(ring, 0);
0576 }
0577 }
0578
0579 static bool uvd_v5_0_is_idle(void *handle)
0580 {
0581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0582
0583 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
0584 }
0585
0586 static int uvd_v5_0_wait_for_idle(void *handle)
0587 {
0588 unsigned i;
0589 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0590
0591 for (i = 0; i < adev->usec_timeout; i++) {
0592 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
0593 return 0;
0594 }
0595 return -ETIMEDOUT;
0596 }
0597
0598 static int uvd_v5_0_soft_reset(void *handle)
0599 {
0600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0601
0602 uvd_v5_0_stop(adev);
0603
0604 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
0605 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
0606 mdelay(5);
0607
0608 return uvd_v5_0_start(adev);
0609 }
0610
0611 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
0612 struct amdgpu_irq_src *source,
0613 unsigned type,
0614 enum amdgpu_interrupt_state state)
0615 {
0616
0617 return 0;
0618 }
0619
0620 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
0621 struct amdgpu_irq_src *source,
0622 struct amdgpu_iv_entry *entry)
0623 {
0624 DRM_DEBUG("IH: UVD TRAP\n");
0625 amdgpu_fence_process(&adev->uvd.inst->ring);
0626 return 0;
0627 }
0628
0629 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
0630 {
0631 uint32_t data1, data3, suvd_flags;
0632
0633 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
0634 data3 = RREG32(mmUVD_CGC_GATE);
0635
0636 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
0637 UVD_SUVD_CGC_GATE__SIT_MASK |
0638 UVD_SUVD_CGC_GATE__SMP_MASK |
0639 UVD_SUVD_CGC_GATE__SCM_MASK |
0640 UVD_SUVD_CGC_GATE__SDB_MASK;
0641
0642 if (enable) {
0643 data3 |= (UVD_CGC_GATE__SYS_MASK |
0644 UVD_CGC_GATE__UDEC_MASK |
0645 UVD_CGC_GATE__MPEG2_MASK |
0646 UVD_CGC_GATE__RBC_MASK |
0647 UVD_CGC_GATE__LMI_MC_MASK |
0648 UVD_CGC_GATE__IDCT_MASK |
0649 UVD_CGC_GATE__MPRD_MASK |
0650 UVD_CGC_GATE__MPC_MASK |
0651 UVD_CGC_GATE__LBSI_MASK |
0652 UVD_CGC_GATE__LRBBM_MASK |
0653 UVD_CGC_GATE__UDEC_RE_MASK |
0654 UVD_CGC_GATE__UDEC_CM_MASK |
0655 UVD_CGC_GATE__UDEC_IT_MASK |
0656 UVD_CGC_GATE__UDEC_DB_MASK |
0657 UVD_CGC_GATE__UDEC_MP_MASK |
0658 UVD_CGC_GATE__WCB_MASK |
0659 UVD_CGC_GATE__JPEG_MASK |
0660 UVD_CGC_GATE__SCPU_MASK);
0661
0662 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
0663 data3 |= UVD_CGC_GATE__VCPU_MASK;
0664 data3 &= ~UVD_CGC_GATE__REGS_MASK;
0665 data1 |= suvd_flags;
0666 } else {
0667 data3 = 0;
0668 data1 = 0;
0669 }
0670
0671 WREG32(mmUVD_SUVD_CGC_GATE, data1);
0672 WREG32(mmUVD_CGC_GATE, data3);
0673 }
0674
0675 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
0676 {
0677 uint32_t data, data2;
0678
0679 data = RREG32(mmUVD_CGC_CTRL);
0680 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
0681
0682
0683 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
0684 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
0685
0686
0687 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
0688 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
0689 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
0690
0691 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
0692 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
0693 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
0694 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
0695 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
0696 UVD_CGC_CTRL__SYS_MODE_MASK |
0697 UVD_CGC_CTRL__UDEC_MODE_MASK |
0698 UVD_CGC_CTRL__MPEG2_MODE_MASK |
0699 UVD_CGC_CTRL__REGS_MODE_MASK |
0700 UVD_CGC_CTRL__RBC_MODE_MASK |
0701 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
0702 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
0703 UVD_CGC_CTRL__IDCT_MODE_MASK |
0704 UVD_CGC_CTRL__MPRD_MODE_MASK |
0705 UVD_CGC_CTRL__MPC_MODE_MASK |
0706 UVD_CGC_CTRL__LBSI_MODE_MASK |
0707 UVD_CGC_CTRL__LRBBM_MODE_MASK |
0708 UVD_CGC_CTRL__WCB_MODE_MASK |
0709 UVD_CGC_CTRL__VCPU_MODE_MASK |
0710 UVD_CGC_CTRL__JPEG_MODE_MASK |
0711 UVD_CGC_CTRL__SCPU_MODE_MASK);
0712 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
0713 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
0714 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
0715 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
0716 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
0717
0718 WREG32(mmUVD_CGC_CTRL, data);
0719 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
0720 }
0721
0722 #if 0
0723 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
0724 {
0725 uint32_t data, data1, cgc_flags, suvd_flags;
0726
0727 data = RREG32(mmUVD_CGC_GATE);
0728 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
0729
0730 cgc_flags = UVD_CGC_GATE__SYS_MASK |
0731 UVD_CGC_GATE__UDEC_MASK |
0732 UVD_CGC_GATE__MPEG2_MASK |
0733 UVD_CGC_GATE__RBC_MASK |
0734 UVD_CGC_GATE__LMI_MC_MASK |
0735 UVD_CGC_GATE__IDCT_MASK |
0736 UVD_CGC_GATE__MPRD_MASK |
0737 UVD_CGC_GATE__MPC_MASK |
0738 UVD_CGC_GATE__LBSI_MASK |
0739 UVD_CGC_GATE__LRBBM_MASK |
0740 UVD_CGC_GATE__UDEC_RE_MASK |
0741 UVD_CGC_GATE__UDEC_CM_MASK |
0742 UVD_CGC_GATE__UDEC_IT_MASK |
0743 UVD_CGC_GATE__UDEC_DB_MASK |
0744 UVD_CGC_GATE__UDEC_MP_MASK |
0745 UVD_CGC_GATE__WCB_MASK |
0746 UVD_CGC_GATE__VCPU_MASK |
0747 UVD_CGC_GATE__SCPU_MASK;
0748
0749 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
0750 UVD_SUVD_CGC_GATE__SIT_MASK |
0751 UVD_SUVD_CGC_GATE__SMP_MASK |
0752 UVD_SUVD_CGC_GATE__SCM_MASK |
0753 UVD_SUVD_CGC_GATE__SDB_MASK;
0754
0755 data |= cgc_flags;
0756 data1 |= suvd_flags;
0757
0758 WREG32(mmUVD_CGC_GATE, data);
0759 WREG32(mmUVD_SUVD_CGC_GATE, data1);
0760 }
0761 #endif
0762
0763 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
0764 bool enable)
0765 {
0766 u32 orig, data;
0767
0768 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
0769 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
0770 data |= 0xfff;
0771 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
0772
0773 orig = data = RREG32(mmUVD_CGC_CTRL);
0774 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
0775 if (orig != data)
0776 WREG32(mmUVD_CGC_CTRL, data);
0777 } else {
0778 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
0779 data &= ~0xfff;
0780 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
0781
0782 orig = data = RREG32(mmUVD_CGC_CTRL);
0783 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
0784 if (orig != data)
0785 WREG32(mmUVD_CGC_CTRL, data);
0786 }
0787 }
0788
0789 static int uvd_v5_0_set_clockgating_state(void *handle,
0790 enum amd_clockgating_state state)
0791 {
0792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0793 bool enable = (state == AMD_CG_STATE_GATE);
0794
0795 if (enable) {
0796
0797 if (uvd_v5_0_wait_for_idle(handle))
0798 return -EBUSY;
0799 uvd_v5_0_enable_clock_gating(adev, true);
0800
0801
0802
0803 } else {
0804 uvd_v5_0_enable_clock_gating(adev, false);
0805 }
0806
0807 uvd_v5_0_set_sw_clock_gating(adev);
0808 return 0;
0809 }
0810
0811 static int uvd_v5_0_set_powergating_state(void *handle,
0812 enum amd_powergating_state state)
0813 {
0814
0815
0816
0817
0818
0819
0820
0821 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0822 int ret = 0;
0823
0824 if (state == AMD_PG_STATE_GATE) {
0825 uvd_v5_0_stop(adev);
0826 } else {
0827 ret = uvd_v5_0_start(adev);
0828 if (ret)
0829 goto out;
0830 }
0831
0832 out:
0833 return ret;
0834 }
0835
0836 static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
0837 {
0838 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0839 int data;
0840
0841 mutex_lock(&adev->pm.mutex);
0842
0843 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
0844 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
0845 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
0846 goto out;
0847 }
0848
0849
0850 data = RREG32(mmUVD_CGC_CTRL);
0851 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
0852 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
0853
0854 out:
0855 mutex_unlock(&adev->pm.mutex);
0856 }
0857
0858 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
0859 .name = "uvd_v5_0",
0860 .early_init = uvd_v5_0_early_init,
0861 .late_init = NULL,
0862 .sw_init = uvd_v5_0_sw_init,
0863 .sw_fini = uvd_v5_0_sw_fini,
0864 .hw_init = uvd_v5_0_hw_init,
0865 .hw_fini = uvd_v5_0_hw_fini,
0866 .suspend = uvd_v5_0_suspend,
0867 .resume = uvd_v5_0_resume,
0868 .is_idle = uvd_v5_0_is_idle,
0869 .wait_for_idle = uvd_v5_0_wait_for_idle,
0870 .soft_reset = uvd_v5_0_soft_reset,
0871 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
0872 .set_powergating_state = uvd_v5_0_set_powergating_state,
0873 .get_clockgating_state = uvd_v5_0_get_clockgating_state,
0874 };
0875
0876 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
0877 .type = AMDGPU_RING_TYPE_UVD,
0878 .align_mask = 0xf,
0879 .support_64bit_ptrs = false,
0880 .no_user_fence = true,
0881 .get_rptr = uvd_v5_0_ring_get_rptr,
0882 .get_wptr = uvd_v5_0_ring_get_wptr,
0883 .set_wptr = uvd_v5_0_ring_set_wptr,
0884 .parse_cs = amdgpu_uvd_ring_parse_cs,
0885 .emit_frame_size =
0886 14,
0887 .emit_ib_size = 6,
0888 .emit_ib = uvd_v5_0_ring_emit_ib,
0889 .emit_fence = uvd_v5_0_ring_emit_fence,
0890 .test_ring = uvd_v5_0_ring_test_ring,
0891 .test_ib = amdgpu_uvd_ring_test_ib,
0892 .insert_nop = uvd_v5_0_ring_insert_nop,
0893 .pad_ib = amdgpu_ring_generic_pad_ib,
0894 .begin_use = amdgpu_uvd_ring_begin_use,
0895 .end_use = amdgpu_uvd_ring_end_use,
0896 };
0897
0898 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
0899 {
0900 adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
0901 }
0902
0903 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
0904 .set = uvd_v5_0_set_interrupt_state,
0905 .process = uvd_v5_0_process_interrupt,
0906 };
0907
0908 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
0909 {
0910 adev->uvd.inst->irq.num_types = 1;
0911 adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
0912 }
0913
0914 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
0915 {
0916 .type = AMD_IP_BLOCK_TYPE_UVD,
0917 .major = 5,
0918 .minor = 0,
0919 .rev = 0,
0920 .funcs = &uvd_v5_0_ip_funcs,
0921 };