0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/firmware.h>
0029
0030 #include "amdgpu.h"
0031 #include "amdgpu_vce.h"
0032 #include "cikd.h"
0033 #include "vce/vce_2_0_d.h"
0034 #include "vce/vce_2_0_sh_mask.h"
0035 #include "smu/smu_7_0_1_d.h"
0036 #include "smu/smu_7_0_1_sh_mask.h"
0037 #include "oss/oss_2_0_d.h"
0038 #include "oss/oss_2_0_sh_mask.h"
0039
0040 #define VCE_V2_0_FW_SIZE (256 * 1024)
0041 #define VCE_V2_0_STACK_SIZE (64 * 1024)
0042 #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
0043 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
0044
0045 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
0046 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
0047
0048
0049
0050
0051
0052
0053
0054
0055 static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
0056 {
0057 struct amdgpu_device *adev = ring->adev;
0058
0059 if (ring->me == 0)
0060 return RREG32(mmVCE_RB_RPTR);
0061 else
0062 return RREG32(mmVCE_RB_RPTR2);
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072 static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
0073 {
0074 struct amdgpu_device *adev = ring->adev;
0075
0076 if (ring->me == 0)
0077 return RREG32(mmVCE_RB_WPTR);
0078 else
0079 return RREG32(mmVCE_RB_WPTR2);
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
0090 {
0091 struct amdgpu_device *adev = ring->adev;
0092
0093 if (ring->me == 0)
0094 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
0095 else
0096 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
0097 }
0098
0099 static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
0100 {
0101 int i, j;
0102
0103 for (i = 0; i < 10; ++i) {
0104 for (j = 0; j < 100; ++j) {
0105 uint32_t status = RREG32(mmVCE_LMI_STATUS);
0106
0107 if (status & 0x337f)
0108 return 0;
0109 mdelay(10);
0110 }
0111 }
0112
0113 return -ETIMEDOUT;
0114 }
0115
0116 static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
0117 {
0118 int i, j;
0119
0120 for (i = 0; i < 10; ++i) {
0121 for (j = 0; j < 100; ++j) {
0122 uint32_t status = RREG32(mmVCE_STATUS);
0123
0124 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
0125 return 0;
0126 mdelay(10);
0127 }
0128
0129 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
0130 WREG32_P(mmVCE_SOFT_RESET,
0131 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
0132 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
0133 mdelay(10);
0134 WREG32_P(mmVCE_SOFT_RESET, 0,
0135 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
0136 mdelay(10);
0137 }
0138
0139 return -ETIMEDOUT;
0140 }
0141
0142 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
0143 {
0144 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
0145 }
0146
0147 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
0148 {
0149 u32 tmp;
0150
0151 tmp = RREG32(mmVCE_CLOCK_GATING_A);
0152 tmp &= ~0xfff;
0153 tmp |= ((0 << 0) | (4 << 4));
0154 tmp |= 0x40000;
0155 WREG32(mmVCE_CLOCK_GATING_A, tmp);
0156
0157 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
0158 tmp &= ~0xfff;
0159 tmp |= ((0 << 0) | (4 << 4));
0160 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
0161
0162 tmp = RREG32(mmVCE_CLOCK_GATING_B);
0163 tmp |= 0x10;
0164 tmp &= ~0x100000;
0165 WREG32(mmVCE_CLOCK_GATING_B, tmp);
0166 }
0167
0168 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
0169 {
0170 uint32_t size, offset;
0171
0172 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
0173 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
0174 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
0175 WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
0176
0177 WREG32(mmVCE_LMI_CTRL, 0x00398000);
0178 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
0179 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
0180 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
0181 WREG32(mmVCE_LMI_VM_CTRL, 0);
0182
0183 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
0184
0185 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
0186 size = VCE_V2_0_FW_SIZE;
0187 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
0188 WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
0189
0190 offset += size;
0191 size = VCE_V2_0_STACK_SIZE;
0192 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
0193 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
0194
0195 offset += size;
0196 size = VCE_V2_0_DATA_SIZE;
0197 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
0198 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
0199
0200 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
0201 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
0202 }
0203
0204 static bool vce_v2_0_is_idle(void *handle)
0205 {
0206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0207
0208 return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
0209 }
0210
0211 static int vce_v2_0_wait_for_idle(void *handle)
0212 {
0213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0214 unsigned i;
0215
0216 for (i = 0; i < adev->usec_timeout; i++) {
0217 if (vce_v2_0_is_idle(handle))
0218 return 0;
0219 }
0220 return -ETIMEDOUT;
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230 static int vce_v2_0_start(struct amdgpu_device *adev)
0231 {
0232 struct amdgpu_ring *ring;
0233 int r;
0234
0235
0236 WREG32_P(mmVCE_STATUS, 1, ~1);
0237
0238 vce_v2_0_init_cg(adev);
0239 vce_v2_0_disable_cg(adev);
0240
0241 vce_v2_0_mc_resume(adev);
0242
0243 ring = &adev->vce.ring[0];
0244 WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
0245 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
0246 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
0247 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
0248 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
0249
0250 ring = &adev->vce.ring[1];
0251 WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
0252 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
0253 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
0254 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
0255 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
0256
0257 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
0258 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
0259 mdelay(100);
0260 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
0261
0262 r = vce_v2_0_firmware_loaded(adev);
0263
0264
0265 WREG32_P(mmVCE_STATUS, 0, ~1);
0266
0267 if (r) {
0268 DRM_ERROR("VCE not responding, giving up!!!\n");
0269 return r;
0270 }
0271
0272 return 0;
0273 }
0274
0275 static int vce_v2_0_stop(struct amdgpu_device *adev)
0276 {
0277 int i;
0278 int status;
0279
0280 if (vce_v2_0_lmi_clean(adev)) {
0281 DRM_INFO("vce is not idle \n");
0282 return 0;
0283 }
0284
0285 if (vce_v2_0_wait_for_idle(adev)) {
0286 DRM_INFO("VCE is busy, Can't set clock gating");
0287 return 0;
0288 }
0289
0290
0291 WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
0292
0293 for (i = 0; i < 100; ++i) {
0294 status = RREG32(mmVCE_LMI_STATUS);
0295 if (status & 0x240)
0296 break;
0297 mdelay(1);
0298 }
0299
0300 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
0301
0302
0303 WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
0304
0305 WREG32(mmVCE_STATUS, 0);
0306
0307 return 0;
0308 }
0309
0310 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
0311 {
0312 u32 tmp;
0313
0314 if (gated) {
0315 tmp = RREG32(mmVCE_CLOCK_GATING_B);
0316 tmp |= 0xe70000;
0317 WREG32(mmVCE_CLOCK_GATING_B, tmp);
0318
0319 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
0320 tmp |= 0xff000000;
0321 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
0322
0323 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
0324 tmp &= ~0x3fc;
0325 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
0326
0327 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
0328 } else {
0329 tmp = RREG32(mmVCE_CLOCK_GATING_B);
0330 tmp |= 0xe7;
0331 tmp &= ~0xe70000;
0332 WREG32(mmVCE_CLOCK_GATING_B, tmp);
0333
0334 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
0335 tmp |= 0x1fe000;
0336 tmp &= ~0xff000000;
0337 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
0338
0339 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
0340 tmp |= 0x3fc;
0341 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
0342 }
0343 }
0344
0345 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
0346 {
0347 u32 orig, tmp;
0348
0349
0350
0351
0352 tmp = RREG32(mmVCE_CLOCK_GATING_B);
0353 tmp &= ~0x00060006;
0354
0355
0356 if (gated) {
0357 tmp |= 0xe10000;
0358 WREG32(mmVCE_CLOCK_GATING_B, tmp);
0359 } else {
0360 tmp |= 0xe1;
0361 tmp &= ~0xe10000;
0362 WREG32(mmVCE_CLOCK_GATING_B, tmp);
0363 }
0364
0365 orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
0366 tmp &= ~0x1fe000;
0367 tmp &= ~0xff000000;
0368 if (tmp != orig)
0369 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
0370
0371 orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
0372 tmp &= ~0x3fc;
0373 if (tmp != orig)
0374 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
0375
0376
0377 WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
0378
0379 if(gated)
0380 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
0381 }
0382
0383 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
0384 bool sw_cg)
0385 {
0386 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
0387 if (sw_cg)
0388 vce_v2_0_set_sw_cg(adev, true);
0389 else
0390 vce_v2_0_set_dyn_cg(adev, true);
0391 } else {
0392 vce_v2_0_disable_cg(adev);
0393
0394 if (sw_cg)
0395 vce_v2_0_set_sw_cg(adev, false);
0396 else
0397 vce_v2_0_set_dyn_cg(adev, false);
0398 }
0399 }
0400
0401 static int vce_v2_0_early_init(void *handle)
0402 {
0403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0404
0405 adev->vce.num_rings = 2;
0406
0407 vce_v2_0_set_ring_funcs(adev);
0408 vce_v2_0_set_irq_funcs(adev);
0409
0410 return 0;
0411 }
0412
0413 static int vce_v2_0_sw_init(void *handle)
0414 {
0415 struct amdgpu_ring *ring;
0416 int r, i;
0417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0418
0419
0420 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
0421 if (r)
0422 return r;
0423
0424 r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
0425 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
0426 if (r)
0427 return r;
0428
0429 r = amdgpu_vce_resume(adev);
0430 if (r)
0431 return r;
0432
0433 for (i = 0; i < adev->vce.num_rings; i++) {
0434 enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
0435
0436 ring = &adev->vce.ring[i];
0437 sprintf(ring->name, "vce%d", i);
0438 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
0439 hw_prio, NULL);
0440 if (r)
0441 return r;
0442 }
0443
0444 r = amdgpu_vce_entity_init(adev);
0445
0446 return r;
0447 }
0448
0449 static int vce_v2_0_sw_fini(void *handle)
0450 {
0451 int r;
0452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0453
0454 r = amdgpu_vce_suspend(adev);
0455 if (r)
0456 return r;
0457
0458 return amdgpu_vce_sw_fini(adev);
0459 }
0460
0461 static int vce_v2_0_hw_init(void *handle)
0462 {
0463 int r, i;
0464 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0465
0466 amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
0467 vce_v2_0_enable_mgcg(adev, true, false);
0468
0469 for (i = 0; i < adev->vce.num_rings; i++) {
0470 r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
0471 if (r)
0472 return r;
0473 }
0474
0475 DRM_INFO("VCE initialized successfully.\n");
0476
0477 return 0;
0478 }
0479
0480 static int vce_v2_0_hw_fini(void *handle)
0481 {
0482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0483
0484 cancel_delayed_work_sync(&adev->vce.idle_work);
0485
0486 return 0;
0487 }
0488
0489 static int vce_v2_0_suspend(void *handle)
0490 {
0491 int r;
0492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 cancel_delayed_work_sync(&adev->vce.idle_work);
0507
0508 if (adev->pm.dpm_enabled) {
0509 amdgpu_dpm_enable_vce(adev, false);
0510 } else {
0511 amdgpu_asic_set_vce_clocks(adev, 0, 0);
0512 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
0513 AMD_PG_STATE_GATE);
0514 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
0515 AMD_CG_STATE_GATE);
0516 }
0517
0518 r = vce_v2_0_hw_fini(adev);
0519 if (r)
0520 return r;
0521
0522 return amdgpu_vce_suspend(adev);
0523 }
0524
0525 static int vce_v2_0_resume(void *handle)
0526 {
0527 int r;
0528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0529
0530 r = amdgpu_vce_resume(adev);
0531 if (r)
0532 return r;
0533
0534 return vce_v2_0_hw_init(adev);
0535 }
0536
0537 static int vce_v2_0_soft_reset(void *handle)
0538 {
0539 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0540
0541 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
0542 mdelay(5);
0543
0544 return vce_v2_0_start(adev);
0545 }
0546
0547 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
0548 struct amdgpu_irq_src *source,
0549 unsigned type,
0550 enum amdgpu_interrupt_state state)
0551 {
0552 uint32_t val = 0;
0553
0554 if (state == AMDGPU_IRQ_STATE_ENABLE)
0555 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
0556
0557 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
0558 return 0;
0559 }
0560
0561 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
0562 struct amdgpu_irq_src *source,
0563 struct amdgpu_iv_entry *entry)
0564 {
0565 DRM_DEBUG("IH: VCE\n");
0566 switch (entry->src_data[0]) {
0567 case 0:
0568 case 1:
0569 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
0570 break;
0571 default:
0572 DRM_ERROR("Unhandled interrupt: %d %d\n",
0573 entry->src_id, entry->src_data[0]);
0574 break;
0575 }
0576
0577 return 0;
0578 }
0579
0580 static int vce_v2_0_set_clockgating_state(void *handle,
0581 enum amd_clockgating_state state)
0582 {
0583 bool gate = false;
0584 bool sw_cg = false;
0585
0586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0587
0588 if (state == AMD_CG_STATE_GATE) {
0589 gate = true;
0590 sw_cg = true;
0591 }
0592
0593 vce_v2_0_enable_mgcg(adev, gate, sw_cg);
0594
0595 return 0;
0596 }
0597
0598 static int vce_v2_0_set_powergating_state(void *handle,
0599 enum amd_powergating_state state)
0600 {
0601
0602
0603
0604
0605
0606
0607
0608 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0609
0610 if (state == AMD_PG_STATE_GATE)
0611 return vce_v2_0_stop(adev);
0612 else
0613 return vce_v2_0_start(adev);
0614 }
0615
0616 static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
0617 .name = "vce_v2_0",
0618 .early_init = vce_v2_0_early_init,
0619 .late_init = NULL,
0620 .sw_init = vce_v2_0_sw_init,
0621 .sw_fini = vce_v2_0_sw_fini,
0622 .hw_init = vce_v2_0_hw_init,
0623 .hw_fini = vce_v2_0_hw_fini,
0624 .suspend = vce_v2_0_suspend,
0625 .resume = vce_v2_0_resume,
0626 .is_idle = vce_v2_0_is_idle,
0627 .wait_for_idle = vce_v2_0_wait_for_idle,
0628 .soft_reset = vce_v2_0_soft_reset,
0629 .set_clockgating_state = vce_v2_0_set_clockgating_state,
0630 .set_powergating_state = vce_v2_0_set_powergating_state,
0631 };
0632
0633 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
0634 .type = AMDGPU_RING_TYPE_VCE,
0635 .align_mask = 0xf,
0636 .nop = VCE_CMD_NO_OP,
0637 .support_64bit_ptrs = false,
0638 .no_user_fence = true,
0639 .get_rptr = vce_v2_0_ring_get_rptr,
0640 .get_wptr = vce_v2_0_ring_get_wptr,
0641 .set_wptr = vce_v2_0_ring_set_wptr,
0642 .parse_cs = amdgpu_vce_ring_parse_cs,
0643 .emit_frame_size = 6,
0644 .emit_ib_size = 4,
0645 .emit_ib = amdgpu_vce_ring_emit_ib,
0646 .emit_fence = amdgpu_vce_ring_emit_fence,
0647 .test_ring = amdgpu_vce_ring_test_ring,
0648 .test_ib = amdgpu_vce_ring_test_ib,
0649 .insert_nop = amdgpu_ring_insert_nop,
0650 .pad_ib = amdgpu_ring_generic_pad_ib,
0651 .begin_use = amdgpu_vce_ring_begin_use,
0652 .end_use = amdgpu_vce_ring_end_use,
0653 };
0654
0655 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
0656 {
0657 int i;
0658
0659 for (i = 0; i < adev->vce.num_rings; i++) {
0660 adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
0661 adev->vce.ring[i].me = i;
0662 }
0663 }
0664
0665 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
0666 .set = vce_v2_0_set_interrupt_state,
0667 .process = vce_v2_0_process_interrupt,
0668 };
0669
0670 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
0671 {
0672 adev->vce.irq.num_types = 1;
0673 adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
0674 };
0675
0676 const struct amdgpu_ip_block_version vce_v2_0_ip_block =
0677 {
0678 .type = AMD_IP_BLOCK_TYPE_VCE,
0679 .major = 2,
0680 .minor = 0,
0681 .rev = 0,
0682 .funcs = &vce_v2_0_ip_funcs,
0683 };