0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/firmware.h>
0032 #include <linux/module.h>
0033
0034 #include <drm/drm.h>
0035 #include <drm/drm_drv.h>
0036
0037 #include "amdgpu.h"
0038 #include "amdgpu_pm.h"
0039 #include "amdgpu_uvd.h"
0040 #include "amdgpu_cs.h"
0041 #include "cikd.h"
0042 #include "uvd/uvd_4_2_d.h"
0043
0044 #include "amdgpu_ras.h"
0045
0046
0047 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
0048
0049
0050 #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
0051 #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
0052 #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
0053 #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
0054
0055
0056 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
0057
0058
0059 #ifdef CONFIG_DRM_AMDGPU_SI
0060 #define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin"
0061 #define FIRMWARE_VERDE "amdgpu/verde_uvd.bin"
0062 #define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin"
0063 #define FIRMWARE_OLAND "amdgpu/oland_uvd.bin"
0064 #endif
0065 #ifdef CONFIG_DRM_AMDGPU_CIK
0066 #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
0067 #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
0068 #define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
0069 #define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
0070 #define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
0071 #endif
0072 #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
0073 #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
0074 #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
0075 #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
0076 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
0077 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
0078 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
0079 #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
0080
0081 #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
0082 #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
0083 #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
0084
0085
0086 #define UVD_GPCOM_VCPU_CMD 0x03c3
0087 #define UVD_GPCOM_VCPU_DATA0 0x03c4
0088 #define UVD_GPCOM_VCPU_DATA1 0x03c5
0089 #define UVD_NO_OP 0x03ff
0090 #define UVD_BASE_SI 0x3800
0091
0092
0093
0094
0095
0096
0097 struct amdgpu_uvd_cs_ctx {
0098 struct amdgpu_cs_parser *parser;
0099 unsigned reg, count;
0100 unsigned data0, data1;
0101 unsigned idx;
0102 struct amdgpu_ib *ib;
0103
0104
0105 bool has_msg_cmd;
0106
0107
0108 unsigned *buf_sizes;
0109 };
0110
0111 #ifdef CONFIG_DRM_AMDGPU_SI
0112 MODULE_FIRMWARE(FIRMWARE_TAHITI);
0113 MODULE_FIRMWARE(FIRMWARE_VERDE);
0114 MODULE_FIRMWARE(FIRMWARE_PITCAIRN);
0115 MODULE_FIRMWARE(FIRMWARE_OLAND);
0116 #endif
0117 #ifdef CONFIG_DRM_AMDGPU_CIK
0118 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
0119 MODULE_FIRMWARE(FIRMWARE_KABINI);
0120 MODULE_FIRMWARE(FIRMWARE_KAVERI);
0121 MODULE_FIRMWARE(FIRMWARE_HAWAII);
0122 MODULE_FIRMWARE(FIRMWARE_MULLINS);
0123 #endif
0124 MODULE_FIRMWARE(FIRMWARE_TONGA);
0125 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
0126 MODULE_FIRMWARE(FIRMWARE_FIJI);
0127 MODULE_FIRMWARE(FIRMWARE_STONEY);
0128 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
0129 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
0130 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
0131 MODULE_FIRMWARE(FIRMWARE_VEGAM);
0132
0133 MODULE_FIRMWARE(FIRMWARE_VEGA10);
0134 MODULE_FIRMWARE(FIRMWARE_VEGA12);
0135 MODULE_FIRMWARE(FIRMWARE_VEGA20);
0136
0137 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
0138 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
0139
0140 static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
0141 uint32_t size,
0142 struct amdgpu_bo **bo_ptr)
0143 {
0144 struct ttm_operation_ctx ctx = { true, false };
0145 struct amdgpu_bo *bo = NULL;
0146 void *addr;
0147 int r;
0148
0149 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
0150 AMDGPU_GEM_DOMAIN_GTT,
0151 &bo, NULL, &addr);
0152 if (r)
0153 return r;
0154
0155 if (adev->uvd.address_64_bit)
0156 goto succ;
0157
0158 amdgpu_bo_kunmap(bo);
0159 amdgpu_bo_unpin(bo);
0160 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
0161 amdgpu_uvd_force_into_uvd_segment(bo);
0162 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
0163 if (r)
0164 goto err;
0165 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
0166 if (r)
0167 goto err_pin;
0168 r = amdgpu_bo_kmap(bo, &addr);
0169 if (r)
0170 goto err_kmap;
0171 succ:
0172 amdgpu_bo_unreserve(bo);
0173 *bo_ptr = bo;
0174 return 0;
0175 err_kmap:
0176 amdgpu_bo_unpin(bo);
0177 err_pin:
0178 err:
0179 amdgpu_bo_unreserve(bo);
0180 amdgpu_bo_unref(&bo);
0181 return r;
0182 }
0183
0184 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
0185 {
0186 unsigned long bo_size;
0187 const char *fw_name;
0188 const struct common_firmware_header *hdr;
0189 unsigned family_id;
0190 int i, j, r;
0191
0192 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
0193
0194 switch (adev->asic_type) {
0195 #ifdef CONFIG_DRM_AMDGPU_SI
0196 case CHIP_TAHITI:
0197 fw_name = FIRMWARE_TAHITI;
0198 break;
0199 case CHIP_VERDE:
0200 fw_name = FIRMWARE_VERDE;
0201 break;
0202 case CHIP_PITCAIRN:
0203 fw_name = FIRMWARE_PITCAIRN;
0204 break;
0205 case CHIP_OLAND:
0206 fw_name = FIRMWARE_OLAND;
0207 break;
0208 #endif
0209 #ifdef CONFIG_DRM_AMDGPU_CIK
0210 case CHIP_BONAIRE:
0211 fw_name = FIRMWARE_BONAIRE;
0212 break;
0213 case CHIP_KABINI:
0214 fw_name = FIRMWARE_KABINI;
0215 break;
0216 case CHIP_KAVERI:
0217 fw_name = FIRMWARE_KAVERI;
0218 break;
0219 case CHIP_HAWAII:
0220 fw_name = FIRMWARE_HAWAII;
0221 break;
0222 case CHIP_MULLINS:
0223 fw_name = FIRMWARE_MULLINS;
0224 break;
0225 #endif
0226 case CHIP_TONGA:
0227 fw_name = FIRMWARE_TONGA;
0228 break;
0229 case CHIP_FIJI:
0230 fw_name = FIRMWARE_FIJI;
0231 break;
0232 case CHIP_CARRIZO:
0233 fw_name = FIRMWARE_CARRIZO;
0234 break;
0235 case CHIP_STONEY:
0236 fw_name = FIRMWARE_STONEY;
0237 break;
0238 case CHIP_POLARIS10:
0239 fw_name = FIRMWARE_POLARIS10;
0240 break;
0241 case CHIP_POLARIS11:
0242 fw_name = FIRMWARE_POLARIS11;
0243 break;
0244 case CHIP_POLARIS12:
0245 fw_name = FIRMWARE_POLARIS12;
0246 break;
0247 case CHIP_VEGA10:
0248 fw_name = FIRMWARE_VEGA10;
0249 break;
0250 case CHIP_VEGA12:
0251 fw_name = FIRMWARE_VEGA12;
0252 break;
0253 case CHIP_VEGAM:
0254 fw_name = FIRMWARE_VEGAM;
0255 break;
0256 case CHIP_VEGA20:
0257 fw_name = FIRMWARE_VEGA20;
0258 break;
0259 default:
0260 return -EINVAL;
0261 }
0262
0263 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
0264 if (r) {
0265 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
0266 fw_name);
0267 return r;
0268 }
0269
0270 r = amdgpu_ucode_validate(adev->uvd.fw);
0271 if (r) {
0272 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
0273 fw_name);
0274 release_firmware(adev->uvd.fw);
0275 adev->uvd.fw = NULL;
0276 return r;
0277 }
0278
0279
0280 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
0281
0282 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
0283 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
0284
0285 if (adev->asic_type < CHIP_VEGA20) {
0286 unsigned version_major, version_minor;
0287
0288 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
0289 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
0290 DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
0291 version_major, version_minor, family_id);
0292
0293
0294
0295
0296
0297
0298
0299 if ((version_major > 0x01) ||
0300 ((version_major == 0x01) && (version_minor >= 0x50)))
0301 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
0302
0303 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
0304 (family_id << 8));
0305
0306 if ((adev->asic_type == CHIP_POLARIS10 ||
0307 adev->asic_type == CHIP_POLARIS11) &&
0308 (adev->uvd.fw_version < FW_1_66_16))
0309 DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
0310 version_major, version_minor);
0311 } else {
0312 unsigned int enc_major, enc_minor, dec_minor;
0313
0314 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
0315 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
0316 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
0317 DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
0318 enc_major, enc_minor, dec_minor, family_id);
0319
0320 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
0321
0322 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
0323 }
0324
0325 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
0326 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
0327 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
0328 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
0329
0330 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
0331 if (adev->uvd.harvest_config & (1 << j))
0332 continue;
0333 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
0334 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
0335 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
0336 if (r) {
0337 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
0338 return r;
0339 }
0340 }
0341
0342 for (i = 0; i < adev->uvd.max_handles; ++i) {
0343 atomic_set(&adev->uvd.handles[i], 0);
0344 adev->uvd.filp[i] = NULL;
0345 }
0346
0347
0348 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
0349 adev->uvd.address_64_bit = true;
0350
0351 r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
0352 if (r)
0353 return r;
0354
0355 switch (adev->asic_type) {
0356 case CHIP_TONGA:
0357 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
0358 break;
0359 case CHIP_CARRIZO:
0360 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
0361 break;
0362 case CHIP_FIJI:
0363 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
0364 break;
0365 case CHIP_STONEY:
0366 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
0367 break;
0368 default:
0369 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
0370 }
0371
0372 return 0;
0373 }
0374
0375 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
0376 {
0377 void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
0378 int i, j;
0379
0380 drm_sched_entity_destroy(&adev->uvd.entity);
0381
0382 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
0383 if (adev->uvd.harvest_config & (1 << j))
0384 continue;
0385 kvfree(adev->uvd.inst[j].saved_bo);
0386
0387 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
0388 &adev->uvd.inst[j].gpu_addr,
0389 (void **)&adev->uvd.inst[j].cpu_addr);
0390
0391 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
0392
0393 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
0394 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
0395 }
0396 amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
0397 release_firmware(adev->uvd.fw);
0398
0399 return 0;
0400 }
0401
0402
0403
0404
0405
0406
0407
0408 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
0409 {
0410 struct amdgpu_ring *ring;
0411 struct drm_gpu_scheduler *sched;
0412 int r;
0413
0414 ring = &adev->uvd.inst[0].ring;
0415 sched = &ring->sched;
0416 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
0417 &sched, 1, NULL);
0418 if (r) {
0419 DRM_ERROR("Failed setting up UVD kernel entity.\n");
0420 return r;
0421 }
0422
0423 return 0;
0424 }
0425
0426 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
0427 {
0428 unsigned size;
0429 void *ptr;
0430 int i, j, idx;
0431 bool in_ras_intr = amdgpu_ras_intr_triggered();
0432
0433 cancel_delayed_work_sync(&adev->uvd.idle_work);
0434
0435
0436 if (adev->asic_type < CHIP_POLARIS10) {
0437 for (i = 0; i < adev->uvd.max_handles; ++i)
0438 if (atomic_read(&adev->uvd.handles[i]))
0439 break;
0440
0441 if (i == adev->uvd.max_handles)
0442 return 0;
0443 }
0444
0445 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
0446 if (adev->uvd.harvest_config & (1 << j))
0447 continue;
0448 if (adev->uvd.inst[j].vcpu_bo == NULL)
0449 continue;
0450
0451 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
0452 ptr = adev->uvd.inst[j].cpu_addr;
0453
0454 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
0455 if (!adev->uvd.inst[j].saved_bo)
0456 return -ENOMEM;
0457
0458 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0459
0460 if (in_ras_intr)
0461 memset(adev->uvd.inst[j].saved_bo, 0, size);
0462 else
0463 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
0464
0465 drm_dev_exit(idx);
0466 }
0467 }
0468
0469 if (in_ras_intr)
0470 DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
0471
0472 return 0;
0473 }
0474
0475 int amdgpu_uvd_resume(struct amdgpu_device *adev)
0476 {
0477 unsigned size;
0478 void *ptr;
0479 int i, idx;
0480
0481 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
0482 if (adev->uvd.harvest_config & (1 << i))
0483 continue;
0484 if (adev->uvd.inst[i].vcpu_bo == NULL)
0485 return -EINVAL;
0486
0487 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
0488 ptr = adev->uvd.inst[i].cpu_addr;
0489
0490 if (adev->uvd.inst[i].saved_bo != NULL) {
0491 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0492 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
0493 drm_dev_exit(idx);
0494 }
0495 kvfree(adev->uvd.inst[i].saved_bo);
0496 adev->uvd.inst[i].saved_bo = NULL;
0497 } else {
0498 const struct common_firmware_header *hdr;
0499 unsigned offset;
0500
0501 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
0502 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
0503 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
0504 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0505 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
0506 le32_to_cpu(hdr->ucode_size_bytes));
0507 drm_dev_exit(idx);
0508 }
0509 size -= le32_to_cpu(hdr->ucode_size_bytes);
0510 ptr += le32_to_cpu(hdr->ucode_size_bytes);
0511 }
0512 memset_io(ptr, 0, size);
0513
0514 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
0515 }
0516 }
0517 return 0;
0518 }
0519
0520 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
0521 {
0522 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
0523 int i, r;
0524
0525 for (i = 0; i < adev->uvd.max_handles; ++i) {
0526 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
0527
0528 if (handle != 0 && adev->uvd.filp[i] == filp) {
0529 struct dma_fence *fence;
0530
0531 r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
0532 &fence);
0533 if (r) {
0534 DRM_ERROR("Error destroying UVD %d!\n", r);
0535 continue;
0536 }
0537
0538 dma_fence_wait(fence, false);
0539 dma_fence_put(fence);
0540
0541 adev->uvd.filp[i] = NULL;
0542 atomic_set(&adev->uvd.handles[i], 0);
0543 }
0544 }
0545 }
0546
0547 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
0548 {
0549 int i;
0550 for (i = 0; i < abo->placement.num_placement; ++i) {
0551 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
0552 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
0553 }
0554 }
0555
0556 static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
0557 {
0558 uint32_t lo, hi;
0559 uint64_t addr;
0560
0561 lo = amdgpu_ib_get_value(ctx->ib, ctx->data0);
0562 hi = amdgpu_ib_get_value(ctx->ib, ctx->data1);
0563 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
0564
0565 return addr;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
0577 {
0578 struct ttm_operation_ctx tctx = { false, false };
0579 struct amdgpu_bo_va_mapping *mapping;
0580 struct amdgpu_bo *bo;
0581 uint32_t cmd;
0582 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
0583 int r = 0;
0584
0585 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
0586 if (r) {
0587 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
0588 return r;
0589 }
0590
0591 if (!ctx->parser->adev->uvd.address_64_bit) {
0592
0593 cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
0594 if (cmd == 0x0 || cmd == 0x3) {
0595
0596 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
0597 amdgpu_bo_placement_from_domain(bo, domain);
0598 }
0599 amdgpu_uvd_force_into_uvd_segment(bo);
0600
0601 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
0602 }
0603
0604 return r;
0605 }
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
0617 unsigned buf_sizes[])
0618 {
0619 unsigned stream_type = msg[4];
0620 unsigned width = msg[6];
0621 unsigned height = msg[7];
0622 unsigned dpb_size = msg[9];
0623 unsigned pitch = msg[28];
0624 unsigned level = msg[57];
0625
0626 unsigned width_in_mb = width / 16;
0627 unsigned height_in_mb = ALIGN(height / 16, 2);
0628 unsigned fs_in_mb = width_in_mb * height_in_mb;
0629
0630 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
0631 unsigned min_ctx_size = ~0;
0632
0633 image_size = width * height;
0634 image_size += image_size / 2;
0635 image_size = ALIGN(image_size, 1024);
0636
0637 switch (stream_type) {
0638 case 0:
0639 switch(level) {
0640 case 30:
0641 num_dpb_buffer = 8100 / fs_in_mb;
0642 break;
0643 case 31:
0644 num_dpb_buffer = 18000 / fs_in_mb;
0645 break;
0646 case 32:
0647 num_dpb_buffer = 20480 / fs_in_mb;
0648 break;
0649 case 41:
0650 num_dpb_buffer = 32768 / fs_in_mb;
0651 break;
0652 case 42:
0653 num_dpb_buffer = 34816 / fs_in_mb;
0654 break;
0655 case 50:
0656 num_dpb_buffer = 110400 / fs_in_mb;
0657 break;
0658 case 51:
0659 num_dpb_buffer = 184320 / fs_in_mb;
0660 break;
0661 default:
0662 num_dpb_buffer = 184320 / fs_in_mb;
0663 break;
0664 }
0665 num_dpb_buffer++;
0666 if (num_dpb_buffer > 17)
0667 num_dpb_buffer = 17;
0668
0669
0670 min_dpb_size = image_size * num_dpb_buffer;
0671
0672
0673 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
0674
0675
0676 min_dpb_size += width_in_mb * height_in_mb * 32;
0677 break;
0678
0679 case 1:
0680
0681
0682 min_dpb_size = image_size * 3;
0683
0684
0685 min_dpb_size += width_in_mb * height_in_mb * 128;
0686
0687
0688 min_dpb_size += width_in_mb * 64;
0689
0690
0691 min_dpb_size += width_in_mb * 128;
0692
0693
0694 tmp = max(width_in_mb, height_in_mb);
0695 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
0696 break;
0697
0698 case 3:
0699
0700
0701 min_dpb_size = image_size * 3;
0702 break;
0703
0704 case 4:
0705
0706
0707 min_dpb_size = image_size * 3;
0708
0709
0710 min_dpb_size += width_in_mb * height_in_mb * 64;
0711
0712
0713 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
0714 break;
0715
0716 case 7:
0717 switch(level) {
0718 case 30:
0719 num_dpb_buffer = 8100 / fs_in_mb;
0720 break;
0721 case 31:
0722 num_dpb_buffer = 18000 / fs_in_mb;
0723 break;
0724 case 32:
0725 num_dpb_buffer = 20480 / fs_in_mb;
0726 break;
0727 case 41:
0728 num_dpb_buffer = 32768 / fs_in_mb;
0729 break;
0730 case 42:
0731 num_dpb_buffer = 34816 / fs_in_mb;
0732 break;
0733 case 50:
0734 num_dpb_buffer = 110400 / fs_in_mb;
0735 break;
0736 case 51:
0737 num_dpb_buffer = 184320 / fs_in_mb;
0738 break;
0739 default:
0740 num_dpb_buffer = 184320 / fs_in_mb;
0741 break;
0742 }
0743 num_dpb_buffer++;
0744 if (num_dpb_buffer > 17)
0745 num_dpb_buffer = 17;
0746
0747
0748 min_dpb_size = image_size * num_dpb_buffer;
0749
0750 if (!adev->uvd.use_ctx_buf){
0751
0752 min_dpb_size +=
0753 width_in_mb * height_in_mb * num_dpb_buffer * 192;
0754
0755
0756 min_dpb_size += width_in_mb * height_in_mb * 32;
0757 } else {
0758
0759 min_ctx_size =
0760 width_in_mb * height_in_mb * num_dpb_buffer * 192;
0761 }
0762 break;
0763
0764 case 8:
0765 min_dpb_size = 0;
0766 break;
0767
0768 case 16:
0769 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
0770 image_size = ALIGN(image_size, 256);
0771
0772 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
0773 min_dpb_size = image_size * num_dpb_buffer;
0774 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
0775 * 16 * num_dpb_buffer + 52 * 1024;
0776 break;
0777
0778 default:
0779 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
0780 return -EINVAL;
0781 }
0782
0783 if (width > pitch) {
0784 DRM_ERROR("Invalid UVD decoding target pitch!\n");
0785 return -EINVAL;
0786 }
0787
0788 if (dpb_size < min_dpb_size) {
0789 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
0790 dpb_size, min_dpb_size);
0791 return -EINVAL;
0792 }
0793
0794 buf_sizes[0x1] = dpb_size;
0795 buf_sizes[0x2] = image_size;
0796 buf_sizes[0x4] = min_ctx_size;
0797
0798 adev->uvd.decode_image_width = width;
0799 return 0;
0800 }
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
0813 struct amdgpu_bo *bo, unsigned offset)
0814 {
0815 struct amdgpu_device *adev = ctx->parser->adev;
0816 int32_t *msg, msg_type, handle;
0817 void *ptr;
0818 long r;
0819 int i;
0820
0821 if (offset & 0x3F) {
0822 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
0823 return -EINVAL;
0824 }
0825
0826 r = amdgpu_bo_kmap(bo, &ptr);
0827 if (r) {
0828 DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
0829 return r;
0830 }
0831
0832 msg = ptr + offset;
0833
0834 msg_type = msg[1];
0835 handle = msg[2];
0836
0837 if (handle == 0) {
0838 amdgpu_bo_kunmap(bo);
0839 DRM_ERROR("Invalid UVD handle!\n");
0840 return -EINVAL;
0841 }
0842
0843 switch (msg_type) {
0844 case 0:
0845
0846 amdgpu_bo_kunmap(bo);
0847
0848
0849 for (i = 0; i < adev->uvd.max_handles; ++i) {
0850 if (atomic_read(&adev->uvd.handles[i]) == handle) {
0851 DRM_ERROR(")Handle 0x%x already in use!\n",
0852 handle);
0853 return -EINVAL;
0854 }
0855
0856 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
0857 adev->uvd.filp[i] = ctx->parser->filp;
0858 return 0;
0859 }
0860 }
0861
0862 DRM_ERROR("No more free UVD handles!\n");
0863 return -ENOSPC;
0864
0865 case 1:
0866
0867 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
0868 amdgpu_bo_kunmap(bo);
0869 if (r)
0870 return r;
0871
0872
0873 for (i = 0; i < adev->uvd.max_handles; ++i) {
0874 if (atomic_read(&adev->uvd.handles[i]) == handle) {
0875 if (adev->uvd.filp[i] != ctx->parser->filp) {
0876 DRM_ERROR("UVD handle collision detected!\n");
0877 return -EINVAL;
0878 }
0879 return 0;
0880 }
0881 }
0882
0883 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
0884 return -ENOENT;
0885
0886 case 2:
0887
0888 for (i = 0; i < adev->uvd.max_handles; ++i)
0889 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
0890 amdgpu_bo_kunmap(bo);
0891 return 0;
0892
0893 default:
0894 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
0895 }
0896
0897 amdgpu_bo_kunmap(bo);
0898 return -EINVAL;
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
0909 {
0910 struct amdgpu_bo_va_mapping *mapping;
0911 struct amdgpu_bo *bo;
0912 uint32_t cmd;
0913 uint64_t start, end;
0914 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
0915 int r;
0916
0917 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
0918 if (r) {
0919 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
0920 return r;
0921 }
0922
0923 start = amdgpu_bo_gpu_offset(bo);
0924
0925 end = (mapping->last + 1 - mapping->start);
0926 end = end * AMDGPU_GPU_PAGE_SIZE + start;
0927
0928 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
0929 start += addr;
0930
0931 amdgpu_ib_set_value(ctx->ib, ctx->data0, lower_32_bits(start));
0932 amdgpu_ib_set_value(ctx->ib, ctx->data1, upper_32_bits(start));
0933
0934 cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
0935 if (cmd < 0x4) {
0936 if ((end - start) < ctx->buf_sizes[cmd]) {
0937 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
0938 (unsigned)(end - start),
0939 ctx->buf_sizes[cmd]);
0940 return -EINVAL;
0941 }
0942
0943 } else if (cmd == 0x206) {
0944 if ((end - start) < ctx->buf_sizes[4]) {
0945 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
0946 (unsigned)(end - start),
0947 ctx->buf_sizes[4]);
0948 return -EINVAL;
0949 }
0950 } else if ((cmd != 0x100) && (cmd != 0x204)) {
0951 DRM_ERROR("invalid UVD command %X!\n", cmd);
0952 return -EINVAL;
0953 }
0954
0955 if (!ctx->parser->adev->uvd.address_64_bit) {
0956 if ((start >> 28) != ((end - 1) >> 28)) {
0957 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
0958 start, end);
0959 return -EINVAL;
0960 }
0961
0962 if ((cmd == 0 || cmd == 0x3) &&
0963 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
0964 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
0965 start, end);
0966 return -EINVAL;
0967 }
0968 }
0969
0970 if (cmd == 0) {
0971 ctx->has_msg_cmd = true;
0972 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
0973 if (r)
0974 return r;
0975 } else if (!ctx->has_msg_cmd) {
0976 DRM_ERROR("Message needed before other commands are send!\n");
0977 return -EINVAL;
0978 }
0979
0980 return 0;
0981 }
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
0992 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
0993 {
0994 int i, r;
0995
0996 ctx->idx++;
0997 for (i = 0; i <= ctx->count; ++i) {
0998 unsigned reg = ctx->reg + i;
0999
1000 if (ctx->idx >= ctx->ib->length_dw) {
1001 DRM_ERROR("Register command after end of CS!\n");
1002 return -EINVAL;
1003 }
1004
1005 switch (reg) {
1006 case mmUVD_GPCOM_VCPU_DATA0:
1007 ctx->data0 = ctx->idx;
1008 break;
1009 case mmUVD_GPCOM_VCPU_DATA1:
1010 ctx->data1 = ctx->idx;
1011 break;
1012 case mmUVD_GPCOM_VCPU_CMD:
1013 r = cb(ctx);
1014 if (r)
1015 return r;
1016 break;
1017 case mmUVD_ENGINE_CNTL:
1018 case mmUVD_NO_OP:
1019 break;
1020 default:
1021 DRM_ERROR("Invalid reg 0x%X!\n", reg);
1022 return -EINVAL;
1023 }
1024 ctx->idx++;
1025 }
1026 return 0;
1027 }
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
1038 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
1039 {
1040 int r;
1041
1042 for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) {
1043 uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx);
1044 unsigned type = CP_PACKET_GET_TYPE(cmd);
1045 switch (type) {
1046 case PACKET_TYPE0:
1047 ctx->reg = CP_PACKET0_GET_REG(cmd);
1048 ctx->count = CP_PACKET_GET_COUNT(cmd);
1049 r = amdgpu_uvd_cs_reg(ctx, cb);
1050 if (r)
1051 return r;
1052 break;
1053 case PACKET_TYPE2:
1054 ++ctx->idx;
1055 break;
1056 default:
1057 DRM_ERROR("Unknown packet type %d !\n", type);
1058 return -EINVAL;
1059 }
1060 }
1061 return 0;
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
1074 struct amdgpu_job *job,
1075 struct amdgpu_ib *ib)
1076 {
1077 struct amdgpu_uvd_cs_ctx ctx = {};
1078 unsigned buf_sizes[] = {
1079 [0x00000000] = 2048,
1080 [0x00000001] = 0xFFFFFFFF,
1081 [0x00000002] = 0xFFFFFFFF,
1082 [0x00000003] = 2048,
1083 [0x00000004] = 0xFFFFFFFF,
1084 };
1085 int r;
1086
1087 job->vm = NULL;
1088 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1089
1090 if (ib->length_dw % 16) {
1091 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
1092 ib->length_dw);
1093 return -EINVAL;
1094 }
1095
1096 ctx.parser = parser;
1097 ctx.buf_sizes = buf_sizes;
1098 ctx.ib = ib;
1099
1100
1101 if (!parser->adev->uvd.address_64_bit) {
1102
1103 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
1104 if (r)
1105 return r;
1106 }
1107
1108
1109 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
1110 if (r)
1111 return r;
1112
1113 if (!ctx.has_msg_cmd) {
1114 DRM_ERROR("UVD-IBs need a msg command!\n");
1115 return -EINVAL;
1116 }
1117
1118 return 0;
1119 }
1120
1121 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1122 bool direct, struct dma_fence **fence)
1123 {
1124 struct amdgpu_device *adev = ring->adev;
1125 struct dma_fence *f = NULL;
1126 struct amdgpu_job *job;
1127 struct amdgpu_ib *ib;
1128 uint32_t data[4];
1129 uint64_t addr;
1130 long r;
1131 int i;
1132 unsigned offset_idx = 0;
1133 unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
1134
1135 r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
1136 AMDGPU_IB_POOL_DELAYED, &job);
1137 if (r)
1138 return r;
1139
1140 if (adev->asic_type >= CHIP_VEGA10) {
1141 offset_idx = 1 + ring->me;
1142 offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1143 offset[2] = adev->reg_offset[UVD_HWIP][1][1];
1144 }
1145
1146 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
1147 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
1148 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
1149 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
1150
1151 ib = &job->ibs[0];
1152 addr = amdgpu_bo_gpu_offset(bo);
1153 ib->ptr[0] = data[0];
1154 ib->ptr[1] = addr;
1155 ib->ptr[2] = data[1];
1156 ib->ptr[3] = addr >> 32;
1157 ib->ptr[4] = data[2];
1158 ib->ptr[5] = 0;
1159 for (i = 6; i < 16; i += 2) {
1160 ib->ptr[i] = data[3];
1161 ib->ptr[i+1] = 0;
1162 }
1163 ib->length_dw = 16;
1164
1165 if (direct) {
1166 r = dma_resv_wait_timeout(bo->tbo.base.resv,
1167 DMA_RESV_USAGE_KERNEL, false,
1168 msecs_to_jiffies(10));
1169 if (r == 0)
1170 r = -ETIMEDOUT;
1171 if (r < 0)
1172 goto err_free;
1173
1174 r = amdgpu_job_submit_direct(job, ring, &f);
1175 if (r)
1176 goto err_free;
1177 } else {
1178 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
1179 AMDGPU_SYNC_ALWAYS,
1180 AMDGPU_FENCE_OWNER_UNDEFINED);
1181 if (r)
1182 goto err_free;
1183
1184 r = amdgpu_job_submit(job, &adev->uvd.entity,
1185 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1186 if (r)
1187 goto err_free;
1188 }
1189
1190 amdgpu_bo_reserve(bo, true);
1191 amdgpu_bo_fence(bo, f, false);
1192 amdgpu_bo_unreserve(bo);
1193
1194 if (fence)
1195 *fence = dma_fence_get(f);
1196 dma_fence_put(f);
1197
1198 return 0;
1199
1200 err_free:
1201 amdgpu_job_free(job);
1202 return r;
1203 }
1204
1205
1206
1207
1208 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1209 struct dma_fence **fence)
1210 {
1211 struct amdgpu_device *adev = ring->adev;
1212 struct amdgpu_bo *bo = adev->uvd.ib_bo;
1213 uint32_t *msg;
1214 int i;
1215
1216 msg = amdgpu_bo_kptr(bo);
1217
1218 msg[0] = cpu_to_le32(0x00000de4);
1219 msg[1] = cpu_to_le32(0x00000000);
1220 msg[2] = cpu_to_le32(handle);
1221 msg[3] = cpu_to_le32(0x00000000);
1222 msg[4] = cpu_to_le32(0x00000000);
1223 msg[5] = cpu_to_le32(0x00000000);
1224 msg[6] = cpu_to_le32(0x00000000);
1225 msg[7] = cpu_to_le32(0x00000780);
1226 msg[8] = cpu_to_le32(0x00000440);
1227 msg[9] = cpu_to_le32(0x00000000);
1228 msg[10] = cpu_to_le32(0x01b37000);
1229 for (i = 11; i < 1024; ++i)
1230 msg[i] = cpu_to_le32(0x0);
1231
1232 return amdgpu_uvd_send_msg(ring, bo, true, fence);
1233
1234 }
1235
1236 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1237 bool direct, struct dma_fence **fence)
1238 {
1239 struct amdgpu_device *adev = ring->adev;
1240 struct amdgpu_bo *bo = NULL;
1241 uint32_t *msg;
1242 int r, i;
1243
1244 if (direct) {
1245 bo = adev->uvd.ib_bo;
1246 } else {
1247 r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
1248 if (r)
1249 return r;
1250 }
1251
1252 msg = amdgpu_bo_kptr(bo);
1253
1254 msg[0] = cpu_to_le32(0x00000de4);
1255 msg[1] = cpu_to_le32(0x00000002);
1256 msg[2] = cpu_to_le32(handle);
1257 msg[3] = cpu_to_le32(0x00000000);
1258 for (i = 4; i < 1024; ++i)
1259 msg[i] = cpu_to_le32(0x0);
1260
1261 r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
1262
1263 if (!direct)
1264 amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
1265
1266 return r;
1267 }
1268
1269 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1270 {
1271 struct amdgpu_device *adev =
1272 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1273 unsigned fences = 0, i, j;
1274
1275 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1276 if (adev->uvd.harvest_config & (1 << i))
1277 continue;
1278 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1279 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1280 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1281 }
1282 }
1283
1284 if (fences == 0) {
1285 if (adev->pm.dpm_enabled) {
1286 amdgpu_dpm_enable_uvd(adev, false);
1287 } else {
1288 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1289
1290 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1291 AMD_PG_STATE_GATE);
1292 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1293 AMD_CG_STATE_GATE);
1294 }
1295 } else {
1296 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1297 }
1298 }
1299
1300 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1301 {
1302 struct amdgpu_device *adev = ring->adev;
1303 bool set_clocks;
1304
1305 if (amdgpu_sriov_vf(adev))
1306 return;
1307
1308 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1309 if (set_clocks) {
1310 if (adev->pm.dpm_enabled) {
1311 amdgpu_dpm_enable_uvd(adev, true);
1312 } else {
1313 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1314 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1315 AMD_CG_STATE_UNGATE);
1316 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1317 AMD_PG_STATE_UNGATE);
1318 }
1319 }
1320 }
1321
1322 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1323 {
1324 if (!amdgpu_sriov_vf(ring->adev))
1325 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1337 {
1338 struct dma_fence *fence;
1339 long r;
1340
1341 r = amdgpu_uvd_get_create_msg(ring, 1, &fence);
1342 if (r)
1343 goto error;
1344
1345 r = dma_fence_wait_timeout(fence, false, timeout);
1346 dma_fence_put(fence);
1347 if (r == 0)
1348 r = -ETIMEDOUT;
1349 if (r < 0)
1350 goto error;
1351
1352 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1353 if (r)
1354 goto error;
1355
1356 r = dma_fence_wait_timeout(fence, false, timeout);
1357 if (r == 0)
1358 r = -ETIMEDOUT;
1359 else if (r > 0)
1360 r = 0;
1361
1362 dma_fence_put(fence);
1363
1364 error:
1365 return r;
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375 uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1376 {
1377 unsigned i;
1378 uint32_t used_handles = 0;
1379
1380 for (i = 0; i < adev->uvd.max_handles; ++i) {
1381
1382
1383
1384
1385
1386 if (atomic_read(&adev->uvd.handles[i]))
1387 used_handles++;
1388 }
1389
1390 return used_handles;
1391 }