0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <linux/firmware.h>
0028 #include <linux/module.h>
0029 #include <linux/pci.h>
0030 #include <linux/debugfs.h>
0031 #include <drm/drm_drv.h>
0032
0033 #include "amdgpu.h"
0034 #include "amdgpu_pm.h"
0035 #include "amdgpu_vcn.h"
0036 #include "soc15d.h"
0037
0038
0039 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
0040 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
0041 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
0042 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
0043 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
0044 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
0045 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
0046 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
0047 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
0048 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
0049 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
0050 #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
0051 #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
0052 #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
0053 #define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
0054 #define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
0055 #define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
0056 #define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
0057 #define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
0058 #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
0059
0060 MODULE_FIRMWARE(FIRMWARE_RAVEN);
0061 MODULE_FIRMWARE(FIRMWARE_PICASSO);
0062 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
0063 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
0064 MODULE_FIRMWARE(FIRMWARE_RENOIR);
0065 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
0066 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
0067 MODULE_FIRMWARE(FIRMWARE_NAVI10);
0068 MODULE_FIRMWARE(FIRMWARE_NAVI14);
0069 MODULE_FIRMWARE(FIRMWARE_NAVI12);
0070 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
0071 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
0072 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
0073 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
0074 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
0075 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
0076 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
0077 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
0078 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
0079 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
0080
0081 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
0082
0083 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
0084 {
0085 unsigned long bo_size;
0086 const char *fw_name;
0087 const struct common_firmware_header *hdr;
0088 unsigned char fw_check;
0089 unsigned int fw_shared_size, log_offset;
0090 int i, r;
0091
0092 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
0093 mutex_init(&adev->vcn.vcn_pg_lock);
0094 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
0095 atomic_set(&adev->vcn.total_submission_cnt, 0);
0096 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
0097 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
0098
0099 switch (adev->ip_versions[UVD_HWIP][0]) {
0100 case IP_VERSION(1, 0, 0):
0101 case IP_VERSION(1, 0, 1):
0102 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
0103 fw_name = FIRMWARE_RAVEN2;
0104 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
0105 fw_name = FIRMWARE_PICASSO;
0106 else
0107 fw_name = FIRMWARE_RAVEN;
0108 break;
0109 case IP_VERSION(2, 5, 0):
0110 fw_name = FIRMWARE_ARCTURUS;
0111 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0112 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0113 adev->vcn.indirect_sram = true;
0114 break;
0115 case IP_VERSION(2, 2, 0):
0116 if (adev->apu_flags & AMD_APU_IS_RENOIR)
0117 fw_name = FIRMWARE_RENOIR;
0118 else
0119 fw_name = FIRMWARE_GREEN_SARDINE;
0120
0121 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0122 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0123 adev->vcn.indirect_sram = true;
0124 break;
0125 case IP_VERSION(2, 6, 0):
0126 fw_name = FIRMWARE_ALDEBARAN;
0127 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0128 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0129 adev->vcn.indirect_sram = true;
0130 break;
0131 case IP_VERSION(2, 0, 0):
0132 fw_name = FIRMWARE_NAVI10;
0133 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0134 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0135 adev->vcn.indirect_sram = true;
0136 break;
0137 case IP_VERSION(2, 0, 2):
0138 if (adev->asic_type == CHIP_NAVI12)
0139 fw_name = FIRMWARE_NAVI12;
0140 else
0141 fw_name = FIRMWARE_NAVI14;
0142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0144 adev->vcn.indirect_sram = true;
0145 break;
0146 case IP_VERSION(3, 0, 0):
0147 case IP_VERSION(3, 0, 64):
0148 case IP_VERSION(3, 0, 192):
0149 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
0150 fw_name = FIRMWARE_SIENNA_CICHLID;
0151 else
0152 fw_name = FIRMWARE_NAVY_FLOUNDER;
0153 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0154 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0155 adev->vcn.indirect_sram = true;
0156 break;
0157 case IP_VERSION(3, 0, 2):
0158 fw_name = FIRMWARE_VANGOGH;
0159 break;
0160 case IP_VERSION(3, 0, 16):
0161 fw_name = FIRMWARE_DIMGREY_CAVEFISH;
0162 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0163 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0164 adev->vcn.indirect_sram = true;
0165 break;
0166 case IP_VERSION(3, 0, 33):
0167 fw_name = FIRMWARE_BEIGE_GOBY;
0168 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0169 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0170 adev->vcn.indirect_sram = true;
0171 break;
0172 case IP_VERSION(3, 1, 1):
0173 fw_name = FIRMWARE_YELLOW_CARP;
0174 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0175 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0176 adev->vcn.indirect_sram = true;
0177 break;
0178 case IP_VERSION(3, 1, 2):
0179 fw_name = FIRMWARE_VCN_3_1_2;
0180 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0181 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0182 adev->vcn.indirect_sram = true;
0183 break;
0184 case IP_VERSION(4, 0, 0):
0185 fw_name = FIRMWARE_VCN4_0_0;
0186 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0187 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0188 adev->vcn.indirect_sram = true;
0189 break;
0190 case IP_VERSION(4, 0, 2):
0191 fw_name = FIRMWARE_VCN4_0_2;
0192 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0193 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0194 adev->vcn.indirect_sram = true;
0195 break;
0196 case IP_VERSION(4, 0, 4):
0197 fw_name = FIRMWARE_VCN4_0_4;
0198 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
0199 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0200 adev->vcn.indirect_sram = true;
0201 break;
0202 default:
0203 return -EINVAL;
0204 }
0205
0206 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
0207 if (r) {
0208 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
0209 fw_name);
0210 return r;
0211 }
0212
0213 r = amdgpu_ucode_validate(adev->vcn.fw);
0214 if (r) {
0215 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
0216 fw_name);
0217 release_firmware(adev->vcn.fw);
0218 adev->vcn.fw = NULL;
0219 return r;
0220 }
0221
0222 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
0223 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
0224
0225
0226
0227
0228
0229
0230
0231 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
0232 if (fw_check) {
0233 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
0234
0235 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
0236 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
0237 enc_major = fw_check;
0238 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
0239 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
0240 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
0241 enc_major, enc_minor, dec_ver, vep, fw_rev);
0242 } else {
0243 unsigned int version_major, version_minor, family_id;
0244
0245 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
0246 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
0247 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
0248 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
0249 version_major, version_minor, family_id);
0250 }
0251
0252 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
0253 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
0254 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
0255
0256 if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
0257 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
0258 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
0259 } else {
0260 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
0261 log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
0262 }
0263
0264 bo_size += fw_shared_size;
0265
0266 if (amdgpu_vcnfw_log)
0267 bo_size += AMDGPU_VCNFW_LOG_SIZE;
0268
0269 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
0270 if (adev->vcn.harvest_config & (1 << i))
0271 continue;
0272
0273 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
0274 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
0275 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
0276 if (r) {
0277 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
0278 return r;
0279 }
0280
0281 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
0282 bo_size - fw_shared_size;
0283 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
0284 bo_size - fw_shared_size;
0285
0286 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
0287
0288 if (amdgpu_vcnfw_log) {
0289 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
0290 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
0291 adev->vcn.inst[i].fw_shared.log_offset = log_offset;
0292 }
0293
0294 if (adev->vcn.indirect_sram) {
0295 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
0296 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
0297 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
0298 if (r) {
0299 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
0300 return r;
0301 }
0302 }
0303 }
0304
0305 return 0;
0306 }
0307
0308 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
0309 {
0310 int i, j;
0311
0312 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
0313 if (adev->vcn.harvest_config & (1 << j))
0314 continue;
0315
0316 if (adev->vcn.indirect_sram) {
0317 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
0318 &adev->vcn.inst[j].dpg_sram_gpu_addr,
0319 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
0320 }
0321 kvfree(adev->vcn.inst[j].saved_bo);
0322
0323 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
0324 &adev->vcn.inst[j].gpu_addr,
0325 (void **)&adev->vcn.inst[j].cpu_addr);
0326
0327 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
0328
0329 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
0330 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
0331 }
0332
0333 release_firmware(adev->vcn.fw);
0334 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
0335 mutex_destroy(&adev->vcn.vcn_pg_lock);
0336
0337 return 0;
0338 }
0339
0340
0341 static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
0342 {
0343 struct amdgpu_device *adev = ring->adev;
0344 bool ret = false;
0345
0346 if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
0347 ret = true;
0348
0349 return ret;
0350 }
0351
0352 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
0353 {
0354 bool ret = false;
0355 int vcn_config = adev->vcn.vcn_config[vcn_instance];
0356
0357 if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
0358 ret = true;
0359 } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
0360 ret = true;
0361 } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
0362 ret = true;
0363 }
0364
0365 return ret;
0366 }
0367
0368 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
0369 {
0370 unsigned size;
0371 void *ptr;
0372 int i, idx;
0373
0374 cancel_delayed_work_sync(&adev->vcn.idle_work);
0375
0376 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0377 if (adev->vcn.harvest_config & (1 << i))
0378 continue;
0379 if (adev->vcn.inst[i].vcpu_bo == NULL)
0380 return 0;
0381
0382 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
0383 ptr = adev->vcn.inst[i].cpu_addr;
0384
0385 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
0386 if (!adev->vcn.inst[i].saved_bo)
0387 return -ENOMEM;
0388
0389 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0390 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
0391 drm_dev_exit(idx);
0392 }
0393 }
0394 return 0;
0395 }
0396
0397 int amdgpu_vcn_resume(struct amdgpu_device *adev)
0398 {
0399 unsigned size;
0400 void *ptr;
0401 int i, idx;
0402
0403 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0404 if (adev->vcn.harvest_config & (1 << i))
0405 continue;
0406 if (adev->vcn.inst[i].vcpu_bo == NULL)
0407 return -EINVAL;
0408
0409 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
0410 ptr = adev->vcn.inst[i].cpu_addr;
0411
0412 if (adev->vcn.inst[i].saved_bo != NULL) {
0413 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0414 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
0415 drm_dev_exit(idx);
0416 }
0417 kvfree(adev->vcn.inst[i].saved_bo);
0418 adev->vcn.inst[i].saved_bo = NULL;
0419 } else {
0420 const struct common_firmware_header *hdr;
0421 unsigned offset;
0422
0423 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
0424 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
0425 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
0426 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0427 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
0428 le32_to_cpu(hdr->ucode_size_bytes));
0429 drm_dev_exit(idx);
0430 }
0431 size -= le32_to_cpu(hdr->ucode_size_bytes);
0432 ptr += le32_to_cpu(hdr->ucode_size_bytes);
0433 }
0434 memset_io(ptr, 0, size);
0435 }
0436 }
0437 return 0;
0438 }
0439
0440 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
0441 {
0442 struct amdgpu_device *adev =
0443 container_of(work, struct amdgpu_device, vcn.idle_work.work);
0444 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
0445 unsigned int i, j;
0446 int r = 0;
0447
0448 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
0449 if (adev->vcn.harvest_config & (1 << j))
0450 continue;
0451
0452 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
0453 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
0454 }
0455
0456 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
0457 struct dpg_pause_state new_state;
0458
0459 if (fence[j] ||
0460 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
0461 new_state.fw_based = VCN_DPG_STATE__PAUSE;
0462 else
0463 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
0464
0465 adev->vcn.pause_dpg_mode(adev, j, &new_state);
0466 }
0467
0468 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
0469 fences += fence[j];
0470 }
0471
0472 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
0473 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
0474 AMD_PG_STATE_GATE);
0475 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
0476 false);
0477 if (r)
0478 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
0479 } else {
0480 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
0481 }
0482 }
0483
0484 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
0485 {
0486 struct amdgpu_device *adev = ring->adev;
0487 int r = 0;
0488
0489 atomic_inc(&adev->vcn.total_submission_cnt);
0490
0491 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
0492 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
0493 true);
0494 if (r)
0495 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
0496 }
0497
0498 mutex_lock(&adev->vcn.vcn_pg_lock);
0499 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
0500 AMD_PG_STATE_UNGATE);
0501
0502 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
0503 struct dpg_pause_state new_state;
0504
0505 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
0506 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
0507 new_state.fw_based = VCN_DPG_STATE__PAUSE;
0508 } else {
0509 unsigned int fences = 0;
0510 unsigned int i;
0511
0512 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
0513 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
0514
0515 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
0516 new_state.fw_based = VCN_DPG_STATE__PAUSE;
0517 else
0518 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
0519 }
0520
0521 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
0522 }
0523 mutex_unlock(&adev->vcn.vcn_pg_lock);
0524 }
0525
0526 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
0527 {
0528 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
0529 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
0530 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
0531
0532 atomic_dec(&ring->adev->vcn.total_submission_cnt);
0533
0534 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
0535 }
0536
0537 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
0538 {
0539 struct amdgpu_device *adev = ring->adev;
0540 uint32_t tmp = 0;
0541 unsigned i;
0542 int r;
0543
0544
0545 if (amdgpu_sriov_vf(adev))
0546 return 0;
0547
0548 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
0549 r = amdgpu_ring_alloc(ring, 3);
0550 if (r)
0551 return r;
0552 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
0553 amdgpu_ring_write(ring, 0xDEADBEEF);
0554 amdgpu_ring_commit(ring);
0555 for (i = 0; i < adev->usec_timeout; i++) {
0556 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
0557 if (tmp == 0xDEADBEEF)
0558 break;
0559 udelay(1);
0560 }
0561
0562 if (i >= adev->usec_timeout)
0563 r = -ETIMEDOUT;
0564
0565 return r;
0566 }
0567
0568 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
0569 {
0570 struct amdgpu_device *adev = ring->adev;
0571 uint32_t rptr;
0572 unsigned int i;
0573 int r;
0574
0575 if (amdgpu_sriov_vf(adev))
0576 return 0;
0577
0578 r = amdgpu_ring_alloc(ring, 16);
0579 if (r)
0580 return r;
0581
0582 rptr = amdgpu_ring_get_rptr(ring);
0583
0584 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
0585 amdgpu_ring_commit(ring);
0586
0587 for (i = 0; i < adev->usec_timeout; i++) {
0588 if (amdgpu_ring_get_rptr(ring) != rptr)
0589 break;
0590 udelay(1);
0591 }
0592
0593 if (i >= adev->usec_timeout)
0594 r = -ETIMEDOUT;
0595
0596 return r;
0597 }
0598
0599 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
0600 struct amdgpu_ib *ib_msg,
0601 struct dma_fence **fence)
0602 {
0603 struct amdgpu_device *adev = ring->adev;
0604 struct dma_fence *f = NULL;
0605 struct amdgpu_job *job;
0606 struct amdgpu_ib *ib;
0607 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
0608 int i, r;
0609
0610 r = amdgpu_job_alloc_with_ib(adev, 64,
0611 AMDGPU_IB_POOL_DIRECT, &job);
0612 if (r)
0613 goto err;
0614
0615 ib = &job->ibs[0];
0616 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
0617 ib->ptr[1] = addr;
0618 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
0619 ib->ptr[3] = addr >> 32;
0620 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
0621 ib->ptr[5] = 0;
0622 for (i = 6; i < 16; i += 2) {
0623 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
0624 ib->ptr[i+1] = 0;
0625 }
0626 ib->length_dw = 16;
0627
0628 r = amdgpu_job_submit_direct(job, ring, &f);
0629 if (r)
0630 goto err_free;
0631
0632 amdgpu_ib_free(adev, ib_msg, f);
0633
0634 if (fence)
0635 *fence = dma_fence_get(f);
0636 dma_fence_put(f);
0637
0638 return 0;
0639
0640 err_free:
0641 amdgpu_job_free(job);
0642 err:
0643 amdgpu_ib_free(adev, ib_msg, f);
0644 return r;
0645 }
0646
0647 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
0648 struct amdgpu_ib *ib)
0649 {
0650 struct amdgpu_device *adev = ring->adev;
0651 uint32_t *msg;
0652 int r, i;
0653
0654 memset(ib, 0, sizeof(*ib));
0655 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
0656 AMDGPU_IB_POOL_DIRECT,
0657 ib);
0658 if (r)
0659 return r;
0660
0661 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
0662 msg[0] = cpu_to_le32(0x00000028);
0663 msg[1] = cpu_to_le32(0x00000038);
0664 msg[2] = cpu_to_le32(0x00000001);
0665 msg[3] = cpu_to_le32(0x00000000);
0666 msg[4] = cpu_to_le32(handle);
0667 msg[5] = cpu_to_le32(0x00000000);
0668 msg[6] = cpu_to_le32(0x00000001);
0669 msg[7] = cpu_to_le32(0x00000028);
0670 msg[8] = cpu_to_le32(0x00000010);
0671 msg[9] = cpu_to_le32(0x00000000);
0672 msg[10] = cpu_to_le32(0x00000007);
0673 msg[11] = cpu_to_le32(0x00000000);
0674 msg[12] = cpu_to_le32(0x00000780);
0675 msg[13] = cpu_to_le32(0x00000440);
0676 for (i = 14; i < 1024; ++i)
0677 msg[i] = cpu_to_le32(0x0);
0678
0679 return 0;
0680 }
0681
0682 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
0683 struct amdgpu_ib *ib)
0684 {
0685 struct amdgpu_device *adev = ring->adev;
0686 uint32_t *msg;
0687 int r, i;
0688
0689 memset(ib, 0, sizeof(*ib));
0690 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
0691 AMDGPU_IB_POOL_DIRECT,
0692 ib);
0693 if (r)
0694 return r;
0695
0696 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
0697 msg[0] = cpu_to_le32(0x00000028);
0698 msg[1] = cpu_to_le32(0x00000018);
0699 msg[2] = cpu_to_le32(0x00000000);
0700 msg[3] = cpu_to_le32(0x00000002);
0701 msg[4] = cpu_to_le32(handle);
0702 msg[5] = cpu_to_le32(0x00000000);
0703 for (i = 6; i < 1024; ++i)
0704 msg[i] = cpu_to_le32(0x0);
0705
0706 return 0;
0707 }
0708
0709 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
0710 {
0711 struct dma_fence *fence = NULL;
0712 struct amdgpu_ib ib;
0713 long r;
0714
0715 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
0716 if (r)
0717 goto error;
0718
0719 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
0720 if (r)
0721 goto error;
0722 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
0723 if (r)
0724 goto error;
0725
0726 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
0727 if (r)
0728 goto error;
0729
0730 r = dma_fence_wait_timeout(fence, false, timeout);
0731 if (r == 0)
0732 r = -ETIMEDOUT;
0733 else if (r > 0)
0734 r = 0;
0735
0736 dma_fence_put(fence);
0737 error:
0738 return r;
0739 }
0740
0741 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
0742 uint32_t ib_pack_in_dw, bool enc)
0743 {
0744 uint32_t *ib_checksum;
0745
0746 ib->ptr[ib->length_dw++] = 0x00000010;
0747 ib->ptr[ib->length_dw++] = 0x30000002;
0748 ib_checksum = &ib->ptr[ib->length_dw++];
0749 ib->ptr[ib->length_dw++] = ib_pack_in_dw;
0750
0751 ib->ptr[ib->length_dw++] = 0x00000010;
0752 ib->ptr[ib->length_dw++] = 0x30000001;
0753 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
0754 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
0755
0756 return ib_checksum;
0757 }
0758
0759 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
0760 uint32_t ib_pack_in_dw)
0761 {
0762 uint32_t i;
0763 uint32_t checksum = 0;
0764
0765 for (i = 0; i < ib_pack_in_dw; i++)
0766 checksum += *(*ib_checksum + 2 + i);
0767
0768 **ib_checksum = checksum;
0769 }
0770
0771 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
0772 struct amdgpu_ib *ib_msg,
0773 struct dma_fence **fence)
0774 {
0775 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
0776 unsigned int ib_size_dw = 64;
0777 struct amdgpu_device *adev = ring->adev;
0778 struct dma_fence *f = NULL;
0779 struct amdgpu_job *job;
0780 struct amdgpu_ib *ib;
0781 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
0782 bool sq = amdgpu_vcn_using_unified_queue(ring);
0783 uint32_t *ib_checksum;
0784 uint32_t ib_pack_in_dw;
0785 int i, r;
0786
0787 if (sq)
0788 ib_size_dw += 8;
0789
0790 r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
0791 AMDGPU_IB_POOL_DIRECT, &job);
0792 if (r)
0793 goto err;
0794
0795 ib = &job->ibs[0];
0796 ib->length_dw = 0;
0797
0798
0799 if (sq) {
0800 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
0801 + 4 + 2;
0802 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
0803 }
0804
0805 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
0806 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
0807 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
0808 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
0809 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
0810
0811 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
0812 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
0813 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
0814
0815 for (i = ib->length_dw; i < ib_size_dw; ++i)
0816 ib->ptr[i] = 0x0;
0817
0818 if (sq)
0819 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
0820
0821 r = amdgpu_job_submit_direct(job, ring, &f);
0822 if (r)
0823 goto err_free;
0824
0825 amdgpu_ib_free(adev, ib_msg, f);
0826
0827 if (fence)
0828 *fence = dma_fence_get(f);
0829 dma_fence_put(f);
0830
0831 return 0;
0832
0833 err_free:
0834 amdgpu_job_free(job);
0835 err:
0836 amdgpu_ib_free(adev, ib_msg, f);
0837 return r;
0838 }
0839
0840 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
0841 {
0842 struct dma_fence *fence = NULL;
0843 struct amdgpu_ib ib;
0844 long r;
0845
0846 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
0847 if (r)
0848 goto error;
0849
0850 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
0851 if (r)
0852 goto error;
0853 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
0854 if (r)
0855 goto error;
0856
0857 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
0858 if (r)
0859 goto error;
0860
0861 r = dma_fence_wait_timeout(fence, false, timeout);
0862 if (r == 0)
0863 r = -ETIMEDOUT;
0864 else if (r > 0)
0865 r = 0;
0866
0867 dma_fence_put(fence);
0868 error:
0869 return r;
0870 }
0871
0872 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
0873 {
0874 struct amdgpu_device *adev = ring->adev;
0875 uint32_t rptr;
0876 unsigned i;
0877 int r;
0878
0879 if (amdgpu_sriov_vf(adev))
0880 return 0;
0881
0882 r = amdgpu_ring_alloc(ring, 16);
0883 if (r)
0884 return r;
0885
0886 rptr = amdgpu_ring_get_rptr(ring);
0887
0888 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
0889 amdgpu_ring_commit(ring);
0890
0891 for (i = 0; i < adev->usec_timeout; i++) {
0892 if (amdgpu_ring_get_rptr(ring) != rptr)
0893 break;
0894 udelay(1);
0895 }
0896
0897 if (i >= adev->usec_timeout)
0898 r = -ETIMEDOUT;
0899
0900 return r;
0901 }
0902
0903 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
0904 struct amdgpu_ib *ib_msg,
0905 struct dma_fence **fence)
0906 {
0907 unsigned int ib_size_dw = 16;
0908 struct amdgpu_job *job;
0909 struct amdgpu_ib *ib;
0910 struct dma_fence *f = NULL;
0911 uint32_t *ib_checksum = NULL;
0912 uint64_t addr;
0913 bool sq = amdgpu_vcn_using_unified_queue(ring);
0914 int i, r;
0915
0916 if (sq)
0917 ib_size_dw += 8;
0918
0919 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
0920 AMDGPU_IB_POOL_DIRECT, &job);
0921 if (r)
0922 return r;
0923
0924 ib = &job->ibs[0];
0925 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
0926
0927 ib->length_dw = 0;
0928
0929 if (sq)
0930 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
0931
0932 ib->ptr[ib->length_dw++] = 0x00000018;
0933 ib->ptr[ib->length_dw++] = 0x00000001;
0934 ib->ptr[ib->length_dw++] = handle;
0935 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
0936 ib->ptr[ib->length_dw++] = addr;
0937 ib->ptr[ib->length_dw++] = 0x0000000b;
0938
0939 ib->ptr[ib->length_dw++] = 0x00000014;
0940 ib->ptr[ib->length_dw++] = 0x00000002;
0941 ib->ptr[ib->length_dw++] = 0x0000001c;
0942 ib->ptr[ib->length_dw++] = 0x00000000;
0943 ib->ptr[ib->length_dw++] = 0x00000000;
0944
0945 ib->ptr[ib->length_dw++] = 0x00000008;
0946 ib->ptr[ib->length_dw++] = 0x08000001;
0947
0948 for (i = ib->length_dw; i < ib_size_dw; ++i)
0949 ib->ptr[i] = 0x0;
0950
0951 if (sq)
0952 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
0953
0954 r = amdgpu_job_submit_direct(job, ring, &f);
0955 if (r)
0956 goto err;
0957
0958 if (fence)
0959 *fence = dma_fence_get(f);
0960 dma_fence_put(f);
0961
0962 return 0;
0963
0964 err:
0965 amdgpu_job_free(job);
0966 return r;
0967 }
0968
0969 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
0970 struct amdgpu_ib *ib_msg,
0971 struct dma_fence **fence)
0972 {
0973 unsigned int ib_size_dw = 16;
0974 struct amdgpu_job *job;
0975 struct amdgpu_ib *ib;
0976 struct dma_fence *f = NULL;
0977 uint32_t *ib_checksum = NULL;
0978 uint64_t addr;
0979 bool sq = amdgpu_vcn_using_unified_queue(ring);
0980 int i, r;
0981
0982 if (sq)
0983 ib_size_dw += 8;
0984
0985 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
0986 AMDGPU_IB_POOL_DIRECT, &job);
0987 if (r)
0988 return r;
0989
0990 ib = &job->ibs[0];
0991 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
0992
0993 ib->length_dw = 0;
0994
0995 if (sq)
0996 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
0997
0998 ib->ptr[ib->length_dw++] = 0x00000018;
0999 ib->ptr[ib->length_dw++] = 0x00000001;
1000 ib->ptr[ib->length_dw++] = handle;
1001 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1002 ib->ptr[ib->length_dw++] = addr;
1003 ib->ptr[ib->length_dw++] = 0x0000000b;
1004
1005 ib->ptr[ib->length_dw++] = 0x00000014;
1006 ib->ptr[ib->length_dw++] = 0x00000002;
1007 ib->ptr[ib->length_dw++] = 0x0000001c;
1008 ib->ptr[ib->length_dw++] = 0x00000000;
1009 ib->ptr[ib->length_dw++] = 0x00000000;
1010
1011 ib->ptr[ib->length_dw++] = 0x00000008;
1012 ib->ptr[ib->length_dw++] = 0x08000002;
1013
1014 for (i = ib->length_dw; i < ib_size_dw; ++i)
1015 ib->ptr[i] = 0x0;
1016
1017 if (sq)
1018 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1019
1020 r = amdgpu_job_submit_direct(job, ring, &f);
1021 if (r)
1022 goto err;
1023
1024 if (fence)
1025 *fence = dma_fence_get(f);
1026 dma_fence_put(f);
1027
1028 return 0;
1029
1030 err:
1031 amdgpu_job_free(job);
1032 return r;
1033 }
1034
1035 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1036 {
1037 struct amdgpu_device *adev = ring->adev;
1038 struct dma_fence *fence = NULL;
1039 struct amdgpu_ib ib;
1040 long r;
1041
1042 memset(&ib, 0, sizeof(ib));
1043 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1044 AMDGPU_IB_POOL_DIRECT,
1045 &ib);
1046 if (r)
1047 return r;
1048
1049 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1050 if (r)
1051 goto error;
1052
1053 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1054 if (r)
1055 goto error;
1056
1057 r = dma_fence_wait_timeout(fence, false, timeout);
1058 if (r == 0)
1059 r = -ETIMEDOUT;
1060 else if (r > 0)
1061 r = 0;
1062
1063 error:
1064 amdgpu_ib_free(adev, &ib, fence);
1065 dma_fence_put(fence);
1066
1067 return r;
1068 }
1069
1070 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1071 {
1072 long r;
1073
1074 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1075 if (r)
1076 goto error;
1077
1078 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1079
1080 error:
1081 return r;
1082 }
1083
1084 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1085 {
1086 switch(ring) {
1087 case 0:
1088 return AMDGPU_RING_PRIO_0;
1089 case 1:
1090 return AMDGPU_RING_PRIO_1;
1091 case 2:
1092 return AMDGPU_RING_PRIO_2;
1093 default:
1094 return AMDGPU_RING_PRIO_0;
1095 }
1096 }
1097
1098 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1099 {
1100 int i;
1101 unsigned int idx;
1102
1103 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1104 const struct common_firmware_header *hdr;
1105 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1106
1107 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1108 if (adev->vcn.harvest_config & (1 << i))
1109 continue;
1110
1111 if (i >= 2) {
1112 dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1113 break;
1114 }
1115 idx = AMDGPU_UCODE_ID_VCN + i;
1116 adev->firmware.ucode[idx].ucode_id = idx;
1117 adev->firmware.ucode[idx].fw = adev->vcn.fw;
1118 adev->firmware.fw_size +=
1119 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1120 }
1121 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1122 }
1123 }
1124
1125
1126
1127
1128 #if defined(CONFIG_DEBUG_FS)
1129 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1130 size_t size, loff_t *pos)
1131 {
1132 struct amdgpu_vcn_inst *vcn;
1133 void *log_buf;
1134 volatile struct amdgpu_vcn_fwlog *plog;
1135 unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1136 unsigned int read_num[2] = {0};
1137
1138 vcn = file_inode(f)->i_private;
1139 if (!vcn)
1140 return -ENODEV;
1141
1142 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1143 return -EFAULT;
1144
1145 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1146
1147 plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1148 read_pos = plog->rptr;
1149 write_pos = plog->wptr;
1150
1151 if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1152 return -EFAULT;
1153
1154 if (!size || (read_pos == write_pos))
1155 return 0;
1156
1157 if (write_pos > read_pos) {
1158 available = write_pos - read_pos;
1159 read_num[0] = min(size, (size_t)available);
1160 } else {
1161 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1162 available = read_num[0] + write_pos - plog->header_size;
1163 if (size > available)
1164 read_num[1] = write_pos - plog->header_size;
1165 else if (size > read_num[0])
1166 read_num[1] = size - read_num[0];
1167 else
1168 read_num[0] = size;
1169 }
1170
1171 for (i = 0; i < 2; i++) {
1172 if (read_num[i]) {
1173 if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1174 read_pos = plog->header_size;
1175 if (read_num[i] == copy_to_user((buf + read_bytes),
1176 (log_buf + read_pos), read_num[i]))
1177 return -EFAULT;
1178
1179 read_bytes += read_num[i];
1180 read_pos += read_num[i];
1181 }
1182 }
1183
1184 plog->rptr = read_pos;
1185 *pos += read_bytes;
1186 return read_bytes;
1187 }
1188
1189 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1190 .owner = THIS_MODULE,
1191 .read = amdgpu_debugfs_vcn_fwlog_read,
1192 .llseek = default_llseek
1193 };
1194 #endif
1195
1196 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1197 struct amdgpu_vcn_inst *vcn)
1198 {
1199 #if defined(CONFIG_DEBUG_FS)
1200 struct drm_minor *minor = adev_to_drm(adev)->primary;
1201 struct dentry *root = minor->debugfs_root;
1202 char name[32];
1203
1204 sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1205 debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
1206 &amdgpu_debugfs_vcnfwlog_fops,
1207 AMDGPU_VCNFW_LOG_SIZE);
1208 #endif
1209 }
1210
1211 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1212 {
1213 #if defined(CONFIG_DEBUG_FS)
1214 volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1215 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1216 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1217 volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1218 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1219 + vcn->fw_shared.log_offset;
1220 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1221 fw_log->is_enabled = 1;
1222 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1223 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1224 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1225
1226 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1227 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1228 log_buf->rptr = log_buf->header_size;
1229 log_buf->wptr = log_buf->header_size;
1230 log_buf->wrapped = 0;
1231 #endif
1232 }
1233
1234 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1235 struct amdgpu_irq_src *source,
1236 struct amdgpu_iv_entry *entry)
1237 {
1238 struct ras_common_if *ras_if = adev->vcn.ras_if;
1239 struct ras_dispatch_if ih_data = {
1240 .entry = entry,
1241 };
1242
1243 if (!ras_if)
1244 return 0;
1245
1246 ih_data.head = *ras_if;
1247 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1248
1249 return 0;
1250 }