0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/firmware.h>
0025 #include <drm/drm_drv.h>
0026
0027 #include "amdgpu.h"
0028 #include "amdgpu_vcn.h"
0029 #include "amdgpu_pm.h"
0030 #include "soc15.h"
0031 #include "soc15d.h"
0032 #include "vcn_v2_0.h"
0033 #include "mmsch_v1_0.h"
0034 #include "vcn_v2_5.h"
0035
0036 #include "vcn/vcn_2_5_offset.h"
0037 #include "vcn/vcn_2_5_sh_mask.h"
0038 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
0039
0040 #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
0041 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200
0042
0043 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
0044 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
0045 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
0046 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
0047 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
0048 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
0049 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
0050
0051 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
0052 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
0053 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
0054 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
0055
0056 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
0057
0058 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
0059 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
0060 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
0061 static int vcn_v2_5_set_powergating_state(void *handle,
0062 enum amd_powergating_state state);
0063 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
0064 int inst_idx, struct dpg_pause_state *new_state);
0065 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
0066 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
0067
0068 static int amdgpu_ih_clientid_vcns[] = {
0069 SOC15_IH_CLIENTID_VCN,
0070 SOC15_IH_CLIENTID_VCN1
0071 };
0072
0073
0074
0075
0076
0077
0078
0079
0080 static int vcn_v2_5_early_init(void *handle)
0081 {
0082 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0083
0084 if (amdgpu_sriov_vf(adev)) {
0085 adev->vcn.num_vcn_inst = 2;
0086 adev->vcn.harvest_config = 0;
0087 adev->vcn.num_enc_rings = 1;
0088 } else {
0089 u32 harvest;
0090 int i;
0091
0092 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
0093 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
0094 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
0095 adev->vcn.harvest_config |= 1 << i;
0096 }
0097 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
0098 AMDGPU_VCN_HARVEST_VCN1))
0099
0100 return -ENOENT;
0101
0102 adev->vcn.num_enc_rings = 2;
0103 }
0104
0105 vcn_v2_5_set_dec_ring_funcs(adev);
0106 vcn_v2_5_set_enc_ring_funcs(adev);
0107 vcn_v2_5_set_irq_funcs(adev);
0108 vcn_v2_5_set_ras_funcs(adev);
0109
0110 return 0;
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120 static int vcn_v2_5_sw_init(void *handle)
0121 {
0122 struct amdgpu_ring *ring;
0123 int i, j, r;
0124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0125
0126 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
0127 if (adev->vcn.harvest_config & (1 << j))
0128 continue;
0129
0130 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
0131 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
0132 if (r)
0133 return r;
0134
0135
0136 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
0137 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
0138 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
0139 if (r)
0140 return r;
0141 }
0142
0143
0144 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
0145 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
0146 if (r)
0147 return r;
0148 }
0149
0150 r = amdgpu_vcn_sw_init(adev);
0151 if (r)
0152 return r;
0153
0154 amdgpu_vcn_setup_ucode(adev);
0155
0156 r = amdgpu_vcn_resume(adev);
0157 if (r)
0158 return r;
0159
0160 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
0161 volatile struct amdgpu_fw_shared *fw_shared;
0162
0163 if (adev->vcn.harvest_config & (1 << j))
0164 continue;
0165 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
0166 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
0167 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
0168 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
0169 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
0170 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
0171
0172 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
0173 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
0174 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
0175 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
0176 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
0177 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
0178 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
0179 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
0180 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
0181 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
0182
0183 ring = &adev->vcn.inst[j].ring_dec;
0184 ring->use_doorbell = true;
0185
0186 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
0187 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
0188 sprintf(ring->name, "vcn_dec_%d", j);
0189 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
0190 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
0191 if (r)
0192 return r;
0193
0194 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
0195 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
0196
0197 ring = &adev->vcn.inst[j].ring_enc[i];
0198 ring->use_doorbell = true;
0199
0200 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
0201 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
0202
0203 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
0204 r = amdgpu_ring_init(adev, ring, 512,
0205 &adev->vcn.inst[j].irq, 0,
0206 hw_prio, NULL);
0207 if (r)
0208 return r;
0209 }
0210
0211 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
0212 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
0213
0214 if (amdgpu_vcnfw_log)
0215 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
0216 }
0217
0218 if (amdgpu_sriov_vf(adev)) {
0219 r = amdgpu_virt_alloc_mm_table(adev);
0220 if (r)
0221 return r;
0222 }
0223
0224 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
0225 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
0226
0227 return 0;
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237 static int vcn_v2_5_sw_fini(void *handle)
0238 {
0239 int i, r, idx;
0240 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0241 volatile struct amdgpu_fw_shared *fw_shared;
0242
0243 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
0244 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
0245 if (adev->vcn.harvest_config & (1 << i))
0246 continue;
0247 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
0248 fw_shared->present_flag_0 = 0;
0249 }
0250 drm_dev_exit(idx);
0251 }
0252
0253
0254 if (amdgpu_sriov_vf(adev))
0255 amdgpu_virt_free_mm_table(adev);
0256
0257 r = amdgpu_vcn_suspend(adev);
0258 if (r)
0259 return r;
0260
0261 r = amdgpu_vcn_sw_fini(adev);
0262
0263 return r;
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273 static int vcn_v2_5_hw_init(void *handle)
0274 {
0275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0276 struct amdgpu_ring *ring;
0277 int i, j, r = 0;
0278
0279 if (amdgpu_sriov_vf(adev))
0280 r = vcn_v2_5_sriov_start(adev);
0281
0282 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
0283 if (adev->vcn.harvest_config & (1 << j))
0284 continue;
0285
0286 if (amdgpu_sriov_vf(adev)) {
0287 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
0288 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
0289 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
0290 adev->vcn.inst[j].ring_dec.sched.ready = true;
0291 } else {
0292
0293 ring = &adev->vcn.inst[j].ring_dec;
0294
0295 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
0296 ring->doorbell_index, j);
0297
0298 r = amdgpu_ring_test_helper(ring);
0299 if (r)
0300 goto done;
0301
0302 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
0303 ring = &adev->vcn.inst[j].ring_enc[i];
0304 r = amdgpu_ring_test_helper(ring);
0305 if (r)
0306 goto done;
0307 }
0308 }
0309 }
0310
0311 done:
0312 if (!r)
0313 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
0314 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
0315
0316 return r;
0317 }
0318
0319
0320
0321
0322
0323
0324
0325
0326 static int vcn_v2_5_hw_fini(void *handle)
0327 {
0328 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0329 int i;
0330
0331 cancel_delayed_work_sync(&adev->vcn.idle_work);
0332
0333 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0334 if (adev->vcn.harvest_config & (1 << i))
0335 continue;
0336
0337 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
0338 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
0339 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
0340 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
0341 }
0342
0343 return 0;
0344 }
0345
0346
0347
0348
0349
0350
0351
0352
0353 static int vcn_v2_5_suspend(void *handle)
0354 {
0355 int r;
0356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0357
0358 r = vcn_v2_5_hw_fini(adev);
0359 if (r)
0360 return r;
0361
0362 r = amdgpu_vcn_suspend(adev);
0363
0364 return r;
0365 }
0366
0367
0368
0369
0370
0371
0372
0373
0374 static int vcn_v2_5_resume(void *handle)
0375 {
0376 int r;
0377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0378
0379 r = amdgpu_vcn_resume(adev);
0380 if (r)
0381 return r;
0382
0383 r = vcn_v2_5_hw_init(adev);
0384
0385 return r;
0386 }
0387
0388
0389
0390
0391
0392
0393
0394
0395 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
0396 {
0397 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
0398 uint32_t offset;
0399 int i;
0400
0401 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0402 if (adev->vcn.harvest_config & (1 << i))
0403 continue;
0404
0405 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0406 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
0407 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
0408 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
0409 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
0410 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
0411 offset = 0;
0412 } else {
0413 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
0414 lower_32_bits(adev->vcn.inst[i].gpu_addr));
0415 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
0416 upper_32_bits(adev->vcn.inst[i].gpu_addr));
0417 offset = size;
0418 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
0419 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
0420 }
0421 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
0422
0423
0424 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
0425 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
0426 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
0427 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
0428 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
0429 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
0430
0431
0432 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
0433 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
0434 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
0435 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
0436 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
0437 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
0438
0439
0440 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
0441 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
0442 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
0443 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
0444 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
0445 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
0446 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
0447 }
0448 }
0449
0450 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
0451 {
0452 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
0453 uint32_t offset;
0454
0455
0456 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0457 if (!indirect) {
0458 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0459 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
0460 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
0461 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0462 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
0463 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
0464 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0465 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
0466 } else {
0467 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0468 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
0469 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0470 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
0471 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0472 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
0473 }
0474 offset = 0;
0475 } else {
0476 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0477 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
0478 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
0479 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0480 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
0481 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
0482 offset = size;
0483 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0484 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
0485 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
0486 }
0487
0488 if (!indirect)
0489 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0490 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
0491 else
0492 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0493 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
0494
0495
0496 if (!indirect) {
0497 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0498 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
0499 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
0500 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0501 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
0502 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
0503 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0504 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
0505 } else {
0506 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0507 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
0508 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0509 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
0510 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0511 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
0512 }
0513 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0514 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
0515
0516
0517 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0518 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
0519 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
0520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0521 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
0522 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
0523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0524 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
0525 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0526 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
0527
0528
0529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0530 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
0531 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
0532 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0533 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
0534 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
0535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0536 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
0537 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0538 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
0539 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
0540
0541
0542 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0543 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
0544 }
0545
0546
0547
0548
0549
0550
0551
0552
0553 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
0554 {
0555 uint32_t data;
0556 int i;
0557
0558 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0559 if (adev->vcn.harvest_config & (1 << i))
0560 continue;
0561
0562 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
0563 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
0564 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0565 else
0566 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
0567 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
0568 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
0569 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
0570
0571 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
0572 data &= ~(UVD_CGC_GATE__SYS_MASK
0573 | UVD_CGC_GATE__UDEC_MASK
0574 | UVD_CGC_GATE__MPEG2_MASK
0575 | UVD_CGC_GATE__REGS_MASK
0576 | UVD_CGC_GATE__RBC_MASK
0577 | UVD_CGC_GATE__LMI_MC_MASK
0578 | UVD_CGC_GATE__LMI_UMC_MASK
0579 | UVD_CGC_GATE__IDCT_MASK
0580 | UVD_CGC_GATE__MPRD_MASK
0581 | UVD_CGC_GATE__MPC_MASK
0582 | UVD_CGC_GATE__LBSI_MASK
0583 | UVD_CGC_GATE__LRBBM_MASK
0584 | UVD_CGC_GATE__UDEC_RE_MASK
0585 | UVD_CGC_GATE__UDEC_CM_MASK
0586 | UVD_CGC_GATE__UDEC_IT_MASK
0587 | UVD_CGC_GATE__UDEC_DB_MASK
0588 | UVD_CGC_GATE__UDEC_MP_MASK
0589 | UVD_CGC_GATE__WCB_MASK
0590 | UVD_CGC_GATE__VCPU_MASK
0591 | UVD_CGC_GATE__MMSCH_MASK);
0592
0593 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
0594
0595 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
0596
0597 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
0598 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
0599 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
0600 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
0601 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
0602 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
0603 | UVD_CGC_CTRL__SYS_MODE_MASK
0604 | UVD_CGC_CTRL__UDEC_MODE_MASK
0605 | UVD_CGC_CTRL__MPEG2_MODE_MASK
0606 | UVD_CGC_CTRL__REGS_MODE_MASK
0607 | UVD_CGC_CTRL__RBC_MODE_MASK
0608 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
0609 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
0610 | UVD_CGC_CTRL__IDCT_MODE_MASK
0611 | UVD_CGC_CTRL__MPRD_MODE_MASK
0612 | UVD_CGC_CTRL__MPC_MODE_MASK
0613 | UVD_CGC_CTRL__LBSI_MODE_MASK
0614 | UVD_CGC_CTRL__LRBBM_MODE_MASK
0615 | UVD_CGC_CTRL__WCB_MODE_MASK
0616 | UVD_CGC_CTRL__VCPU_MODE_MASK
0617 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
0618 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
0619
0620
0621 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
0622 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
0623 | UVD_SUVD_CGC_GATE__SIT_MASK
0624 | UVD_SUVD_CGC_GATE__SMP_MASK
0625 | UVD_SUVD_CGC_GATE__SCM_MASK
0626 | UVD_SUVD_CGC_GATE__SDB_MASK
0627 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
0628 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
0629 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
0630 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
0631 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
0632 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
0633 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
0634 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
0635 | UVD_SUVD_CGC_GATE__SCLR_MASK
0636 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
0637 | UVD_SUVD_CGC_GATE__ENT_MASK
0638 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
0639 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
0640 | UVD_SUVD_CGC_GATE__SITE_MASK
0641 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
0642 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
0643 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
0644 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
0645 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
0646 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
0647
0648 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
0649 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
0650 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
0651 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
0652 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
0653 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
0654 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
0655 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
0656 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
0657 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
0658 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
0659 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
0660 }
0661 }
0662
0663 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
0664 uint8_t sram_sel, int inst_idx, uint8_t indirect)
0665 {
0666 uint32_t reg_data = 0;
0667
0668
0669 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
0670 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0671 else
0672 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0673 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
0674 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
0675 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
0676 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
0677 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
0678 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
0679 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
0680 UVD_CGC_CTRL__SYS_MODE_MASK |
0681 UVD_CGC_CTRL__UDEC_MODE_MASK |
0682 UVD_CGC_CTRL__MPEG2_MODE_MASK |
0683 UVD_CGC_CTRL__REGS_MODE_MASK |
0684 UVD_CGC_CTRL__RBC_MODE_MASK |
0685 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
0686 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
0687 UVD_CGC_CTRL__IDCT_MODE_MASK |
0688 UVD_CGC_CTRL__MPRD_MODE_MASK |
0689 UVD_CGC_CTRL__MPC_MODE_MASK |
0690 UVD_CGC_CTRL__LBSI_MODE_MASK |
0691 UVD_CGC_CTRL__LRBBM_MODE_MASK |
0692 UVD_CGC_CTRL__WCB_MODE_MASK |
0693 UVD_CGC_CTRL__VCPU_MODE_MASK |
0694 UVD_CGC_CTRL__MMSCH_MODE_MASK);
0695 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0696 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
0697
0698
0699 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0700 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
0701
0702
0703 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0704 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
0705
0706
0707 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0708 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
0709 }
0710
0711
0712
0713
0714
0715
0716
0717
0718 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
0719 {
0720 uint32_t data = 0;
0721 int i;
0722
0723 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0724 if (adev->vcn.harvest_config & (1 << i))
0725 continue;
0726
0727 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
0728 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
0729 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0730 else
0731 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0732 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
0733 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
0734 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
0735
0736 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
0737 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
0738 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
0739 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
0740 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
0741 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
0742 | UVD_CGC_CTRL__SYS_MODE_MASK
0743 | UVD_CGC_CTRL__UDEC_MODE_MASK
0744 | UVD_CGC_CTRL__MPEG2_MODE_MASK
0745 | UVD_CGC_CTRL__REGS_MODE_MASK
0746 | UVD_CGC_CTRL__RBC_MODE_MASK
0747 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
0748 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
0749 | UVD_CGC_CTRL__IDCT_MODE_MASK
0750 | UVD_CGC_CTRL__MPRD_MODE_MASK
0751 | UVD_CGC_CTRL__MPC_MODE_MASK
0752 | UVD_CGC_CTRL__LBSI_MODE_MASK
0753 | UVD_CGC_CTRL__LRBBM_MODE_MASK
0754 | UVD_CGC_CTRL__WCB_MODE_MASK
0755 | UVD_CGC_CTRL__VCPU_MODE_MASK);
0756 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
0757
0758 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
0759 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
0760 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
0761 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
0762 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
0763 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
0764 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
0765 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
0766 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
0767 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
0768 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
0769 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
0770 }
0771 }
0772
0773 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
0774 {
0775 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
0776 struct amdgpu_ring *ring;
0777 uint32_t rb_bufsz, tmp;
0778
0779
0780 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
0781 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
0782
0783 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
0784 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
0785 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
0786 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
0787
0788 if (indirect)
0789 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
0790
0791
0792 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
0793
0794
0795 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
0796 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
0797 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
0798 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0799 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
0800
0801
0802 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0803 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
0804
0805
0806 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
0807 UVD_LMI_CTRL__REQ_MODE_MASK |
0808 UVD_LMI_CTRL__CRC_RESET_MASK |
0809 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
0810 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
0811 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
0812 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0813 0x00100000L);
0814 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0815 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
0816
0817 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0818 VCN, 0, mmUVD_MPC_CNTL),
0819 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
0820
0821 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0822 VCN, 0, mmUVD_MPC_SET_MUXA0),
0823 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
0824 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
0825 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
0826 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
0827
0828 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0829 VCN, 0, mmUVD_MPC_SET_MUXB0),
0830 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
0831 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
0832 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
0833 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
0834
0835 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0836 VCN, 0, mmUVD_MPC_SET_MUX),
0837 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
0838 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
0839 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
0840
0841 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
0842
0843 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0844 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
0845 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0846 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
0847
0848
0849 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0850 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
0851
0852
0853 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0854 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
0855
0856 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
0857 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
0858 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0859 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
0860
0861
0862 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
0863 VCN, 0, mmUVD_MASTINT_EN),
0864 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
0865
0866 if (indirect)
0867 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
0868 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
0869 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
0870
0871 ring = &adev->vcn.inst[inst_idx].ring_dec;
0872
0873 rb_bufsz = order_base_2(ring->ring_size);
0874 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
0875 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
0876 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
0877 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
0878 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
0879 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
0880
0881
0882 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0883 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
0884 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
0885 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
0886
0887
0888 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
0889
0890
0891 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
0892 (upper_32_bits(ring->gpu_addr) >> 2));
0893
0894
0895 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
0896 lower_32_bits(ring->gpu_addr));
0897 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
0898 upper_32_bits(ring->gpu_addr));
0899
0900
0901 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
0902
0903 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
0904
0905 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
0906 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
0907 lower_32_bits(ring->wptr));
0908
0909 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
0910
0911 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0912 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
0913
0914 return 0;
0915 }
0916
0917 static int vcn_v2_5_start(struct amdgpu_device *adev)
0918 {
0919 struct amdgpu_ring *ring;
0920 uint32_t rb_bufsz, tmp;
0921 int i, j, k, r;
0922
0923 if (adev->pm.dpm_enabled)
0924 amdgpu_dpm_enable_uvd(adev, true);
0925
0926 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0927 if (adev->vcn.harvest_config & (1 << i))
0928 continue;
0929 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
0930 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
0931 continue;
0932 }
0933
0934
0935 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
0936 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
0937
0938
0939 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
0940 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
0941 }
0942
0943 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
0944 return 0;
0945
0946
0947 vcn_v2_5_disable_clock_gating(adev);
0948
0949 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0950 if (adev->vcn.harvest_config & (1 << i))
0951 continue;
0952
0953 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
0954 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
0955
0956
0957 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
0958 ~UVD_MASTINT_EN__VCPU_EN_MASK);
0959
0960
0961 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
0962 tmp &= ~0xff;
0963 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
0964 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
0965 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
0966 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
0967 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
0968
0969
0970 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
0971 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
0972 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
0973 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
0974
0975
0976 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
0977 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
0978 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
0979 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
0980 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
0981
0982
0983 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
0984 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
0985 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
0986 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
0987 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
0988
0989
0990 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
0991 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
0992 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
0993 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
0994 }
0995
0996 vcn_v2_5_mc_resume(adev);
0997
0998 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
0999 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1000 if (adev->vcn.harvest_config & (1 << i))
1001 continue;
1002
1003 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1004 adev->gfx.config.gb_addr_config);
1005 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1006 adev->gfx.config.gb_addr_config);
1007
1008
1009 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1010 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1011
1012
1013 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1014 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1015
1016 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1017 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1018
1019 for (k = 0; k < 10; ++k) {
1020 uint32_t status;
1021
1022 for (j = 0; j < 100; ++j) {
1023 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1024 if (status & 2)
1025 break;
1026 if (amdgpu_emu_mode == 1)
1027 msleep(500);
1028 else
1029 mdelay(10);
1030 }
1031 r = 0;
1032 if (status & 2)
1033 break;
1034
1035 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1036 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1037 UVD_VCPU_CNTL__BLK_RST_MASK,
1038 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1039 mdelay(10);
1040 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1041 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1042
1043 mdelay(10);
1044 r = -1;
1045 }
1046
1047 if (r) {
1048 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1049 return r;
1050 }
1051
1052
1053 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1054 UVD_MASTINT_EN__VCPU_EN_MASK,
1055 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1056
1057
1058 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1059 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1060
1061 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1062
1063 ring = &adev->vcn.inst[i].ring_dec;
1064
1065 rb_bufsz = order_base_2(ring->ring_size);
1066 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1067 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1068 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1069 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1070 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1071 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1072
1073 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1074
1075 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1076 lower_32_bits(ring->gpu_addr));
1077 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1078 upper_32_bits(ring->gpu_addr));
1079
1080
1081 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1082
1083 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1084 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1085 lower_32_bits(ring->wptr));
1086 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1087
1088 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1089 ring = &adev->vcn.inst[i].ring_enc[0];
1090 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1091 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1092 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1093 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1094 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1095 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1096
1097 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1098 ring = &adev->vcn.inst[i].ring_enc[1];
1099 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1100 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1101 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1102 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1103 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1104 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1105 }
1106
1107 return 0;
1108 }
1109
1110 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1111 struct amdgpu_mm_table *table)
1112 {
1113 uint32_t data = 0, loop = 0, size = 0;
1114 uint64_t addr = table->gpu_addr;
1115 struct mmsch_v1_1_init_header *header = NULL;
1116
1117 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1118 size = header->total_size;
1119
1120
1121
1122
1123
1124 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1125 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1126
1127
1128 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1129 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1130
1131 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1132 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1133
1134
1135 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1136
1137
1138 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1139
1140
1141
1142
1143
1144 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1145
1146 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1147 loop = 10;
1148 while ((data & 0x10000002) != 0x10000002) {
1149 udelay(100);
1150 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1151 loop--;
1152 if (!loop)
1153 break;
1154 }
1155
1156 if (!loop) {
1157 dev_err(adev->dev,
1158 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1159 data);
1160 return -EBUSY;
1161 }
1162
1163 return 0;
1164 }
1165
1166 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1167 {
1168 struct amdgpu_ring *ring;
1169 uint32_t offset, size, tmp, i, rb_bufsz;
1170 uint32_t table_size = 0;
1171 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1172 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1173 struct mmsch_v1_0_cmd_end end = { { 0 } };
1174 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1175 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1176
1177 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1178 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1179 end.cmd_header.command_type = MMSCH_COMMAND__END;
1180
1181 header->version = MMSCH_VERSION;
1182 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1183 init_table += header->total_size;
1184
1185 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1186 header->eng[i].table_offset = header->total_size;
1187 header->eng[i].init_status = 0;
1188 header->eng[i].table_size = 0;
1189
1190 table_size = 0;
1191
1192 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1193 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1194 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1195
1196 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1197
1198 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1199 MMSCH_V1_0_INSERT_DIRECT_WT(
1200 SOC15_REG_OFFSET(VCN, i,
1201 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1202 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1203 MMSCH_V1_0_INSERT_DIRECT_WT(
1204 SOC15_REG_OFFSET(VCN, i,
1205 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1206 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1207 offset = 0;
1208 MMSCH_V1_0_INSERT_DIRECT_WT(
1209 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1210 } else {
1211 MMSCH_V1_0_INSERT_DIRECT_WT(
1212 SOC15_REG_OFFSET(VCN, i,
1213 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1214 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1215 MMSCH_V1_0_INSERT_DIRECT_WT(
1216 SOC15_REG_OFFSET(VCN, i,
1217 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1218 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1219 offset = size;
1220 MMSCH_V1_0_INSERT_DIRECT_WT(
1221 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1222 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1223 }
1224
1225 MMSCH_V1_0_INSERT_DIRECT_WT(
1226 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1227 size);
1228 MMSCH_V1_0_INSERT_DIRECT_WT(
1229 SOC15_REG_OFFSET(VCN, i,
1230 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1231 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1232 MMSCH_V1_0_INSERT_DIRECT_WT(
1233 SOC15_REG_OFFSET(VCN, i,
1234 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1235 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1236 MMSCH_V1_0_INSERT_DIRECT_WT(
1237 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1238 0);
1239 MMSCH_V1_0_INSERT_DIRECT_WT(
1240 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1241 AMDGPU_VCN_STACK_SIZE);
1242 MMSCH_V1_0_INSERT_DIRECT_WT(
1243 SOC15_REG_OFFSET(VCN, i,
1244 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1245 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1246 AMDGPU_VCN_STACK_SIZE));
1247 MMSCH_V1_0_INSERT_DIRECT_WT(
1248 SOC15_REG_OFFSET(VCN, i,
1249 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1250 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1251 AMDGPU_VCN_STACK_SIZE));
1252 MMSCH_V1_0_INSERT_DIRECT_WT(
1253 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1254 0);
1255 MMSCH_V1_0_INSERT_DIRECT_WT(
1256 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1257 AMDGPU_VCN_CONTEXT_SIZE);
1258
1259 ring = &adev->vcn.inst[i].ring_enc[0];
1260 ring->wptr = 0;
1261
1262 MMSCH_V1_0_INSERT_DIRECT_WT(
1263 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1264 lower_32_bits(ring->gpu_addr));
1265 MMSCH_V1_0_INSERT_DIRECT_WT(
1266 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1267 upper_32_bits(ring->gpu_addr));
1268 MMSCH_V1_0_INSERT_DIRECT_WT(
1269 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1270 ring->ring_size / 4);
1271
1272 ring = &adev->vcn.inst[i].ring_dec;
1273 ring->wptr = 0;
1274 MMSCH_V1_0_INSERT_DIRECT_WT(
1275 SOC15_REG_OFFSET(VCN, i,
1276 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1277 lower_32_bits(ring->gpu_addr));
1278 MMSCH_V1_0_INSERT_DIRECT_WT(
1279 SOC15_REG_OFFSET(VCN, i,
1280 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1281 upper_32_bits(ring->gpu_addr));
1282
1283
1284 rb_bufsz = order_base_2(ring->ring_size);
1285 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1286 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1287 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1288 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1289 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1290 MMSCH_V1_0_INSERT_DIRECT_WT(
1291 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1292
1293
1294 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1295 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1296 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1297
1298
1299 header->eng[i].table_size = table_size;
1300 header->total_size += table_size;
1301 }
1302
1303 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1304 }
1305
1306 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1307 {
1308 uint32_t tmp;
1309
1310
1311 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1312 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1313
1314
1315 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1316 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1317
1318 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1319 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1320
1321 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1322 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1323
1324 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1325 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1326
1327
1328 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1329 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1330
1331 return 0;
1332 }
1333
1334 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1335 {
1336 uint32_t tmp;
1337 int i, r = 0;
1338
1339 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1340 if (adev->vcn.harvest_config & (1 << i))
1341 continue;
1342 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1343 r = vcn_v2_5_stop_dpg_mode(adev, i);
1344 continue;
1345 }
1346
1347
1348 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1349 if (r)
1350 return r;
1351
1352 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1353 UVD_LMI_STATUS__READ_CLEAN_MASK |
1354 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1355 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1356 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1357 if (r)
1358 return r;
1359
1360
1361 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1362 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1363 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1364
1365 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1366 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1367 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1368 if (r)
1369 return r;
1370
1371
1372 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1373 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1374 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1375
1376
1377 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1378 UVD_VCPU_CNTL__BLK_RST_MASK,
1379 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1380
1381
1382 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1383 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1384
1385
1386 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1387
1388 vcn_v2_5_enable_clock_gating(adev);
1389
1390
1391 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1392 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1393 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1394 }
1395
1396 if (adev->pm.dpm_enabled)
1397 amdgpu_dpm_enable_uvd(adev, false);
1398
1399 return 0;
1400 }
1401
1402 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1403 int inst_idx, struct dpg_pause_state *new_state)
1404 {
1405 struct amdgpu_ring *ring;
1406 uint32_t reg_data = 0;
1407 int ret_code = 0;
1408
1409
1410 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1411 DRM_DEBUG("dpg pause state changed %d -> %d",
1412 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1413 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1414 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1415
1416 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1417 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1418 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1419
1420 if (!ret_code) {
1421 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1422
1423
1424 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1425 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1426
1427
1428 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1429 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1430 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1431
1432
1433 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1434 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1435 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1436
1437
1438 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1439 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1440 ring->wptr = 0;
1441 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1442 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1443 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1444 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1445 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1446 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1447
1448 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1449 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1450 ring->wptr = 0;
1451 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1452 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1453 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1454 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1455 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1456 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1457
1458
1459 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1460 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1461
1462 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1463 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1464 }
1465 } else {
1466 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1467 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1468 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1469 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1470 }
1471 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1472 }
1473
1474 return 0;
1475 }
1476
1477
1478
1479
1480
1481
1482
1483
1484 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1485 {
1486 struct amdgpu_device *adev = ring->adev;
1487
1488 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1499 {
1500 struct amdgpu_device *adev = ring->adev;
1501
1502 if (ring->use_doorbell)
1503 return *ring->wptr_cpu_addr;
1504 else
1505 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1506 }
1507
1508
1509
1510
1511
1512
1513
1514
1515 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1516 {
1517 struct amdgpu_device *adev = ring->adev;
1518
1519 if (ring->use_doorbell) {
1520 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1521 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1522 } else {
1523 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1524 }
1525 }
1526
1527 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1528 .type = AMDGPU_RING_TYPE_VCN_DEC,
1529 .align_mask = 0xf,
1530 .secure_submission_supported = true,
1531 .vmhub = AMDGPU_MMHUB_1,
1532 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1533 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1534 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1535 .emit_frame_size =
1536 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1537 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1538 8 +
1539 14 + 14 +
1540 6,
1541 .emit_ib_size = 8,
1542 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1543 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1544 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1545 .test_ring = vcn_v2_0_dec_ring_test_ring,
1546 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1547 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1548 .insert_start = vcn_v2_0_dec_ring_insert_start,
1549 .insert_end = vcn_v2_0_dec_ring_insert_end,
1550 .pad_ib = amdgpu_ring_generic_pad_ib,
1551 .begin_use = amdgpu_vcn_ring_begin_use,
1552 .end_use = amdgpu_vcn_ring_end_use,
1553 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1554 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1555 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1556 };
1557
1558 static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
1559 .type = AMDGPU_RING_TYPE_VCN_DEC,
1560 .align_mask = 0xf,
1561 .secure_submission_supported = true,
1562 .vmhub = AMDGPU_MMHUB_0,
1563 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1564 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1565 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1566 .emit_frame_size =
1567 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1568 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1569 8 +
1570 14 + 14 +
1571 6,
1572 .emit_ib_size = 8,
1573 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1574 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1575 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1576 .test_ring = vcn_v2_0_dec_ring_test_ring,
1577 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1578 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1579 .insert_start = vcn_v2_0_dec_ring_insert_start,
1580 .insert_end = vcn_v2_0_dec_ring_insert_end,
1581 .pad_ib = amdgpu_ring_generic_pad_ib,
1582 .begin_use = amdgpu_vcn_ring_begin_use,
1583 .end_use = amdgpu_vcn_ring_end_use,
1584 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1585 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1586 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1587 };
1588
1589
1590
1591
1592
1593
1594
1595
1596 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1597 {
1598 struct amdgpu_device *adev = ring->adev;
1599
1600 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1601 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1602 else
1603 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1604 }
1605
1606
1607
1608
1609
1610
1611
1612
1613 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1614 {
1615 struct amdgpu_device *adev = ring->adev;
1616
1617 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1618 if (ring->use_doorbell)
1619 return *ring->wptr_cpu_addr;
1620 else
1621 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1622 } else {
1623 if (ring->use_doorbell)
1624 return *ring->wptr_cpu_addr;
1625 else
1626 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1627 }
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1638 {
1639 struct amdgpu_device *adev = ring->adev;
1640
1641 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1642 if (ring->use_doorbell) {
1643 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1644 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1645 } else {
1646 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1647 }
1648 } else {
1649 if (ring->use_doorbell) {
1650 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1651 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1652 } else {
1653 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1654 }
1655 }
1656 }
1657
1658 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1659 .type = AMDGPU_RING_TYPE_VCN_ENC,
1660 .align_mask = 0x3f,
1661 .nop = VCN_ENC_CMD_NO_OP,
1662 .vmhub = AMDGPU_MMHUB_1,
1663 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1664 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1665 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1666 .emit_frame_size =
1667 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1668 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1669 4 +
1670 5 + 5 +
1671 1,
1672 .emit_ib_size = 5,
1673 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1674 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1675 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1676 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1677 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1678 .insert_nop = amdgpu_ring_insert_nop,
1679 .insert_end = vcn_v2_0_enc_ring_insert_end,
1680 .pad_ib = amdgpu_ring_generic_pad_ib,
1681 .begin_use = amdgpu_vcn_ring_begin_use,
1682 .end_use = amdgpu_vcn_ring_end_use,
1683 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1684 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1685 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1686 };
1687
1688 static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
1689 .type = AMDGPU_RING_TYPE_VCN_ENC,
1690 .align_mask = 0x3f,
1691 .nop = VCN_ENC_CMD_NO_OP,
1692 .vmhub = AMDGPU_MMHUB_0,
1693 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1694 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1695 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1696 .emit_frame_size =
1697 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1698 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1699 4 +
1700 5 + 5 +
1701 1,
1702 .emit_ib_size = 5,
1703 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1704 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1705 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1706 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1707 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1708 .insert_nop = amdgpu_ring_insert_nop,
1709 .insert_end = vcn_v2_0_enc_ring_insert_end,
1710 .pad_ib = amdgpu_ring_generic_pad_ib,
1711 .begin_use = amdgpu_vcn_ring_begin_use,
1712 .end_use = amdgpu_vcn_ring_end_use,
1713 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1714 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1715 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1716 };
1717
1718 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1719 {
1720 int i;
1721
1722 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1723 if (adev->vcn.harvest_config & (1 << i))
1724 continue;
1725 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
1726 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1727 else
1728 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
1729 adev->vcn.inst[i].ring_dec.me = i;
1730 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1731 }
1732 }
1733
1734 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1735 {
1736 int i, j;
1737
1738 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1739 if (adev->vcn.harvest_config & (1 << j))
1740 continue;
1741 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1742 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
1743 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1744 else
1745 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
1746 adev->vcn.inst[j].ring_enc[i].me = j;
1747 }
1748 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1749 }
1750 }
1751
1752 static bool vcn_v2_5_is_idle(void *handle)
1753 {
1754 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1755 int i, ret = 1;
1756
1757 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1758 if (adev->vcn.harvest_config & (1 << i))
1759 continue;
1760 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1761 }
1762
1763 return ret;
1764 }
1765
1766 static int vcn_v2_5_wait_for_idle(void *handle)
1767 {
1768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1769 int i, ret = 0;
1770
1771 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1772 if (adev->vcn.harvest_config & (1 << i))
1773 continue;
1774 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1775 UVD_STATUS__IDLE);
1776 if (ret)
1777 return ret;
1778 }
1779
1780 return ret;
1781 }
1782
1783 static int vcn_v2_5_set_clockgating_state(void *handle,
1784 enum amd_clockgating_state state)
1785 {
1786 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1787 bool enable = (state == AMD_CG_STATE_GATE);
1788
1789 if (amdgpu_sriov_vf(adev))
1790 return 0;
1791
1792 if (enable) {
1793 if (!vcn_v2_5_is_idle(handle))
1794 return -EBUSY;
1795 vcn_v2_5_enable_clock_gating(adev);
1796 } else {
1797 vcn_v2_5_disable_clock_gating(adev);
1798 }
1799
1800 return 0;
1801 }
1802
1803 static int vcn_v2_5_set_powergating_state(void *handle,
1804 enum amd_powergating_state state)
1805 {
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 int ret;
1808
1809 if (amdgpu_sriov_vf(adev))
1810 return 0;
1811
1812 if(state == adev->vcn.cur_state)
1813 return 0;
1814
1815 if (state == AMD_PG_STATE_GATE)
1816 ret = vcn_v2_5_stop(adev);
1817 else
1818 ret = vcn_v2_5_start(adev);
1819
1820 if(!ret)
1821 adev->vcn.cur_state = state;
1822
1823 return ret;
1824 }
1825
1826 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1827 struct amdgpu_irq_src *source,
1828 unsigned type,
1829 enum amdgpu_interrupt_state state)
1830 {
1831 return 0;
1832 }
1833
1834 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1835 struct amdgpu_irq_src *source,
1836 struct amdgpu_iv_entry *entry)
1837 {
1838 uint32_t ip_instance;
1839
1840 switch (entry->client_id) {
1841 case SOC15_IH_CLIENTID_VCN:
1842 ip_instance = 0;
1843 break;
1844 case SOC15_IH_CLIENTID_VCN1:
1845 ip_instance = 1;
1846 break;
1847 default:
1848 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1849 return 0;
1850 }
1851
1852 DRM_DEBUG("IH: VCN TRAP\n");
1853
1854 switch (entry->src_id) {
1855 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1856 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1857 break;
1858 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1859 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1860 break;
1861 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1862 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1863 break;
1864 case VCN_2_6__SRCID_UVD_POISON:
1865 amdgpu_vcn_process_poison_irq(adev, source, entry);
1866 break;
1867 default:
1868 DRM_ERROR("Unhandled interrupt: %d %d\n",
1869 entry->src_id, entry->src_data[0]);
1870 break;
1871 }
1872
1873 return 0;
1874 }
1875
1876 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1877 .set = vcn_v2_5_set_interrupt_state,
1878 .process = vcn_v2_5_process_interrupt,
1879 };
1880
1881 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1882 {
1883 int i;
1884
1885 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1886 if (adev->vcn.harvest_config & (1 << i))
1887 continue;
1888 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1889 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1890 }
1891 }
1892
1893 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1894 .name = "vcn_v2_5",
1895 .early_init = vcn_v2_5_early_init,
1896 .late_init = NULL,
1897 .sw_init = vcn_v2_5_sw_init,
1898 .sw_fini = vcn_v2_5_sw_fini,
1899 .hw_init = vcn_v2_5_hw_init,
1900 .hw_fini = vcn_v2_5_hw_fini,
1901 .suspend = vcn_v2_5_suspend,
1902 .resume = vcn_v2_5_resume,
1903 .is_idle = vcn_v2_5_is_idle,
1904 .wait_for_idle = vcn_v2_5_wait_for_idle,
1905 .check_soft_reset = NULL,
1906 .pre_soft_reset = NULL,
1907 .soft_reset = NULL,
1908 .post_soft_reset = NULL,
1909 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1910 .set_powergating_state = vcn_v2_5_set_powergating_state,
1911 };
1912
1913 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1914 .name = "vcn_v2_6",
1915 .early_init = vcn_v2_5_early_init,
1916 .late_init = NULL,
1917 .sw_init = vcn_v2_5_sw_init,
1918 .sw_fini = vcn_v2_5_sw_fini,
1919 .hw_init = vcn_v2_5_hw_init,
1920 .hw_fini = vcn_v2_5_hw_fini,
1921 .suspend = vcn_v2_5_suspend,
1922 .resume = vcn_v2_5_resume,
1923 .is_idle = vcn_v2_5_is_idle,
1924 .wait_for_idle = vcn_v2_5_wait_for_idle,
1925 .check_soft_reset = NULL,
1926 .pre_soft_reset = NULL,
1927 .soft_reset = NULL,
1928 .post_soft_reset = NULL,
1929 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1930 .set_powergating_state = vcn_v2_5_set_powergating_state,
1931 };
1932
1933 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1934 {
1935 .type = AMD_IP_BLOCK_TYPE_VCN,
1936 .major = 2,
1937 .minor = 5,
1938 .rev = 0,
1939 .funcs = &vcn_v2_5_ip_funcs,
1940 };
1941
1942 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1943 {
1944 .type = AMD_IP_BLOCK_TYPE_VCN,
1945 .major = 2,
1946 .minor = 6,
1947 .rev = 0,
1948 .funcs = &vcn_v2_6_ip_funcs,
1949 };
1950
1951 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1952 uint32_t instance, uint32_t sub_block)
1953 {
1954 uint32_t poison_stat = 0, reg_value = 0;
1955
1956 switch (sub_block) {
1957 case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1958 reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1959 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1960 break;
1961 default:
1962 break;
1963 }
1964
1965 if (poison_stat)
1966 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1967 instance, sub_block);
1968
1969 return poison_stat;
1970 }
1971
1972 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1973 {
1974 uint32_t inst, sub;
1975 uint32_t poison_stat = 0;
1976
1977 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1978 for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1979 poison_stat +=
1980 vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1981
1982 return !!poison_stat;
1983 }
1984
1985 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1986 .query_poison_status = vcn_v2_6_query_poison_status,
1987 };
1988
1989 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1990 .ras_block = {
1991 .hw_ops = &vcn_v2_6_ras_hw_ops,
1992 },
1993 };
1994
1995 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1996 {
1997 switch (adev->ip_versions[VCN_HWIP][0]) {
1998 case IP_VERSION(2, 6, 0):
1999 adev->vcn.ras = &vcn_v2_6_ras;
2000 break;
2001 default:
2002 break;
2003 }
2004
2005 if (adev->vcn.ras) {
2006 amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
2007
2008 strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
2009 adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
2010 adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
2011 adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
2012
2013
2014 if (!adev->vcn.ras->ras_block.ras_late_init)
2015 adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
2016 }
2017 }