0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "amdgpu.h"
0025 #include "amdgpu_jpeg.h"
0026 #include "soc15.h"
0027 #include "soc15d.h"
0028 #include "jpeg_v2_0.h"
0029 #include "jpeg_v2_5.h"
0030
0031 #include "vcn/vcn_2_5_offset.h"
0032 #include "vcn/vcn_2_5_sh_mask.h"
0033 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
0034
0035 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
0036
0037 #define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
0038
0039 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
0040 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
0041 static int jpeg_v2_5_set_powergating_state(void *handle,
0042 enum amd_powergating_state state);
0043 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev);
0044
0045 static int amdgpu_ih_clientid_jpeg[] = {
0046 SOC15_IH_CLIENTID_VCN,
0047 SOC15_IH_CLIENTID_VCN1
0048 };
0049
0050
0051
0052
0053
0054
0055
0056
0057 static int jpeg_v2_5_early_init(void *handle)
0058 {
0059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0060 u32 harvest;
0061 int i;
0062
0063 adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
0064 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
0065 harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
0066 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
0067 adev->jpeg.harvest_config |= 1 << i;
0068 }
0069 if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
0070 AMDGPU_JPEG_HARVEST_JPEG1))
0071 return -ENOENT;
0072
0073 jpeg_v2_5_set_dec_ring_funcs(adev);
0074 jpeg_v2_5_set_irq_funcs(adev);
0075 jpeg_v2_5_set_ras_funcs(adev);
0076
0077 return 0;
0078 }
0079
0080
0081
0082
0083
0084
0085
0086
0087 static int jpeg_v2_5_sw_init(void *handle)
0088 {
0089 struct amdgpu_ring *ring;
0090 int i, r;
0091 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0092
0093 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0094 if (adev->jpeg.harvest_config & (1 << i))
0095 continue;
0096
0097
0098 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
0099 VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
0100 if (r)
0101 return r;
0102
0103
0104 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
0105 VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
0106 if (r)
0107 return r;
0108
0109
0110 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
0111 VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
0112 if (r)
0113 return r;
0114 }
0115
0116 r = amdgpu_jpeg_sw_init(adev);
0117 if (r)
0118 return r;
0119
0120 r = amdgpu_jpeg_resume(adev);
0121 if (r)
0122 return r;
0123
0124 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0125 if (adev->jpeg.harvest_config & (1 << i))
0126 continue;
0127
0128 ring = &adev->jpeg.inst[i].ring_dec;
0129 ring->use_doorbell = true;
0130 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
0131 sprintf(ring->name, "jpeg_dec_%d", i);
0132 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
0133 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
0134 if (r)
0135 return r;
0136
0137 adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
0138 adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
0139 }
0140
0141 return 0;
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151 static int jpeg_v2_5_sw_fini(void *handle)
0152 {
0153 int r;
0154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0155
0156 r = amdgpu_jpeg_suspend(adev);
0157 if (r)
0158 return r;
0159
0160 r = amdgpu_jpeg_sw_fini(adev);
0161
0162 return r;
0163 }
0164
0165
0166
0167
0168
0169
0170
0171 static int jpeg_v2_5_hw_init(void *handle)
0172 {
0173 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0174 struct amdgpu_ring *ring;
0175 int i, r;
0176
0177 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0178 if (adev->jpeg.harvest_config & (1 << i))
0179 continue;
0180
0181 ring = &adev->jpeg.inst[i].ring_dec;
0182 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
0183 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
0184
0185 r = amdgpu_ring_test_helper(ring);
0186 if (r)
0187 return r;
0188 }
0189
0190 DRM_INFO("JPEG decode initialized successfully.\n");
0191
0192 return 0;
0193 }
0194
0195
0196
0197
0198
0199
0200
0201
0202 static int jpeg_v2_5_hw_fini(void *handle)
0203 {
0204 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0205 int i;
0206
0207 cancel_delayed_work_sync(&adev->vcn.idle_work);
0208
0209 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0210 if (adev->jpeg.harvest_config & (1 << i))
0211 continue;
0212
0213 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
0214 RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
0215 jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
0216 }
0217
0218 return 0;
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228 static int jpeg_v2_5_suspend(void *handle)
0229 {
0230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0231 int r;
0232
0233 r = jpeg_v2_5_hw_fini(adev);
0234 if (r)
0235 return r;
0236
0237 r = amdgpu_jpeg_suspend(adev);
0238
0239 return r;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249 static int jpeg_v2_5_resume(void *handle)
0250 {
0251 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0252 int r;
0253
0254 r = amdgpu_jpeg_resume(adev);
0255 if (r)
0256 return r;
0257
0258 r = jpeg_v2_5_hw_init(adev);
0259
0260 return r;
0261 }
0262
0263 static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
0264 {
0265 uint32_t data;
0266
0267 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
0268 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
0269 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0270 else
0271 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
0272
0273 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
0274 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
0275 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
0276
0277 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
0278 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
0279 | JPEG_CGC_GATE__JPEG2_DEC_MASK
0280 | JPEG_CGC_GATE__JMCIF_MASK
0281 | JPEG_CGC_GATE__JRBBM_MASK);
0282 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
0283
0284 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
0285 data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
0286 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
0287 | JPEG_CGC_CTRL__JMCIF_MODE_MASK
0288 | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
0289 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
0290 }
0291
0292 static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
0293 {
0294 uint32_t data;
0295
0296 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
0297 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
0298 |JPEG_CGC_GATE__JPEG2_DEC_MASK
0299 |JPEG_CGC_GATE__JPEG_ENC_MASK
0300 |JPEG_CGC_GATE__JMCIF_MASK
0301 |JPEG_CGC_GATE__JRBBM_MASK);
0302 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312 static int jpeg_v2_5_start(struct amdgpu_device *adev)
0313 {
0314 struct amdgpu_ring *ring;
0315 int i;
0316
0317 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0318 if (adev->jpeg.harvest_config & (1 << i))
0319 continue;
0320
0321 ring = &adev->jpeg.inst[i].ring_dec;
0322
0323 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
0324 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
0325
0326
0327 jpeg_v2_5_disable_clock_gating(adev, i);
0328
0329
0330 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
0331 adev->gfx.config.gb_addr_config);
0332 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
0333 adev->gfx.config.gb_addr_config);
0334
0335
0336 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
0337 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
0338
0339
0340 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
0341 JPEG_SYS_INT_EN__DJRBC_MASK,
0342 ~JPEG_SYS_INT_EN__DJRBC_MASK);
0343
0344 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
0345 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
0346 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
0347 lower_32_bits(ring->gpu_addr));
0348 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
0349 upper_32_bits(ring->gpu_addr));
0350 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
0351 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
0352 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
0353 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
0354 ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
0355 }
0356
0357 return 0;
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367 static int jpeg_v2_5_stop(struct amdgpu_device *adev)
0368 {
0369 int i;
0370
0371 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0372 if (adev->jpeg.harvest_config & (1 << i))
0373 continue;
0374
0375
0376 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
0377 UVD_JMI_CNTL__SOFT_RESET_MASK,
0378 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
0379
0380 jpeg_v2_5_enable_clock_gating(adev, i);
0381
0382
0383 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
0384 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
0385 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
0386 }
0387
0388 return 0;
0389 }
0390
0391
0392
0393
0394
0395
0396
0397
0398 static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
0399 {
0400 struct amdgpu_device *adev = ring->adev;
0401
0402 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412 static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
0413 {
0414 struct amdgpu_device *adev = ring->adev;
0415
0416 if (ring->use_doorbell)
0417 return *ring->wptr_cpu_addr;
0418 else
0419 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429 static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
0430 {
0431 struct amdgpu_device *adev = ring->adev;
0432
0433 if (ring->use_doorbell) {
0434 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
0435 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
0436 } else {
0437 WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
0438 }
0439 }
0440
0441
0442
0443
0444
0445
0446
0447
0448 static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring)
0449 {
0450 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0451 0, 0, PACKETJ_TYPE0));
0452 amdgpu_ring_write(ring, 0x6aa04);
0453
0454 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0455 0, 0, PACKETJ_TYPE0));
0456 amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14)));
0457 }
0458
0459
0460
0461
0462
0463
0464
0465
0466 static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
0467 {
0468 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0469 0, 0, PACKETJ_TYPE0));
0470 amdgpu_ring_write(ring, 0x6aa04);
0471
0472 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0473 0, 0, PACKETJ_TYPE0));
0474 amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
0475 }
0476
0477 static bool jpeg_v2_5_is_idle(void *handle)
0478 {
0479 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0480 int i, ret = 1;
0481
0482 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0483 if (adev->jpeg.harvest_config & (1 << i))
0484 continue;
0485
0486 ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
0487 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
0488 UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
0489 }
0490
0491 return ret;
0492 }
0493
0494 static int jpeg_v2_5_wait_for_idle(void *handle)
0495 {
0496 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0497 int i, ret;
0498
0499 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0500 if (adev->jpeg.harvest_config & (1 << i))
0501 continue;
0502
0503 ret = SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
0504 UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
0505 UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
0506 if (ret)
0507 return ret;
0508 }
0509
0510 return 0;
0511 }
0512
0513 static int jpeg_v2_5_set_clockgating_state(void *handle,
0514 enum amd_clockgating_state state)
0515 {
0516 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0517 bool enable = (state == AMD_CG_STATE_GATE);
0518 int i;
0519
0520 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0521 if (adev->jpeg.harvest_config & (1 << i))
0522 continue;
0523
0524 if (enable) {
0525 if (!jpeg_v2_5_is_idle(handle))
0526 return -EBUSY;
0527 jpeg_v2_5_enable_clock_gating(adev, i);
0528 } else {
0529 jpeg_v2_5_disable_clock_gating(adev, i);
0530 }
0531 }
0532
0533 return 0;
0534 }
0535
0536 static int jpeg_v2_5_set_powergating_state(void *handle,
0537 enum amd_powergating_state state)
0538 {
0539 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0540 int ret;
0541
0542 if(state == adev->jpeg.cur_state)
0543 return 0;
0544
0545 if (state == AMD_PG_STATE_GATE)
0546 ret = jpeg_v2_5_stop(adev);
0547 else
0548 ret = jpeg_v2_5_start(adev);
0549
0550 if(!ret)
0551 adev->jpeg.cur_state = state;
0552
0553 return ret;
0554 }
0555
0556 static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
0557 struct amdgpu_irq_src *source,
0558 unsigned type,
0559 enum amdgpu_interrupt_state state)
0560 {
0561 return 0;
0562 }
0563
0564 static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
0565 struct amdgpu_irq_src *source,
0566 struct amdgpu_iv_entry *entry)
0567 {
0568 uint32_t ip_instance;
0569
0570 switch (entry->client_id) {
0571 case SOC15_IH_CLIENTID_VCN:
0572 ip_instance = 0;
0573 break;
0574 case SOC15_IH_CLIENTID_VCN1:
0575 ip_instance = 1;
0576 break;
0577 default:
0578 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
0579 return 0;
0580 }
0581
0582 DRM_DEBUG("IH: JPEG TRAP\n");
0583
0584 switch (entry->src_id) {
0585 case VCN_2_0__SRCID__JPEG_DECODE:
0586 amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
0587 break;
0588 case VCN_2_6__SRCID_DJPEG0_POISON:
0589 case VCN_2_6__SRCID_EJPEG0_POISON:
0590 amdgpu_jpeg_process_poison_irq(adev, source, entry);
0591 break;
0592 default:
0593 DRM_ERROR("Unhandled interrupt: %d %d\n",
0594 entry->src_id, entry->src_data[0]);
0595 break;
0596 }
0597
0598 return 0;
0599 }
0600
0601 static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
0602 .name = "jpeg_v2_5",
0603 .early_init = jpeg_v2_5_early_init,
0604 .late_init = NULL,
0605 .sw_init = jpeg_v2_5_sw_init,
0606 .sw_fini = jpeg_v2_5_sw_fini,
0607 .hw_init = jpeg_v2_5_hw_init,
0608 .hw_fini = jpeg_v2_5_hw_fini,
0609 .suspend = jpeg_v2_5_suspend,
0610 .resume = jpeg_v2_5_resume,
0611 .is_idle = jpeg_v2_5_is_idle,
0612 .wait_for_idle = jpeg_v2_5_wait_for_idle,
0613 .check_soft_reset = NULL,
0614 .pre_soft_reset = NULL,
0615 .soft_reset = NULL,
0616 .post_soft_reset = NULL,
0617 .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
0618 .set_powergating_state = jpeg_v2_5_set_powergating_state,
0619 };
0620
0621 static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
0622 .name = "jpeg_v2_6",
0623 .early_init = jpeg_v2_5_early_init,
0624 .late_init = NULL,
0625 .sw_init = jpeg_v2_5_sw_init,
0626 .sw_fini = jpeg_v2_5_sw_fini,
0627 .hw_init = jpeg_v2_5_hw_init,
0628 .hw_fini = jpeg_v2_5_hw_fini,
0629 .suspend = jpeg_v2_5_suspend,
0630 .resume = jpeg_v2_5_resume,
0631 .is_idle = jpeg_v2_5_is_idle,
0632 .wait_for_idle = jpeg_v2_5_wait_for_idle,
0633 .check_soft_reset = NULL,
0634 .pre_soft_reset = NULL,
0635 .soft_reset = NULL,
0636 .post_soft_reset = NULL,
0637 .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
0638 .set_powergating_state = jpeg_v2_5_set_powergating_state,
0639 };
0640
0641 static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
0642 .type = AMDGPU_RING_TYPE_VCN_JPEG,
0643 .align_mask = 0xf,
0644 .vmhub = AMDGPU_MMHUB_1,
0645 .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
0646 .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
0647 .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
0648 .emit_frame_size =
0649 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
0650 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
0651 8 +
0652 18 + 18 +
0653 8 + 16,
0654 .emit_ib_size = 22,
0655 .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
0656 .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
0657 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
0658 .test_ring = amdgpu_jpeg_dec_ring_test_ring,
0659 .test_ib = amdgpu_jpeg_dec_ring_test_ib,
0660 .insert_nop = jpeg_v2_0_dec_ring_nop,
0661 .insert_start = jpeg_v2_0_dec_ring_insert_start,
0662 .insert_end = jpeg_v2_0_dec_ring_insert_end,
0663 .pad_ib = amdgpu_ring_generic_pad_ib,
0664 .begin_use = amdgpu_jpeg_ring_begin_use,
0665 .end_use = amdgpu_jpeg_ring_end_use,
0666 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
0667 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
0668 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
0669 };
0670
0671 static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
0672 .type = AMDGPU_RING_TYPE_VCN_JPEG,
0673 .align_mask = 0xf,
0674 .vmhub = AMDGPU_MMHUB_0,
0675 .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
0676 .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
0677 .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
0678 .emit_frame_size =
0679 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
0680 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
0681 8 +
0682 18 + 18 +
0683 8 + 16,
0684 .emit_ib_size = 22,
0685 .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
0686 .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
0687 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
0688 .test_ring = amdgpu_jpeg_dec_ring_test_ring,
0689 .test_ib = amdgpu_jpeg_dec_ring_test_ib,
0690 .insert_nop = jpeg_v2_0_dec_ring_nop,
0691 .insert_start = jpeg_v2_6_dec_ring_insert_start,
0692 .insert_end = jpeg_v2_6_dec_ring_insert_end,
0693 .pad_ib = amdgpu_ring_generic_pad_ib,
0694 .begin_use = amdgpu_jpeg_ring_begin_use,
0695 .end_use = amdgpu_jpeg_ring_end_use,
0696 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
0697 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
0698 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
0699 };
0700
0701 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
0702 {
0703 int i;
0704
0705 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0706 if (adev->jpeg.harvest_config & (1 << i))
0707 continue;
0708 if (adev->asic_type == CHIP_ARCTURUS)
0709 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
0710 else
0711 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs;
0712 adev->jpeg.inst[i].ring_dec.me = i;
0713 DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
0714 }
0715 }
0716
0717 static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
0718 .set = jpeg_v2_5_set_interrupt_state,
0719 .process = jpeg_v2_5_process_interrupt,
0720 };
0721
0722 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
0723 {
0724 int i;
0725
0726 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
0727 if (adev->jpeg.harvest_config & (1 << i))
0728 continue;
0729
0730 adev->jpeg.inst[i].irq.num_types = 1;
0731 adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
0732 }
0733 }
0734
0735 const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
0736 {
0737 .type = AMD_IP_BLOCK_TYPE_JPEG,
0738 .major = 2,
0739 .minor = 5,
0740 .rev = 0,
0741 .funcs = &jpeg_v2_5_ip_funcs,
0742 };
0743
0744 const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
0745 {
0746 .type = AMD_IP_BLOCK_TYPE_JPEG,
0747 .major = 2,
0748 .minor = 6,
0749 .rev = 0,
0750 .funcs = &jpeg_v2_6_ip_funcs,
0751 };
0752
0753 static uint32_t jpeg_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
0754 uint32_t instance, uint32_t sub_block)
0755 {
0756 uint32_t poison_stat = 0, reg_value = 0;
0757
0758 switch (sub_block) {
0759 case AMDGPU_JPEG_V2_6_JPEG0:
0760 reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG0_STATUS);
0761 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
0762 break;
0763 case AMDGPU_JPEG_V2_6_JPEG1:
0764 reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG1_STATUS);
0765 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
0766 break;
0767 default:
0768 break;
0769 }
0770
0771 if (poison_stat)
0772 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
0773 instance, sub_block);
0774
0775 return poison_stat;
0776 }
0777
0778 static bool jpeg_v2_6_query_ras_poison_status(struct amdgpu_device *adev)
0779 {
0780 uint32_t inst = 0, sub = 0, poison_stat = 0;
0781
0782 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
0783 for (sub = 0; sub < AMDGPU_JPEG_V2_6_MAX_SUB_BLOCK; sub++)
0784 poison_stat +=
0785 jpeg_v2_6_query_poison_by_instance(adev, inst, sub);
0786
0787 return !!poison_stat;
0788 }
0789
0790 const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
0791 .query_poison_status = jpeg_v2_6_query_ras_poison_status,
0792 };
0793
0794 static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
0795 .ras_block = {
0796 .hw_ops = &jpeg_v2_6_ras_hw_ops,
0797 },
0798 };
0799
0800 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
0801 {
0802 switch (adev->ip_versions[JPEG_HWIP][0]) {
0803 case IP_VERSION(2, 6, 0):
0804 adev->jpeg.ras = &jpeg_v2_6_ras;
0805 break;
0806 default:
0807 break;
0808 }
0809
0810 if (adev->jpeg.ras) {
0811 amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
0812
0813 strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
0814 adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
0815 adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
0816 adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
0817
0818
0819 if (!adev->jpeg.ras->ras_block.ras_late_init)
0820 adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
0821 }
0822 }