Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2020 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Sonny Jiang <sonny.jiang@amd.com>
0023  */
0024 
0025 #include <linux/firmware.h>
0026 
0027 #include "amdgpu.h"
0028 #include "amdgpu_uvd.h"
0029 #include "sid.h"
0030 
0031 #include "uvd/uvd_3_1_d.h"
0032 #include "uvd/uvd_3_1_sh_mask.h"
0033 
0034 #include "oss/oss_1_0_d.h"
0035 #include "oss/oss_1_0_sh_mask.h"
0036 
0037 /**
0038  * uvd_v3_1_ring_get_rptr - get read pointer
0039  *
0040  * @ring: amdgpu_ring pointer
0041  *
0042  * Returns the current hardware read pointer
0043  */
0044 static uint64_t uvd_v3_1_ring_get_rptr(struct amdgpu_ring *ring)
0045 {
0046     struct amdgpu_device *adev = ring->adev;
0047 
0048     return RREG32(mmUVD_RBC_RB_RPTR);
0049 }
0050 
0051 /**
0052  * uvd_v3_1_ring_get_wptr - get write pointer
0053  *
0054  * @ring: amdgpu_ring pointer
0055  *
0056  * Returns the current hardware write pointer
0057  */
0058 static uint64_t uvd_v3_1_ring_get_wptr(struct amdgpu_ring *ring)
0059 {
0060     struct amdgpu_device *adev = ring->adev;
0061 
0062     return RREG32(mmUVD_RBC_RB_WPTR);
0063 }
0064 
0065 /**
0066  * uvd_v3_1_ring_set_wptr - set write pointer
0067  *
0068  * @ring: amdgpu_ring pointer
0069  *
0070  * Commits the write pointer to the hardware
0071  */
0072 static void uvd_v3_1_ring_set_wptr(struct amdgpu_ring *ring)
0073 {
0074     struct amdgpu_device *adev = ring->adev;
0075 
0076     WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0077 }
0078 
0079 /**
0080  * uvd_v3_1_ring_emit_ib - execute indirect buffer
0081  *
0082  * @ring: amdgpu_ring pointer
0083  * @job: iob associated with the indirect buffer
0084  * @ib: indirect buffer to execute
0085  * @flags: flags associated with the indirect buffer
0086  *
0087  * Write ring commands to execute the indirect buffer
0088  */
0089 static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
0090                   struct amdgpu_job *job,
0091                   struct amdgpu_ib *ib,
0092                   uint32_t flags)
0093 {
0094     amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
0095     amdgpu_ring_write(ring, ib->gpu_addr);
0096     amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
0097     amdgpu_ring_write(ring, ib->length_dw);
0098 }
0099 
0100 /**
0101  * uvd_v3_1_ring_emit_fence - emit an fence & trap command
0102  *
0103  * @ring: amdgpu_ring pointer
0104  * @addr: address
0105  * @seq: sequence number
0106  * @flags: fence related flags
0107  *
0108  * Write a fence and a trap command to the ring.
0109  */
0110 static void uvd_v3_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
0111                  unsigned flags)
0112 {
0113     WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
0114 
0115     amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
0116     amdgpu_ring_write(ring, seq);
0117     amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
0118     amdgpu_ring_write(ring, addr & 0xffffffff);
0119     amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
0120     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
0121     amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
0122     amdgpu_ring_write(ring, 0);
0123 
0124     amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
0125     amdgpu_ring_write(ring, 0);
0126     amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
0127     amdgpu_ring_write(ring, 0);
0128     amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
0129     amdgpu_ring_write(ring, 2);
0130 }
0131 
0132 /**
0133  * uvd_v3_1_ring_test_ring - register write test
0134  *
0135  * @ring: amdgpu_ring pointer
0136  *
0137  * Test if we can successfully write to the context register
0138  */
0139 static int uvd_v3_1_ring_test_ring(struct amdgpu_ring *ring)
0140 {
0141     struct amdgpu_device *adev = ring->adev;
0142     uint32_t tmp = 0;
0143     unsigned i;
0144     int r;
0145 
0146     WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
0147     r = amdgpu_ring_alloc(ring, 3);
0148     if (r)
0149         return r;
0150 
0151     amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
0152     amdgpu_ring_write(ring, 0xDEADBEEF);
0153     amdgpu_ring_commit(ring);
0154     for (i = 0; i < adev->usec_timeout; i++) {
0155         tmp = RREG32(mmUVD_CONTEXT_ID);
0156         if (tmp == 0xDEADBEEF)
0157             break;
0158         udelay(1);
0159     }
0160 
0161     if (i >= adev->usec_timeout)
0162         r = -ETIMEDOUT;
0163 
0164     return r;
0165 }
0166 
0167 static void uvd_v3_1_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
0168 {
0169     int i;
0170 
0171     WARN_ON(ring->wptr % 2 || count % 2);
0172 
0173     for (i = 0; i < count / 2; i++) {
0174         amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
0175         amdgpu_ring_write(ring, 0);
0176     }
0177 }
0178 
0179 static const struct amdgpu_ring_funcs uvd_v3_1_ring_funcs = {
0180     .type = AMDGPU_RING_TYPE_UVD,
0181     .align_mask = 0xf,
0182     .support_64bit_ptrs = false,
0183     .no_user_fence = true,
0184     .get_rptr = uvd_v3_1_ring_get_rptr,
0185     .get_wptr = uvd_v3_1_ring_get_wptr,
0186     .set_wptr = uvd_v3_1_ring_set_wptr,
0187     .parse_cs = amdgpu_uvd_ring_parse_cs,
0188     .emit_frame_size =
0189         14, /* uvd_v3_1_ring_emit_fence  x1 no user fence */
0190     .emit_ib_size = 4, /* uvd_v3_1_ring_emit_ib */
0191     .emit_ib = uvd_v3_1_ring_emit_ib,
0192     .emit_fence = uvd_v3_1_ring_emit_fence,
0193     .test_ring = uvd_v3_1_ring_test_ring,
0194     .test_ib = amdgpu_uvd_ring_test_ib,
0195     .insert_nop = uvd_v3_1_ring_insert_nop,
0196     .pad_ib = amdgpu_ring_generic_pad_ib,
0197     .begin_use = amdgpu_uvd_ring_begin_use,
0198     .end_use = amdgpu_uvd_ring_end_use,
0199 };
0200 
0201 static void uvd_v3_1_set_ring_funcs(struct amdgpu_device *adev)
0202 {
0203     adev->uvd.inst->ring.funcs = &uvd_v3_1_ring_funcs;
0204 }
0205 
0206 static void uvd_v3_1_set_dcm(struct amdgpu_device *adev,
0207                              bool sw_mode)
0208 {
0209     u32 tmp, tmp2;
0210 
0211     WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
0212 
0213     tmp = RREG32(mmUVD_CGC_CTRL);
0214     tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
0215     tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
0216         (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
0217         (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
0218 
0219     if (sw_mode) {
0220         tmp &= ~0x7ffff800;
0221         tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
0222             UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
0223             (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
0224     } else {
0225         tmp |= 0x7ffff800;
0226         tmp2 = 0;
0227     }
0228 
0229     WREG32(mmUVD_CGC_CTRL, tmp);
0230     WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
0231 }
0232 
0233 /**
0234  * uvd_v3_1_mc_resume - memory controller programming
0235  *
0236  * @adev: amdgpu_device pointer
0237  *
0238  * Let the UVD memory controller know it's offsets
0239  */
0240 static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
0241 {
0242     uint64_t addr;
0243     uint32_t size;
0244 
0245     /* programm the VCPU memory controller bits 0-27 */
0246     addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
0247     size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
0248     WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
0249     WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
0250 
0251     addr += size;
0252     size = AMDGPU_UVD_HEAP_SIZE >> 3;
0253     WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
0254     WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
0255 
0256     addr += size;
0257     size = (AMDGPU_UVD_STACK_SIZE +
0258         (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
0259     WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
0260     WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
0261 
0262     /* bits 28-31 */
0263     addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
0264     WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
0265 
0266     /* bits 32-39 */
0267     addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
0268     WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
0269 
0270     WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0271     WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0272     WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
0273 }
0274 
0275 /**
0276  * uvd_v3_1_fw_validate - FW validation operation
0277  *
0278  * @adev: amdgpu_device pointer
0279  *
0280  * Initialate and check UVD validation.
0281  */
0282 static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
0283 {
0284     int i;
0285     uint32_t keysel = adev->uvd.keyselect;
0286 
0287     WREG32(mmUVD_FW_START, keysel);
0288 
0289     for (i = 0; i < 10; ++i) {
0290         mdelay(10);
0291         if (RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__DONE_MASK)
0292             break;
0293     }
0294 
0295     if (i == 10)
0296         return -ETIMEDOUT;
0297 
0298     if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__PASS_MASK))
0299         return -EINVAL;
0300 
0301     for (i = 0; i < 10; ++i) {
0302         mdelay(10);
0303         if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__BUSY_MASK))
0304             break;
0305     }
0306 
0307     if (i == 10)
0308         return -ETIMEDOUT;
0309 
0310     return 0;
0311 }
0312 
0313 /**
0314  * uvd_v3_1_start - start UVD block
0315  *
0316  * @adev: amdgpu_device pointer
0317  *
0318  * Setup and start the UVD block
0319  */
0320 static int uvd_v3_1_start(struct amdgpu_device *adev)
0321 {
0322     struct amdgpu_ring *ring = &adev->uvd.inst->ring;
0323     uint32_t rb_bufsz;
0324     int i, j, r;
0325     u32 tmp;
0326     /* disable byte swapping */
0327     u32 lmi_swap_cntl = 0;
0328     u32 mp_swap_cntl = 0;
0329 
0330     /* set uvd busy */
0331     WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
0332 
0333     uvd_v3_1_set_dcm(adev, true);
0334     WREG32(mmUVD_CGC_GATE, 0);
0335 
0336     /* take UVD block out of reset */
0337     WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
0338     mdelay(5);
0339 
0340     /* enable VCPU clock */
0341     WREG32(mmUVD_VCPU_CNTL,  1 << 9);
0342 
0343     /* disable interrupt */
0344     WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
0345 
0346 #ifdef __BIG_ENDIAN
0347     /* swap (8 in 32) RB and IB */
0348     lmi_swap_cntl = 0xa;
0349     mp_swap_cntl = 0;
0350 #endif
0351     WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
0352     WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
0353 
0354     /* initialize UVD memory controller */
0355     WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
0356         (1 << 21) | (1 << 9) | (1 << 20));
0357 
0358     tmp = RREG32(mmUVD_MPC_CNTL);
0359     WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
0360 
0361     WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
0362     WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
0363     WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
0364     WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
0365     WREG32(mmUVD_MPC_SET_ALU, 0);
0366     WREG32(mmUVD_MPC_SET_MUX, 0x88);
0367 
0368     tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
0369     WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
0370 
0371     /* enable UMC */
0372     WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
0373 
0374     WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
0375 
0376     WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
0377 
0378     WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0379 
0380     mdelay(10);
0381 
0382     for (i = 0; i < 10; ++i) {
0383         uint32_t status;
0384         for (j = 0; j < 100; ++j) {
0385             status = RREG32(mmUVD_STATUS);
0386             if (status & 2)
0387                 break;
0388             mdelay(10);
0389         }
0390         r = 0;
0391         if (status & 2)
0392             break;
0393 
0394         DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
0395         WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
0396                  ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0397         mdelay(10);
0398         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0399         mdelay(10);
0400         r = -1;
0401     }
0402 
0403     if (r) {
0404         DRM_ERROR("UVD not responding, giving up!!!\n");
0405         return r;
0406     }
0407 
0408     /* enable interrupt */
0409     WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
0410 
0411     WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
0412 
0413     /* force RBC into idle state */
0414     WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
0415 
0416     /* Set the write pointer delay */
0417     WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
0418 
0419     /* programm the 4GB memory segment for rptr and ring buffer */
0420     WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
0421            (0x7 << 16) | (0x1 << 31));
0422 
0423     /* Initialize the ring buffer's read and write pointers */
0424     WREG32(mmUVD_RBC_RB_RPTR, 0x0);
0425 
0426     ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
0427     WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0428 
0429     /* set the ring address */
0430     WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
0431 
0432     /* Set ring buffer size */
0433     rb_bufsz = order_base_2(ring->ring_size);
0434     rb_bufsz = (0x1 << 8) | rb_bufsz;
0435     WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
0436 
0437     return 0;
0438 }
0439 
0440 /**
0441  * uvd_v3_1_stop - stop UVD block
0442  *
0443  * @adev: amdgpu_device pointer
0444  *
0445  * stop the UVD block
0446  */
0447 static void uvd_v3_1_stop(struct amdgpu_device *adev)
0448 {
0449     uint32_t i, j;
0450     uint32_t status;
0451 
0452     WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
0453 
0454     for (i = 0; i < 10; ++i) {
0455         for (j = 0; j < 100; ++j) {
0456             status = RREG32(mmUVD_STATUS);
0457             if (status & 2)
0458                 break;
0459             mdelay(1);
0460         }
0461         if (status & 2)
0462             break;
0463     }
0464 
0465     for (i = 0; i < 10; ++i) {
0466         for (j = 0; j < 100; ++j) {
0467             status = RREG32(mmUVD_LMI_STATUS);
0468             if (status & 0xf)
0469                 break;
0470             mdelay(1);
0471         }
0472         if (status & 0xf)
0473             break;
0474     }
0475 
0476     /* Stall UMC and register bus before resetting VCPU */
0477     WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
0478 
0479     for (i = 0; i < 10; ++i) {
0480         for (j = 0; j < 100; ++j) {
0481             status = RREG32(mmUVD_LMI_STATUS);
0482             if (status & 0x240)
0483                 break;
0484             mdelay(1);
0485         }
0486         if (status & 0x240)
0487             break;
0488     }
0489 
0490     WREG32_P(0x3D49, 0, ~(1 << 2));
0491 
0492     WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
0493 
0494     /* put LMI, VCPU, RBC etc... into reset */
0495     WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
0496         UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
0497         UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
0498 
0499     WREG32(mmUVD_STATUS, 0);
0500 
0501     uvd_v3_1_set_dcm(adev, false);
0502 }
0503 
0504 static int uvd_v3_1_set_interrupt_state(struct amdgpu_device *adev,
0505                     struct amdgpu_irq_src *source,
0506                     unsigned type,
0507                     enum amdgpu_interrupt_state state)
0508 {
0509     return 0;
0510 }
0511 
0512 static int uvd_v3_1_process_interrupt(struct amdgpu_device *adev,
0513                       struct amdgpu_irq_src *source,
0514                       struct amdgpu_iv_entry *entry)
0515 {
0516     DRM_DEBUG("IH: UVD TRAP\n");
0517     amdgpu_fence_process(&adev->uvd.inst->ring);
0518     return 0;
0519 }
0520 
0521 
0522 static const struct amdgpu_irq_src_funcs uvd_v3_1_irq_funcs = {
0523     .set = uvd_v3_1_set_interrupt_state,
0524     .process = uvd_v3_1_process_interrupt,
0525 };
0526 
0527 static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev)
0528 {
0529     adev->uvd.inst->irq.num_types = 1;
0530     adev->uvd.inst->irq.funcs = &uvd_v3_1_irq_funcs;
0531 }
0532 
0533 
0534 static int uvd_v3_1_early_init(void *handle)
0535 {
0536     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0537     adev->uvd.num_uvd_inst = 1;
0538 
0539     uvd_v3_1_set_ring_funcs(adev);
0540     uvd_v3_1_set_irq_funcs(adev);
0541 
0542     return 0;
0543 }
0544 
0545 static int uvd_v3_1_sw_init(void *handle)
0546 {
0547     struct amdgpu_ring *ring;
0548     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0549     int r;
0550     void *ptr;
0551     uint32_t ucode_len;
0552 
0553     /* UVD TRAP */
0554     r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
0555     if (r)
0556         return r;
0557 
0558     r = amdgpu_uvd_sw_init(adev);
0559     if (r)
0560         return r;
0561 
0562     ring = &adev->uvd.inst->ring;
0563     sprintf(ring->name, "uvd");
0564     r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
0565              AMDGPU_RING_PRIO_DEFAULT, NULL);
0566     if (r)
0567         return r;
0568 
0569     r = amdgpu_uvd_resume(adev);
0570     if (r)
0571         return r;
0572 
0573     /* Retrieval firmware validate key */
0574     ptr = adev->uvd.inst[0].cpu_addr;
0575     ptr += 192 + 16;
0576     memcpy(&ucode_len, ptr, 4);
0577     ptr += ucode_len;
0578     memcpy(&adev->uvd.keyselect, ptr, 4);
0579 
0580     r = amdgpu_uvd_entity_init(adev);
0581 
0582     return r;
0583 }
0584 
0585 static int uvd_v3_1_sw_fini(void *handle)
0586 {
0587     int r;
0588     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0589 
0590     r = amdgpu_uvd_suspend(adev);
0591     if (r)
0592         return r;
0593 
0594     return amdgpu_uvd_sw_fini(adev);
0595 }
0596 
0597 static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
0598                  bool enable)
0599 {
0600     u32 orig, data;
0601 
0602     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
0603         data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
0604         data |= 0x3fff;
0605         WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
0606 
0607         orig = data = RREG32(mmUVD_CGC_CTRL);
0608         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
0609         if (orig != data)
0610             WREG32(mmUVD_CGC_CTRL, data);
0611     } else {
0612         data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
0613         data &= ~0x3fff;
0614         WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
0615 
0616         orig = data = RREG32(mmUVD_CGC_CTRL);
0617         data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
0618         if (orig != data)
0619             WREG32(mmUVD_CGC_CTRL, data);
0620     }
0621 }
0622 
0623 /**
0624  * uvd_v3_1_hw_init - start and test UVD block
0625  *
0626  * @handle: handle used to pass amdgpu_device pointer
0627  *
0628  * Initialize the hardware, boot up the VCPU and do some testing
0629  */
0630 static int uvd_v3_1_hw_init(void *handle)
0631 {
0632     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0633     struct amdgpu_ring *ring = &adev->uvd.inst->ring;
0634     uint32_t tmp;
0635     int r;
0636 
0637     uvd_v3_1_mc_resume(adev);
0638 
0639     r = uvd_v3_1_fw_validate(adev);
0640     if (r) {
0641         DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r);
0642         return r;
0643     }
0644 
0645     uvd_v3_1_enable_mgcg(adev, true);
0646     amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
0647 
0648     uvd_v3_1_start(adev);
0649 
0650     r = amdgpu_ring_test_helper(ring);
0651     if (r) {
0652         DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r);
0653         goto done;
0654     }
0655 
0656     r = amdgpu_ring_alloc(ring, 10);
0657     if (r) {
0658         DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
0659         goto done;
0660     }
0661 
0662     tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
0663     amdgpu_ring_write(ring, tmp);
0664     amdgpu_ring_write(ring, 0xFFFFF);
0665 
0666     tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
0667     amdgpu_ring_write(ring, tmp);
0668     amdgpu_ring_write(ring, 0xFFFFF);
0669 
0670     tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
0671     amdgpu_ring_write(ring, tmp);
0672     amdgpu_ring_write(ring, 0xFFFFF);
0673 
0674     /* Clear timeout status bits */
0675     amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
0676     amdgpu_ring_write(ring, 0x8);
0677 
0678     amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
0679     amdgpu_ring_write(ring, 3);
0680 
0681     amdgpu_ring_commit(ring);
0682 
0683 done:
0684     if (!r)
0685         DRM_INFO("UVD initialized successfully.\n");
0686 
0687     return r;
0688 }
0689 
0690 /**
0691  * uvd_v3_1_hw_fini - stop the hardware block
0692  *
0693  * @handle: handle used to pass amdgpu_device pointer
0694  *
0695  * Stop the UVD block, mark ring as not ready any more
0696  */
0697 static int uvd_v3_1_hw_fini(void *handle)
0698 {
0699     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0700 
0701     cancel_delayed_work_sync(&adev->uvd.idle_work);
0702 
0703     if (RREG32(mmUVD_STATUS) != 0)
0704         uvd_v3_1_stop(adev);
0705 
0706     return 0;
0707 }
0708 
0709 static int uvd_v3_1_suspend(void *handle)
0710 {
0711     int r;
0712     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0713 
0714     /*
0715      * Proper cleanups before halting the HW engine:
0716      *   - cancel the delayed idle work
0717      *   - enable powergating
0718      *   - enable clockgating
0719      *   - disable dpm
0720      *
0721      * TODO: to align with the VCN implementation, move the
0722      * jobs for clockgating/powergating/dpm setting to
0723      * ->set_powergating_state().
0724      */
0725     cancel_delayed_work_sync(&adev->uvd.idle_work);
0726 
0727     if (adev->pm.dpm_enabled) {
0728         amdgpu_dpm_enable_uvd(adev, false);
0729     } else {
0730         amdgpu_asic_set_uvd_clocks(adev, 0, 0);
0731         /* shutdown the UVD block */
0732         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0733                                AMD_PG_STATE_GATE);
0734         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0735                                AMD_CG_STATE_GATE);
0736     }
0737 
0738     r = uvd_v3_1_hw_fini(adev);
0739     if (r)
0740         return r;
0741 
0742     return amdgpu_uvd_suspend(adev);
0743 }
0744 
0745 static int uvd_v3_1_resume(void *handle)
0746 {
0747     int r;
0748     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0749 
0750     r = amdgpu_uvd_resume(adev);
0751     if (r)
0752         return r;
0753 
0754     return uvd_v3_1_hw_init(adev);
0755 }
0756 
0757 static bool uvd_v3_1_is_idle(void *handle)
0758 {
0759     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0760 
0761     return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
0762 }
0763 
0764 static int uvd_v3_1_wait_for_idle(void *handle)
0765 {
0766     unsigned i;
0767     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0768 
0769     for (i = 0; i < adev->usec_timeout; i++) {
0770         if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
0771             return 0;
0772     }
0773     return -ETIMEDOUT;
0774 }
0775 
0776 static int uvd_v3_1_soft_reset(void *handle)
0777 {
0778     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0779 
0780     uvd_v3_1_stop(adev);
0781 
0782     WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
0783              ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
0784     mdelay(5);
0785 
0786     return uvd_v3_1_start(adev);
0787 }
0788 
0789 static int uvd_v3_1_set_clockgating_state(void *handle,
0790                       enum amd_clockgating_state state)
0791 {
0792     return 0;
0793 }
0794 
0795 static int uvd_v3_1_set_powergating_state(void *handle,
0796                       enum amd_powergating_state state)
0797 {
0798     return 0;
0799 }
0800 
0801 static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
0802     .name = "uvd_v3_1",
0803     .early_init = uvd_v3_1_early_init,
0804     .late_init = NULL,
0805     .sw_init = uvd_v3_1_sw_init,
0806     .sw_fini = uvd_v3_1_sw_fini,
0807     .hw_init = uvd_v3_1_hw_init,
0808     .hw_fini = uvd_v3_1_hw_fini,
0809     .suspend = uvd_v3_1_suspend,
0810     .resume = uvd_v3_1_resume,
0811     .is_idle = uvd_v3_1_is_idle,
0812     .wait_for_idle = uvd_v3_1_wait_for_idle,
0813     .soft_reset = uvd_v3_1_soft_reset,
0814     .set_clockgating_state = uvd_v3_1_set_clockgating_state,
0815     .set_powergating_state = uvd_v3_1_set_powergating_state,
0816 };
0817 
0818 const struct amdgpu_ip_block_version uvd_v3_1_ip_block =
0819 {
0820     .type = AMD_IP_BLOCK_TYPE_UVD,
0821     .major = 3,
0822     .minor = 1,
0823     .rev = 0,
0824     .funcs = &uvd_v3_1_ip_funcs,
0825 };