Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2016 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/firmware.h>
0025 
0026 #include "amdgpu.h"
0027 #include "amdgpu_uvd.h"
0028 #include "amdgpu_cs.h"
0029 #include "soc15.h"
0030 #include "soc15d.h"
0031 #include "soc15_common.h"
0032 #include "mmsch_v1_0.h"
0033 
0034 #include "uvd/uvd_7_0_offset.h"
0035 #include "uvd/uvd_7_0_sh_mask.h"
0036 #include "vce/vce_4_0_offset.h"
0037 #include "vce/vce_4_0_default.h"
0038 #include "vce/vce_4_0_sh_mask.h"
0039 #include "nbif/nbif_6_1_offset.h"
0040 #include "mmhub/mmhub_1_0_offset.h"
0041 #include "mmhub/mmhub_1_0_sh_mask.h"
0042 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
0043 
0044 #define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
0045 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
0046 //UVD_PG0_CC_UVD_HARVESTING
0047 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
0048 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
0049 
0050 #define UVD7_MAX_HW_INSTANCES_VEGA20            2
0051 
0052 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
0053 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
0054 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
0055 static int uvd_v7_0_start(struct amdgpu_device *adev);
0056 static void uvd_v7_0_stop(struct amdgpu_device *adev);
0057 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
0058 
0059 static int amdgpu_ih_clientid_uvds[] = {
0060     SOC15_IH_CLIENTID_UVD,
0061     SOC15_IH_CLIENTID_UVD1
0062 };
0063 
0064 /**
0065  * uvd_v7_0_ring_get_rptr - get read pointer
0066  *
0067  * @ring: amdgpu_ring pointer
0068  *
0069  * Returns the current hardware read pointer
0070  */
0071 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
0072 {
0073     struct amdgpu_device *adev = ring->adev;
0074 
0075     return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
0076 }
0077 
0078 /**
0079  * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
0080  *
0081  * @ring: amdgpu_ring pointer
0082  *
0083  * Returns the current hardware enc read pointer
0084  */
0085 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
0086 {
0087     struct amdgpu_device *adev = ring->adev;
0088 
0089     if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
0090         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
0091     else
0092         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
0093 }
0094 
0095 /**
0096  * uvd_v7_0_ring_get_wptr - get write pointer
0097  *
0098  * @ring: amdgpu_ring pointer
0099  *
0100  * Returns the current hardware write pointer
0101  */
0102 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
0103 {
0104     struct amdgpu_device *adev = ring->adev;
0105 
0106     return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
0107 }
0108 
0109 /**
0110  * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
0111  *
0112  * @ring: amdgpu_ring pointer
0113  *
0114  * Returns the current hardware enc write pointer
0115  */
0116 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
0117 {
0118     struct amdgpu_device *adev = ring->adev;
0119 
0120     if (ring->use_doorbell)
0121         return *ring->wptr_cpu_addr;
0122 
0123     if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
0124         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
0125     else
0126         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
0127 }
0128 
0129 /**
0130  * uvd_v7_0_ring_set_wptr - set write pointer
0131  *
0132  * @ring: amdgpu_ring pointer
0133  *
0134  * Commits the write pointer to the hardware
0135  */
0136 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
0137 {
0138     struct amdgpu_device *adev = ring->adev;
0139 
0140     WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
0141 }
0142 
0143 /**
0144  * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
0145  *
0146  * @ring: amdgpu_ring pointer
0147  *
0148  * Commits the enc write pointer to the hardware
0149  */
0150 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
0151 {
0152     struct amdgpu_device *adev = ring->adev;
0153 
0154     if (ring->use_doorbell) {
0155         /* XXX check if swapping is necessary on BE */
0156         *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
0157         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
0158         return;
0159     }
0160 
0161     if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
0162         WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
0163             lower_32_bits(ring->wptr));
0164     else
0165         WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
0166             lower_32_bits(ring->wptr));
0167 }
0168 
0169 /**
0170  * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
0171  *
0172  * @ring: the engine to test on
0173  *
0174  */
0175 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
0176 {
0177     struct amdgpu_device *adev = ring->adev;
0178     uint32_t rptr;
0179     unsigned i;
0180     int r;
0181 
0182     if (amdgpu_sriov_vf(adev))
0183         return 0;
0184 
0185     r = amdgpu_ring_alloc(ring, 16);
0186     if (r)
0187         return r;
0188 
0189     rptr = amdgpu_ring_get_rptr(ring);
0190 
0191     amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
0192     amdgpu_ring_commit(ring);
0193 
0194     for (i = 0; i < adev->usec_timeout; i++) {
0195         if (amdgpu_ring_get_rptr(ring) != rptr)
0196             break;
0197         udelay(1);
0198     }
0199 
0200     if (i >= adev->usec_timeout)
0201         r = -ETIMEDOUT;
0202 
0203     return r;
0204 }
0205 
0206 /**
0207  * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
0208  *
0209  * @ring: ring we should submit the msg to
0210  * @handle: session handle to use
0211  * @bo: amdgpu object for which we query the offset
0212  * @fence: optional fence to return
0213  *
0214  * Open up a stream for HW test
0215  */
0216 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
0217                        struct amdgpu_bo *bo,
0218                        struct dma_fence **fence)
0219 {
0220     const unsigned ib_size_dw = 16;
0221     struct amdgpu_job *job;
0222     struct amdgpu_ib *ib;
0223     struct dma_fence *f = NULL;
0224     uint64_t addr;
0225     int i, r;
0226 
0227     r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
0228                     AMDGPU_IB_POOL_DIRECT, &job);
0229     if (r)
0230         return r;
0231 
0232     ib = &job->ibs[0];
0233     addr = amdgpu_bo_gpu_offset(bo);
0234 
0235     ib->length_dw = 0;
0236     ib->ptr[ib->length_dw++] = 0x00000018;
0237     ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
0238     ib->ptr[ib->length_dw++] = handle;
0239     ib->ptr[ib->length_dw++] = 0x00000000;
0240     ib->ptr[ib->length_dw++] = upper_32_bits(addr);
0241     ib->ptr[ib->length_dw++] = addr;
0242 
0243     ib->ptr[ib->length_dw++] = 0x00000014;
0244     ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
0245     ib->ptr[ib->length_dw++] = 0x0000001c;
0246     ib->ptr[ib->length_dw++] = 0x00000000;
0247     ib->ptr[ib->length_dw++] = 0x00000000;
0248 
0249     ib->ptr[ib->length_dw++] = 0x00000008;
0250     ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
0251 
0252     for (i = ib->length_dw; i < ib_size_dw; ++i)
0253         ib->ptr[i] = 0x0;
0254 
0255     r = amdgpu_job_submit_direct(job, ring, &f);
0256     if (r)
0257         goto err;
0258 
0259     if (fence)
0260         *fence = dma_fence_get(f);
0261     dma_fence_put(f);
0262     return 0;
0263 
0264 err:
0265     amdgpu_job_free(job);
0266     return r;
0267 }
0268 
0269 /**
0270  * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
0271  *
0272  * @ring: ring we should submit the msg to
0273  * @handle: session handle to use
0274  * @bo: amdgpu object for which we query the offset
0275  * @fence: optional fence to return
0276  *
0277  * Close up a stream for HW test or if userspace failed to do so
0278  */
0279 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
0280                     struct amdgpu_bo *bo,
0281                     struct dma_fence **fence)
0282 {
0283     const unsigned ib_size_dw = 16;
0284     struct amdgpu_job *job;
0285     struct amdgpu_ib *ib;
0286     struct dma_fence *f = NULL;
0287     uint64_t addr;
0288     int i, r;
0289 
0290     r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
0291                     AMDGPU_IB_POOL_DIRECT, &job);
0292     if (r)
0293         return r;
0294 
0295     ib = &job->ibs[0];
0296     addr = amdgpu_bo_gpu_offset(bo);
0297 
0298     ib->length_dw = 0;
0299     ib->ptr[ib->length_dw++] = 0x00000018;
0300     ib->ptr[ib->length_dw++] = 0x00000001;
0301     ib->ptr[ib->length_dw++] = handle;
0302     ib->ptr[ib->length_dw++] = 0x00000000;
0303     ib->ptr[ib->length_dw++] = upper_32_bits(addr);
0304     ib->ptr[ib->length_dw++] = addr;
0305 
0306     ib->ptr[ib->length_dw++] = 0x00000014;
0307     ib->ptr[ib->length_dw++] = 0x00000002;
0308     ib->ptr[ib->length_dw++] = 0x0000001c;
0309     ib->ptr[ib->length_dw++] = 0x00000000;
0310     ib->ptr[ib->length_dw++] = 0x00000000;
0311 
0312     ib->ptr[ib->length_dw++] = 0x00000008;
0313     ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
0314 
0315     for (i = ib->length_dw; i < ib_size_dw; ++i)
0316         ib->ptr[i] = 0x0;
0317 
0318     r = amdgpu_job_submit_direct(job, ring, &f);
0319     if (r)
0320         goto err;
0321 
0322     if (fence)
0323         *fence = dma_fence_get(f);
0324     dma_fence_put(f);
0325     return 0;
0326 
0327 err:
0328     amdgpu_job_free(job);
0329     return r;
0330 }
0331 
0332 /**
0333  * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
0334  *
0335  * @ring: the engine to test on
0336  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
0337  *
0338  */
0339 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
0340 {
0341     struct dma_fence *fence = NULL;
0342     struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
0343     long r;
0344 
0345     r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
0346     if (r)
0347         goto error;
0348 
0349     r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
0350     if (r)
0351         goto error;
0352 
0353     r = dma_fence_wait_timeout(fence, false, timeout);
0354     if (r == 0)
0355         r = -ETIMEDOUT;
0356     else if (r > 0)
0357         r = 0;
0358 
0359 error:
0360     dma_fence_put(fence);
0361     return r;
0362 }
0363 
0364 static int uvd_v7_0_early_init(void *handle)
0365 {
0366     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0367 
0368     if (adev->asic_type == CHIP_VEGA20) {
0369         u32 harvest;
0370         int i;
0371 
0372         adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
0373         for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
0374             harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
0375             if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
0376                 adev->uvd.harvest_config |= 1 << i;
0377             }
0378         }
0379         if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
0380                          AMDGPU_UVD_HARVEST_UVD1))
0381             /* both instances are harvested, disable the block */
0382             return -ENOENT;
0383     } else {
0384         adev->uvd.num_uvd_inst = 1;
0385     }
0386 
0387     if (amdgpu_sriov_vf(adev))
0388         adev->uvd.num_enc_rings = 1;
0389     else
0390         adev->uvd.num_enc_rings = 2;
0391     uvd_v7_0_set_ring_funcs(adev);
0392     uvd_v7_0_set_enc_ring_funcs(adev);
0393     uvd_v7_0_set_irq_funcs(adev);
0394 
0395     return 0;
0396 }
0397 
0398 static int uvd_v7_0_sw_init(void *handle)
0399 {
0400     struct amdgpu_ring *ring;
0401 
0402     int i, j, r;
0403     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0404 
0405     for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
0406         if (adev->uvd.harvest_config & (1 << j))
0407             continue;
0408         /* UVD TRAP */
0409         r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
0410         if (r)
0411             return r;
0412 
0413         /* UVD ENC TRAP */
0414         for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
0415             r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
0416             if (r)
0417                 return r;
0418         }
0419     }
0420 
0421     r = amdgpu_uvd_sw_init(adev);
0422     if (r)
0423         return r;
0424 
0425     if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0426         const struct common_firmware_header *hdr;
0427         hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
0428         adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
0429         adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
0430         adev->firmware.fw_size +=
0431             ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
0432 
0433         if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
0434             adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
0435             adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
0436             adev->firmware.fw_size +=
0437                 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
0438         }
0439         DRM_INFO("PSP loading UVD firmware\n");
0440     }
0441 
0442     for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
0443         if (adev->uvd.harvest_config & (1 << j))
0444             continue;
0445         if (!amdgpu_sriov_vf(adev)) {
0446             ring = &adev->uvd.inst[j].ring;
0447             sprintf(ring->name, "uvd_%d", ring->me);
0448             r = amdgpu_ring_init(adev, ring, 512,
0449                          &adev->uvd.inst[j].irq, 0,
0450                          AMDGPU_RING_PRIO_DEFAULT, NULL);
0451             if (r)
0452                 return r;
0453         }
0454 
0455         for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
0456             ring = &adev->uvd.inst[j].ring_enc[i];
0457             sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
0458             if (amdgpu_sriov_vf(adev)) {
0459                 ring->use_doorbell = true;
0460 
0461                 /* currently only use the first enconding ring for
0462                  * sriov, so set unused location for other unused rings.
0463                  */
0464                 if (i == 0)
0465                     ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
0466                 else
0467                     ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
0468             }
0469             r = amdgpu_ring_init(adev, ring, 512,
0470                          &adev->uvd.inst[j].irq, 0,
0471                          AMDGPU_RING_PRIO_DEFAULT, NULL);
0472             if (r)
0473                 return r;
0474         }
0475     }
0476 
0477     r = amdgpu_uvd_resume(adev);
0478     if (r)
0479         return r;
0480 
0481     r = amdgpu_uvd_entity_init(adev);
0482     if (r)
0483         return r;
0484 
0485     r = amdgpu_virt_alloc_mm_table(adev);
0486     if (r)
0487         return r;
0488 
0489     return r;
0490 }
0491 
0492 static int uvd_v7_0_sw_fini(void *handle)
0493 {
0494     int i, j, r;
0495     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0496 
0497     amdgpu_virt_free_mm_table(adev);
0498 
0499     r = amdgpu_uvd_suspend(adev);
0500     if (r)
0501         return r;
0502 
0503     for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
0504         if (adev->uvd.harvest_config & (1 << j))
0505             continue;
0506         for (i = 0; i < adev->uvd.num_enc_rings; ++i)
0507             amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
0508     }
0509     return amdgpu_uvd_sw_fini(adev);
0510 }
0511 
0512 /**
0513  * uvd_v7_0_hw_init - start and test UVD block
0514  *
0515  * @handle: handle used to pass amdgpu_device pointer
0516  *
0517  * Initialize the hardware, boot up the VCPU and do some testing
0518  */
0519 static int uvd_v7_0_hw_init(void *handle)
0520 {
0521     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0522     struct amdgpu_ring *ring;
0523     uint32_t tmp;
0524     int i, j, r;
0525 
0526     if (amdgpu_sriov_vf(adev))
0527         r = uvd_v7_0_sriov_start(adev);
0528     else
0529         r = uvd_v7_0_start(adev);
0530     if (r)
0531         goto done;
0532 
0533     for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
0534         if (adev->uvd.harvest_config & (1 << j))
0535             continue;
0536         ring = &adev->uvd.inst[j].ring;
0537 
0538         if (!amdgpu_sriov_vf(adev)) {
0539             r = amdgpu_ring_test_helper(ring);
0540             if (r)
0541                 goto done;
0542 
0543             r = amdgpu_ring_alloc(ring, 10);
0544             if (r) {
0545                 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
0546                 goto done;
0547             }
0548 
0549             tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
0550                 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
0551             amdgpu_ring_write(ring, tmp);
0552             amdgpu_ring_write(ring, 0xFFFFF);
0553 
0554             tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
0555                 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
0556             amdgpu_ring_write(ring, tmp);
0557             amdgpu_ring_write(ring, 0xFFFFF);
0558 
0559             tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
0560                 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
0561             amdgpu_ring_write(ring, tmp);
0562             amdgpu_ring_write(ring, 0xFFFFF);
0563 
0564             /* Clear timeout status bits */
0565             amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
0566                 mmUVD_SEMA_TIMEOUT_STATUS), 0));
0567             amdgpu_ring_write(ring, 0x8);
0568 
0569             amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
0570                 mmUVD_SEMA_CNTL), 0));
0571             amdgpu_ring_write(ring, 3);
0572 
0573             amdgpu_ring_commit(ring);
0574         }
0575 
0576         for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
0577             ring = &adev->uvd.inst[j].ring_enc[i];
0578             r = amdgpu_ring_test_helper(ring);
0579             if (r)
0580                 goto done;
0581         }
0582     }
0583 done:
0584     if (!r)
0585         DRM_INFO("UVD and UVD ENC initialized successfully.\n");
0586 
0587     return r;
0588 }
0589 
0590 /**
0591  * uvd_v7_0_hw_fini - stop the hardware block
0592  *
0593  * @handle: handle used to pass amdgpu_device pointer
0594  *
0595  * Stop the UVD block, mark ring as not ready any more
0596  */
0597 static int uvd_v7_0_hw_fini(void *handle)
0598 {
0599     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0600 
0601     cancel_delayed_work_sync(&adev->uvd.idle_work);
0602 
0603     if (!amdgpu_sriov_vf(adev))
0604         uvd_v7_0_stop(adev);
0605     else {
0606         /* full access mode, so don't touch any UVD register */
0607         DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
0608     }
0609 
0610     return 0;
0611 }
0612 
0613 static int uvd_v7_0_suspend(void *handle)
0614 {
0615     int r;
0616     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0617 
0618     /*
0619      * Proper cleanups before halting the HW engine:
0620      *   - cancel the delayed idle work
0621      *   - enable powergating
0622      *   - enable clockgating
0623      *   - disable dpm
0624      *
0625      * TODO: to align with the VCN implementation, move the
0626      * jobs for clockgating/powergating/dpm setting to
0627      * ->set_powergating_state().
0628      */
0629     cancel_delayed_work_sync(&adev->uvd.idle_work);
0630 
0631     if (adev->pm.dpm_enabled) {
0632         amdgpu_dpm_enable_uvd(adev, false);
0633     } else {
0634         amdgpu_asic_set_uvd_clocks(adev, 0, 0);
0635         /* shutdown the UVD block */
0636         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0637                                AMD_PG_STATE_GATE);
0638         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
0639                                AMD_CG_STATE_GATE);
0640     }
0641 
0642     r = uvd_v7_0_hw_fini(adev);
0643     if (r)
0644         return r;
0645 
0646     return amdgpu_uvd_suspend(adev);
0647 }
0648 
0649 static int uvd_v7_0_resume(void *handle)
0650 {
0651     int r;
0652     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0653 
0654     r = amdgpu_uvd_resume(adev);
0655     if (r)
0656         return r;
0657 
0658     return uvd_v7_0_hw_init(adev);
0659 }
0660 
0661 /**
0662  * uvd_v7_0_mc_resume - memory controller programming
0663  *
0664  * @adev: amdgpu_device pointer
0665  *
0666  * Let the UVD memory controller know it's offsets
0667  */
0668 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
0669 {
0670     uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
0671     uint32_t offset;
0672     int i;
0673 
0674     for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
0675         if (adev->uvd.harvest_config & (1 << i))
0676             continue;
0677         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0678             WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
0679                 i == 0 ?
0680                 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
0681                 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
0682             WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
0683                 i == 0 ?
0684                 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
0685                 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
0686             WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
0687             offset = 0;
0688         } else {
0689             WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
0690                 lower_32_bits(adev->uvd.inst[i].gpu_addr));
0691             WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
0692                 upper_32_bits(adev->uvd.inst[i].gpu_addr));
0693             offset = size;
0694             WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
0695                     AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
0696         }
0697 
0698         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
0699 
0700         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
0701                 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
0702         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
0703                 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
0704         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
0705         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
0706 
0707         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
0708                 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
0709         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
0710                 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
0711         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
0712         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
0713                 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
0714 
0715         WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
0716                 adev->gfx.config.gb_addr_config);
0717         WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
0718                 adev->gfx.config.gb_addr_config);
0719         WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
0720                 adev->gfx.config.gb_addr_config);
0721 
0722         WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
0723     }
0724 }
0725 
0726 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
0727                 struct amdgpu_mm_table *table)
0728 {
0729     uint32_t data = 0, loop;
0730     uint64_t addr = table->gpu_addr;
0731     struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
0732     uint32_t size;
0733     int i;
0734 
0735     size = header->header_size + header->vce_table_size + header->uvd_table_size;
0736 
0737     /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
0738     WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
0739     WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
0740 
0741     /* 2, update vmid of descriptor */
0742     data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
0743     data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
0744     data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
0745     WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
0746 
0747     /* 3, notify mmsch about the size of this descriptor */
0748     WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
0749 
0750     /* 4, set resp to zero */
0751     WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
0752 
0753     for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
0754         if (adev->uvd.harvest_config & (1 << i))
0755             continue;
0756         WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
0757         *adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
0758         adev->uvd.inst[i].ring_enc[0].wptr = 0;
0759         adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
0760     }
0761     /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
0762     WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
0763 
0764     data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
0765     loop = 1000;
0766     while ((data & 0x10000002) != 0x10000002) {
0767         udelay(10);
0768         data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
0769         loop--;
0770         if (!loop)
0771             break;
0772     }
0773 
0774     if (!loop) {
0775         dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
0776         return -EBUSY;
0777     }
0778 
0779     return 0;
0780 }
0781 
0782 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
0783 {
0784     struct amdgpu_ring *ring;
0785     uint32_t offset, size, tmp;
0786     uint32_t table_size = 0;
0787     struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
0788     struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
0789     struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
0790     struct mmsch_v1_0_cmd_end end = { {0} };
0791     uint32_t *init_table = adev->virt.mm_table.cpu_addr;
0792     struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
0793     uint8_t i = 0;
0794 
0795     direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
0796     direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
0797     direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
0798     end.cmd_header.command_type = MMSCH_COMMAND__END;
0799 
0800     if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
0801         header->version = MMSCH_VERSION;
0802         header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
0803 
0804         if (header->vce_table_offset == 0 && header->vce_table_size == 0)
0805             header->uvd_table_offset = header->header_size;
0806         else
0807             header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
0808 
0809         init_table += header->uvd_table_offset;
0810 
0811         for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
0812             if (adev->uvd.harvest_config & (1 << i))
0813                 continue;
0814             ring = &adev->uvd.inst[i].ring;
0815             ring->wptr = 0;
0816             size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
0817 
0818             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
0819                                0xFFFFFFFF, 0x00000004);
0820             /* mc resume*/
0821             if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0822                 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
0823                             mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
0824                             adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
0825                 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
0826                             mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
0827                             adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
0828                 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
0829                 offset = 0;
0830             } else {
0831                 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
0832                                 lower_32_bits(adev->uvd.inst[i].gpu_addr));
0833                 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
0834                                 upper_32_bits(adev->uvd.inst[i].gpu_addr));
0835                 offset = size;
0836                 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
0837                             AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
0838 
0839             }
0840 
0841             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
0842 
0843             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
0844                             lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
0845             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
0846                             upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
0847             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
0848             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
0849 
0850             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
0851                             lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
0852             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
0853                             upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
0854             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
0855             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
0856                             AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
0857 
0858             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
0859             /* mc resume end*/
0860 
0861             /* disable clock gating */
0862             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
0863                                ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
0864 
0865             /* disable interupt */
0866             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
0867                                ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
0868 
0869             /* stall UMC and register bus before resetting VCPU */
0870             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
0871                                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
0872                                UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
0873 
0874             /* put LMI, VCPU, RBC etc... into reset */
0875             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
0876                             (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
0877                                    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
0878                                    UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
0879                                    UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
0880                                    UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
0881                                    UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
0882                                    UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
0883                                    UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
0884 
0885             /* initialize UVD memory controller */
0886             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
0887                             (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0888                                    UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
0889                                    UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
0890                                    UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
0891                                    UVD_LMI_CTRL__REQ_MODE_MASK |
0892                                    0x00100000L));
0893 
0894             /* take all subblocks out of reset, except VCPU */
0895             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
0896                             UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
0897 
0898             /* enable VCPU clock */
0899             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
0900                             UVD_VCPU_CNTL__CLK_EN_MASK);
0901 
0902             /* enable master interrupt */
0903             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
0904                                ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
0905                                (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
0906 
0907             /* clear the bit 4 of UVD_STATUS */
0908             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
0909                                ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
0910 
0911             /* force RBC into idle state */
0912             size = order_base_2(ring->ring_size);
0913             tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
0914             tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
0915             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
0916 
0917             ring = &adev->uvd.inst[i].ring_enc[0];
0918             ring->wptr = 0;
0919             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
0920             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
0921             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
0922 
0923             /* boot up the VCPU */
0924             MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
0925 
0926             /* enable UMC */
0927             MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
0928                                                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
0929 
0930             MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
0931         }
0932         /* add end packet */
0933         memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
0934         table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
0935         header->uvd_table_size = table_size;
0936 
0937     }
0938     return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
0939 }
0940 
0941 /**
0942  * uvd_v7_0_start - start UVD block
0943  *
0944  * @adev: amdgpu_device pointer
0945  *
0946  * Setup and start the UVD block
0947  */
0948 static int uvd_v7_0_start(struct amdgpu_device *adev)
0949 {
0950     struct amdgpu_ring *ring;
0951     uint32_t rb_bufsz, tmp;
0952     uint32_t lmi_swap_cntl;
0953     uint32_t mp_swap_cntl;
0954     int i, j, k, r;
0955 
0956     for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
0957         if (adev->uvd.harvest_config & (1 << k))
0958             continue;
0959         /* disable DPG */
0960         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
0961                 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
0962     }
0963 
0964     /* disable byte swapping */
0965     lmi_swap_cntl = 0;
0966     mp_swap_cntl = 0;
0967 
0968     uvd_v7_0_mc_resume(adev);
0969 
0970     for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
0971         if (adev->uvd.harvest_config & (1 << k))
0972             continue;
0973         ring = &adev->uvd.inst[k].ring;
0974         /* disable clock gating */
0975         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
0976                 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
0977 
0978         /* disable interupt */
0979         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
0980                 ~UVD_MASTINT_EN__VCPU_EN_MASK);
0981 
0982         /* stall UMC and register bus before resetting VCPU */
0983         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
0984                 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
0985                 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
0986         mdelay(1);
0987 
0988         /* put LMI, VCPU, RBC etc... into reset */
0989         WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
0990             UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
0991             UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
0992             UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
0993             UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
0994             UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
0995             UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
0996             UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
0997             UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
0998         mdelay(5);
0999 
1000         /* initialize UVD memory controller */
1001         WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1002             (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1003             UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1004             UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1005             UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1006             UVD_LMI_CTRL__REQ_MODE_MASK |
1007             0x00100000L);
1008 
1009 #ifdef __BIG_ENDIAN
1010         /* swap (8 in 32) RB and IB */
1011         lmi_swap_cntl = 0xa;
1012         mp_swap_cntl = 0;
1013 #endif
1014         WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1015         WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1016 
1017         WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1018         WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1019         WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1020         WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1021         WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1022         WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1023 
1024         /* take all subblocks out of reset, except VCPU */
1025         WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1026                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1027         mdelay(5);
1028 
1029         /* enable VCPU clock */
1030         WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1031                 UVD_VCPU_CNTL__CLK_EN_MASK);
1032 
1033         /* enable UMC */
1034         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1035                 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1036 
1037         /* boot up the VCPU */
1038         WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1039         mdelay(10);
1040 
1041         for (i = 0; i < 10; ++i) {
1042             uint32_t status;
1043 
1044             for (j = 0; j < 100; ++j) {
1045                 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1046                 if (status & 2)
1047                     break;
1048                 mdelay(10);
1049             }
1050             r = 0;
1051             if (status & 2)
1052                 break;
1053 
1054             DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1055             WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1056                     UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1057                     ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1058             mdelay(10);
1059             WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1060                     ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1061             mdelay(10);
1062             r = -1;
1063         }
1064 
1065         if (r) {
1066             DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1067             return r;
1068         }
1069         /* enable master interrupt */
1070         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1071             (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1072             ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1073 
1074         /* clear the bit 4 of UVD_STATUS */
1075         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1076                 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1077 
1078         /* force RBC into idle state */
1079         rb_bufsz = order_base_2(ring->ring_size);
1080         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1081         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1082         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1083         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1084         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1085         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1086         WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1087 
1088         /* set the write pointer delay */
1089         WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1090 
1091         /* set the wb address */
1092         WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1093                 (upper_32_bits(ring->gpu_addr) >> 2));
1094 
1095         /* program the RB_BASE for ring buffer */
1096         WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1097                 lower_32_bits(ring->gpu_addr));
1098         WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1099                 upper_32_bits(ring->gpu_addr));
1100 
1101         /* Initialize the ring buffer's read and write pointers */
1102         WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1103 
1104         ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1105         WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1106                 lower_32_bits(ring->wptr));
1107 
1108         WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1109                 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1110 
1111         ring = &adev->uvd.inst[k].ring_enc[0];
1112         WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1113         WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1114         WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1115         WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1116         WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1117 
1118         ring = &adev->uvd.inst[k].ring_enc[1];
1119         WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1120         WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1121         WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1122         WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1123         WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1124     }
1125     return 0;
1126 }
1127 
1128 /**
1129  * uvd_v7_0_stop - stop UVD block
1130  *
1131  * @adev: amdgpu_device pointer
1132  *
1133  * stop the UVD block
1134  */
1135 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1136 {
1137     uint8_t i = 0;
1138 
1139     for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1140         if (adev->uvd.harvest_config & (1 << i))
1141             continue;
1142         /* force RBC into idle state */
1143         WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1144 
1145         /* Stall UMC and register bus before resetting VCPU */
1146         WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1147                 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1148                 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1149         mdelay(1);
1150 
1151         /* put VCPU into reset */
1152         WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1153                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1154         mdelay(5);
1155 
1156         /* disable VCPU clock */
1157         WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1158 
1159         /* Unstall UMC and register bus */
1160         WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1161                 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1162     }
1163 }
1164 
1165 /**
1166  * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1167  *
1168  * @ring: amdgpu_ring pointer
1169  * @addr: address
1170  * @seq: sequence number
1171  * @flags: fence related flags
1172  *
1173  * Write a fence and a trap command to the ring.
1174  */
1175 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1176                      unsigned flags)
1177 {
1178     struct amdgpu_device *adev = ring->adev;
1179 
1180     WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1181 
1182     amdgpu_ring_write(ring,
1183         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1184     amdgpu_ring_write(ring, seq);
1185     amdgpu_ring_write(ring,
1186         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1187     amdgpu_ring_write(ring, addr & 0xffffffff);
1188     amdgpu_ring_write(ring,
1189         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1190     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1191     amdgpu_ring_write(ring,
1192         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1193     amdgpu_ring_write(ring, 0);
1194 
1195     amdgpu_ring_write(ring,
1196         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1197     amdgpu_ring_write(ring, 0);
1198     amdgpu_ring_write(ring,
1199         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1200     amdgpu_ring_write(ring, 0);
1201     amdgpu_ring_write(ring,
1202         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1203     amdgpu_ring_write(ring, 2);
1204 }
1205 
1206 /**
1207  * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1208  *
1209  * @ring: amdgpu_ring pointer
1210  * @addr: address
1211  * @seq: sequence number
1212  * @flags: fence related flags
1213  *
1214  * Write enc a fence and a trap command to the ring.
1215  */
1216 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1217             u64 seq, unsigned flags)
1218 {
1219 
1220     WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1221 
1222     amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1223     amdgpu_ring_write(ring, addr);
1224     amdgpu_ring_write(ring, upper_32_bits(addr));
1225     amdgpu_ring_write(ring, seq);
1226     amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1227 }
1228 
1229 /**
1230  * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1231  *
1232  * @ring: amdgpu_ring pointer
1233  */
1234 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1235 {
1236     /* The firmware doesn't seem to like touching registers at this point. */
1237 }
1238 
1239 /**
1240  * uvd_v7_0_ring_test_ring - register write test
1241  *
1242  * @ring: amdgpu_ring pointer
1243  *
1244  * Test if we can successfully write to the context register
1245  */
1246 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1247 {
1248     struct amdgpu_device *adev = ring->adev;
1249     uint32_t tmp = 0;
1250     unsigned i;
1251     int r;
1252 
1253     WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1254     r = amdgpu_ring_alloc(ring, 3);
1255     if (r)
1256         return r;
1257 
1258     amdgpu_ring_write(ring,
1259         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1260     amdgpu_ring_write(ring, 0xDEADBEEF);
1261     amdgpu_ring_commit(ring);
1262     for (i = 0; i < adev->usec_timeout; i++) {
1263         tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1264         if (tmp == 0xDEADBEEF)
1265             break;
1266         udelay(1);
1267     }
1268 
1269     if (i >= adev->usec_timeout)
1270         r = -ETIMEDOUT;
1271 
1272     return r;
1273 }
1274 
1275 /**
1276  * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1277  *
1278  * @p: the CS parser with the IBs
1279  * @job: which job this ib is in
1280  * @ib: which IB to patch
1281  *
1282  */
1283 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1284                        struct amdgpu_job *job,
1285                        struct amdgpu_ib *ib)
1286 {
1287     struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
1288     unsigned i;
1289 
1290     /* No patching necessary for the first instance */
1291     if (!ring->me)
1292         return 0;
1293 
1294     for (i = 0; i < ib->length_dw; i += 2) {
1295         uint32_t reg = amdgpu_ib_get_value(ib, i);
1296 
1297         reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1298         reg += p->adev->reg_offset[UVD_HWIP][1][1];
1299 
1300         amdgpu_ib_set_value(ib, i, reg);
1301     }
1302     return 0;
1303 }
1304 
1305 /**
1306  * uvd_v7_0_ring_emit_ib - execute indirect buffer
1307  *
1308  * @ring: amdgpu_ring pointer
1309  * @job: job to retrieve vmid from
1310  * @ib: indirect buffer to execute
1311  * @flags: unused
1312  *
1313  * Write ring commands to execute the indirect buffer
1314  */
1315 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1316                   struct amdgpu_job *job,
1317                   struct amdgpu_ib *ib,
1318                   uint32_t flags)
1319 {
1320     struct amdgpu_device *adev = ring->adev;
1321     unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1322 
1323     amdgpu_ring_write(ring,
1324         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1325     amdgpu_ring_write(ring, vmid);
1326 
1327     amdgpu_ring_write(ring,
1328         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1329     amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1330     amdgpu_ring_write(ring,
1331         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1332     amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1333     amdgpu_ring_write(ring,
1334         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1335     amdgpu_ring_write(ring, ib->length_dw);
1336 }
1337 
1338 /**
1339  * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1340  *
1341  * @ring: amdgpu_ring pointer
1342  * @job: job to retrive vmid from
1343  * @ib: indirect buffer to execute
1344  * @flags: unused
1345  *
1346  * Write enc ring commands to execute the indirect buffer
1347  */
1348 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1349                     struct amdgpu_job *job,
1350                     struct amdgpu_ib *ib,
1351                     uint32_t flags)
1352 {
1353     unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1354 
1355     amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1356     amdgpu_ring_write(ring, vmid);
1357     amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1358     amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1359     amdgpu_ring_write(ring, ib->length_dw);
1360 }
1361 
1362 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1363                     uint32_t reg, uint32_t val)
1364 {
1365     struct amdgpu_device *adev = ring->adev;
1366 
1367     amdgpu_ring_write(ring,
1368         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1369     amdgpu_ring_write(ring, reg << 2);
1370     amdgpu_ring_write(ring,
1371         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1372     amdgpu_ring_write(ring, val);
1373     amdgpu_ring_write(ring,
1374         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1375     amdgpu_ring_write(ring, 8);
1376 }
1377 
1378 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1379                     uint32_t val, uint32_t mask)
1380 {
1381     struct amdgpu_device *adev = ring->adev;
1382 
1383     amdgpu_ring_write(ring,
1384         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1385     amdgpu_ring_write(ring, reg << 2);
1386     amdgpu_ring_write(ring,
1387         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1388     amdgpu_ring_write(ring, val);
1389     amdgpu_ring_write(ring,
1390         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1391     amdgpu_ring_write(ring, mask);
1392     amdgpu_ring_write(ring,
1393         PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1394     amdgpu_ring_write(ring, 12);
1395 }
1396 
1397 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1398                     unsigned vmid, uint64_t pd_addr)
1399 {
1400     struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1401     uint32_t data0, data1, mask;
1402 
1403     pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1404 
1405     /* wait for reg writes */
1406     data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1407     data1 = lower_32_bits(pd_addr);
1408     mask = 0xffffffff;
1409     uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1410 }
1411 
1412 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1413 {
1414     struct amdgpu_device *adev = ring->adev;
1415     int i;
1416 
1417     WARN_ON(ring->wptr % 2 || count % 2);
1418 
1419     for (i = 0; i < count / 2; i++) {
1420         amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1421         amdgpu_ring_write(ring, 0);
1422     }
1423 }
1424 
1425 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1426 {
1427     amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1428 }
1429 
1430 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1431                         uint32_t reg, uint32_t val,
1432                         uint32_t mask)
1433 {
1434     amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1435     amdgpu_ring_write(ring, reg << 2);
1436     amdgpu_ring_write(ring, mask);
1437     amdgpu_ring_write(ring, val);
1438 }
1439 
1440 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1441                         unsigned int vmid, uint64_t pd_addr)
1442 {
1443     struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1444 
1445     pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1446 
1447     /* wait for reg writes */
1448     uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1449                     vmid * hub->ctx_addr_distance,
1450                     lower_32_bits(pd_addr), 0xffffffff);
1451 }
1452 
1453 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1454                     uint32_t reg, uint32_t val)
1455 {
1456     amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1457     amdgpu_ring_write(ring, reg << 2);
1458     amdgpu_ring_write(ring, val);
1459 }
1460 
1461 #if 0
1462 static bool uvd_v7_0_is_idle(void *handle)
1463 {
1464     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1465 
1466     return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1467 }
1468 
1469 static int uvd_v7_0_wait_for_idle(void *handle)
1470 {
1471     unsigned i;
1472     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1473 
1474     for (i = 0; i < adev->usec_timeout; i++) {
1475         if (uvd_v7_0_is_idle(handle))
1476             return 0;
1477     }
1478     return -ETIMEDOUT;
1479 }
1480 
1481 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1482 static bool uvd_v7_0_check_soft_reset(void *handle)
1483 {
1484     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485     u32 srbm_soft_reset = 0;
1486     u32 tmp = RREG32(mmSRBM_STATUS);
1487 
1488     if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1489         REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1490         (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1491             AMDGPU_UVD_STATUS_BUSY_MASK))
1492         srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1493                 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1494 
1495     if (srbm_soft_reset) {
1496         adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1497         return true;
1498     } else {
1499         adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1500         return false;
1501     }
1502 }
1503 
1504 static int uvd_v7_0_pre_soft_reset(void *handle)
1505 {
1506     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1507 
1508     if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1509         return 0;
1510 
1511     uvd_v7_0_stop(adev);
1512     return 0;
1513 }
1514 
1515 static int uvd_v7_0_soft_reset(void *handle)
1516 {
1517     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1518     u32 srbm_soft_reset;
1519 
1520     if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1521         return 0;
1522     srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1523 
1524     if (srbm_soft_reset) {
1525         u32 tmp;
1526 
1527         tmp = RREG32(mmSRBM_SOFT_RESET);
1528         tmp |= srbm_soft_reset;
1529         dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1530         WREG32(mmSRBM_SOFT_RESET, tmp);
1531         tmp = RREG32(mmSRBM_SOFT_RESET);
1532 
1533         udelay(50);
1534 
1535         tmp &= ~srbm_soft_reset;
1536         WREG32(mmSRBM_SOFT_RESET, tmp);
1537         tmp = RREG32(mmSRBM_SOFT_RESET);
1538 
1539         /* Wait a little for things to settle down */
1540         udelay(50);
1541     }
1542 
1543     return 0;
1544 }
1545 
1546 static int uvd_v7_0_post_soft_reset(void *handle)
1547 {
1548     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1549 
1550     if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1551         return 0;
1552 
1553     mdelay(5);
1554 
1555     return uvd_v7_0_start(adev);
1556 }
1557 #endif
1558 
1559 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1560                     struct amdgpu_irq_src *source,
1561                     unsigned type,
1562                     enum amdgpu_interrupt_state state)
1563 {
1564     // TODO
1565     return 0;
1566 }
1567 
1568 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1569                       struct amdgpu_irq_src *source,
1570                       struct amdgpu_iv_entry *entry)
1571 {
1572     uint32_t ip_instance;
1573 
1574     switch (entry->client_id) {
1575     case SOC15_IH_CLIENTID_UVD:
1576         ip_instance = 0;
1577         break;
1578     case SOC15_IH_CLIENTID_UVD1:
1579         ip_instance = 1;
1580         break;
1581     default:
1582         DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1583         return 0;
1584     }
1585 
1586     DRM_DEBUG("IH: UVD TRAP\n");
1587 
1588     switch (entry->src_id) {
1589     case 124:
1590         amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1591         break;
1592     case 119:
1593         amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1594         break;
1595     case 120:
1596         if (!amdgpu_sriov_vf(adev))
1597             amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1598         break;
1599     default:
1600         DRM_ERROR("Unhandled interrupt: %d %d\n",
1601               entry->src_id, entry->src_data[0]);
1602         break;
1603     }
1604 
1605     return 0;
1606 }
1607 
1608 #if 0
1609 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1610 {
1611     uint32_t data, data1, data2, suvd_flags;
1612 
1613     data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1614     data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1615     data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1616 
1617     data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1618           UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1619 
1620     suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1621              UVD_SUVD_CGC_GATE__SIT_MASK |
1622              UVD_SUVD_CGC_GATE__SMP_MASK |
1623              UVD_SUVD_CGC_GATE__SCM_MASK |
1624              UVD_SUVD_CGC_GATE__SDB_MASK;
1625 
1626     data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1627         (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1628         (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1629 
1630     data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1631             UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1632             UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1633             UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1634             UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1635             UVD_CGC_CTRL__SYS_MODE_MASK |
1636             UVD_CGC_CTRL__UDEC_MODE_MASK |
1637             UVD_CGC_CTRL__MPEG2_MODE_MASK |
1638             UVD_CGC_CTRL__REGS_MODE_MASK |
1639             UVD_CGC_CTRL__RBC_MODE_MASK |
1640             UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1641             UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1642             UVD_CGC_CTRL__IDCT_MODE_MASK |
1643             UVD_CGC_CTRL__MPRD_MODE_MASK |
1644             UVD_CGC_CTRL__MPC_MODE_MASK |
1645             UVD_CGC_CTRL__LBSI_MODE_MASK |
1646             UVD_CGC_CTRL__LRBBM_MODE_MASK |
1647             UVD_CGC_CTRL__WCB_MODE_MASK |
1648             UVD_CGC_CTRL__VCPU_MODE_MASK |
1649             UVD_CGC_CTRL__JPEG_MODE_MASK |
1650             UVD_CGC_CTRL__JPEG2_MODE_MASK |
1651             UVD_CGC_CTRL__SCPU_MODE_MASK);
1652     data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1653             UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1654             UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1655             UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1656             UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1657     data1 |= suvd_flags;
1658 
1659     WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1660     WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1661     WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1662     WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1663 }
1664 
1665 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1666 {
1667     uint32_t data, data1, cgc_flags, suvd_flags;
1668 
1669     data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1670     data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1671 
1672     cgc_flags = UVD_CGC_GATE__SYS_MASK |
1673         UVD_CGC_GATE__UDEC_MASK |
1674         UVD_CGC_GATE__MPEG2_MASK |
1675         UVD_CGC_GATE__RBC_MASK |
1676         UVD_CGC_GATE__LMI_MC_MASK |
1677         UVD_CGC_GATE__IDCT_MASK |
1678         UVD_CGC_GATE__MPRD_MASK |
1679         UVD_CGC_GATE__MPC_MASK |
1680         UVD_CGC_GATE__LBSI_MASK |
1681         UVD_CGC_GATE__LRBBM_MASK |
1682         UVD_CGC_GATE__UDEC_RE_MASK |
1683         UVD_CGC_GATE__UDEC_CM_MASK |
1684         UVD_CGC_GATE__UDEC_IT_MASK |
1685         UVD_CGC_GATE__UDEC_DB_MASK |
1686         UVD_CGC_GATE__UDEC_MP_MASK |
1687         UVD_CGC_GATE__WCB_MASK |
1688         UVD_CGC_GATE__VCPU_MASK |
1689         UVD_CGC_GATE__SCPU_MASK |
1690         UVD_CGC_GATE__JPEG_MASK |
1691         UVD_CGC_GATE__JPEG2_MASK;
1692 
1693     suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1694                 UVD_SUVD_CGC_GATE__SIT_MASK |
1695                 UVD_SUVD_CGC_GATE__SMP_MASK |
1696                 UVD_SUVD_CGC_GATE__SCM_MASK |
1697                 UVD_SUVD_CGC_GATE__SDB_MASK;
1698 
1699     data |= cgc_flags;
1700     data1 |= suvd_flags;
1701 
1702     WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1703     WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1704 }
1705 
1706 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1707 {
1708     u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1709 
1710     if (enable)
1711         tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1712             GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1713     else
1714         tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1715              GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1716 
1717     WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1718 }
1719 
1720 
1721 static int uvd_v7_0_set_clockgating_state(void *handle,
1722                       enum amd_clockgating_state state)
1723 {
1724     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1725     bool enable = (state == AMD_CG_STATE_GATE);
1726 
1727     uvd_v7_0_set_bypass_mode(adev, enable);
1728 
1729     if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1730         return 0;
1731 
1732     if (enable) {
1733         /* disable HW gating and enable Sw gating */
1734         uvd_v7_0_set_sw_clock_gating(adev);
1735     } else {
1736         /* wait for STATUS to clear */
1737         if (uvd_v7_0_wait_for_idle(handle))
1738             return -EBUSY;
1739 
1740         /* enable HW gates because UVD is idle */
1741         /* uvd_v7_0_set_hw_clock_gating(adev); */
1742     }
1743 
1744     return 0;
1745 }
1746 
1747 static int uvd_v7_0_set_powergating_state(void *handle,
1748                       enum amd_powergating_state state)
1749 {
1750     /* This doesn't actually powergate the UVD block.
1751      * That's done in the dpm code via the SMC.  This
1752      * just re-inits the block as necessary.  The actual
1753      * gating still happens in the dpm code.  We should
1754      * revisit this when there is a cleaner line between
1755      * the smc and the hw blocks
1756      */
1757     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1758 
1759     if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1760         return 0;
1761 
1762     WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1763 
1764     if (state == AMD_PG_STATE_GATE) {
1765         uvd_v7_0_stop(adev);
1766         return 0;
1767     } else {
1768         return uvd_v7_0_start(adev);
1769     }
1770 }
1771 #endif
1772 
1773 static int uvd_v7_0_set_clockgating_state(void *handle,
1774                       enum amd_clockgating_state state)
1775 {
1776     /* needed for driver unload*/
1777     return 0;
1778 }
1779 
1780 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1781     .name = "uvd_v7_0",
1782     .early_init = uvd_v7_0_early_init,
1783     .late_init = NULL,
1784     .sw_init = uvd_v7_0_sw_init,
1785     .sw_fini = uvd_v7_0_sw_fini,
1786     .hw_init = uvd_v7_0_hw_init,
1787     .hw_fini = uvd_v7_0_hw_fini,
1788     .suspend = uvd_v7_0_suspend,
1789     .resume = uvd_v7_0_resume,
1790     .is_idle = NULL /* uvd_v7_0_is_idle */,
1791     .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1792     .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1793     .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1794     .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1795     .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1796     .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1797     .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1798 };
1799 
1800 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1801     .type = AMDGPU_RING_TYPE_UVD,
1802     .align_mask = 0xf,
1803     .support_64bit_ptrs = false,
1804     .no_user_fence = true,
1805     .vmhub = AMDGPU_MMHUB_0,
1806     .get_rptr = uvd_v7_0_ring_get_rptr,
1807     .get_wptr = uvd_v7_0_ring_get_wptr,
1808     .set_wptr = uvd_v7_0_ring_set_wptr,
1809     .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1810     .emit_frame_size =
1811         6 + /* hdp invalidate */
1812         SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1813         SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1814         8 + /* uvd_v7_0_ring_emit_vm_flush */
1815         14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1816     .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1817     .emit_ib = uvd_v7_0_ring_emit_ib,
1818     .emit_fence = uvd_v7_0_ring_emit_fence,
1819     .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1820     .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1821     .test_ring = uvd_v7_0_ring_test_ring,
1822     .test_ib = amdgpu_uvd_ring_test_ib,
1823     .insert_nop = uvd_v7_0_ring_insert_nop,
1824     .pad_ib = amdgpu_ring_generic_pad_ib,
1825     .begin_use = amdgpu_uvd_ring_begin_use,
1826     .end_use = amdgpu_uvd_ring_end_use,
1827     .emit_wreg = uvd_v7_0_ring_emit_wreg,
1828     .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1829     .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1830 };
1831 
1832 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1833     .type = AMDGPU_RING_TYPE_UVD_ENC,
1834     .align_mask = 0x3f,
1835     .nop = HEVC_ENC_CMD_NO_OP,
1836     .support_64bit_ptrs = false,
1837     .no_user_fence = true,
1838     .vmhub = AMDGPU_MMHUB_0,
1839     .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1840     .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1841     .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1842     .emit_frame_size =
1843         3 + 3 + /* hdp flush / invalidate */
1844         SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1845         SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1846         4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1847         5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1848         1, /* uvd_v7_0_enc_ring_insert_end */
1849     .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1850     .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1851     .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1852     .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1853     .test_ring = uvd_v7_0_enc_ring_test_ring,
1854     .test_ib = uvd_v7_0_enc_ring_test_ib,
1855     .insert_nop = amdgpu_ring_insert_nop,
1856     .insert_end = uvd_v7_0_enc_ring_insert_end,
1857     .pad_ib = amdgpu_ring_generic_pad_ib,
1858     .begin_use = amdgpu_uvd_ring_begin_use,
1859     .end_use = amdgpu_uvd_ring_end_use,
1860     .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1861     .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1862     .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1863 };
1864 
1865 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1866 {
1867     int i;
1868 
1869     for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1870         if (adev->uvd.harvest_config & (1 << i))
1871             continue;
1872         adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1873         adev->uvd.inst[i].ring.me = i;
1874         DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1875     }
1876 }
1877 
1878 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1879 {
1880     int i, j;
1881 
1882     for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1883         if (adev->uvd.harvest_config & (1 << j))
1884             continue;
1885         for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1886             adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1887             adev->uvd.inst[j].ring_enc[i].me = j;
1888         }
1889 
1890         DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1891     }
1892 }
1893 
1894 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1895     .set = uvd_v7_0_set_interrupt_state,
1896     .process = uvd_v7_0_process_interrupt,
1897 };
1898 
1899 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1900 {
1901     int i;
1902 
1903     for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1904         if (adev->uvd.harvest_config & (1 << i))
1905             continue;
1906         adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1907         adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1908     }
1909 }
1910 
1911 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1912 {
1913         .type = AMD_IP_BLOCK_TYPE_UVD,
1914         .major = 7,
1915         .minor = 0,
1916         .rev = 0,
1917         .funcs = &uvd_v7_0_ip_funcs,
1918 };