Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2013 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Alex Deucher
0023  */
0024 #include <linux/firmware.h>
0025 
0026 #include "radeon.h"
0027 #include "radeon_ucode.h"
0028 #include "radeon_asic.h"
0029 #include "radeon_trace.h"
0030 #include "cik.h"
0031 #include "cikd.h"
0032 
0033 /* sdma */
0034 #define CIK_SDMA_UCODE_SIZE 1050
0035 #define CIK_SDMA_UCODE_VERSION 64
0036 
0037 /*
0038  * sDMA - System DMA
0039  * Starting with CIK, the GPU has new asynchronous
0040  * DMA engines.  These engines are used for compute
0041  * and gfx.  There are two DMA engines (SDMA0, SDMA1)
0042  * and each one supports 1 ring buffer used for gfx
0043  * and 2 queues used for compute.
0044  *
0045  * The programming model is very similar to the CP
0046  * (ring buffer, IBs, etc.), but sDMA has it's own
0047  * packet format that is different from the PM4 format
0048  * used by the CP. sDMA supports copying data, writing
0049  * embedded data, solid fills, and a number of other
0050  * things.  It also has support for tiling/detiling of
0051  * buffers.
0052  */
0053 
0054 /**
0055  * cik_sdma_get_rptr - get the current read pointer
0056  *
0057  * @rdev: radeon_device pointer
0058  * @ring: radeon ring pointer
0059  *
0060  * Get the current rptr from the hardware (CIK+).
0061  */
0062 uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
0063                struct radeon_ring *ring)
0064 {
0065     u32 rptr, reg;
0066 
0067     if (rdev->wb.enabled) {
0068         rptr = rdev->wb.wb[ring->rptr_offs/4];
0069     } else {
0070         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0071             reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
0072         else
0073             reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
0074 
0075         rptr = RREG32(reg);
0076     }
0077 
0078     return (rptr & 0x3fffc) >> 2;
0079 }
0080 
0081 /**
0082  * cik_sdma_get_wptr - get the current write pointer
0083  *
0084  * @rdev: radeon_device pointer
0085  * @ring: radeon ring pointer
0086  *
0087  * Get the current wptr from the hardware (CIK+).
0088  */
0089 uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
0090                struct radeon_ring *ring)
0091 {
0092     u32 reg;
0093 
0094     if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0095         reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
0096     else
0097         reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
0098 
0099     return (RREG32(reg) & 0x3fffc) >> 2;
0100 }
0101 
0102 /**
0103  * cik_sdma_set_wptr - commit the write pointer
0104  *
0105  * @rdev: radeon_device pointer
0106  * @ring: radeon ring pointer
0107  *
0108  * Write the wptr back to the hardware (CIK+).
0109  */
0110 void cik_sdma_set_wptr(struct radeon_device *rdev,
0111                struct radeon_ring *ring)
0112 {
0113     u32 reg;
0114 
0115     if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0116         reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
0117     else
0118         reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
0119 
0120     WREG32(reg, (ring->wptr << 2) & 0x3fffc);
0121     (void)RREG32(reg);
0122 }
0123 
0124 /**
0125  * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
0126  *
0127  * @rdev: radeon_device pointer
0128  * @ib: IB object to schedule
0129  *
0130  * Schedule an IB in the DMA ring (CIK).
0131  */
0132 void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
0133                   struct radeon_ib *ib)
0134 {
0135     struct radeon_ring *ring = &rdev->ring[ib->ring];
0136     u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
0137 
0138     if (rdev->wb.enabled) {
0139         u32 next_rptr = ring->wptr + 5;
0140         while ((next_rptr & 7) != 4)
0141             next_rptr++;
0142         next_rptr += 4;
0143         radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
0144         radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
0145         radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
0146         radeon_ring_write(ring, 1); /* number of DWs to follow */
0147         radeon_ring_write(ring, next_rptr);
0148     }
0149 
0150     /* IB packet must end on a 8 DW boundary */
0151     while ((ring->wptr & 7) != 4)
0152         radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
0153     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
0154     radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
0155     radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
0156     radeon_ring_write(ring, ib->length_dw);
0157 
0158 }
0159 
0160 /**
0161  * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
0162  *
0163  * @rdev: radeon_device pointer
0164  * @ridx: radeon ring index
0165  *
0166  * Emit an hdp flush packet on the requested DMA ring.
0167  */
0168 static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
0169                      int ridx)
0170 {
0171     struct radeon_ring *ring = &rdev->ring[ridx];
0172     u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
0173               SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
0174     u32 ref_and_mask;
0175 
0176     if (ridx == R600_RING_TYPE_DMA_INDEX)
0177         ref_and_mask = SDMA0;
0178     else
0179         ref_and_mask = SDMA1;
0180 
0181     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
0182     radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
0183     radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
0184     radeon_ring_write(ring, ref_and_mask); /* reference */
0185     radeon_ring_write(ring, ref_and_mask); /* mask */
0186     radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
0187 }
0188 
0189 /**
0190  * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
0191  *
0192  * @rdev: radeon_device pointer
0193  * @fence: radeon fence object
0194  *
0195  * Add a DMA fence packet to the ring to write
0196  * the fence seq number and DMA trap packet to generate
0197  * an interrupt if needed (CIK).
0198  */
0199 void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
0200                   struct radeon_fence *fence)
0201 {
0202     struct radeon_ring *ring = &rdev->ring[fence->ring];
0203     u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
0204 
0205     /* write the fence */
0206     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
0207     radeon_ring_write(ring, lower_32_bits(addr));
0208     radeon_ring_write(ring, upper_32_bits(addr));
0209     radeon_ring_write(ring, fence->seq);
0210     /* generate an interrupt */
0211     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
0212     /* flush HDP */
0213     cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
0214 }
0215 
0216 /**
0217  * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
0218  *
0219  * @rdev: radeon_device pointer
0220  * @ring: radeon_ring structure holding ring information
0221  * @semaphore: radeon semaphore object
0222  * @emit_wait: wait or signal semaphore
0223  *
0224  * Add a DMA semaphore packet to the ring wait on or signal
0225  * other rings (CIK).
0226  */
0227 bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
0228                   struct radeon_ring *ring,
0229                   struct radeon_semaphore *semaphore,
0230                   bool emit_wait)
0231 {
0232     u64 addr = semaphore->gpu_addr;
0233     u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
0234 
0235     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
0236     radeon_ring_write(ring, addr & 0xfffffff8);
0237     radeon_ring_write(ring, upper_32_bits(addr));
0238 
0239     return true;
0240 }
0241 
0242 /**
0243  * cik_sdma_gfx_stop - stop the gfx async dma engines
0244  *
0245  * @rdev: radeon_device pointer
0246  *
0247  * Stop the gfx async dma ring buffers (CIK).
0248  */
0249 static void cik_sdma_gfx_stop(struct radeon_device *rdev)
0250 {
0251     u32 rb_cntl, reg_offset;
0252     int i;
0253 
0254     if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
0255         (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
0256         radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
0257 
0258     for (i = 0; i < 2; i++) {
0259         if (i == 0)
0260             reg_offset = SDMA0_REGISTER_OFFSET;
0261         else
0262             reg_offset = SDMA1_REGISTER_OFFSET;
0263         rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
0264         rb_cntl &= ~SDMA_RB_ENABLE;
0265         WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
0266         WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
0267     }
0268     rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
0269     rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
0270 
0271     /* FIXME use something else than big hammer but after few days can not
0272      * seem to find good combination so reset SDMA blocks as it seems we
0273      * do not shut them down properly. This fix hibernation and does not
0274      * affect suspend to ram.
0275      */
0276     WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
0277     (void)RREG32(SRBM_SOFT_RESET);
0278     udelay(50);
0279     WREG32(SRBM_SOFT_RESET, 0);
0280     (void)RREG32(SRBM_SOFT_RESET);
0281 }
0282 
0283 /**
0284  * cik_sdma_rlc_stop - stop the compute async dma engines
0285  *
0286  * @rdev: radeon_device pointer
0287  *
0288  * Stop the compute async dma queues (CIK).
0289  */
0290 static void cik_sdma_rlc_stop(struct radeon_device *rdev)
0291 {
0292     /* XXX todo */
0293 }
0294 
0295 /**
0296  * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
0297  *
0298  * @rdev: radeon_device pointer
0299  * @enable: enable/disable preemption.
0300  *
0301  * Halt or unhalt the async dma engines (CIK).
0302  */
0303 static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
0304 {
0305     uint32_t reg_offset, value;
0306     int i;
0307 
0308     for (i = 0; i < 2; i++) {
0309         if (i == 0)
0310             reg_offset = SDMA0_REGISTER_OFFSET;
0311         else
0312             reg_offset = SDMA1_REGISTER_OFFSET;
0313         value = RREG32(SDMA0_CNTL + reg_offset);
0314         if (enable)
0315             value |= AUTO_CTXSW_ENABLE;
0316         else
0317             value &= ~AUTO_CTXSW_ENABLE;
0318         WREG32(SDMA0_CNTL + reg_offset, value);
0319     }
0320 }
0321 
0322 /**
0323  * cik_sdma_enable - stop the async dma engines
0324  *
0325  * @rdev: radeon_device pointer
0326  * @enable: enable/disable the DMA MEs.
0327  *
0328  * Halt or unhalt the async dma engines (CIK).
0329  */
0330 void cik_sdma_enable(struct radeon_device *rdev, bool enable)
0331 {
0332     u32 me_cntl, reg_offset;
0333     int i;
0334 
0335     if (!enable) {
0336         cik_sdma_gfx_stop(rdev);
0337         cik_sdma_rlc_stop(rdev);
0338     }
0339 
0340     for (i = 0; i < 2; i++) {
0341         if (i == 0)
0342             reg_offset = SDMA0_REGISTER_OFFSET;
0343         else
0344             reg_offset = SDMA1_REGISTER_OFFSET;
0345         me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
0346         if (enable)
0347             me_cntl &= ~SDMA_HALT;
0348         else
0349             me_cntl |= SDMA_HALT;
0350         WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
0351     }
0352 
0353     cik_sdma_ctx_switch_enable(rdev, enable);
0354 }
0355 
0356 /**
0357  * cik_sdma_gfx_resume - setup and start the async dma engines
0358  *
0359  * @rdev: radeon_device pointer
0360  *
0361  * Set up the gfx DMA ring buffers and enable them (CIK).
0362  * Returns 0 for success, error for failure.
0363  */
0364 static int cik_sdma_gfx_resume(struct radeon_device *rdev)
0365 {
0366     struct radeon_ring *ring;
0367     u32 rb_cntl, ib_cntl;
0368     u32 rb_bufsz;
0369     u32 reg_offset, wb_offset;
0370     int i, r;
0371 
0372     for (i = 0; i < 2; i++) {
0373         if (i == 0) {
0374             ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
0375             reg_offset = SDMA0_REGISTER_OFFSET;
0376             wb_offset = R600_WB_DMA_RPTR_OFFSET;
0377         } else {
0378             ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
0379             reg_offset = SDMA1_REGISTER_OFFSET;
0380             wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
0381         }
0382 
0383         WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
0384         WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
0385 
0386         /* Set ring buffer size in dwords */
0387         rb_bufsz = order_base_2(ring->ring_size / 4);
0388         rb_cntl = rb_bufsz << 1;
0389 #ifdef __BIG_ENDIAN
0390         rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
0391 #endif
0392         WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
0393 
0394         /* Initialize the ring buffer's read and write pointers */
0395         WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
0396         WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
0397 
0398         /* set the wb address whether it's enabled or not */
0399         WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
0400                upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
0401         WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
0402                ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
0403 
0404         if (rdev->wb.enabled)
0405             rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
0406 
0407         WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
0408         WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
0409 
0410         ring->wptr = 0;
0411         WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
0412 
0413         /* enable DMA RB */
0414         WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
0415 
0416         ib_cntl = SDMA_IB_ENABLE;
0417 #ifdef __BIG_ENDIAN
0418         ib_cntl |= SDMA_IB_SWAP_ENABLE;
0419 #endif
0420         /* enable DMA IBs */
0421         WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
0422 
0423         ring->ready = true;
0424 
0425         r = radeon_ring_test(rdev, ring->idx, ring);
0426         if (r) {
0427             ring->ready = false;
0428             return r;
0429         }
0430     }
0431 
0432     if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
0433         (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
0434         radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
0435 
0436     return 0;
0437 }
0438 
0439 /**
0440  * cik_sdma_rlc_resume - setup and start the async dma engines
0441  *
0442  * @rdev: radeon_device pointer
0443  *
0444  * Set up the compute DMA queues and enable them (CIK).
0445  * Returns 0 for success, error for failure.
0446  */
0447 static int cik_sdma_rlc_resume(struct radeon_device *rdev)
0448 {
0449     /* XXX todo */
0450     return 0;
0451 }
0452 
0453 /**
0454  * cik_sdma_load_microcode - load the sDMA ME ucode
0455  *
0456  * @rdev: radeon_device pointer
0457  *
0458  * Loads the sDMA0/1 ucode.
0459  * Returns 0 for success, -EINVAL if the ucode is not available.
0460  */
0461 static int cik_sdma_load_microcode(struct radeon_device *rdev)
0462 {
0463     int i;
0464 
0465     if (!rdev->sdma_fw)
0466         return -EINVAL;
0467 
0468     /* halt the MEs */
0469     cik_sdma_enable(rdev, false);
0470 
0471     if (rdev->new_fw) {
0472         const struct sdma_firmware_header_v1_0 *hdr =
0473             (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
0474         const __le32 *fw_data;
0475         u32 fw_size;
0476 
0477         radeon_ucode_print_sdma_hdr(&hdr->header);
0478 
0479         /* sdma0 */
0480         fw_data = (const __le32 *)
0481             (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0482         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
0483         WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
0484         for (i = 0; i < fw_size; i++)
0485             WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
0486         WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
0487 
0488         /* sdma1 */
0489         fw_data = (const __le32 *)
0490             (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0491         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
0492         WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
0493         for (i = 0; i < fw_size; i++)
0494             WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
0495         WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
0496     } else {
0497         const __be32 *fw_data;
0498 
0499         /* sdma0 */
0500         fw_data = (const __be32 *)rdev->sdma_fw->data;
0501         WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
0502         for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
0503             WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
0504         WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
0505 
0506         /* sdma1 */
0507         fw_data = (const __be32 *)rdev->sdma_fw->data;
0508         WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
0509         for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
0510             WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
0511         WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
0512     }
0513 
0514     WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
0515     WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
0516     return 0;
0517 }
0518 
0519 /**
0520  * cik_sdma_resume - setup and start the async dma engines
0521  *
0522  * @rdev: radeon_device pointer
0523  *
0524  * Set up the DMA engines and enable them (CIK).
0525  * Returns 0 for success, error for failure.
0526  */
0527 int cik_sdma_resume(struct radeon_device *rdev)
0528 {
0529     int r;
0530 
0531     r = cik_sdma_load_microcode(rdev);
0532     if (r)
0533         return r;
0534 
0535     /* unhalt the MEs */
0536     cik_sdma_enable(rdev, true);
0537 
0538     /* start the gfx rings and rlc compute queues */
0539     r = cik_sdma_gfx_resume(rdev);
0540     if (r)
0541         return r;
0542     r = cik_sdma_rlc_resume(rdev);
0543     if (r)
0544         return r;
0545 
0546     return 0;
0547 }
0548 
0549 /**
0550  * cik_sdma_fini - tear down the async dma engines
0551  *
0552  * @rdev: radeon_device pointer
0553  *
0554  * Stop the async dma engines and free the rings (CIK).
0555  */
0556 void cik_sdma_fini(struct radeon_device *rdev)
0557 {
0558     /* halt the MEs */
0559     cik_sdma_enable(rdev, false);
0560     radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
0561     radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
0562     /* XXX - compute dma queue tear down */
0563 }
0564 
0565 /**
0566  * cik_copy_dma - copy pages using the DMA engine
0567  *
0568  * @rdev: radeon_device pointer
0569  * @src_offset: src GPU address
0570  * @dst_offset: dst GPU address
0571  * @num_gpu_pages: number of GPU pages to xfer
0572  * @resv: reservation object to sync to
0573  *
0574  * Copy GPU paging using the DMA engine (CIK).
0575  * Used by the radeon ttm implementation to move pages if
0576  * registered as the asic copy callback.
0577  */
0578 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
0579                   uint64_t src_offset, uint64_t dst_offset,
0580                   unsigned num_gpu_pages,
0581                   struct dma_resv *resv)
0582 {
0583     struct radeon_fence *fence;
0584     struct radeon_sync sync;
0585     int ring_index = rdev->asic->copy.dma_ring_index;
0586     struct radeon_ring *ring = &rdev->ring[ring_index];
0587     u32 size_in_bytes, cur_size_in_bytes;
0588     int i, num_loops;
0589     int r = 0;
0590 
0591     radeon_sync_create(&sync);
0592 
0593     size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
0594     num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
0595     r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
0596     if (r) {
0597         DRM_ERROR("radeon: moving bo (%d).\n", r);
0598         radeon_sync_free(rdev, &sync, NULL);
0599         return ERR_PTR(r);
0600     }
0601 
0602     radeon_sync_resv(rdev, &sync, resv, false);
0603     radeon_sync_rings(rdev, &sync, ring->idx);
0604 
0605     for (i = 0; i < num_loops; i++) {
0606         cur_size_in_bytes = size_in_bytes;
0607         if (cur_size_in_bytes > 0x1fffff)
0608             cur_size_in_bytes = 0x1fffff;
0609         size_in_bytes -= cur_size_in_bytes;
0610         radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
0611         radeon_ring_write(ring, cur_size_in_bytes);
0612         radeon_ring_write(ring, 0); /* src/dst endian swap */
0613         radeon_ring_write(ring, lower_32_bits(src_offset));
0614         radeon_ring_write(ring, upper_32_bits(src_offset));
0615         radeon_ring_write(ring, lower_32_bits(dst_offset));
0616         radeon_ring_write(ring, upper_32_bits(dst_offset));
0617         src_offset += cur_size_in_bytes;
0618         dst_offset += cur_size_in_bytes;
0619     }
0620 
0621     r = radeon_fence_emit(rdev, &fence, ring->idx);
0622     if (r) {
0623         radeon_ring_unlock_undo(rdev, ring);
0624         radeon_sync_free(rdev, &sync, NULL);
0625         return ERR_PTR(r);
0626     }
0627 
0628     radeon_ring_unlock_commit(rdev, ring, false);
0629     radeon_sync_free(rdev, &sync, fence);
0630 
0631     return fence;
0632 }
0633 
0634 /**
0635  * cik_sdma_ring_test - simple async dma engine test
0636  *
0637  * @rdev: radeon_device pointer
0638  * @ring: radeon_ring structure holding ring information
0639  *
0640  * Test the DMA engine by writing using it to write an
0641  * value to memory. (CIK).
0642  * Returns 0 for success, error for failure.
0643  */
0644 int cik_sdma_ring_test(struct radeon_device *rdev,
0645                struct radeon_ring *ring)
0646 {
0647     unsigned i;
0648     int r;
0649     unsigned index;
0650     u32 tmp;
0651     u64 gpu_addr;
0652 
0653     if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0654         index = R600_WB_DMA_RING_TEST_OFFSET;
0655     else
0656         index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
0657 
0658     gpu_addr = rdev->wb.gpu_addr + index;
0659 
0660     tmp = 0xCAFEDEAD;
0661     rdev->wb.wb[index/4] = cpu_to_le32(tmp);
0662 
0663     r = radeon_ring_lock(rdev, ring, 5);
0664     if (r) {
0665         DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
0666         return r;
0667     }
0668     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
0669     radeon_ring_write(ring, lower_32_bits(gpu_addr));
0670     radeon_ring_write(ring, upper_32_bits(gpu_addr));
0671     radeon_ring_write(ring, 1); /* number of DWs to follow */
0672     radeon_ring_write(ring, 0xDEADBEEF);
0673     radeon_ring_unlock_commit(rdev, ring, false);
0674 
0675     for (i = 0; i < rdev->usec_timeout; i++) {
0676         tmp = le32_to_cpu(rdev->wb.wb[index/4]);
0677         if (tmp == 0xDEADBEEF)
0678             break;
0679         udelay(1);
0680     }
0681 
0682     if (i < rdev->usec_timeout) {
0683         DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
0684     } else {
0685         DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
0686               ring->idx, tmp);
0687         r = -EINVAL;
0688     }
0689     return r;
0690 }
0691 
0692 /**
0693  * cik_sdma_ib_test - test an IB on the DMA engine
0694  *
0695  * @rdev: radeon_device pointer
0696  * @ring: radeon_ring structure holding ring information
0697  *
0698  * Test a simple IB in the DMA ring (CIK).
0699  * Returns 0 on success, error on failure.
0700  */
0701 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
0702 {
0703     struct radeon_ib ib;
0704     unsigned i;
0705     unsigned index;
0706     int r;
0707     u32 tmp = 0;
0708     u64 gpu_addr;
0709 
0710     if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0711         index = R600_WB_DMA_RING_TEST_OFFSET;
0712     else
0713         index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
0714 
0715     gpu_addr = rdev->wb.gpu_addr + index;
0716 
0717     tmp = 0xCAFEDEAD;
0718     rdev->wb.wb[index/4] = cpu_to_le32(tmp);
0719 
0720     r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
0721     if (r) {
0722         DRM_ERROR("radeon: failed to get ib (%d).\n", r);
0723         return r;
0724     }
0725 
0726     ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
0727     ib.ptr[1] = lower_32_bits(gpu_addr);
0728     ib.ptr[2] = upper_32_bits(gpu_addr);
0729     ib.ptr[3] = 1;
0730     ib.ptr[4] = 0xDEADBEEF;
0731     ib.length_dw = 5;
0732 
0733     r = radeon_ib_schedule(rdev, &ib, NULL, false);
0734     if (r) {
0735         radeon_ib_free(rdev, &ib);
0736         DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
0737         return r;
0738     }
0739     r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
0740         RADEON_USEC_IB_TEST_TIMEOUT));
0741     if (r < 0) {
0742         DRM_ERROR("radeon: fence wait failed (%d).\n", r);
0743         return r;
0744     } else if (r == 0) {
0745         DRM_ERROR("radeon: fence wait timed out.\n");
0746         return -ETIMEDOUT;
0747     }
0748     r = 0;
0749     for (i = 0; i < rdev->usec_timeout; i++) {
0750         tmp = le32_to_cpu(rdev->wb.wb[index/4]);
0751         if (tmp == 0xDEADBEEF)
0752             break;
0753         udelay(1);
0754     }
0755     if (i < rdev->usec_timeout) {
0756         DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
0757     } else {
0758         DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
0759         r = -EINVAL;
0760     }
0761     radeon_ib_free(rdev, &ib);
0762     return r;
0763 }
0764 
0765 /**
0766  * cik_sdma_is_lockup - Check if the DMA engine is locked up
0767  *
0768  * @rdev: radeon_device pointer
0769  * @ring: radeon_ring structure holding ring information
0770  *
0771  * Check if the async DMA engine is locked up (CIK).
0772  * Returns true if the engine appears to be locked up, false if not.
0773  */
0774 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
0775 {
0776     u32 reset_mask = cik_gpu_check_soft_reset(rdev);
0777     u32 mask;
0778 
0779     if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0780         mask = RADEON_RESET_DMA;
0781     else
0782         mask = RADEON_RESET_DMA1;
0783 
0784     if (!(reset_mask & mask)) {
0785         radeon_ring_lockup_update(rdev, ring);
0786         return false;
0787     }
0788     return radeon_ring_test_lockup(rdev, ring);
0789 }
0790 
0791 /**
0792  * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
0793  *
0794  * @rdev: radeon_device pointer
0795  * @ib: indirect buffer to fill with commands
0796  * @pe: addr of the page entry
0797  * @src: src addr to copy from
0798  * @count: number of page entries to update
0799  *
0800  * Update PTEs by copying them from the GART using sDMA (CIK).
0801  */
0802 void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
0803                 struct radeon_ib *ib,
0804                 uint64_t pe, uint64_t src,
0805                 unsigned count)
0806 {
0807     while (count) {
0808         unsigned bytes = count * 8;
0809         if (bytes > 0x1FFFF8)
0810             bytes = 0x1FFFF8;
0811 
0812         ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
0813             SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
0814         ib->ptr[ib->length_dw++] = bytes;
0815         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
0816         ib->ptr[ib->length_dw++] = lower_32_bits(src);
0817         ib->ptr[ib->length_dw++] = upper_32_bits(src);
0818         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
0819         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
0820 
0821         pe += bytes;
0822         src += bytes;
0823         count -= bytes / 8;
0824     }
0825 }
0826 
0827 /**
0828  * cik_sdma_vm_write_pages - update PTEs by writing them manually
0829  *
0830  * @rdev: radeon_device pointer
0831  * @ib: indirect buffer to fill with commands
0832  * @pe: addr of the page entry
0833  * @addr: dst addr to write into pe
0834  * @count: number of page entries to update
0835  * @incr: increase next addr by incr bytes
0836  * @flags: access flags
0837  *
0838  * Update PTEs by writing them manually using sDMA (CIK).
0839  */
0840 void cik_sdma_vm_write_pages(struct radeon_device *rdev,
0841                  struct radeon_ib *ib,
0842                  uint64_t pe,
0843                  uint64_t addr, unsigned count,
0844                  uint32_t incr, uint32_t flags)
0845 {
0846     uint64_t value;
0847     unsigned ndw;
0848 
0849     while (count) {
0850         ndw = count * 2;
0851         if (ndw > 0xFFFFE)
0852             ndw = 0xFFFFE;
0853 
0854         /* for non-physically contiguous pages (system) */
0855         ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
0856             SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
0857         ib->ptr[ib->length_dw++] = pe;
0858         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
0859         ib->ptr[ib->length_dw++] = ndw;
0860         for (; ndw > 0; ndw -= 2, --count, pe += 8) {
0861             if (flags & R600_PTE_SYSTEM) {
0862                 value = radeon_vm_map_gart(rdev, addr);
0863             } else if (flags & R600_PTE_VALID) {
0864                 value = addr;
0865             } else {
0866                 value = 0;
0867             }
0868             addr += incr;
0869             value |= flags;
0870             ib->ptr[ib->length_dw++] = value;
0871             ib->ptr[ib->length_dw++] = upper_32_bits(value);
0872         }
0873     }
0874 }
0875 
0876 /**
0877  * cik_sdma_vm_set_pages - update the page tables using sDMA
0878  *
0879  * @rdev: radeon_device pointer
0880  * @ib: indirect buffer to fill with commands
0881  * @pe: addr of the page entry
0882  * @addr: dst addr to write into pe
0883  * @count: number of page entries to update
0884  * @incr: increase next addr by incr bytes
0885  * @flags: access flags
0886  *
0887  * Update the page tables using sDMA (CIK).
0888  */
0889 void cik_sdma_vm_set_pages(struct radeon_device *rdev,
0890                struct radeon_ib *ib,
0891                uint64_t pe,
0892                uint64_t addr, unsigned count,
0893                uint32_t incr, uint32_t flags)
0894 {
0895     uint64_t value;
0896     unsigned ndw;
0897 
0898     while (count) {
0899         ndw = count;
0900         if (ndw > 0x7FFFF)
0901             ndw = 0x7FFFF;
0902 
0903         if (flags & R600_PTE_VALID)
0904             value = addr;
0905         else
0906             value = 0;
0907 
0908         /* for physically contiguous pages (vram) */
0909         ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
0910         ib->ptr[ib->length_dw++] = pe; /* dst addr */
0911         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
0912         ib->ptr[ib->length_dw++] = flags; /* mask */
0913         ib->ptr[ib->length_dw++] = 0;
0914         ib->ptr[ib->length_dw++] = value; /* value */
0915         ib->ptr[ib->length_dw++] = upper_32_bits(value);
0916         ib->ptr[ib->length_dw++] = incr; /* increment size */
0917         ib->ptr[ib->length_dw++] = 0;
0918         ib->ptr[ib->length_dw++] = ndw; /* number of entries */
0919 
0920         pe += ndw * 8;
0921         addr += ndw * incr;
0922         count -= ndw;
0923     }
0924 }
0925 
0926 /**
0927  * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
0928  *
0929  * @ib: indirect buffer to fill with padding
0930  *
0931  */
0932 void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
0933 {
0934     while (ib->length_dw & 0x7)
0935         ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
0936 }
0937 
0938 /*
0939  * cik_dma_vm_flush - cik vm flush using sDMA
0940  *
0941  * Update the page table base and flush the VM TLB
0942  * using sDMA (CIK).
0943  */
0944 void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
0945               unsigned vm_id, uint64_t pd_addr)
0946 {
0947     u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
0948               SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
0949 
0950     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0951     if (vm_id < 8) {
0952         radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
0953     } else {
0954         radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
0955     }
0956     radeon_ring_write(ring, pd_addr >> 12);
0957 
0958     /* update SH_MEM_* regs */
0959     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0960     radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
0961     radeon_ring_write(ring, VMID(vm_id));
0962 
0963     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0964     radeon_ring_write(ring, SH_MEM_BASES >> 2);
0965     radeon_ring_write(ring, 0);
0966 
0967     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0968     radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
0969     radeon_ring_write(ring, 0);
0970 
0971     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0972     radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
0973     radeon_ring_write(ring, 1);
0974 
0975     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0976     radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
0977     radeon_ring_write(ring, 0);
0978 
0979     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0980     radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
0981     radeon_ring_write(ring, VMID(0));
0982 
0983     /* flush HDP */
0984     cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
0985 
0986     /* flush TLB */
0987     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
0988     radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
0989     radeon_ring_write(ring, 1 << vm_id);
0990 
0991     radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
0992     radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
0993     radeon_ring_write(ring, 0);
0994     radeon_ring_write(ring, 0); /* reference */
0995     radeon_ring_write(ring, 0); /* mask */
0996     radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
0997 }
0998