Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2013 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Alex Deucher
0023  */
0024 
0025 #include "radeon.h"
0026 #include "radeon_asic.h"
0027 #include "radeon_trace.h"
0028 #include "si.h"
0029 #include "sid.h"
0030 
0031 /**
0032  * si_dma_is_lockup - Check if the DMA engine is locked up
0033  *
0034  * @rdev: radeon_device pointer
0035  * @ring: radeon_ring structure holding ring information
0036  *
0037  * Check if the async DMA engine is locked up.
0038  * Returns true if the engine appears to be locked up, false if not.
0039  */
0040 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
0041 {
0042     u32 reset_mask = si_gpu_check_soft_reset(rdev);
0043     u32 mask;
0044 
0045     if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0046         mask = RADEON_RESET_DMA;
0047     else
0048         mask = RADEON_RESET_DMA1;
0049 
0050     if (!(reset_mask & mask)) {
0051         radeon_ring_lockup_update(rdev, ring);
0052         return false;
0053     }
0054     return radeon_ring_test_lockup(rdev, ring);
0055 }
0056 
0057 /**
0058  * si_dma_vm_copy_pages - update PTEs by copying them from the GART
0059  *
0060  * @rdev: radeon_device pointer
0061  * @ib: indirect buffer to fill with commands
0062  * @pe: addr of the page entry
0063  * @src: src addr where to copy from
0064  * @count: number of page entries to update
0065  *
0066  * Update PTEs by copying them from the GART using the DMA (SI).
0067  */
0068 void si_dma_vm_copy_pages(struct radeon_device *rdev,
0069               struct radeon_ib *ib,
0070               uint64_t pe, uint64_t src,
0071               unsigned count)
0072 {
0073     while (count) {
0074         unsigned bytes = count * 8;
0075         if (bytes > 0xFFFF8)
0076             bytes = 0xFFFF8;
0077 
0078         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
0079                               1, 0, 0, bytes);
0080         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
0081         ib->ptr[ib->length_dw++] = lower_32_bits(src);
0082         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
0083         ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
0084 
0085         pe += bytes;
0086         src += bytes;
0087         count -= bytes / 8;
0088     }
0089 }
0090 
0091 /**
0092  * si_dma_vm_write_pages - update PTEs by writing them manually
0093  *
0094  * @rdev: radeon_device pointer
0095  * @ib: indirect buffer to fill with commands
0096  * @pe: addr of the page entry
0097  * @addr: dst addr to write into pe
0098  * @count: number of page entries to update
0099  * @incr: increase next addr by incr bytes
0100  * @flags: access flags
0101  *
0102  * Update PTEs by writing them manually using the DMA (SI).
0103  */
0104 void si_dma_vm_write_pages(struct radeon_device *rdev,
0105                struct radeon_ib *ib,
0106                uint64_t pe,
0107                uint64_t addr, unsigned count,
0108                uint32_t incr, uint32_t flags)
0109 {
0110     uint64_t value;
0111     unsigned ndw;
0112 
0113     while (count) {
0114         ndw = count * 2;
0115         if (ndw > 0xFFFFE)
0116             ndw = 0xFFFFE;
0117 
0118         /* for non-physically contiguous pages (system) */
0119         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
0120         ib->ptr[ib->length_dw++] = pe;
0121         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
0122         for (; ndw > 0; ndw -= 2, --count, pe += 8) {
0123             if (flags & R600_PTE_SYSTEM) {
0124                 value = radeon_vm_map_gart(rdev, addr);
0125             } else if (flags & R600_PTE_VALID) {
0126                 value = addr;
0127             } else {
0128                 value = 0;
0129             }
0130             addr += incr;
0131             value |= flags;
0132             ib->ptr[ib->length_dw++] = value;
0133             ib->ptr[ib->length_dw++] = upper_32_bits(value);
0134         }
0135     }
0136 }
0137 
0138 /**
0139  * si_dma_vm_set_pages - update the page tables using the DMA
0140  *
0141  * @rdev: radeon_device pointer
0142  * @ib: indirect buffer to fill with commands
0143  * @pe: addr of the page entry
0144  * @addr: dst addr to write into pe
0145  * @count: number of page entries to update
0146  * @incr: increase next addr by incr bytes
0147  * @flags: access flags
0148  *
0149  * Update the page tables using the DMA (SI).
0150  */
0151 void si_dma_vm_set_pages(struct radeon_device *rdev,
0152              struct radeon_ib *ib,
0153              uint64_t pe,
0154              uint64_t addr, unsigned count,
0155              uint32_t incr, uint32_t flags)
0156 {
0157     uint64_t value;
0158     unsigned ndw;
0159 
0160     while (count) {
0161         ndw = count * 2;
0162         if (ndw > 0xFFFFE)
0163             ndw = 0xFFFFE;
0164 
0165         if (flags & R600_PTE_VALID)
0166             value = addr;
0167         else
0168             value = 0;
0169 
0170         /* for physically contiguous pages (vram) */
0171         ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
0172         ib->ptr[ib->length_dw++] = pe; /* dst addr */
0173         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
0174         ib->ptr[ib->length_dw++] = flags; /* mask */
0175         ib->ptr[ib->length_dw++] = 0;
0176         ib->ptr[ib->length_dw++] = value; /* value */
0177         ib->ptr[ib->length_dw++] = upper_32_bits(value);
0178         ib->ptr[ib->length_dw++] = incr; /* increment size */
0179         ib->ptr[ib->length_dw++] = 0;
0180         pe += ndw * 4;
0181         addr += (ndw / 2) * incr;
0182         count -= ndw / 2;
0183     }
0184 }
0185 
0186 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
0187              unsigned vm_id, uint64_t pd_addr)
0188 
0189 {
0190     radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
0191     if (vm_id < 8) {
0192         radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
0193     } else {
0194         radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
0195     }
0196     radeon_ring_write(ring, pd_addr >> 12);
0197 
0198     /* flush hdp cache */
0199     radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
0200     radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
0201     radeon_ring_write(ring, 1);
0202 
0203     /* bits 0-7 are the VM contexts0-7 */
0204     radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
0205     radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
0206     radeon_ring_write(ring, 1 << vm_id);
0207 
0208     /* wait for invalidate to complete */
0209     radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
0210     radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
0211     radeon_ring_write(ring, 0xff << 16); /* retry */
0212     radeon_ring_write(ring, 1 << vm_id); /* mask */
0213     radeon_ring_write(ring, 0); /* value */
0214     radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
0215 }
0216 
0217 /**
0218  * si_copy_dma - copy pages using the DMA engine
0219  *
0220  * @rdev: radeon_device pointer
0221  * @src_offset: src GPU address
0222  * @dst_offset: dst GPU address
0223  * @num_gpu_pages: number of GPU pages to xfer
0224  * @resv: reservation object to sync to
0225  *
0226  * Copy GPU paging using the DMA engine (SI).
0227  * Used by the radeon ttm implementation to move pages if
0228  * registered as the asic copy callback.
0229  */
0230 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
0231                  uint64_t src_offset, uint64_t dst_offset,
0232                  unsigned num_gpu_pages,
0233                  struct dma_resv *resv)
0234 {
0235     struct radeon_fence *fence;
0236     struct radeon_sync sync;
0237     int ring_index = rdev->asic->copy.dma_ring_index;
0238     struct radeon_ring *ring = &rdev->ring[ring_index];
0239     u32 size_in_bytes, cur_size_in_bytes;
0240     int i, num_loops;
0241     int r = 0;
0242 
0243     radeon_sync_create(&sync);
0244 
0245     size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
0246     num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
0247     r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
0248     if (r) {
0249         DRM_ERROR("radeon: moving bo (%d).\n", r);
0250         radeon_sync_free(rdev, &sync, NULL);
0251         return ERR_PTR(r);
0252     }
0253 
0254     radeon_sync_resv(rdev, &sync, resv, false);
0255     radeon_sync_rings(rdev, &sync, ring->idx);
0256 
0257     for (i = 0; i < num_loops; i++) {
0258         cur_size_in_bytes = size_in_bytes;
0259         if (cur_size_in_bytes > 0xFFFFF)
0260             cur_size_in_bytes = 0xFFFFF;
0261         size_in_bytes -= cur_size_in_bytes;
0262         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
0263         radeon_ring_write(ring, lower_32_bits(dst_offset));
0264         radeon_ring_write(ring, lower_32_bits(src_offset));
0265         radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
0266         radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
0267         src_offset += cur_size_in_bytes;
0268         dst_offset += cur_size_in_bytes;
0269     }
0270 
0271     r = radeon_fence_emit(rdev, &fence, ring->idx);
0272     if (r) {
0273         radeon_ring_unlock_undo(rdev, ring);
0274         radeon_sync_free(rdev, &sync, NULL);
0275         return ERR_PTR(r);
0276     }
0277 
0278     radeon_ring_unlock_commit(rdev, ring, false);
0279     radeon_sync_free(rdev, &sync, fence);
0280 
0281     return fence;
0282 }
0283