0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "radeon.h"
0026 #include "radeon_asic.h"
0027 #include "radeon_trace.h"
0028 #include "si.h"
0029 #include "sid.h"
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
0041 {
0042 u32 reset_mask = si_gpu_check_soft_reset(rdev);
0043 u32 mask;
0044
0045 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
0046 mask = RADEON_RESET_DMA;
0047 else
0048 mask = RADEON_RESET_DMA1;
0049
0050 if (!(reset_mask & mask)) {
0051 radeon_ring_lockup_update(rdev, ring);
0052 return false;
0053 }
0054 return radeon_ring_test_lockup(rdev, ring);
0055 }
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 void si_dma_vm_copy_pages(struct radeon_device *rdev,
0069 struct radeon_ib *ib,
0070 uint64_t pe, uint64_t src,
0071 unsigned count)
0072 {
0073 while (count) {
0074 unsigned bytes = count * 8;
0075 if (bytes > 0xFFFF8)
0076 bytes = 0xFFFF8;
0077
0078 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
0079 1, 0, 0, bytes);
0080 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
0081 ib->ptr[ib->length_dw++] = lower_32_bits(src);
0082 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
0083 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
0084
0085 pe += bytes;
0086 src += bytes;
0087 count -= bytes / 8;
0088 }
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 void si_dma_vm_write_pages(struct radeon_device *rdev,
0105 struct radeon_ib *ib,
0106 uint64_t pe,
0107 uint64_t addr, unsigned count,
0108 uint32_t incr, uint32_t flags)
0109 {
0110 uint64_t value;
0111 unsigned ndw;
0112
0113 while (count) {
0114 ndw = count * 2;
0115 if (ndw > 0xFFFFE)
0116 ndw = 0xFFFFE;
0117
0118
0119 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
0120 ib->ptr[ib->length_dw++] = pe;
0121 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
0122 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
0123 if (flags & R600_PTE_SYSTEM) {
0124 value = radeon_vm_map_gart(rdev, addr);
0125 } else if (flags & R600_PTE_VALID) {
0126 value = addr;
0127 } else {
0128 value = 0;
0129 }
0130 addr += incr;
0131 value |= flags;
0132 ib->ptr[ib->length_dw++] = value;
0133 ib->ptr[ib->length_dw++] = upper_32_bits(value);
0134 }
0135 }
0136 }
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 void si_dma_vm_set_pages(struct radeon_device *rdev,
0152 struct radeon_ib *ib,
0153 uint64_t pe,
0154 uint64_t addr, unsigned count,
0155 uint32_t incr, uint32_t flags)
0156 {
0157 uint64_t value;
0158 unsigned ndw;
0159
0160 while (count) {
0161 ndw = count * 2;
0162 if (ndw > 0xFFFFE)
0163 ndw = 0xFFFFE;
0164
0165 if (flags & R600_PTE_VALID)
0166 value = addr;
0167 else
0168 value = 0;
0169
0170
0171 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
0172 ib->ptr[ib->length_dw++] = pe;
0173 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
0174 ib->ptr[ib->length_dw++] = flags;
0175 ib->ptr[ib->length_dw++] = 0;
0176 ib->ptr[ib->length_dw++] = value;
0177 ib->ptr[ib->length_dw++] = upper_32_bits(value);
0178 ib->ptr[ib->length_dw++] = incr;
0179 ib->ptr[ib->length_dw++] = 0;
0180 pe += ndw * 4;
0181 addr += (ndw / 2) * incr;
0182 count -= ndw / 2;
0183 }
0184 }
0185
0186 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
0187 unsigned vm_id, uint64_t pd_addr)
0188
0189 {
0190 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
0191 if (vm_id < 8) {
0192 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
0193 } else {
0194 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
0195 }
0196 radeon_ring_write(ring, pd_addr >> 12);
0197
0198
0199 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
0200 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
0201 radeon_ring_write(ring, 1);
0202
0203
0204 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
0205 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
0206 radeon_ring_write(ring, 1 << vm_id);
0207
0208
0209 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
0210 radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
0211 radeon_ring_write(ring, 0xff << 16);
0212 radeon_ring_write(ring, 1 << vm_id);
0213 radeon_ring_write(ring, 0);
0214 radeon_ring_write(ring, (0 << 28) | 0x20);
0215 }
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
0231 uint64_t src_offset, uint64_t dst_offset,
0232 unsigned num_gpu_pages,
0233 struct dma_resv *resv)
0234 {
0235 struct radeon_fence *fence;
0236 struct radeon_sync sync;
0237 int ring_index = rdev->asic->copy.dma_ring_index;
0238 struct radeon_ring *ring = &rdev->ring[ring_index];
0239 u32 size_in_bytes, cur_size_in_bytes;
0240 int i, num_loops;
0241 int r = 0;
0242
0243 radeon_sync_create(&sync);
0244
0245 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
0246 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
0247 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
0248 if (r) {
0249 DRM_ERROR("radeon: moving bo (%d).\n", r);
0250 radeon_sync_free(rdev, &sync, NULL);
0251 return ERR_PTR(r);
0252 }
0253
0254 radeon_sync_resv(rdev, &sync, resv, false);
0255 radeon_sync_rings(rdev, &sync, ring->idx);
0256
0257 for (i = 0; i < num_loops; i++) {
0258 cur_size_in_bytes = size_in_bytes;
0259 if (cur_size_in_bytes > 0xFFFFF)
0260 cur_size_in_bytes = 0xFFFFF;
0261 size_in_bytes -= cur_size_in_bytes;
0262 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
0263 radeon_ring_write(ring, lower_32_bits(dst_offset));
0264 radeon_ring_write(ring, lower_32_bits(src_offset));
0265 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
0266 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
0267 src_offset += cur_size_in_bytes;
0268 dst_offset += cur_size_in_bytes;
0269 }
0270
0271 r = radeon_fence_emit(rdev, &fence, ring->idx);
0272 if (r) {
0273 radeon_ring_unlock_undo(rdev, ring);
0274 radeon_sync_free(rdev, &sync, NULL);
0275 return ERR_PTR(r);
0276 }
0277
0278 radeon_ring_unlock_commit(rdev, ring, false);
0279 radeon_sync_free(rdev, &sync, fence);
0280
0281 return fence;
0282 }
0283