0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "radeon.h"
0032 #include "radeon_trace.h"
0033
0034
0035
0036
0037
0038
0039
0040
0041 void radeon_sync_create(struct radeon_sync *sync)
0042 {
0043 unsigned i;
0044
0045 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
0046 sync->semaphores[i] = NULL;
0047
0048 for (i = 0; i < RADEON_NUM_RINGS; ++i)
0049 sync->sync_to[i] = NULL;
0050
0051 sync->last_vm_update = NULL;
0052 }
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 void radeon_sync_fence(struct radeon_sync *sync,
0063 struct radeon_fence *fence)
0064 {
0065 struct radeon_fence *other;
0066
0067 if (!fence)
0068 return;
0069
0070 other = sync->sync_to[fence->ring];
0071 sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
0072
0073 if (fence->is_vm_update) {
0074 other = sync->last_vm_update;
0075 sync->last_vm_update = radeon_fence_later(fence, other);
0076 }
0077 }
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 int radeon_sync_resv(struct radeon_device *rdev,
0090 struct radeon_sync *sync,
0091 struct dma_resv *resv,
0092 bool shared)
0093 {
0094 struct dma_resv_iter cursor;
0095 struct radeon_fence *fence;
0096 struct dma_fence *f;
0097 int r = 0;
0098
0099 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(!shared), f) {
0100 fence = to_radeon_fence(f);
0101 if (fence && fence->rdev == rdev)
0102 radeon_sync_fence(sync, fence);
0103 else
0104 r = dma_fence_wait(f, true);
0105 if (r)
0106 break;
0107 }
0108 return r;
0109 }
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 int radeon_sync_rings(struct radeon_device *rdev,
0122 struct radeon_sync *sync,
0123 int ring)
0124 {
0125 unsigned count = 0;
0126 int i, r;
0127
0128 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
0129 struct radeon_fence *fence = sync->sync_to[i];
0130 struct radeon_semaphore *semaphore;
0131
0132
0133 if (!radeon_fence_need_sync(fence, ring))
0134 continue;
0135
0136
0137 if (!rdev->ring[i].ready) {
0138 dev_err(rdev->dev, "Syncing to a disabled ring!");
0139 return -EINVAL;
0140 }
0141
0142 if (count >= RADEON_NUM_SYNCS) {
0143
0144 r = radeon_fence_wait(fence, false);
0145 if (r)
0146 return r;
0147 continue;
0148 }
0149 r = radeon_semaphore_create(rdev, &semaphore);
0150 if (r)
0151 return r;
0152
0153 sync->semaphores[count++] = semaphore;
0154
0155
0156 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
0157 if (r)
0158 return r;
0159
0160
0161 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
0162
0163 radeon_ring_undo(&rdev->ring[i]);
0164 r = radeon_fence_wait(fence, false);
0165 if (r)
0166 return r;
0167 continue;
0168 }
0169
0170
0171 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
0172
0173 radeon_ring_undo(&rdev->ring[i]);
0174 r = radeon_fence_wait(fence, false);
0175 if (r)
0176 return r;
0177 continue;
0178 }
0179
0180 radeon_ring_commit(rdev, &rdev->ring[i], false);
0181 radeon_fence_note_sync(fence, ring);
0182 }
0183
0184 return 0;
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 void radeon_sync_free(struct radeon_device *rdev,
0197 struct radeon_sync *sync,
0198 struct radeon_fence *fence)
0199 {
0200 unsigned i;
0201
0202 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
0203 radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
0204 }