0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <drm/drm_file.h>
0031
0032 #include "radeon.h"
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 static void radeon_debugfs_sa_init(struct radeon_device *rdev);
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 int radeon_ib_get(struct radeon_device *rdev, int ring,
0059 struct radeon_ib *ib, struct radeon_vm *vm,
0060 unsigned size)
0061 {
0062 int r;
0063
0064 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
0065 if (r) {
0066 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
0067 return r;
0068 }
0069
0070 radeon_sync_create(&ib->sync);
0071
0072 ib->ring = ring;
0073 ib->fence = NULL;
0074 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
0075 ib->vm = vm;
0076 if (vm) {
0077
0078
0079
0080 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
0081 } else {
0082 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
0083 }
0084 ib->is_const_ib = false;
0085
0086 return 0;
0087 }
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
0098 {
0099 radeon_sync_free(rdev, &ib->sync, ib->fence);
0100 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
0101 radeon_fence_unref(&ib->fence);
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
0126 struct radeon_ib *const_ib, bool hdp_flush)
0127 {
0128 struct radeon_ring *ring = &rdev->ring[ib->ring];
0129 int r = 0;
0130
0131 if (!ib->length_dw || !ring->ready) {
0132
0133 dev_err(rdev->dev, "couldn't schedule ib\n");
0134 return -EINVAL;
0135 }
0136
0137
0138 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
0139 if (r) {
0140 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
0141 return r;
0142 }
0143
0144
0145 if (ib->vm) {
0146 struct radeon_fence *vm_id_fence;
0147 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
0148 radeon_sync_fence(&ib->sync, vm_id_fence);
0149 }
0150
0151
0152 r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
0153 if (r) {
0154 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
0155 radeon_ring_unlock_undo(rdev, ring);
0156 return r;
0157 }
0158
0159 if (ib->vm)
0160 radeon_vm_flush(rdev, ib->vm, ib->ring,
0161 ib->sync.last_vm_update);
0162
0163 if (const_ib) {
0164 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
0165 radeon_sync_free(rdev, &const_ib->sync, NULL);
0166 }
0167 radeon_ring_ib_execute(rdev, ib->ring, ib);
0168 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
0169 if (r) {
0170 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
0171 radeon_ring_unlock_undo(rdev, ring);
0172 return r;
0173 }
0174 if (const_ib) {
0175 const_ib->fence = radeon_fence_ref(ib->fence);
0176 }
0177
0178 if (ib->vm)
0179 radeon_vm_fence(rdev, ib->vm, ib->fence);
0180
0181 radeon_ring_unlock_commit(rdev, ring, hdp_flush);
0182 return 0;
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 int radeon_ib_pool_init(struct radeon_device *rdev)
0195 {
0196 int r;
0197
0198 if (rdev->ib_pool_ready) {
0199 return 0;
0200 }
0201
0202 if (rdev->family >= CHIP_BONAIRE) {
0203 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
0204 RADEON_IB_POOL_SIZE*64*1024,
0205 RADEON_GPU_PAGE_SIZE,
0206 RADEON_GEM_DOMAIN_GTT,
0207 RADEON_GEM_GTT_WC);
0208 } else {
0209
0210
0211
0212 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
0213 RADEON_IB_POOL_SIZE*64*1024,
0214 RADEON_GPU_PAGE_SIZE,
0215 RADEON_GEM_DOMAIN_GTT, 0);
0216 }
0217 if (r) {
0218 return r;
0219 }
0220
0221 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
0222 if (r) {
0223 return r;
0224 }
0225
0226 rdev->ib_pool_ready = true;
0227 radeon_debugfs_sa_init(rdev);
0228 return 0;
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 void radeon_ib_pool_fini(struct radeon_device *rdev)
0240 {
0241 if (rdev->ib_pool_ready) {
0242 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
0243 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
0244 rdev->ib_pool_ready = false;
0245 }
0246 }
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258 int radeon_ib_ring_tests(struct radeon_device *rdev)
0259 {
0260 unsigned i;
0261 int r;
0262
0263 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
0264 struct radeon_ring *ring = &rdev->ring[i];
0265
0266 if (!ring->ready)
0267 continue;
0268
0269 r = radeon_ib_test(rdev, i, ring);
0270 if (r) {
0271 radeon_fence_driver_force_completion(rdev, i);
0272 ring->ready = false;
0273 rdev->needs_reset = false;
0274
0275 if (i == RADEON_RING_TYPE_GFX_INDEX) {
0276
0277 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
0278 rdev->accel_working = false;
0279 return r;
0280
0281 } else {
0282
0283 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
0284 }
0285 }
0286 }
0287 return 0;
0288 }
0289
0290
0291
0292
0293 #if defined(CONFIG_DEBUG_FS)
0294
0295 static int radeon_debugfs_sa_info_show(struct seq_file *m, void *unused)
0296 {
0297 struct radeon_device *rdev = (struct radeon_device *)m->private;
0298
0299 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
0300
0301 return 0;
0302
0303 }
0304
0305 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_sa_info);
0306
0307 #endif
0308
0309 static void radeon_debugfs_sa_init(struct radeon_device *rdev)
0310 {
0311 #if defined(CONFIG_DEBUG_FS)
0312 struct dentry *root = rdev->ddev->primary->debugfs_root;
0313
0314 debugfs_create_file("radeon_sa_info", 0444, root, rdev,
0315 &radeon_debugfs_sa_info_fops);
0316 #endif
0317 }