0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/seq_file.h>
0030 #include <linux/slab.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/debugfs.h>
0033
0034 #include <drm/amdgpu_drm.h>
0035 #include "amdgpu.h"
0036 #include "atom.h"
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
0062 {
0063
0064
0065 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
0066
0067
0068
0069
0070 if (WARN_ON_ONCE(ndw > ring->max_dw))
0071 return -ENOMEM;
0072
0073 ring->count_dw = ndw;
0074 ring->wptr_old = ring->wptr;
0075
0076 if (ring->funcs->begin_use)
0077 ring->funcs->begin_use(ring);
0078
0079 return 0;
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
0090 {
0091 int i;
0092
0093 for (i = 0; i < count; i++)
0094 amdgpu_ring_write(ring, ring->funcs->nop);
0095 }
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
0106 {
0107 while (ib->length_dw & ring->funcs->align_mask)
0108 ib->ptr[ib->length_dw++] = ring->funcs->nop;
0109 }
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 void amdgpu_ring_commit(struct amdgpu_ring *ring)
0121 {
0122 uint32_t count;
0123
0124
0125 count = ring->funcs->align_mask + 1 -
0126 (ring->wptr & ring->funcs->align_mask);
0127 count %= ring->funcs->align_mask + 1;
0128 ring->funcs->insert_nop(ring, count);
0129
0130 mb();
0131 amdgpu_ring_set_wptr(ring);
0132
0133 if (ring->funcs->end_use)
0134 ring->funcs->end_use(ring);
0135 }
0136
0137
0138
0139
0140
0141
0142
0143
0144 void amdgpu_ring_undo(struct amdgpu_ring *ring)
0145 {
0146 ring->wptr = ring->wptr_old;
0147
0148 if (ring->funcs->end_use)
0149 ring->funcs->end_use(ring);
0150 }
0151
0152 #define amdgpu_ring_get_gpu_addr(ring, offset) \
0153 (ring->is_mes_queue ? \
0154 (ring->mes_ctx->meta_data_gpu_addr + offset) : \
0155 (ring->adev->wb.gpu_addr + offset * 4))
0156
0157 #define amdgpu_ring_get_cpu_addr(ring, offset) \
0158 (ring->is_mes_queue ? \
0159 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
0160 (&ring->adev->wb.wb[offset]))
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
0177 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
0178 unsigned int irq_type, unsigned int hw_prio,
0179 atomic_t *sched_score)
0180 {
0181 int r;
0182 int sched_hw_submission = amdgpu_sched_hw_submission;
0183 u32 *num_sched;
0184 u32 hw_ip;
0185
0186
0187
0188
0189
0190
0191
0192 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
0193 sched_hw_submission = max(sched_hw_submission, 256);
0194 else if (ring == &adev->sdma.instance[0].page)
0195 sched_hw_submission = 256;
0196
0197 if (ring->adev == NULL) {
0198 if (adev->num_rings >= AMDGPU_MAX_RINGS)
0199 return -EINVAL;
0200
0201 ring->adev = adev;
0202 ring->num_hw_submission = sched_hw_submission;
0203 ring->sched_score = sched_score;
0204 ring->vmid_wait = dma_fence_get_stub();
0205
0206 if (!ring->is_mes_queue) {
0207 ring->idx = adev->num_rings++;
0208 adev->rings[ring->idx] = ring;
0209 }
0210
0211 r = amdgpu_fence_driver_init_ring(ring);
0212 if (r)
0213 return r;
0214 }
0215
0216 if (ring->is_mes_queue) {
0217 ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
0218 AMDGPU_MES_CTX_RPTR_OFFS);
0219 ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
0220 AMDGPU_MES_CTX_WPTR_OFFS);
0221 ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
0222 AMDGPU_MES_CTX_FENCE_OFFS);
0223 ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
0224 AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
0225 ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
0226 AMDGPU_MES_CTX_COND_EXE_OFFS);
0227 } else {
0228 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
0229 if (r) {
0230 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
0231 return r;
0232 }
0233
0234 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
0235 if (r) {
0236 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
0237 return r;
0238 }
0239
0240 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
0241 if (r) {
0242 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
0243 return r;
0244 }
0245
0246 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
0247 if (r) {
0248 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
0249 return r;
0250 }
0251
0252 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
0253 if (r) {
0254 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
0255 return r;
0256 }
0257 }
0258
0259 ring->fence_gpu_addr =
0260 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
0261 ring->fence_cpu_addr =
0262 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
0263
0264 ring->rptr_gpu_addr =
0265 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
0266 ring->rptr_cpu_addr =
0267 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
0268
0269 ring->wptr_gpu_addr =
0270 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
0271 ring->wptr_cpu_addr =
0272 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
0273
0274 ring->trail_fence_gpu_addr =
0275 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
0276 ring->trail_fence_cpu_addr =
0277 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
0278
0279 ring->cond_exe_gpu_addr =
0280 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
0281 ring->cond_exe_cpu_addr =
0282 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
0283
0284
0285 *ring->cond_exe_cpu_addr = 1;
0286
0287 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
0288 if (r) {
0289 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
0290 return r;
0291 }
0292
0293 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
0294
0295 ring->buf_mask = (ring->ring_size / 4) - 1;
0296 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0297 0xffffffffffffffff : ring->buf_mask;
0298
0299
0300 if (ring->is_mes_queue) {
0301 int offset = 0;
0302
0303 BUG_ON(ring->ring_size > PAGE_SIZE*4);
0304
0305 offset = amdgpu_mes_ctx_get_offs(ring,
0306 AMDGPU_MES_CTX_RING_OFFS);
0307 ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
0308 ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
0309 amdgpu_ring_clear_ring(ring);
0310
0311 } else if (ring->ring_obj == NULL) {
0312 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
0313 AMDGPU_GEM_DOMAIN_GTT,
0314 &ring->ring_obj,
0315 &ring->gpu_addr,
0316 (void **)&ring->ring);
0317 if (r) {
0318 dev_err(adev->dev, "(%d) ring create failed\n", r);
0319 return r;
0320 }
0321 amdgpu_ring_clear_ring(ring);
0322 }
0323
0324 ring->max_dw = max_dw;
0325 ring->hw_prio = hw_prio;
0326
0327 if (!ring->no_scheduler) {
0328 hw_ip = ring->funcs->type;
0329 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
0330 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
0331 &ring->sched;
0332 }
0333
0334 return 0;
0335 }
0336
0337
0338
0339
0340
0341
0342
0343
0344 void amdgpu_ring_fini(struct amdgpu_ring *ring)
0345 {
0346
0347
0348 if (!(ring->adev) ||
0349 (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
0350 return;
0351
0352 ring->sched.ready = false;
0353
0354 if (!ring->is_mes_queue) {
0355 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
0356 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
0357
0358 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
0359 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
0360
0361 amdgpu_bo_free_kernel(&ring->ring_obj,
0362 &ring->gpu_addr,
0363 (void **)&ring->ring);
0364 }
0365
0366 dma_fence_put(ring->vmid_wait);
0367 ring->vmid_wait = NULL;
0368 ring->me = 0;
0369
0370 if (!ring->is_mes_queue)
0371 ring->adev->rings[ring->idx] = NULL;
0372 }
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
0387 uint32_t reg0, uint32_t reg1,
0388 uint32_t ref, uint32_t mask)
0389 {
0390 amdgpu_ring_emit_wreg(ring, reg0, ref);
0391 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
0392 }
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
0404 struct dma_fence *fence)
0405 {
0406 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
0407
0408 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
0409 return false;
0410
0411 atomic_inc(&ring->adev->gpu_reset_counter);
0412 while (!dma_fence_is_signaled(fence) &&
0413 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
0414 ring->funcs->soft_recovery(ring, vmid);
0415
0416 return dma_fence_is_signaled(fence);
0417 }
0418
0419
0420
0421
0422 #if defined(CONFIG_DEBUG_FS)
0423
0424
0425
0426
0427
0428
0429
0430
0431 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
0432 size_t size, loff_t *pos)
0433 {
0434 struct amdgpu_ring *ring = file_inode(f)->i_private;
0435 int r, i;
0436 uint32_t value, result, early[3];
0437
0438 if (*pos & 3 || size & 3)
0439 return -EINVAL;
0440
0441 result = 0;
0442
0443 if (*pos < 12) {
0444 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
0445 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
0446 early[2] = ring->wptr & ring->buf_mask;
0447 for (i = *pos / 4; i < 3 && size; i++) {
0448 r = put_user(early[i], (uint32_t *)buf);
0449 if (r)
0450 return r;
0451 buf += 4;
0452 result += 4;
0453 size -= 4;
0454 *pos += 4;
0455 }
0456 }
0457
0458 while (size) {
0459 if (*pos >= (ring->ring_size + 12))
0460 return result;
0461
0462 value = ring->ring[(*pos - 12)/4];
0463 r = put_user(value, (uint32_t *)buf);
0464 if (r)
0465 return r;
0466 buf += 4;
0467 result += 4;
0468 size -= 4;
0469 *pos += 4;
0470 }
0471
0472 return result;
0473 }
0474
0475 static const struct file_operations amdgpu_debugfs_ring_fops = {
0476 .owner = THIS_MODULE,
0477 .read = amdgpu_debugfs_ring_read,
0478 .llseek = default_llseek
0479 };
0480
0481 #endif
0482
0483 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
0484 struct amdgpu_ring *ring)
0485 {
0486 #if defined(CONFIG_DEBUG_FS)
0487 struct drm_minor *minor = adev_to_drm(adev)->primary;
0488 struct dentry *root = minor->debugfs_root;
0489 char name[32];
0490
0491 sprintf(name, "amdgpu_ring_%s", ring->name);
0492 debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
0493 &amdgpu_debugfs_ring_fops,
0494 ring->ring_size + 12);
0495
0496 #endif
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
0509 {
0510 struct amdgpu_device *adev = ring->adev;
0511 int r;
0512
0513 r = amdgpu_ring_test_ring(ring);
0514 if (r)
0515 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
0516 ring->name, r);
0517 else
0518 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
0519 ring->name);
0520
0521 ring->sched.ready = !r;
0522 return r;
0523 }
0524
0525 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
0526 struct amdgpu_mqd_prop *prop)
0527 {
0528 struct amdgpu_device *adev = ring->adev;
0529
0530 memset(prop, 0, sizeof(*prop));
0531
0532 prop->mqd_gpu_addr = ring->mqd_gpu_addr;
0533 prop->hqd_base_gpu_addr = ring->gpu_addr;
0534 prop->rptr_gpu_addr = ring->rptr_gpu_addr;
0535 prop->wptr_gpu_addr = ring->wptr_gpu_addr;
0536 prop->queue_size = ring->ring_size;
0537 prop->eop_gpu_addr = ring->eop_gpu_addr;
0538 prop->use_doorbell = ring->use_doorbell;
0539 prop->doorbell_index = ring->doorbell_index;
0540
0541
0542
0543
0544 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
0545
0546 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
0547 amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
0548 (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
0549 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
0550 prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
0551 prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
0552 }
0553 }
0554
0555 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
0556 {
0557 struct amdgpu_device *adev = ring->adev;
0558 struct amdgpu_mqd *mqd_mgr;
0559 struct amdgpu_mqd_prop prop;
0560
0561 amdgpu_ring_to_mqd_prop(ring, &prop);
0562
0563 ring->wptr = 0;
0564
0565 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
0566 mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
0567 else
0568 mqd_mgr = &adev->mqds[ring->funcs->type];
0569
0570 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
0571 }