0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/ktime.h>
0029 #include <linux/module.h>
0030 #include <linux/pagemap.h>
0031 #include <linux/pci.h>
0032 #include <linux/dma-buf.h>
0033
0034 #include <drm/amdgpu_drm.h>
0035 #include <drm/drm_drv.h>
0036 #include <drm/drm_gem_ttm_helper.h>
0037
0038 #include "amdgpu.h"
0039 #include "amdgpu_display.h"
0040 #include "amdgpu_dma_buf.h"
0041 #include "amdgpu_xgmi.h"
0042
0043 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
0044
0045 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
0046 {
0047 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
0048 struct drm_device *ddev = bo->base.dev;
0049 vm_fault_t ret;
0050 int idx;
0051
0052 ret = ttm_bo_vm_reserve(bo, vmf);
0053 if (ret)
0054 return ret;
0055
0056 if (drm_dev_enter(ddev, &idx)) {
0057 ret = amdgpu_bo_fault_reserve_notify(bo);
0058 if (ret) {
0059 drm_dev_exit(idx);
0060 goto unlock;
0061 }
0062
0063 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
0064 TTM_BO_VM_NUM_PREFAULT);
0065
0066 drm_dev_exit(idx);
0067 } else {
0068 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
0069 }
0070 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
0071 return ret;
0072
0073 unlock:
0074 dma_resv_unlock(bo->base.resv);
0075 return ret;
0076 }
0077
0078 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
0079 .fault = amdgpu_gem_fault,
0080 .open = ttm_bo_vm_open,
0081 .close = ttm_bo_vm_close,
0082 .access = ttm_bo_vm_access
0083 };
0084
0085 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
0086 {
0087 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
0088
0089 if (robj) {
0090 amdgpu_mn_unregister(robj);
0091 amdgpu_bo_unref(&robj);
0092 }
0093 }
0094
0095 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
0096 int alignment, u32 initial_domain,
0097 u64 flags, enum ttm_bo_type type,
0098 struct dma_resv *resv,
0099 struct drm_gem_object **obj)
0100 {
0101 struct amdgpu_bo *bo;
0102 struct amdgpu_bo_user *ubo;
0103 struct amdgpu_bo_param bp;
0104 int r;
0105
0106 memset(&bp, 0, sizeof(bp));
0107 *obj = NULL;
0108
0109 bp.size = size;
0110 bp.byte_align = alignment;
0111 bp.type = type;
0112 bp.resv = resv;
0113 bp.preferred_domain = initial_domain;
0114 bp.flags = flags;
0115 bp.domain = initial_domain;
0116 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
0117
0118 r = amdgpu_bo_create_user(adev, &bp, &ubo);
0119 if (r)
0120 return r;
0121
0122 bo = &ubo->bo;
0123 *obj = &bo->tbo.base;
0124 (*obj)->funcs = &amdgpu_gem_object_funcs;
0125
0126 return 0;
0127 }
0128
0129 void amdgpu_gem_force_release(struct amdgpu_device *adev)
0130 {
0131 struct drm_device *ddev = adev_to_drm(adev);
0132 struct drm_file *file;
0133
0134 mutex_lock(&ddev->filelist_mutex);
0135
0136 list_for_each_entry(file, &ddev->filelist, lhead) {
0137 struct drm_gem_object *gobj;
0138 int handle;
0139
0140 WARN_ONCE(1, "Still active user space clients!\n");
0141 spin_lock(&file->table_lock);
0142 idr_for_each_entry(&file->object_idr, gobj, handle) {
0143 WARN_ONCE(1, "And also active allocations!\n");
0144 drm_gem_object_put(gobj);
0145 }
0146 idr_destroy(&file->object_idr);
0147 spin_unlock(&file->table_lock);
0148 }
0149
0150 mutex_unlock(&ddev->filelist_mutex);
0151 }
0152
0153
0154
0155
0156
0157 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
0158 struct drm_file *file_priv)
0159 {
0160 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
0161 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
0162 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
0163 struct amdgpu_vm *vm = &fpriv->vm;
0164 struct amdgpu_bo_va *bo_va;
0165 struct mm_struct *mm;
0166 int r;
0167
0168 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
0169 if (mm && mm != current->mm)
0170 return -EPERM;
0171
0172 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
0173 abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
0174 return -EPERM;
0175
0176 r = amdgpu_bo_reserve(abo, false);
0177 if (r)
0178 return r;
0179
0180 bo_va = amdgpu_vm_bo_find(vm, abo);
0181 if (!bo_va) {
0182 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
0183 } else {
0184 ++bo_va->ref_count;
0185 }
0186 amdgpu_bo_unreserve(abo);
0187 return 0;
0188 }
0189
0190 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
0191 struct drm_file *file_priv)
0192 {
0193 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
0194 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
0195 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
0196 struct amdgpu_vm *vm = &fpriv->vm;
0197
0198 struct amdgpu_bo_list_entry vm_pd;
0199 struct list_head list, duplicates;
0200 struct dma_fence *fence = NULL;
0201 struct ttm_validate_buffer tv;
0202 struct ww_acquire_ctx ticket;
0203 struct amdgpu_bo_va *bo_va;
0204 long r;
0205
0206 INIT_LIST_HEAD(&list);
0207 INIT_LIST_HEAD(&duplicates);
0208
0209 tv.bo = &bo->tbo;
0210 tv.num_shared = 2;
0211 list_add(&tv.head, &list);
0212
0213 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
0214
0215 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
0216 if (r) {
0217 dev_err(adev->dev, "leaking bo va because "
0218 "we fail to reserve bo (%ld)\n", r);
0219 return;
0220 }
0221 bo_va = amdgpu_vm_bo_find(vm, bo);
0222 if (!bo_va || --bo_va->ref_count)
0223 goto out_unlock;
0224
0225 amdgpu_vm_bo_del(adev, bo_va);
0226 if (!amdgpu_vm_ready(vm))
0227 goto out_unlock;
0228
0229 r = amdgpu_vm_clear_freed(adev, vm, &fence);
0230 if (r || !fence)
0231 goto out_unlock;
0232
0233 amdgpu_bo_fence(bo, fence, true);
0234 dma_fence_put(fence);
0235
0236 out_unlock:
0237 if (unlikely(r < 0))
0238 dev_err(adev->dev, "failed to clear page "
0239 "tables on GEM object close (%ld)\n", r);
0240 ttm_eu_backoff_reservation(&ticket, &list);
0241 }
0242
0243 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
0244 {
0245 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
0246
0247 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
0248 return -EPERM;
0249 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
0250 return -EPERM;
0251
0252
0253
0254
0255
0256
0257 if (is_cow_mapping(vma->vm_flags) &&
0258 !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
0259 vma->vm_flags &= ~VM_MAYWRITE;
0260
0261 return drm_gem_ttm_mmap(obj, vma);
0262 }
0263
0264 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
0265 .free = amdgpu_gem_object_free,
0266 .open = amdgpu_gem_object_open,
0267 .close = amdgpu_gem_object_close,
0268 .export = amdgpu_gem_prime_export,
0269 .vmap = drm_gem_ttm_vmap,
0270 .vunmap = drm_gem_ttm_vunmap,
0271 .mmap = amdgpu_gem_object_mmap,
0272 .vm_ops = &amdgpu_gem_vm_ops,
0273 };
0274
0275
0276
0277
0278 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
0279 struct drm_file *filp)
0280 {
0281 struct amdgpu_device *adev = drm_to_adev(dev);
0282 struct amdgpu_fpriv *fpriv = filp->driver_priv;
0283 struct amdgpu_vm *vm = &fpriv->vm;
0284 union drm_amdgpu_gem_create *args = data;
0285 uint64_t flags = args->in.domain_flags;
0286 uint64_t size = args->in.bo_size;
0287 struct dma_resv *resv = NULL;
0288 struct drm_gem_object *gobj;
0289 uint32_t handle, initial_domain;
0290 int r;
0291
0292
0293 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
0294 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
0295 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
0296 AMDGPU_GEM_CREATE_VRAM_CLEARED |
0297 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
0298 AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
0299 AMDGPU_GEM_CREATE_ENCRYPTED |
0300 AMDGPU_GEM_CREATE_DISCARDABLE))
0301 return -EINVAL;
0302
0303
0304 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
0305 return -EINVAL;
0306
0307 if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
0308 DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
0309 return -EINVAL;
0310 }
0311
0312
0313 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
0314 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
0315 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
0316
0317
0318
0319 DRM_ERROR("GDS bo cannot be per-vm-bo\n");
0320 return -EINVAL;
0321 }
0322 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
0323 }
0324
0325 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
0326 r = amdgpu_bo_reserve(vm->root.bo, false);
0327 if (r)
0328 return r;
0329
0330 resv = vm->root.bo->tbo.base.resv;
0331 }
0332
0333 initial_domain = (u32)(0xffffffff & args->in.domains);
0334 retry:
0335 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
0336 initial_domain,
0337 flags, ttm_bo_type_device, resv, &gobj);
0338 if (r && r != -ERESTARTSYS) {
0339 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
0340 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
0341 goto retry;
0342 }
0343
0344 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
0345 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
0346 goto retry;
0347 }
0348 DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
0349 size, initial_domain, args->in.alignment, r);
0350 }
0351
0352 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
0353 if (!r) {
0354 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
0355
0356 abo->parent = amdgpu_bo_ref(vm->root.bo);
0357 }
0358 amdgpu_bo_unreserve(vm->root.bo);
0359 }
0360 if (r)
0361 return r;
0362
0363 r = drm_gem_handle_create(filp, gobj, &handle);
0364
0365 drm_gem_object_put(gobj);
0366 if (r)
0367 return r;
0368
0369 memset(args, 0, sizeof(*args));
0370 args->out.handle = handle;
0371 return 0;
0372 }
0373
0374 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
0375 struct drm_file *filp)
0376 {
0377 struct ttm_operation_ctx ctx = { true, false };
0378 struct amdgpu_device *adev = drm_to_adev(dev);
0379 struct drm_amdgpu_gem_userptr *args = data;
0380 struct drm_gem_object *gobj;
0381 struct amdgpu_bo *bo;
0382 uint32_t handle;
0383 int r;
0384
0385 args->addr = untagged_addr(args->addr);
0386
0387 if (offset_in_page(args->addr | args->size))
0388 return -EINVAL;
0389
0390
0391 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
0392 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
0393 AMDGPU_GEM_USERPTR_REGISTER))
0394 return -EINVAL;
0395
0396 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
0397 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
0398
0399
0400 return -EACCES;
0401 }
0402
0403
0404 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
0405 0, ttm_bo_type_device, NULL, &gobj);
0406 if (r)
0407 return r;
0408
0409 bo = gem_to_amdgpu_bo(gobj);
0410 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
0411 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
0412 r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
0413 if (r)
0414 goto release_object;
0415
0416 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
0417 r = amdgpu_mn_register(bo, args->addr);
0418 if (r)
0419 goto release_object;
0420 }
0421
0422 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
0423 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
0424 if (r)
0425 goto release_object;
0426
0427 r = amdgpu_bo_reserve(bo, true);
0428 if (r)
0429 goto user_pages_done;
0430
0431 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
0432 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
0433 amdgpu_bo_unreserve(bo);
0434 if (r)
0435 goto user_pages_done;
0436 }
0437
0438 r = drm_gem_handle_create(filp, gobj, &handle);
0439 if (r)
0440 goto user_pages_done;
0441
0442 args->handle = handle;
0443
0444 user_pages_done:
0445 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
0446 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
0447
0448 release_object:
0449 drm_gem_object_put(gobj);
0450
0451 return r;
0452 }
0453
0454 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
0455 struct drm_device *dev,
0456 uint32_t handle, uint64_t *offset_p)
0457 {
0458 struct drm_gem_object *gobj;
0459 struct amdgpu_bo *robj;
0460
0461 gobj = drm_gem_object_lookup(filp, handle);
0462 if (gobj == NULL) {
0463 return -ENOENT;
0464 }
0465 robj = gem_to_amdgpu_bo(gobj);
0466 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
0467 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
0468 drm_gem_object_put(gobj);
0469 return -EPERM;
0470 }
0471 *offset_p = amdgpu_bo_mmap_offset(robj);
0472 drm_gem_object_put(gobj);
0473 return 0;
0474 }
0475
0476 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
0477 struct drm_file *filp)
0478 {
0479 union drm_amdgpu_gem_mmap *args = data;
0480 uint32_t handle = args->in.handle;
0481 memset(args, 0, sizeof(*args));
0482 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
0493 {
0494 unsigned long timeout_jiffies;
0495 ktime_t timeout;
0496
0497
0498 if (((int64_t)timeout_ns) < 0)
0499 return MAX_SCHEDULE_TIMEOUT;
0500
0501 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
0502 if (ktime_to_ns(timeout) < 0)
0503 return 0;
0504
0505 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
0506
0507 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
0508 return MAX_SCHEDULE_TIMEOUT - 1;
0509
0510 return timeout_jiffies;
0511 }
0512
0513 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
0514 struct drm_file *filp)
0515 {
0516 union drm_amdgpu_gem_wait_idle *args = data;
0517 struct drm_gem_object *gobj;
0518 struct amdgpu_bo *robj;
0519 uint32_t handle = args->in.handle;
0520 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
0521 int r = 0;
0522 long ret;
0523
0524 gobj = drm_gem_object_lookup(filp, handle);
0525 if (gobj == NULL) {
0526 return -ENOENT;
0527 }
0528 robj = gem_to_amdgpu_bo(gobj);
0529 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
0530 true, timeout);
0531
0532
0533
0534
0535
0536 if (ret >= 0) {
0537 memset(args, 0, sizeof(*args));
0538 args->out.status = (ret == 0);
0539 } else
0540 r = ret;
0541
0542 drm_gem_object_put(gobj);
0543 return r;
0544 }
0545
0546 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
0547 struct drm_file *filp)
0548 {
0549 struct drm_amdgpu_gem_metadata *args = data;
0550 struct drm_gem_object *gobj;
0551 struct amdgpu_bo *robj;
0552 int r = -1;
0553
0554 DRM_DEBUG("%d \n", args->handle);
0555 gobj = drm_gem_object_lookup(filp, args->handle);
0556 if (gobj == NULL)
0557 return -ENOENT;
0558 robj = gem_to_amdgpu_bo(gobj);
0559
0560 r = amdgpu_bo_reserve(robj, false);
0561 if (unlikely(r != 0))
0562 goto out;
0563
0564 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
0565 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
0566 r = amdgpu_bo_get_metadata(robj, args->data.data,
0567 sizeof(args->data.data),
0568 &args->data.data_size_bytes,
0569 &args->data.flags);
0570 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
0571 if (args->data.data_size_bytes > sizeof(args->data.data)) {
0572 r = -EINVAL;
0573 goto unreserve;
0574 }
0575 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
0576 if (!r)
0577 r = amdgpu_bo_set_metadata(robj, args->data.data,
0578 args->data.data_size_bytes,
0579 args->data.flags);
0580 }
0581
0582 unreserve:
0583 amdgpu_bo_unreserve(robj);
0584 out:
0585 drm_gem_object_put(gobj);
0586 return r;
0587 }
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
0601 struct amdgpu_vm *vm,
0602 struct amdgpu_bo_va *bo_va,
0603 uint32_t operation)
0604 {
0605 int r;
0606
0607 if (!amdgpu_vm_ready(vm))
0608 return;
0609
0610 r = amdgpu_vm_clear_freed(adev, vm, NULL);
0611 if (r)
0612 goto error;
0613
0614 if (operation == AMDGPU_VA_OP_MAP ||
0615 operation == AMDGPU_VA_OP_REPLACE) {
0616 r = amdgpu_vm_bo_update(adev, bo_va, false);
0617 if (r)
0618 goto error;
0619 }
0620
0621 r = amdgpu_vm_update_pdes(adev, vm, false);
0622
0623 error:
0624 if (r && r != -ERESTARTSYS)
0625 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
0626 }
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
0637 {
0638 uint64_t pte_flag = 0;
0639
0640 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
0641 pte_flag |= AMDGPU_PTE_EXECUTABLE;
0642 if (flags & AMDGPU_VM_PAGE_READABLE)
0643 pte_flag |= AMDGPU_PTE_READABLE;
0644 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
0645 pte_flag |= AMDGPU_PTE_WRITEABLE;
0646 if (flags & AMDGPU_VM_PAGE_PRT)
0647 pte_flag |= AMDGPU_PTE_PRT;
0648 if (flags & AMDGPU_VM_PAGE_NOALLOC)
0649 pte_flag |= AMDGPU_PTE_NOALLOC;
0650
0651 if (adev->gmc.gmc_funcs->map_mtype)
0652 pte_flag |= amdgpu_gmc_map_mtype(adev,
0653 flags & AMDGPU_VM_MTYPE_MASK);
0654
0655 return pte_flag;
0656 }
0657
0658 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
0659 struct drm_file *filp)
0660 {
0661 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
0662 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
0663 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
0664 AMDGPU_VM_PAGE_NOALLOC;
0665 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
0666 AMDGPU_VM_PAGE_PRT;
0667
0668 struct drm_amdgpu_gem_va *args = data;
0669 struct drm_gem_object *gobj;
0670 struct amdgpu_device *adev = drm_to_adev(dev);
0671 struct amdgpu_fpriv *fpriv = filp->driver_priv;
0672 struct amdgpu_bo *abo;
0673 struct amdgpu_bo_va *bo_va;
0674 struct amdgpu_bo_list_entry vm_pd;
0675 struct ttm_validate_buffer tv;
0676 struct ww_acquire_ctx ticket;
0677 struct list_head list, duplicates;
0678 uint64_t va_flags;
0679 uint64_t vm_size;
0680 int r = 0;
0681
0682 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
0683 dev_dbg(dev->dev,
0684 "va_address 0x%LX is in reserved area 0x%LX\n",
0685 args->va_address, AMDGPU_VA_RESERVED_SIZE);
0686 return -EINVAL;
0687 }
0688
0689 if (args->va_address >= AMDGPU_GMC_HOLE_START &&
0690 args->va_address < AMDGPU_GMC_HOLE_END) {
0691 dev_dbg(dev->dev,
0692 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
0693 args->va_address, AMDGPU_GMC_HOLE_START,
0694 AMDGPU_GMC_HOLE_END);
0695 return -EINVAL;
0696 }
0697
0698 args->va_address &= AMDGPU_GMC_HOLE_MASK;
0699
0700 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
0701 vm_size -= AMDGPU_VA_RESERVED_SIZE;
0702 if (args->va_address + args->map_size > vm_size) {
0703 dev_dbg(dev->dev,
0704 "va_address 0x%llx is in top reserved area 0x%llx\n",
0705 args->va_address + args->map_size, vm_size);
0706 return -EINVAL;
0707 }
0708
0709 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
0710 dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
0711 args->flags);
0712 return -EINVAL;
0713 }
0714
0715 switch (args->operation) {
0716 case AMDGPU_VA_OP_MAP:
0717 case AMDGPU_VA_OP_UNMAP:
0718 case AMDGPU_VA_OP_CLEAR:
0719 case AMDGPU_VA_OP_REPLACE:
0720 break;
0721 default:
0722 dev_dbg(dev->dev, "unsupported operation %d\n",
0723 args->operation);
0724 return -EINVAL;
0725 }
0726
0727 INIT_LIST_HEAD(&list);
0728 INIT_LIST_HEAD(&duplicates);
0729 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
0730 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
0731 gobj = drm_gem_object_lookup(filp, args->handle);
0732 if (gobj == NULL)
0733 return -ENOENT;
0734 abo = gem_to_amdgpu_bo(gobj);
0735 tv.bo = &abo->tbo;
0736 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
0737 tv.num_shared = 1;
0738 else
0739 tv.num_shared = 0;
0740 list_add(&tv.head, &list);
0741 } else {
0742 gobj = NULL;
0743 abo = NULL;
0744 }
0745
0746 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
0747
0748 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
0749 if (r)
0750 goto error_unref;
0751
0752 if (abo) {
0753 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
0754 if (!bo_va) {
0755 r = -ENOENT;
0756 goto error_backoff;
0757 }
0758 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
0759 bo_va = fpriv->prt_va;
0760 } else {
0761 bo_va = NULL;
0762 }
0763
0764 switch (args->operation) {
0765 case AMDGPU_VA_OP_MAP:
0766 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
0767 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
0768 args->offset_in_bo, args->map_size,
0769 va_flags);
0770 break;
0771 case AMDGPU_VA_OP_UNMAP:
0772 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
0773 break;
0774
0775 case AMDGPU_VA_OP_CLEAR:
0776 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
0777 args->va_address,
0778 args->map_size);
0779 break;
0780 case AMDGPU_VA_OP_REPLACE:
0781 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
0782 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
0783 args->offset_in_bo, args->map_size,
0784 va_flags);
0785 break;
0786 default:
0787 break;
0788 }
0789 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
0790 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
0791 args->operation);
0792
0793 error_backoff:
0794 ttm_eu_backoff_reservation(&ticket, &list);
0795
0796 error_unref:
0797 drm_gem_object_put(gobj);
0798 return r;
0799 }
0800
0801 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
0802 struct drm_file *filp)
0803 {
0804 struct amdgpu_device *adev = drm_to_adev(dev);
0805 struct drm_amdgpu_gem_op *args = data;
0806 struct drm_gem_object *gobj;
0807 struct amdgpu_vm_bo_base *base;
0808 struct amdgpu_bo *robj;
0809 int r;
0810
0811 gobj = drm_gem_object_lookup(filp, args->handle);
0812 if (gobj == NULL) {
0813 return -ENOENT;
0814 }
0815 robj = gem_to_amdgpu_bo(gobj);
0816
0817 r = amdgpu_bo_reserve(robj, false);
0818 if (unlikely(r))
0819 goto out;
0820
0821 switch (args->op) {
0822 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
0823 struct drm_amdgpu_gem_create_in info;
0824 void __user *out = u64_to_user_ptr(args->value);
0825
0826 info.bo_size = robj->tbo.base.size;
0827 info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
0828 info.domains = robj->preferred_domains;
0829 info.domain_flags = robj->flags;
0830 amdgpu_bo_unreserve(robj);
0831 if (copy_to_user(out, &info, sizeof(info)))
0832 r = -EFAULT;
0833 break;
0834 }
0835 case AMDGPU_GEM_OP_SET_PLACEMENT:
0836 if (robj->tbo.base.import_attach &&
0837 args->value & AMDGPU_GEM_DOMAIN_VRAM) {
0838 r = -EINVAL;
0839 amdgpu_bo_unreserve(robj);
0840 break;
0841 }
0842 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
0843 r = -EPERM;
0844 amdgpu_bo_unreserve(robj);
0845 break;
0846 }
0847 for (base = robj->vm_bo; base; base = base->next)
0848 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
0849 amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
0850 r = -EINVAL;
0851 amdgpu_bo_unreserve(robj);
0852 goto out;
0853 }
0854
0855
0856 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
0857 AMDGPU_GEM_DOMAIN_GTT |
0858 AMDGPU_GEM_DOMAIN_CPU);
0859 robj->allowed_domains = robj->preferred_domains;
0860 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
0861 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
0862
0863 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
0864 amdgpu_vm_bo_invalidate(adev, robj, true);
0865
0866 amdgpu_bo_unreserve(robj);
0867 break;
0868 default:
0869 amdgpu_bo_unreserve(robj);
0870 r = -EINVAL;
0871 }
0872
0873 out:
0874 drm_gem_object_put(gobj);
0875 return r;
0876 }
0877
0878 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
0879 int width,
0880 int cpp,
0881 bool tiled)
0882 {
0883 int aligned = width;
0884 int pitch_mask = 0;
0885
0886 switch (cpp) {
0887 case 1:
0888 pitch_mask = 255;
0889 break;
0890 case 2:
0891 pitch_mask = 127;
0892 break;
0893 case 3:
0894 case 4:
0895 pitch_mask = 63;
0896 break;
0897 }
0898
0899 aligned += pitch_mask;
0900 aligned &= ~pitch_mask;
0901 return aligned * cpp;
0902 }
0903
0904 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
0905 struct drm_device *dev,
0906 struct drm_mode_create_dumb *args)
0907 {
0908 struct amdgpu_device *adev = drm_to_adev(dev);
0909 struct drm_gem_object *gobj;
0910 uint32_t handle;
0911 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
0912 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
0913 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
0914 u32 domain;
0915 int r;
0916
0917
0918
0919
0920
0921
0922 if (adev->mman.buffer_funcs_enabled)
0923 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
0924
0925 args->pitch = amdgpu_gem_align_pitch(adev, args->width,
0926 DIV_ROUND_UP(args->bpp, 8), 0);
0927 args->size = (u64)args->pitch * args->height;
0928 args->size = ALIGN(args->size, PAGE_SIZE);
0929 domain = amdgpu_bo_get_preferred_domain(adev,
0930 amdgpu_display_supported_domains(adev, flags));
0931 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
0932 ttm_bo_type_device, NULL, &gobj);
0933 if (r)
0934 return -ENOMEM;
0935
0936 r = drm_gem_handle_create(file_priv, gobj, &handle);
0937
0938 drm_gem_object_put(gobj);
0939 if (r) {
0940 return r;
0941 }
0942 args->handle = handle;
0943 return 0;
0944 }
0945
0946 #if defined(CONFIG_DEBUG_FS)
0947 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
0948 {
0949 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
0950 struct drm_device *dev = adev_to_drm(adev);
0951 struct drm_file *file;
0952 int r;
0953
0954 r = mutex_lock_interruptible(&dev->filelist_mutex);
0955 if (r)
0956 return r;
0957
0958 list_for_each_entry(file, &dev->filelist, lhead) {
0959 struct task_struct *task;
0960 struct drm_gem_object *gobj;
0961 int id;
0962
0963
0964
0965
0966
0967
0968
0969 rcu_read_lock();
0970 task = pid_task(file->pid, PIDTYPE_PID);
0971 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
0972 task ? task->comm : "<unknown>");
0973 rcu_read_unlock();
0974
0975 spin_lock(&file->table_lock);
0976 idr_for_each_entry(&file->object_idr, gobj, id) {
0977 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
0978
0979 amdgpu_bo_print_info(id, bo, m);
0980 }
0981 spin_unlock(&file->table_lock);
0982 }
0983
0984 mutex_unlock(&dev->filelist_mutex);
0985 return 0;
0986 }
0987
0988 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
0989
0990 #endif
0991
0992 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
0993 {
0994 #if defined(CONFIG_DEBUG_FS)
0995 struct drm_minor *minor = adev_to_drm(adev)->primary;
0996 struct dentry *root = minor->debugfs_root;
0997
0998 debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
0999 &amdgpu_debugfs_gem_info_fops);
1000 #endif
1001 }