Back to home page

LXR

 
 

    


0001 /*
0002  * Copyright 2008 Advanced Micro Devices, Inc.
0003  * Copyright 2008 Red Hat Inc.
0004  * Copyright 2009 Jerome Glisse.
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice shall be included in
0014  * all copies or substantial portions of the Software.
0015  *
0016  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0017  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0018  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0019  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0020  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0021  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0022  * OTHER DEALINGS IN THE SOFTWARE.
0023  *
0024  * Authors: Dave Airlie
0025  *          Alex Deucher
0026  *          Jerome Glisse
0027  */
0028 #include <linux/ktime.h>
0029 #include <linux/pagemap.h>
0030 #include <drm/drmP.h>
0031 #include <drm/amdgpu_drm.h>
0032 #include "amdgpu.h"
0033 
0034 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
0035 {
0036     struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
0037 
0038     if (robj) {
0039         if (robj->gem_base.import_attach)
0040             drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
0041         amdgpu_mn_unregister(robj);
0042         amdgpu_bo_unref(&robj);
0043     }
0044 }
0045 
0046 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
0047                 int alignment, u32 initial_domain,
0048                 u64 flags, bool kernel,
0049                 struct drm_gem_object **obj)
0050 {
0051     struct amdgpu_bo *robj;
0052     unsigned long max_size;
0053     int r;
0054 
0055     *obj = NULL;
0056     /* At least align on page size */
0057     if (alignment < PAGE_SIZE) {
0058         alignment = PAGE_SIZE;
0059     }
0060 
0061     if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
0062         /* Maximum bo size is the unpinned gtt size since we use the gtt to
0063          * handle vram to system pool migrations.
0064          */
0065         max_size = adev->mc.gtt_size - adev->gart_pin_size;
0066         if (size > max_size) {
0067             DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
0068                   size >> 20, max_size >> 20);
0069             return -ENOMEM;
0070         }
0071     }
0072 retry:
0073     r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
0074                  flags, NULL, NULL, &robj);
0075     if (r) {
0076         if (r != -ERESTARTSYS) {
0077             if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
0078                 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
0079                 goto retry;
0080             }
0081             DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
0082                   size, initial_domain, alignment, r);
0083         }
0084         return r;
0085     }
0086     *obj = &robj->gem_base;
0087 
0088     return 0;
0089 }
0090 
0091 void amdgpu_gem_force_release(struct amdgpu_device *adev)
0092 {
0093     struct drm_device *ddev = adev->ddev;
0094     struct drm_file *file;
0095 
0096     mutex_lock(&ddev->filelist_mutex);
0097 
0098     list_for_each_entry(file, &ddev->filelist, lhead) {
0099         struct drm_gem_object *gobj;
0100         int handle;
0101 
0102         WARN_ONCE(1, "Still active user space clients!\n");
0103         spin_lock(&file->table_lock);
0104         idr_for_each_entry(&file->object_idr, gobj, handle) {
0105             WARN_ONCE(1, "And also active allocations!\n");
0106             drm_gem_object_unreference_unlocked(gobj);
0107         }
0108         idr_destroy(&file->object_idr);
0109         spin_unlock(&file->table_lock);
0110     }
0111 
0112     mutex_unlock(&ddev->filelist_mutex);
0113 }
0114 
0115 /*
0116  * Call from drm_gem_handle_create which appear in both new and open ioctl
0117  * case.
0118  */
0119 int amdgpu_gem_object_open(struct drm_gem_object *obj,
0120                struct drm_file *file_priv)
0121 {
0122     struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
0123     struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
0124     struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
0125     struct amdgpu_vm *vm = &fpriv->vm;
0126     struct amdgpu_bo_va *bo_va;
0127     int r;
0128     r = amdgpu_bo_reserve(abo, false);
0129     if (r)
0130         return r;
0131 
0132     bo_va = amdgpu_vm_bo_find(vm, abo);
0133     if (!bo_va) {
0134         bo_va = amdgpu_vm_bo_add(adev, vm, abo);
0135     } else {
0136         ++bo_va->ref_count;
0137     }
0138     amdgpu_bo_unreserve(abo);
0139     return 0;
0140 }
0141 
0142 void amdgpu_gem_object_close(struct drm_gem_object *obj,
0143                  struct drm_file *file_priv)
0144 {
0145     struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
0146     struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
0147     struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
0148     struct amdgpu_vm *vm = &fpriv->vm;
0149 
0150     struct amdgpu_bo_list_entry vm_pd;
0151     struct list_head list, duplicates;
0152     struct ttm_validate_buffer tv;
0153     struct ww_acquire_ctx ticket;
0154     struct amdgpu_bo_va *bo_va;
0155     int r;
0156 
0157     INIT_LIST_HEAD(&list);
0158     INIT_LIST_HEAD(&duplicates);
0159 
0160     tv.bo = &bo->tbo;
0161     tv.shared = true;
0162     list_add(&tv.head, &list);
0163 
0164     amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
0165 
0166     r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
0167     if (r) {
0168         dev_err(adev->dev, "leaking bo va because "
0169             "we fail to reserve bo (%d)\n", r);
0170         return;
0171     }
0172     bo_va = amdgpu_vm_bo_find(vm, bo);
0173     if (bo_va) {
0174         if (--bo_va->ref_count == 0) {
0175             amdgpu_vm_bo_rmv(adev, bo_va);
0176         }
0177     }
0178     ttm_eu_backoff_reservation(&ticket, &list);
0179 }
0180 
0181 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
0182 {
0183     if (r == -EDEADLK) {
0184         r = amdgpu_gpu_reset(adev);
0185         if (!r)
0186             r = -EAGAIN;
0187     }
0188     return r;
0189 }
0190 
0191 /*
0192  * GEM ioctls.
0193  */
0194 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
0195                 struct drm_file *filp)
0196 {
0197     struct amdgpu_device *adev = dev->dev_private;
0198     union drm_amdgpu_gem_create *args = data;
0199     uint64_t size = args->in.bo_size;
0200     struct drm_gem_object *gobj;
0201     uint32_t handle;
0202     bool kernel = false;
0203     int r;
0204 
0205     /* create a gem object to contain this object in */
0206     if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
0207         AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
0208         kernel = true;
0209         if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
0210             size = size << AMDGPU_GDS_SHIFT;
0211         else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
0212             size = size << AMDGPU_GWS_SHIFT;
0213         else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
0214             size = size << AMDGPU_OA_SHIFT;
0215         else {
0216             r = -EINVAL;
0217             goto error_unlock;
0218         }
0219     }
0220     size = roundup(size, PAGE_SIZE);
0221 
0222     r = amdgpu_gem_object_create(adev, size, args->in.alignment,
0223                      (u32)(0xffffffff & args->in.domains),
0224                      args->in.domain_flags,
0225                      kernel, &gobj);
0226     if (r)
0227         goto error_unlock;
0228 
0229     r = drm_gem_handle_create(filp, gobj, &handle);
0230     /* drop reference from allocate - handle holds it now */
0231     drm_gem_object_unreference_unlocked(gobj);
0232     if (r)
0233         goto error_unlock;
0234 
0235     memset(args, 0, sizeof(*args));
0236     args->out.handle = handle;
0237     return 0;
0238 
0239 error_unlock:
0240     r = amdgpu_gem_handle_lockup(adev, r);
0241     return r;
0242 }
0243 
0244 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
0245                  struct drm_file *filp)
0246 {
0247     struct amdgpu_device *adev = dev->dev_private;
0248     struct drm_amdgpu_gem_userptr *args = data;
0249     struct drm_gem_object *gobj;
0250     struct amdgpu_bo *bo;
0251     uint32_t handle;
0252     int r;
0253 
0254     if (offset_in_page(args->addr | args->size))
0255         return -EINVAL;
0256 
0257     /* reject unknown flag values */
0258     if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
0259         AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
0260         AMDGPU_GEM_USERPTR_REGISTER))
0261         return -EINVAL;
0262 
0263     if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
0264          !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
0265 
0266         /* if we want to write to it we must install a MMU notifier */
0267         return -EACCES;
0268     }
0269 
0270     /* create a gem object to contain this object in */
0271     r = amdgpu_gem_object_create(adev, args->size, 0,
0272                      AMDGPU_GEM_DOMAIN_CPU, 0,
0273                      0, &gobj);
0274     if (r)
0275         goto handle_lockup;
0276 
0277     bo = gem_to_amdgpu_bo(gobj);
0278     bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
0279     bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
0280     r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
0281     if (r)
0282         goto release_object;
0283 
0284     if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
0285         r = amdgpu_mn_register(bo, args->addr);
0286         if (r)
0287             goto release_object;
0288     }
0289 
0290     if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
0291         down_read(&current->mm->mmap_sem);
0292 
0293         r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
0294                          bo->tbo.ttm->pages);
0295         if (r)
0296             goto unlock_mmap_sem;
0297 
0298         r = amdgpu_bo_reserve(bo, true);
0299         if (r)
0300             goto free_pages;
0301 
0302         amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
0303         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
0304         amdgpu_bo_unreserve(bo);
0305         if (r)
0306             goto free_pages;
0307 
0308         up_read(&current->mm->mmap_sem);
0309     }
0310 
0311     r = drm_gem_handle_create(filp, gobj, &handle);
0312     /* drop reference from allocate - handle holds it now */
0313     drm_gem_object_unreference_unlocked(gobj);
0314     if (r)
0315         goto handle_lockup;
0316 
0317     args->handle = handle;
0318     return 0;
0319 
0320 free_pages:
0321     release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
0322 
0323 unlock_mmap_sem:
0324     up_read(&current->mm->mmap_sem);
0325 
0326 release_object:
0327     drm_gem_object_unreference_unlocked(gobj);
0328 
0329 handle_lockup:
0330     r = amdgpu_gem_handle_lockup(adev, r);
0331 
0332     return r;
0333 }
0334 
0335 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
0336               struct drm_device *dev,
0337               uint32_t handle, uint64_t *offset_p)
0338 {
0339     struct drm_gem_object *gobj;
0340     struct amdgpu_bo *robj;
0341 
0342     gobj = drm_gem_object_lookup(filp, handle);
0343     if (gobj == NULL) {
0344         return -ENOENT;
0345     }
0346     robj = gem_to_amdgpu_bo(gobj);
0347     if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
0348         (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
0349         drm_gem_object_unreference_unlocked(gobj);
0350         return -EPERM;
0351     }
0352     *offset_p = amdgpu_bo_mmap_offset(robj);
0353     drm_gem_object_unreference_unlocked(gobj);
0354     return 0;
0355 }
0356 
0357 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
0358               struct drm_file *filp)
0359 {
0360     union drm_amdgpu_gem_mmap *args = data;
0361     uint32_t handle = args->in.handle;
0362     memset(args, 0, sizeof(*args));
0363     return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
0364 }
0365 
0366 /**
0367  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
0368  *
0369  * @timeout_ns: timeout in ns
0370  *
0371  * Calculate the timeout in jiffies from an absolute timeout in ns.
0372  */
0373 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
0374 {
0375     unsigned long timeout_jiffies;
0376     ktime_t timeout;
0377 
0378     /* clamp timeout if it's to large */
0379     if (((int64_t)timeout_ns) < 0)
0380         return MAX_SCHEDULE_TIMEOUT;
0381 
0382     timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
0383     if (ktime_to_ns(timeout) < 0)
0384         return 0;
0385 
0386     timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
0387     /*  clamp timeout to avoid unsigned-> signed overflow */
0388     if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
0389         return MAX_SCHEDULE_TIMEOUT - 1;
0390 
0391     return timeout_jiffies;
0392 }
0393 
0394 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
0395                   struct drm_file *filp)
0396 {
0397     struct amdgpu_device *adev = dev->dev_private;
0398     union drm_amdgpu_gem_wait_idle *args = data;
0399     struct drm_gem_object *gobj;
0400     struct amdgpu_bo *robj;
0401     uint32_t handle = args->in.handle;
0402     unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
0403     int r = 0;
0404     long ret;
0405 
0406     gobj = drm_gem_object_lookup(filp, handle);
0407     if (gobj == NULL) {
0408         return -ENOENT;
0409     }
0410     robj = gem_to_amdgpu_bo(gobj);
0411     ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
0412                           timeout);
0413 
0414     /* ret == 0 means not signaled,
0415      * ret > 0 means signaled
0416      * ret < 0 means interrupted before timeout
0417      */
0418     if (ret >= 0) {
0419         memset(args, 0, sizeof(*args));
0420         args->out.status = (ret == 0);
0421     } else
0422         r = ret;
0423 
0424     drm_gem_object_unreference_unlocked(gobj);
0425     r = amdgpu_gem_handle_lockup(adev, r);
0426     return r;
0427 }
0428 
0429 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
0430                 struct drm_file *filp)
0431 {
0432     struct drm_amdgpu_gem_metadata *args = data;
0433     struct drm_gem_object *gobj;
0434     struct amdgpu_bo *robj;
0435     int r = -1;
0436 
0437     DRM_DEBUG("%d \n", args->handle);
0438     gobj = drm_gem_object_lookup(filp, args->handle);
0439     if (gobj == NULL)
0440         return -ENOENT;
0441     robj = gem_to_amdgpu_bo(gobj);
0442 
0443     r = amdgpu_bo_reserve(robj, false);
0444     if (unlikely(r != 0))
0445         goto out;
0446 
0447     if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
0448         amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
0449         r = amdgpu_bo_get_metadata(robj, args->data.data,
0450                        sizeof(args->data.data),
0451                        &args->data.data_size_bytes,
0452                        &args->data.flags);
0453     } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
0454         if (args->data.data_size_bytes > sizeof(args->data.data)) {
0455             r = -EINVAL;
0456             goto unreserve;
0457         }
0458         r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
0459         if (!r)
0460             r = amdgpu_bo_set_metadata(robj, args->data.data,
0461                            args->data.data_size_bytes,
0462                            args->data.flags);
0463     }
0464 
0465 unreserve:
0466     amdgpu_bo_unreserve(robj);
0467 out:
0468     drm_gem_object_unreference_unlocked(gobj);
0469     return r;
0470 }
0471 
0472 static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
0473 {
0474     unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
0475 
0476     /* if anything is swapped out don't swap it in here,
0477        just abort and wait for the next CS */
0478 
0479     return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
0480 }
0481 
0482 /**
0483  * amdgpu_gem_va_update_vm -update the bo_va in its VM
0484  *
0485  * @adev: amdgpu_device pointer
0486  * @bo_va: bo_va to update
0487  *
0488  * Update the bo_va directly after setting it's address. Errors are not
0489  * vital here, so they are not reported back to userspace.
0490  */
0491 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
0492                     struct amdgpu_bo_va *bo_va,
0493                     uint32_t operation)
0494 {
0495     struct ttm_validate_buffer tv, *entry;
0496     struct amdgpu_bo_list_entry vm_pd;
0497     struct ww_acquire_ctx ticket;
0498     struct list_head list, duplicates;
0499     unsigned domain;
0500     int r;
0501 
0502     INIT_LIST_HEAD(&list);
0503     INIT_LIST_HEAD(&duplicates);
0504 
0505     tv.bo = &bo_va->bo->tbo;
0506     tv.shared = true;
0507     list_add(&tv.head, &list);
0508 
0509     amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
0510 
0511     /* Provide duplicates to avoid -EALREADY */
0512     r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
0513     if (r)
0514         goto error_print;
0515 
0516     list_for_each_entry(entry, &list, head) {
0517         domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
0518         /* if anything is swapped out don't swap it in here,
0519            just abort and wait for the next CS */
0520         if (domain == AMDGPU_GEM_DOMAIN_CPU)
0521             goto error_unreserve;
0522     }
0523     r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
0524                       NULL);
0525     if (r)
0526         goto error_unreserve;
0527 
0528     r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
0529     if (r)
0530         goto error_unreserve;
0531 
0532     r = amdgpu_vm_clear_freed(adev, bo_va->vm);
0533     if (r)
0534         goto error_unreserve;
0535 
0536     if (operation == AMDGPU_VA_OP_MAP)
0537         r = amdgpu_vm_bo_update(adev, bo_va, false);
0538 
0539 error_unreserve:
0540     ttm_eu_backoff_reservation(&ticket, &list);
0541 
0542 error_print:
0543     if (r && r != -ERESTARTSYS)
0544         DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
0545 }
0546 
0547 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
0548               struct drm_file *filp)
0549 {
0550     struct drm_amdgpu_gem_va *args = data;
0551     struct drm_gem_object *gobj;
0552     struct amdgpu_device *adev = dev->dev_private;
0553     struct amdgpu_fpriv *fpriv = filp->driver_priv;
0554     struct amdgpu_bo *abo;
0555     struct amdgpu_bo_va *bo_va;
0556     struct amdgpu_bo_list_entry vm_pd;
0557     struct ttm_validate_buffer tv;
0558     struct ww_acquire_ctx ticket;
0559     struct list_head list, duplicates;
0560     uint32_t invalid_flags, va_flags = 0;
0561     int r = 0;
0562 
0563     if (!adev->vm_manager.enabled)
0564         return -ENOTTY;
0565 
0566     if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
0567         dev_err(&dev->pdev->dev,
0568             "va_address 0x%lX is in reserved area 0x%X\n",
0569             (unsigned long)args->va_address,
0570             AMDGPU_VA_RESERVED_SIZE);
0571         return -EINVAL;
0572     }
0573 
0574     invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
0575             AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
0576     if ((args->flags & invalid_flags)) {
0577         dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
0578             args->flags, invalid_flags);
0579         return -EINVAL;
0580     }
0581 
0582     switch (args->operation) {
0583     case AMDGPU_VA_OP_MAP:
0584     case AMDGPU_VA_OP_UNMAP:
0585         break;
0586     default:
0587         dev_err(&dev->pdev->dev, "unsupported operation %d\n",
0588             args->operation);
0589         return -EINVAL;
0590     }
0591 
0592     gobj = drm_gem_object_lookup(filp, args->handle);
0593     if (gobj == NULL)
0594         return -ENOENT;
0595     abo = gem_to_amdgpu_bo(gobj);
0596     INIT_LIST_HEAD(&list);
0597     INIT_LIST_HEAD(&duplicates);
0598     tv.bo = &abo->tbo;
0599     tv.shared = true;
0600     list_add(&tv.head, &list);
0601 
0602     amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
0603 
0604     r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
0605     if (r) {
0606         drm_gem_object_unreference_unlocked(gobj);
0607         return r;
0608     }
0609 
0610     bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
0611     if (!bo_va) {
0612         ttm_eu_backoff_reservation(&ticket, &list);
0613         drm_gem_object_unreference_unlocked(gobj);
0614         return -ENOENT;
0615     }
0616 
0617     switch (args->operation) {
0618     case AMDGPU_VA_OP_MAP:
0619         if (args->flags & AMDGPU_VM_PAGE_READABLE)
0620             va_flags |= AMDGPU_PTE_READABLE;
0621         if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
0622             va_flags |= AMDGPU_PTE_WRITEABLE;
0623         if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
0624             va_flags |= AMDGPU_PTE_EXECUTABLE;
0625         r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
0626                      args->offset_in_bo, args->map_size,
0627                      va_flags);
0628         break;
0629     case AMDGPU_VA_OP_UNMAP:
0630         r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
0631         break;
0632     default:
0633         break;
0634     }
0635     ttm_eu_backoff_reservation(&ticket, &list);
0636     if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
0637         !amdgpu_vm_debug)
0638         amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
0639 
0640     drm_gem_object_unreference_unlocked(gobj);
0641     return r;
0642 }
0643 
0644 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
0645             struct drm_file *filp)
0646 {
0647     struct drm_amdgpu_gem_op *args = data;
0648     struct drm_gem_object *gobj;
0649     struct amdgpu_bo *robj;
0650     int r;
0651 
0652     gobj = drm_gem_object_lookup(filp, args->handle);
0653     if (gobj == NULL) {
0654         return -ENOENT;
0655     }
0656     robj = gem_to_amdgpu_bo(gobj);
0657 
0658     r = amdgpu_bo_reserve(robj, false);
0659     if (unlikely(r))
0660         goto out;
0661 
0662     switch (args->op) {
0663     case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
0664         struct drm_amdgpu_gem_create_in info;
0665         void __user *out = (void __user *)(long)args->value;
0666 
0667         info.bo_size = robj->gem_base.size;
0668         info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
0669         info.domains = robj->prefered_domains;
0670         info.domain_flags = robj->flags;
0671         amdgpu_bo_unreserve(robj);
0672         if (copy_to_user(out, &info, sizeof(info)))
0673             r = -EFAULT;
0674         break;
0675     }
0676     case AMDGPU_GEM_OP_SET_PLACEMENT:
0677         if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
0678             r = -EPERM;
0679             amdgpu_bo_unreserve(robj);
0680             break;
0681         }
0682         robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
0683                             AMDGPU_GEM_DOMAIN_GTT |
0684                             AMDGPU_GEM_DOMAIN_CPU);
0685         robj->allowed_domains = robj->prefered_domains;
0686         if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
0687             robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
0688 
0689         amdgpu_bo_unreserve(robj);
0690         break;
0691     default:
0692         amdgpu_bo_unreserve(robj);
0693         r = -EINVAL;
0694     }
0695 
0696 out:
0697     drm_gem_object_unreference_unlocked(gobj);
0698     return r;
0699 }
0700 
0701 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
0702                 struct drm_device *dev,
0703                 struct drm_mode_create_dumb *args)
0704 {
0705     struct amdgpu_device *adev = dev->dev_private;
0706     struct drm_gem_object *gobj;
0707     uint32_t handle;
0708     int r;
0709 
0710     args->pitch = amdgpu_align_pitch(adev, args->width,
0711                      DIV_ROUND_UP(args->bpp, 8), 0);
0712     args->size = (u64)args->pitch * args->height;
0713     args->size = ALIGN(args->size, PAGE_SIZE);
0714 
0715     r = amdgpu_gem_object_create(adev, args->size, 0,
0716                      AMDGPU_GEM_DOMAIN_VRAM,
0717                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
0718                      ttm_bo_type_device,
0719                      &gobj);
0720     if (r)
0721         return -ENOMEM;
0722 
0723     r = drm_gem_handle_create(file_priv, gobj, &handle);
0724     /* drop reference from allocate - handle holds it now */
0725     drm_gem_object_unreference_unlocked(gobj);
0726     if (r) {
0727         return r;
0728     }
0729     args->handle = handle;
0730     return 0;
0731 }
0732 
0733 #if defined(CONFIG_DEBUG_FS)
0734 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
0735 {
0736     struct drm_gem_object *gobj = ptr;
0737     struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
0738     struct seq_file *m = data;
0739 
0740     unsigned domain;
0741     const char *placement;
0742     unsigned pin_count;
0743 
0744     domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
0745     switch (domain) {
0746     case AMDGPU_GEM_DOMAIN_VRAM:
0747         placement = "VRAM";
0748         break;
0749     case AMDGPU_GEM_DOMAIN_GTT:
0750         placement = " GTT";
0751         break;
0752     case AMDGPU_GEM_DOMAIN_CPU:
0753     default:
0754         placement = " CPU";
0755         break;
0756     }
0757     seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
0758            id, amdgpu_bo_size(bo), placement,
0759            amdgpu_bo_gpu_offset(bo));
0760 
0761     pin_count = ACCESS_ONCE(bo->pin_count);
0762     if (pin_count)
0763         seq_printf(m, " pin count %d", pin_count);
0764     seq_printf(m, "\n");
0765 
0766     return 0;
0767 }
0768 
0769 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
0770 {
0771     struct drm_info_node *node = (struct drm_info_node *)m->private;
0772     struct drm_device *dev = node->minor->dev;
0773     struct drm_file *file;
0774     int r;
0775 
0776     r = mutex_lock_interruptible(&dev->filelist_mutex);
0777     if (r)
0778         return r;
0779 
0780     list_for_each_entry(file, &dev->filelist, lhead) {
0781         struct task_struct *task;
0782 
0783         /*
0784          * Although we have a valid reference on file->pid, that does
0785          * not guarantee that the task_struct who called get_pid() is
0786          * still alive (e.g. get_pid(current) => fork() => exit()).
0787          * Therefore, we need to protect this ->comm access using RCU.
0788          */
0789         rcu_read_lock();
0790         task = pid_task(file->pid, PIDTYPE_PID);
0791         seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
0792                task ? task->comm : "<unknown>");
0793         rcu_read_unlock();
0794 
0795         spin_lock(&file->table_lock);
0796         idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
0797         spin_unlock(&file->table_lock);
0798     }
0799 
0800     mutex_unlock(&dev->filelist_mutex);
0801     return 0;
0802 }
0803 
0804 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
0805     {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
0806 };
0807 #endif
0808 
0809 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
0810 {
0811 #if defined(CONFIG_DEBUG_FS)
0812     return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
0813 #endif
0814     return 0;
0815 }