Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2008 Advanced Micro Devices, Inc.
0003  * Copyright 2008 Red Hat Inc.
0004  * Copyright 2009 Jerome Glisse.
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice shall be included in
0014  * all copies or substantial portions of the Software.
0015  *
0016  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0017  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0018  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0019  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0020  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0021  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0022  * OTHER DEALINGS IN THE SOFTWARE.
0023  *
0024  * Authors: Dave Airlie
0025  *          Alex Deucher
0026  *          Jerome Glisse
0027  */
0028 
0029 #include <linux/iosys-map.h>
0030 #include <linux/pci.h>
0031 
0032 #include <drm/drm_device.h>
0033 #include <drm/drm_file.h>
0034 #include <drm/drm_gem_ttm_helper.h>
0035 #include <drm/radeon_drm.h>
0036 
0037 #include "radeon.h"
0038 #include "radeon_prime.h"
0039 
0040 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
0041                     int flags);
0042 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
0043 int radeon_gem_prime_pin(struct drm_gem_object *obj);
0044 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
0045 
0046 const struct drm_gem_object_funcs radeon_gem_object_funcs;
0047 
0048 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
0049 {
0050     struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
0051     struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
0052     vm_fault_t ret;
0053 
0054     down_read(&rdev->pm.mclk_lock);
0055 
0056     ret = ttm_bo_vm_reserve(bo, vmf);
0057     if (ret)
0058         goto unlock_mclk;
0059 
0060     ret = radeon_bo_fault_reserve_notify(bo);
0061     if (ret)
0062         goto unlock_resv;
0063 
0064     ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
0065                        TTM_BO_VM_NUM_PREFAULT);
0066     if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
0067         goto unlock_mclk;
0068 
0069 unlock_resv:
0070     dma_resv_unlock(bo->base.resv);
0071 
0072 unlock_mclk:
0073     up_read(&rdev->pm.mclk_lock);
0074     return ret;
0075 }
0076 
0077 static const struct vm_operations_struct radeon_gem_vm_ops = {
0078     .fault = radeon_gem_fault,
0079     .open = ttm_bo_vm_open,
0080     .close = ttm_bo_vm_close,
0081     .access = ttm_bo_vm_access
0082 };
0083 
0084 static void radeon_gem_object_free(struct drm_gem_object *gobj)
0085 {
0086     struct radeon_bo *robj = gem_to_radeon_bo(gobj);
0087 
0088     if (robj) {
0089         radeon_mn_unregister(robj);
0090         radeon_bo_unref(&robj);
0091     }
0092 }
0093 
0094 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
0095                 int alignment, int initial_domain,
0096                 u32 flags, bool kernel,
0097                 struct drm_gem_object **obj)
0098 {
0099     struct radeon_bo *robj;
0100     unsigned long max_size;
0101     int r;
0102 
0103     *obj = NULL;
0104     /* At least align on page size */
0105     if (alignment < PAGE_SIZE) {
0106         alignment = PAGE_SIZE;
0107     }
0108 
0109     /* Maximum bo size is the unpinned gtt size since we use the gtt to
0110      * handle vram to system pool migrations.
0111      */
0112     max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
0113     if (size > max_size) {
0114         DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
0115               size >> 20, max_size >> 20);
0116         return -ENOMEM;
0117     }
0118 
0119 retry:
0120     r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
0121                  flags, NULL, NULL, &robj);
0122     if (r) {
0123         if (r != -ERESTARTSYS) {
0124             if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
0125                 initial_domain |= RADEON_GEM_DOMAIN_GTT;
0126                 goto retry;
0127             }
0128             DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
0129                   size, initial_domain, alignment, r);
0130         }
0131         return r;
0132     }
0133     *obj = &robj->tbo.base;
0134     (*obj)->funcs = &radeon_gem_object_funcs;
0135     robj->pid = task_pid_nr(current);
0136 
0137     mutex_lock(&rdev->gem.mutex);
0138     list_add_tail(&robj->list, &rdev->gem.objects);
0139     mutex_unlock(&rdev->gem.mutex);
0140 
0141     return 0;
0142 }
0143 
0144 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
0145               uint32_t rdomain, uint32_t wdomain)
0146 {
0147     struct radeon_bo *robj;
0148     uint32_t domain;
0149     long r;
0150 
0151     /* FIXME: reeimplement */
0152     robj = gem_to_radeon_bo(gobj);
0153     /* work out where to validate the buffer to */
0154     domain = wdomain;
0155     if (!domain) {
0156         domain = rdomain;
0157     }
0158     if (!domain) {
0159         /* Do nothings */
0160         pr_warn("Set domain without domain !\n");
0161         return 0;
0162     }
0163     if (domain == RADEON_GEM_DOMAIN_CPU) {
0164         /* Asking for cpu access wait for object idle */
0165         r = dma_resv_wait_timeout(robj->tbo.base.resv,
0166                       DMA_RESV_USAGE_BOOKKEEP,
0167                       true, 30 * HZ);
0168         if (!r)
0169             r = -EBUSY;
0170 
0171         if (r < 0 && r != -EINTR) {
0172             pr_err("Failed to wait for object: %li\n", r);
0173             return r;
0174         }
0175     }
0176     if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
0177         /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
0178         return -EINVAL;
0179     }
0180     return 0;
0181 }
0182 
0183 int radeon_gem_init(struct radeon_device *rdev)
0184 {
0185     INIT_LIST_HEAD(&rdev->gem.objects);
0186     return 0;
0187 }
0188 
0189 void radeon_gem_fini(struct radeon_device *rdev)
0190 {
0191     radeon_bo_force_delete(rdev);
0192 }
0193 
0194 /*
0195  * Call from drm_gem_handle_create which appear in both new and open ioctl
0196  * case.
0197  */
0198 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
0199 {
0200     struct radeon_bo *rbo = gem_to_radeon_bo(obj);
0201     struct radeon_device *rdev = rbo->rdev;
0202     struct radeon_fpriv *fpriv = file_priv->driver_priv;
0203     struct radeon_vm *vm = &fpriv->vm;
0204     struct radeon_bo_va *bo_va;
0205     int r;
0206 
0207     if ((rdev->family < CHIP_CAYMAN) ||
0208         (!rdev->accel_working)) {
0209         return 0;
0210     }
0211 
0212     r = radeon_bo_reserve(rbo, false);
0213     if (r) {
0214         return r;
0215     }
0216 
0217     bo_va = radeon_vm_bo_find(vm, rbo);
0218     if (!bo_va) {
0219         bo_va = radeon_vm_bo_add(rdev, vm, rbo);
0220     } else {
0221         ++bo_va->ref_count;
0222     }
0223     radeon_bo_unreserve(rbo);
0224 
0225     return 0;
0226 }
0227 
0228 static void radeon_gem_object_close(struct drm_gem_object *obj,
0229                     struct drm_file *file_priv)
0230 {
0231     struct radeon_bo *rbo = gem_to_radeon_bo(obj);
0232     struct radeon_device *rdev = rbo->rdev;
0233     struct radeon_fpriv *fpriv = file_priv->driver_priv;
0234     struct radeon_vm *vm = &fpriv->vm;
0235     struct radeon_bo_va *bo_va;
0236     int r;
0237 
0238     if ((rdev->family < CHIP_CAYMAN) ||
0239         (!rdev->accel_working)) {
0240         return;
0241     }
0242 
0243     r = radeon_bo_reserve(rbo, true);
0244     if (r) {
0245         dev_err(rdev->dev, "leaking bo va because "
0246             "we fail to reserve bo (%d)\n", r);
0247         return;
0248     }
0249     bo_va = radeon_vm_bo_find(vm, rbo);
0250     if (bo_va) {
0251         if (--bo_va->ref_count == 0) {
0252             radeon_vm_bo_rmv(rdev, bo_va);
0253         }
0254     }
0255     radeon_bo_unreserve(rbo);
0256 }
0257 
0258 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
0259 {
0260     if (r == -EDEADLK) {
0261         r = radeon_gpu_reset(rdev);
0262         if (!r)
0263             r = -EAGAIN;
0264     }
0265     return r;
0266 }
0267 
0268 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
0269 {
0270     struct radeon_bo *bo = gem_to_radeon_bo(obj);
0271     struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
0272 
0273     if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
0274         return -EPERM;
0275 
0276     return drm_gem_ttm_mmap(obj, vma);
0277 }
0278 
0279 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
0280     .free = radeon_gem_object_free,
0281     .open = radeon_gem_object_open,
0282     .close = radeon_gem_object_close,
0283     .export = radeon_gem_prime_export,
0284     .pin = radeon_gem_prime_pin,
0285     .unpin = radeon_gem_prime_unpin,
0286     .get_sg_table = radeon_gem_prime_get_sg_table,
0287     .vmap = drm_gem_ttm_vmap,
0288     .vunmap = drm_gem_ttm_vunmap,
0289     .mmap = radeon_gem_object_mmap,
0290     .vm_ops = &radeon_gem_vm_ops,
0291 };
0292 
0293 /*
0294  * GEM ioctls.
0295  */
0296 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
0297               struct drm_file *filp)
0298 {
0299     struct radeon_device *rdev = dev->dev_private;
0300     struct drm_radeon_gem_info *args = data;
0301     struct ttm_resource_manager *man;
0302 
0303     man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
0304 
0305     args->vram_size = (u64)man->size << PAGE_SHIFT;
0306     args->vram_visible = rdev->mc.visible_vram_size;
0307     args->vram_visible -= rdev->vram_pin_size;
0308     args->gart_size = rdev->mc.gtt_size;
0309     args->gart_size -= rdev->gart_pin_size;
0310 
0311     return 0;
0312 }
0313 
0314 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
0315                struct drm_file *filp)
0316 {
0317     /* TODO: implement */
0318     DRM_ERROR("unimplemented %s\n", __func__);
0319     return -ENOSYS;
0320 }
0321 
0322 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
0323                 struct drm_file *filp)
0324 {
0325     /* TODO: implement */
0326     DRM_ERROR("unimplemented %s\n", __func__);
0327     return -ENOSYS;
0328 }
0329 
0330 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
0331                 struct drm_file *filp)
0332 {
0333     struct radeon_device *rdev = dev->dev_private;
0334     struct drm_radeon_gem_create *args = data;
0335     struct drm_gem_object *gobj;
0336     uint32_t handle;
0337     int r;
0338 
0339     down_read(&rdev->exclusive_lock);
0340     /* create a gem object to contain this object in */
0341     args->size = roundup(args->size, PAGE_SIZE);
0342     r = radeon_gem_object_create(rdev, args->size, args->alignment,
0343                      args->initial_domain, args->flags,
0344                      false, &gobj);
0345     if (r) {
0346         up_read(&rdev->exclusive_lock);
0347         r = radeon_gem_handle_lockup(rdev, r);
0348         return r;
0349     }
0350     r = drm_gem_handle_create(filp, gobj, &handle);
0351     /* drop reference from allocate - handle holds it now */
0352     drm_gem_object_put(gobj);
0353     if (r) {
0354         up_read(&rdev->exclusive_lock);
0355         r = radeon_gem_handle_lockup(rdev, r);
0356         return r;
0357     }
0358     args->handle = handle;
0359     up_read(&rdev->exclusive_lock);
0360     return 0;
0361 }
0362 
0363 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
0364                  struct drm_file *filp)
0365 {
0366     struct ttm_operation_ctx ctx = { true, false };
0367     struct radeon_device *rdev = dev->dev_private;
0368     struct drm_radeon_gem_userptr *args = data;
0369     struct drm_gem_object *gobj;
0370     struct radeon_bo *bo;
0371     uint32_t handle;
0372     int r;
0373 
0374     args->addr = untagged_addr(args->addr);
0375 
0376     if (offset_in_page(args->addr | args->size))
0377         return -EINVAL;
0378 
0379     /* reject unknown flag values */
0380     if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
0381         RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
0382         RADEON_GEM_USERPTR_REGISTER))
0383         return -EINVAL;
0384 
0385     if (args->flags & RADEON_GEM_USERPTR_READONLY) {
0386         /* readonly pages not tested on older hardware */
0387         if (rdev->family < CHIP_R600)
0388             return -EINVAL;
0389 
0390     } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
0391            !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
0392 
0393         /* if we want to write to it we must require anonymous
0394            memory and install a MMU notifier */
0395         return -EACCES;
0396     }
0397 
0398     down_read(&rdev->exclusive_lock);
0399 
0400     /* create a gem object to contain this object in */
0401     r = radeon_gem_object_create(rdev, args->size, 0,
0402                      RADEON_GEM_DOMAIN_CPU, 0,
0403                      false, &gobj);
0404     if (r)
0405         goto handle_lockup;
0406 
0407     bo = gem_to_radeon_bo(gobj);
0408     r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
0409     if (r)
0410         goto release_object;
0411 
0412     if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
0413         r = radeon_mn_register(bo, args->addr);
0414         if (r)
0415             goto release_object;
0416     }
0417 
0418     if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
0419         mmap_read_lock(current->mm);
0420         r = radeon_bo_reserve(bo, true);
0421         if (r) {
0422             mmap_read_unlock(current->mm);
0423             goto release_object;
0424         }
0425 
0426         radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
0427         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
0428         radeon_bo_unreserve(bo);
0429         mmap_read_unlock(current->mm);
0430         if (r)
0431             goto release_object;
0432     }
0433 
0434     r = drm_gem_handle_create(filp, gobj, &handle);
0435     /* drop reference from allocate - handle holds it now */
0436     drm_gem_object_put(gobj);
0437     if (r)
0438         goto handle_lockup;
0439 
0440     args->handle = handle;
0441     up_read(&rdev->exclusive_lock);
0442     return 0;
0443 
0444 release_object:
0445     drm_gem_object_put(gobj);
0446 
0447 handle_lockup:
0448     up_read(&rdev->exclusive_lock);
0449     r = radeon_gem_handle_lockup(rdev, r);
0450 
0451     return r;
0452 }
0453 
0454 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
0455                 struct drm_file *filp)
0456 {
0457     /* transition the BO to a domain -
0458      * just validate the BO into a certain domain */
0459     struct radeon_device *rdev = dev->dev_private;
0460     struct drm_radeon_gem_set_domain *args = data;
0461     struct drm_gem_object *gobj;
0462     struct radeon_bo *robj;
0463     int r;
0464 
0465     /* for now if someone requests domain CPU -
0466      * just make sure the buffer is finished with */
0467     down_read(&rdev->exclusive_lock);
0468 
0469     /* just do a BO wait for now */
0470     gobj = drm_gem_object_lookup(filp, args->handle);
0471     if (gobj == NULL) {
0472         up_read(&rdev->exclusive_lock);
0473         return -ENOENT;
0474     }
0475     robj = gem_to_radeon_bo(gobj);
0476 
0477     r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
0478 
0479     drm_gem_object_put(gobj);
0480     up_read(&rdev->exclusive_lock);
0481     r = radeon_gem_handle_lockup(robj->rdev, r);
0482     return r;
0483 }
0484 
0485 int radeon_mode_dumb_mmap(struct drm_file *filp,
0486               struct drm_device *dev,
0487               uint32_t handle, uint64_t *offset_p)
0488 {
0489     struct drm_gem_object *gobj;
0490     struct radeon_bo *robj;
0491 
0492     gobj = drm_gem_object_lookup(filp, handle);
0493     if (gobj == NULL) {
0494         return -ENOENT;
0495     }
0496     robj = gem_to_radeon_bo(gobj);
0497     if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
0498         drm_gem_object_put(gobj);
0499         return -EPERM;
0500     }
0501     *offset_p = radeon_bo_mmap_offset(robj);
0502     drm_gem_object_put(gobj);
0503     return 0;
0504 }
0505 
0506 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
0507               struct drm_file *filp)
0508 {
0509     struct drm_radeon_gem_mmap *args = data;
0510 
0511     return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
0512 }
0513 
0514 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
0515               struct drm_file *filp)
0516 {
0517     struct drm_radeon_gem_busy *args = data;
0518     struct drm_gem_object *gobj;
0519     struct radeon_bo *robj;
0520     int r;
0521     uint32_t cur_placement = 0;
0522 
0523     gobj = drm_gem_object_lookup(filp, args->handle);
0524     if (gobj == NULL) {
0525         return -ENOENT;
0526     }
0527     robj = gem_to_radeon_bo(gobj);
0528 
0529     r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
0530     if (r == 0)
0531         r = -EBUSY;
0532     else
0533         r = 0;
0534 
0535     cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
0536     args->domain = radeon_mem_type_to_domain(cur_placement);
0537     drm_gem_object_put(gobj);
0538     return r;
0539 }
0540 
0541 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
0542                   struct drm_file *filp)
0543 {
0544     struct radeon_device *rdev = dev->dev_private;
0545     struct drm_radeon_gem_wait_idle *args = data;
0546     struct drm_gem_object *gobj;
0547     struct radeon_bo *robj;
0548     int r = 0;
0549     uint32_t cur_placement = 0;
0550     long ret;
0551 
0552     gobj = drm_gem_object_lookup(filp, args->handle);
0553     if (gobj == NULL) {
0554         return -ENOENT;
0555     }
0556     robj = gem_to_radeon_bo(gobj);
0557 
0558     ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
0559                     true, 30 * HZ);
0560     if (ret == 0)
0561         r = -EBUSY;
0562     else if (ret < 0)
0563         r = ret;
0564 
0565     /* Flush HDP cache via MMIO if necessary */
0566     cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
0567     if (rdev->asic->mmio_hdp_flush &&
0568         radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
0569         robj->rdev->asic->mmio_hdp_flush(rdev);
0570     drm_gem_object_put(gobj);
0571     r = radeon_gem_handle_lockup(rdev, r);
0572     return r;
0573 }
0574 
0575 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
0576                 struct drm_file *filp)
0577 {
0578     struct drm_radeon_gem_set_tiling *args = data;
0579     struct drm_gem_object *gobj;
0580     struct radeon_bo *robj;
0581     int r = 0;
0582 
0583     DRM_DEBUG("%d \n", args->handle);
0584     gobj = drm_gem_object_lookup(filp, args->handle);
0585     if (gobj == NULL)
0586         return -ENOENT;
0587     robj = gem_to_radeon_bo(gobj);
0588     r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
0589     drm_gem_object_put(gobj);
0590     return r;
0591 }
0592 
0593 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
0594                 struct drm_file *filp)
0595 {
0596     struct drm_radeon_gem_get_tiling *args = data;
0597     struct drm_gem_object *gobj;
0598     struct radeon_bo *rbo;
0599     int r = 0;
0600 
0601     DRM_DEBUG("\n");
0602     gobj = drm_gem_object_lookup(filp, args->handle);
0603     if (gobj == NULL)
0604         return -ENOENT;
0605     rbo = gem_to_radeon_bo(gobj);
0606     r = radeon_bo_reserve(rbo, false);
0607     if (unlikely(r != 0))
0608         goto out;
0609     radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
0610     radeon_bo_unreserve(rbo);
0611 out:
0612     drm_gem_object_put(gobj);
0613     return r;
0614 }
0615 
0616 /**
0617  * radeon_gem_va_update_vm -update the bo_va in its VM
0618  *
0619  * @rdev: radeon_device pointer
0620  * @bo_va: bo_va to update
0621  *
0622  * Update the bo_va directly after setting it's address. Errors are not
0623  * vital here, so they are not reported back to userspace.
0624  */
0625 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
0626                     struct radeon_bo_va *bo_va)
0627 {
0628     struct ttm_validate_buffer tv, *entry;
0629     struct radeon_bo_list *vm_bos;
0630     struct ww_acquire_ctx ticket;
0631     struct list_head list;
0632     unsigned domain;
0633     int r;
0634 
0635     INIT_LIST_HEAD(&list);
0636 
0637     tv.bo = &bo_va->bo->tbo;
0638     tv.num_shared = 1;
0639     list_add(&tv.head, &list);
0640 
0641     vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
0642     if (!vm_bos)
0643         return;
0644 
0645     r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
0646     if (r)
0647         goto error_free;
0648 
0649     list_for_each_entry(entry, &list, head) {
0650         domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
0651         /* if anything is swapped out don't swap it in here,
0652            just abort and wait for the next CS */
0653         if (domain == RADEON_GEM_DOMAIN_CPU)
0654             goto error_unreserve;
0655     }
0656 
0657     mutex_lock(&bo_va->vm->mutex);
0658     r = radeon_vm_clear_freed(rdev, bo_va->vm);
0659     if (r)
0660         goto error_unlock;
0661 
0662     if (bo_va->it.start)
0663         r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
0664 
0665 error_unlock:
0666     mutex_unlock(&bo_va->vm->mutex);
0667 
0668 error_unreserve:
0669     ttm_eu_backoff_reservation(&ticket, &list);
0670 
0671 error_free:
0672     kvfree(vm_bos);
0673 
0674     if (r && r != -ERESTARTSYS)
0675         DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
0676 }
0677 
0678 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
0679               struct drm_file *filp)
0680 {
0681     struct drm_radeon_gem_va *args = data;
0682     struct drm_gem_object *gobj;
0683     struct radeon_device *rdev = dev->dev_private;
0684     struct radeon_fpriv *fpriv = filp->driver_priv;
0685     struct radeon_bo *rbo;
0686     struct radeon_bo_va *bo_va;
0687     u32 invalid_flags;
0688     int r = 0;
0689 
0690     if (!rdev->vm_manager.enabled) {
0691         args->operation = RADEON_VA_RESULT_ERROR;
0692         return -ENOTTY;
0693     }
0694 
0695     /* !! DONT REMOVE !!
0696      * We don't support vm_id yet, to be sure we don't have broken
0697      * userspace, reject anyone trying to use non 0 value thus moving
0698      * forward we can use those fields without breaking existant userspace
0699      */
0700     if (args->vm_id) {
0701         args->operation = RADEON_VA_RESULT_ERROR;
0702         return -EINVAL;
0703     }
0704 
0705     if (args->offset < RADEON_VA_RESERVED_SIZE) {
0706         dev_err(dev->dev,
0707             "offset 0x%lX is in reserved area 0x%X\n",
0708             (unsigned long)args->offset,
0709             RADEON_VA_RESERVED_SIZE);
0710         args->operation = RADEON_VA_RESULT_ERROR;
0711         return -EINVAL;
0712     }
0713 
0714     /* don't remove, we need to enforce userspace to set the snooped flag
0715      * otherwise we will endup with broken userspace and we won't be able
0716      * to enable this feature without adding new interface
0717      */
0718     invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
0719     if ((args->flags & invalid_flags)) {
0720         dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
0721             args->flags, invalid_flags);
0722         args->operation = RADEON_VA_RESULT_ERROR;
0723         return -EINVAL;
0724     }
0725 
0726     switch (args->operation) {
0727     case RADEON_VA_MAP:
0728     case RADEON_VA_UNMAP:
0729         break;
0730     default:
0731         dev_err(dev->dev, "unsupported operation %d\n",
0732             args->operation);
0733         args->operation = RADEON_VA_RESULT_ERROR;
0734         return -EINVAL;
0735     }
0736 
0737     gobj = drm_gem_object_lookup(filp, args->handle);
0738     if (gobj == NULL) {
0739         args->operation = RADEON_VA_RESULT_ERROR;
0740         return -ENOENT;
0741     }
0742     rbo = gem_to_radeon_bo(gobj);
0743     r = radeon_bo_reserve(rbo, false);
0744     if (r) {
0745         args->operation = RADEON_VA_RESULT_ERROR;
0746         drm_gem_object_put(gobj);
0747         return r;
0748     }
0749     bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
0750     if (!bo_va) {
0751         args->operation = RADEON_VA_RESULT_ERROR;
0752         radeon_bo_unreserve(rbo);
0753         drm_gem_object_put(gobj);
0754         return -ENOENT;
0755     }
0756 
0757     switch (args->operation) {
0758     case RADEON_VA_MAP:
0759         if (bo_va->it.start) {
0760             args->operation = RADEON_VA_RESULT_VA_EXIST;
0761             args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
0762             radeon_bo_unreserve(rbo);
0763             goto out;
0764         }
0765         r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
0766         break;
0767     case RADEON_VA_UNMAP:
0768         r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
0769         break;
0770     default:
0771         break;
0772     }
0773     if (!r)
0774         radeon_gem_va_update_vm(rdev, bo_va);
0775     args->operation = RADEON_VA_RESULT_OK;
0776     if (r) {
0777         args->operation = RADEON_VA_RESULT_ERROR;
0778     }
0779 out:
0780     drm_gem_object_put(gobj);
0781     return r;
0782 }
0783 
0784 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
0785             struct drm_file *filp)
0786 {
0787     struct drm_radeon_gem_op *args = data;
0788     struct drm_gem_object *gobj;
0789     struct radeon_bo *robj;
0790     int r;
0791 
0792     gobj = drm_gem_object_lookup(filp, args->handle);
0793     if (gobj == NULL) {
0794         return -ENOENT;
0795     }
0796     robj = gem_to_radeon_bo(gobj);
0797 
0798     r = -EPERM;
0799     if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
0800         goto out;
0801 
0802     r = radeon_bo_reserve(robj, false);
0803     if (unlikely(r))
0804         goto out;
0805 
0806     switch (args->op) {
0807     case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
0808         args->value = robj->initial_domain;
0809         break;
0810     case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
0811         robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
0812                               RADEON_GEM_DOMAIN_GTT |
0813                               RADEON_GEM_DOMAIN_CPU);
0814         break;
0815     default:
0816         r = -EINVAL;
0817     }
0818 
0819     radeon_bo_unreserve(robj);
0820 out:
0821     drm_gem_object_put(gobj);
0822     return r;
0823 }
0824 
0825 int radeon_mode_dumb_create(struct drm_file *file_priv,
0826                 struct drm_device *dev,
0827                 struct drm_mode_create_dumb *args)
0828 {
0829     struct radeon_device *rdev = dev->dev_private;
0830     struct drm_gem_object *gobj;
0831     uint32_t handle;
0832     int r;
0833 
0834     args->pitch = radeon_align_pitch(rdev, args->width,
0835                      DIV_ROUND_UP(args->bpp, 8), 0);
0836     args->size = (u64)args->pitch * args->height;
0837     args->size = ALIGN(args->size, PAGE_SIZE);
0838 
0839     r = radeon_gem_object_create(rdev, args->size, 0,
0840                      RADEON_GEM_DOMAIN_VRAM, 0,
0841                      false, &gobj);
0842     if (r)
0843         return -ENOMEM;
0844 
0845     r = drm_gem_handle_create(file_priv, gobj, &handle);
0846     /* drop reference from allocate - handle holds it now */
0847     drm_gem_object_put(gobj);
0848     if (r) {
0849         return r;
0850     }
0851     args->handle = handle;
0852     return 0;
0853 }
0854 
0855 #if defined(CONFIG_DEBUG_FS)
0856 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
0857 {
0858     struct radeon_device *rdev = (struct radeon_device *)m->private;
0859     struct radeon_bo *rbo;
0860     unsigned i = 0;
0861 
0862     mutex_lock(&rdev->gem.mutex);
0863     list_for_each_entry(rbo, &rdev->gem.objects, list) {
0864         unsigned domain;
0865         const char *placement;
0866 
0867         domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
0868         switch (domain) {
0869         case RADEON_GEM_DOMAIN_VRAM:
0870             placement = "VRAM";
0871             break;
0872         case RADEON_GEM_DOMAIN_GTT:
0873             placement = " GTT";
0874             break;
0875         case RADEON_GEM_DOMAIN_CPU:
0876         default:
0877             placement = " CPU";
0878             break;
0879         }
0880         seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
0881                i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
0882                placement, (unsigned long)rbo->pid);
0883         i++;
0884     }
0885     mutex_unlock(&rdev->gem.mutex);
0886     return 0;
0887 }
0888 
0889 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
0890 #endif
0891 
0892 void radeon_gem_debugfs_init(struct radeon_device *rdev)
0893 {
0894 #if defined(CONFIG_DEBUG_FS)
0895     struct dentry *root = rdev->ddev->primary->debugfs_root;
0896 
0897     debugfs_create_file("radeon_gem_info", 0444, root, rdev,
0898                 &radeon_debugfs_gem_info_fops);
0899 
0900 #endif
0901 }