Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2008 Ben Skeggs.
0003  * All Rights Reserved.
0004  *
0005  * Permission is hereby granted, free of charge, to any person obtaining
0006  * a copy of this software and associated documentation files (the
0007  * "Software"), to deal in the Software without restriction, including
0008  * without limitation the rights to use, copy, modify, merge, publish,
0009  * distribute, sublicense, and/or sell copies of the Software, and to
0010  * permit persons to whom the Software is furnished to do so, subject to
0011  * the following conditions:
0012  *
0013  * The above copyright notice and this permission notice (including the
0014  * next paragraph) shall be included in all copies or substantial
0015  * portions of the Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0018  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0019  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
0020  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
0021  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
0022  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
0023  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0024  *
0025  */
0026 
0027 #include <drm/drm_gem_ttm_helper.h>
0028 
0029 #include "nouveau_drv.h"
0030 #include "nouveau_dma.h"
0031 #include "nouveau_fence.h"
0032 #include "nouveau_abi16.h"
0033 
0034 #include "nouveau_ttm.h"
0035 #include "nouveau_gem.h"
0036 #include "nouveau_mem.h"
0037 #include "nouveau_vmm.h"
0038 
0039 #include <nvif/class.h>
0040 #include <nvif/push206e.h>
0041 
0042 static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
0043 {
0044     struct vm_area_struct *vma = vmf->vma;
0045     struct ttm_buffer_object *bo = vma->vm_private_data;
0046     pgprot_t prot;
0047     vm_fault_t ret;
0048 
0049     ret = ttm_bo_vm_reserve(bo, vmf);
0050     if (ret)
0051         return ret;
0052 
0053     ret = nouveau_ttm_fault_reserve_notify(bo);
0054     if (ret)
0055         goto error_unlock;
0056 
0057     nouveau_bo_del_io_reserve_lru(bo);
0058     prot = vm_get_page_prot(vma->vm_flags);
0059     ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
0060     nouveau_bo_add_io_reserve_lru(bo);
0061     if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
0062         return ret;
0063 
0064 error_unlock:
0065     dma_resv_unlock(bo->base.resv);
0066     return ret;
0067 }
0068 
0069 static const struct vm_operations_struct nouveau_ttm_vm_ops = {
0070     .fault = nouveau_ttm_fault,
0071     .open = ttm_bo_vm_open,
0072     .close = ttm_bo_vm_close,
0073     .access = ttm_bo_vm_access
0074 };
0075 
0076 void
0077 nouveau_gem_object_del(struct drm_gem_object *gem)
0078 {
0079     struct nouveau_bo *nvbo = nouveau_gem_object(gem);
0080     struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
0081     struct device *dev = drm->dev->dev;
0082     int ret;
0083 
0084     ret = pm_runtime_get_sync(dev);
0085     if (WARN_ON(ret < 0 && ret != -EACCES)) {
0086         pm_runtime_put_autosuspend(dev);
0087         return;
0088     }
0089 
0090     if (gem->import_attach)
0091         drm_prime_gem_destroy(gem, nvbo->bo.sg);
0092 
0093     ttm_bo_put(&nvbo->bo);
0094 
0095     pm_runtime_mark_last_busy(dev);
0096     pm_runtime_put_autosuspend(dev);
0097 }
0098 
0099 int
0100 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
0101 {
0102     struct nouveau_cli *cli = nouveau_cli(file_priv);
0103     struct nouveau_bo *nvbo = nouveau_gem_object(gem);
0104     struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
0105     struct device *dev = drm->dev->dev;
0106     struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
0107     struct nouveau_vma *vma;
0108     int ret;
0109 
0110     if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
0111         return 0;
0112 
0113     ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
0114     if (ret)
0115         return ret;
0116 
0117     ret = pm_runtime_get_sync(dev);
0118     if (ret < 0 && ret != -EACCES) {
0119         pm_runtime_put_autosuspend(dev);
0120         goto out;
0121     }
0122 
0123     ret = nouveau_vma_new(nvbo, vmm, &vma);
0124     pm_runtime_mark_last_busy(dev);
0125     pm_runtime_put_autosuspend(dev);
0126 out:
0127     ttm_bo_unreserve(&nvbo->bo);
0128     return ret;
0129 }
0130 
0131 struct nouveau_gem_object_unmap {
0132     struct nouveau_cli_work work;
0133     struct nouveau_vma *vma;
0134 };
0135 
0136 static void
0137 nouveau_gem_object_delete(struct nouveau_vma *vma)
0138 {
0139     nouveau_fence_unref(&vma->fence);
0140     nouveau_vma_del(&vma);
0141 }
0142 
0143 static void
0144 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
0145 {
0146     struct nouveau_gem_object_unmap *work =
0147         container_of(w, typeof(*work), work);
0148     nouveau_gem_object_delete(work->vma);
0149     kfree(work);
0150 }
0151 
0152 static void
0153 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
0154 {
0155     struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
0156     struct nouveau_gem_object_unmap *work;
0157 
0158     list_del_init(&vma->head);
0159 
0160     if (!fence) {
0161         nouveau_gem_object_delete(vma);
0162         return;
0163     }
0164 
0165     if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
0166         WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
0167         nouveau_gem_object_delete(vma);
0168         return;
0169     }
0170 
0171     work->work.func = nouveau_gem_object_delete_work;
0172     work->vma = vma;
0173     nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
0174 }
0175 
0176 void
0177 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
0178 {
0179     struct nouveau_cli *cli = nouveau_cli(file_priv);
0180     struct nouveau_bo *nvbo = nouveau_gem_object(gem);
0181     struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
0182     struct device *dev = drm->dev->dev;
0183     struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
0184     struct nouveau_vma *vma;
0185     int ret;
0186 
0187     if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
0188         return;
0189 
0190     ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
0191     if (ret)
0192         return;
0193 
0194     vma = nouveau_vma_find(nvbo, vmm);
0195     if (vma) {
0196         if (--vma->refs == 0) {
0197             ret = pm_runtime_get_sync(dev);
0198             if (!WARN_ON(ret < 0 && ret != -EACCES)) {
0199                 nouveau_gem_object_unmap(nvbo, vma);
0200                 pm_runtime_mark_last_busy(dev);
0201             }
0202             pm_runtime_put_autosuspend(dev);
0203         }
0204     }
0205     ttm_bo_unreserve(&nvbo->bo);
0206 }
0207 
0208 const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
0209     .free = nouveau_gem_object_del,
0210     .open = nouveau_gem_object_open,
0211     .close = nouveau_gem_object_close,
0212     .pin = nouveau_gem_prime_pin,
0213     .unpin = nouveau_gem_prime_unpin,
0214     .get_sg_table = nouveau_gem_prime_get_sg_table,
0215     .vmap = drm_gem_ttm_vmap,
0216     .vunmap = drm_gem_ttm_vunmap,
0217     .mmap = drm_gem_ttm_mmap,
0218     .vm_ops = &nouveau_ttm_vm_ops,
0219 };
0220 
0221 int
0222 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
0223         uint32_t tile_mode, uint32_t tile_flags,
0224         struct nouveau_bo **pnvbo)
0225 {
0226     struct nouveau_drm *drm = cli->drm;
0227     struct nouveau_bo *nvbo;
0228     int ret;
0229 
0230     if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
0231         domain |= NOUVEAU_GEM_DOMAIN_CPU;
0232 
0233     nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
0234                 tile_flags);
0235     if (IS_ERR(nvbo))
0236         return PTR_ERR(nvbo);
0237 
0238     nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
0239 
0240     /* Initialize the embedded gem-object. We return a single gem-reference
0241      * to the caller, instead of a normal nouveau_bo ttm reference. */
0242     ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
0243     if (ret) {
0244         drm_gem_object_release(&nvbo->bo.base);
0245         kfree(nvbo);
0246         return ret;
0247     }
0248 
0249     ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
0250     if (ret)
0251         return ret;
0252 
0253     /* we restrict allowed domains on nv50+ to only the types
0254      * that were requested at creation time.  not possibly on
0255      * earlier chips without busting the ABI.
0256      */
0257     nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
0258                   NOUVEAU_GEM_DOMAIN_GART;
0259     if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
0260         nvbo->valid_domains &= domain;
0261 
0262     *pnvbo = nvbo;
0263     return 0;
0264 }
0265 
0266 static int
0267 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
0268          struct drm_nouveau_gem_info *rep)
0269 {
0270     struct nouveau_cli *cli = nouveau_cli(file_priv);
0271     struct nouveau_bo *nvbo = nouveau_gem_object(gem);
0272     struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
0273     struct nouveau_vma *vma;
0274 
0275     if (is_power_of_2(nvbo->valid_domains))
0276         rep->domain = nvbo->valid_domains;
0277     else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
0278         rep->domain = NOUVEAU_GEM_DOMAIN_GART;
0279     else
0280         rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
0281     rep->offset = nvbo->offset;
0282     if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
0283         vma = nouveau_vma_find(nvbo, vmm);
0284         if (!vma)
0285             return -EINVAL;
0286 
0287         rep->offset = vma->addr;
0288     }
0289 
0290     rep->size = nvbo->bo.base.size;
0291     rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
0292     rep->tile_mode = nvbo->mode;
0293     rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
0294     if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
0295         rep->tile_flags |= nvbo->kind << 8;
0296     else
0297     if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
0298         rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
0299     else
0300         rep->tile_flags |= nvbo->zeta;
0301     return 0;
0302 }
0303 
0304 int
0305 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
0306               struct drm_file *file_priv)
0307 {
0308     struct nouveau_cli *cli = nouveau_cli(file_priv);
0309     struct drm_nouveau_gem_new *req = data;
0310     struct nouveau_bo *nvbo = NULL;
0311     int ret = 0;
0312 
0313     ret = nouveau_gem_new(cli, req->info.size, req->align,
0314                   req->info.domain, req->info.tile_mode,
0315                   req->info.tile_flags, &nvbo);
0316     if (ret)
0317         return ret;
0318 
0319     ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
0320                     &req->info.handle);
0321     if (ret == 0) {
0322         ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
0323         if (ret)
0324             drm_gem_handle_delete(file_priv, req->info.handle);
0325     }
0326 
0327     /* drop reference from allocate - handle holds it now */
0328     drm_gem_object_put(&nvbo->bo.base);
0329     return ret;
0330 }
0331 
0332 static int
0333 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
0334                uint32_t write_domains, uint32_t valid_domains)
0335 {
0336     struct nouveau_bo *nvbo = nouveau_gem_object(gem);
0337     struct ttm_buffer_object *bo = &nvbo->bo;
0338     uint32_t domains = valid_domains & nvbo->valid_domains &
0339         (write_domains ? write_domains : read_domains);
0340     uint32_t pref_domains = 0;
0341 
0342     if (!domains)
0343         return -EINVAL;
0344 
0345     valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
0346 
0347     if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
0348         bo->resource->mem_type == TTM_PL_VRAM)
0349         pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
0350 
0351     else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
0352          bo->resource->mem_type == TTM_PL_TT)
0353         pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
0354 
0355     else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
0356         pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
0357 
0358     else
0359         pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
0360 
0361     nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
0362 
0363     return 0;
0364 }
0365 
0366 struct validate_op {
0367     struct list_head list;
0368     struct ww_acquire_ctx ticket;
0369 };
0370 
0371 static void
0372 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
0373             struct nouveau_fence *fence,
0374             struct drm_nouveau_gem_pushbuf_bo *pbbo)
0375 {
0376     struct nouveau_bo *nvbo;
0377     struct drm_nouveau_gem_pushbuf_bo *b;
0378 
0379     while (!list_empty(&op->list)) {
0380         nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
0381         b = &pbbo[nvbo->pbbo_index];
0382 
0383         if (likely(fence)) {
0384             nouveau_bo_fence(nvbo, fence, !!b->write_domains);
0385 
0386             if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
0387                 struct nouveau_vma *vma =
0388                     (void *)(unsigned long)b->user_priv;
0389                 nouveau_fence_unref(&vma->fence);
0390                 dma_fence_get(&fence->base);
0391                 vma->fence = fence;
0392             }
0393         }
0394 
0395         if (unlikely(nvbo->validate_mapped)) {
0396             ttm_bo_kunmap(&nvbo->kmap);
0397             nvbo->validate_mapped = false;
0398         }
0399 
0400         list_del(&nvbo->entry);
0401         nvbo->reserved_by = NULL;
0402         ttm_bo_unreserve(&nvbo->bo);
0403         drm_gem_object_put(&nvbo->bo.base);
0404     }
0405 }
0406 
0407 static void
0408 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
0409           struct nouveau_fence *fence,
0410           struct drm_nouveau_gem_pushbuf_bo *pbbo)
0411 {
0412     validate_fini_no_ticket(op, chan, fence, pbbo);
0413     ww_acquire_fini(&op->ticket);
0414 }
0415 
0416 static int
0417 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
0418           struct drm_nouveau_gem_pushbuf_bo *pbbo,
0419           int nr_buffers, struct validate_op *op)
0420 {
0421     struct nouveau_cli *cli = nouveau_cli(file_priv);
0422     int trycnt = 0;
0423     int ret = -EINVAL, i;
0424     struct nouveau_bo *res_bo = NULL;
0425     LIST_HEAD(gart_list);
0426     LIST_HEAD(vram_list);
0427     LIST_HEAD(both_list);
0428 
0429     ww_acquire_init(&op->ticket, &reservation_ww_class);
0430 retry:
0431     if (++trycnt > 100000) {
0432         NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
0433         return -EINVAL;
0434     }
0435 
0436     for (i = 0; i < nr_buffers; i++) {
0437         struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
0438         struct drm_gem_object *gem;
0439         struct nouveau_bo *nvbo;
0440 
0441         gem = drm_gem_object_lookup(file_priv, b->handle);
0442         if (!gem) {
0443             NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
0444             ret = -ENOENT;
0445             break;
0446         }
0447         nvbo = nouveau_gem_object(gem);
0448         if (nvbo == res_bo) {
0449             res_bo = NULL;
0450             drm_gem_object_put(gem);
0451             continue;
0452         }
0453 
0454         if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
0455             NV_PRINTK(err, cli, "multiple instances of buffer %d on "
0456                       "validation list\n", b->handle);
0457             drm_gem_object_put(gem);
0458             ret = -EINVAL;
0459             break;
0460         }
0461 
0462         ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
0463         if (ret) {
0464             list_splice_tail_init(&vram_list, &op->list);
0465             list_splice_tail_init(&gart_list, &op->list);
0466             list_splice_tail_init(&both_list, &op->list);
0467             validate_fini_no_ticket(op, chan, NULL, NULL);
0468             if (unlikely(ret == -EDEADLK)) {
0469                 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
0470                                   &op->ticket);
0471                 if (!ret)
0472                     res_bo = nvbo;
0473             }
0474             if (unlikely(ret)) {
0475                 if (ret != -ERESTARTSYS)
0476                     NV_PRINTK(err, cli, "fail reserve\n");
0477                 break;
0478             }
0479         }
0480 
0481         if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
0482             struct nouveau_vmm *vmm = chan->vmm;
0483             struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
0484             if (!vma) {
0485                 NV_PRINTK(err, cli, "vma not found!\n");
0486                 ret = -EINVAL;
0487                 break;
0488             }
0489 
0490             b->user_priv = (uint64_t)(unsigned long)vma;
0491         } else {
0492             b->user_priv = (uint64_t)(unsigned long)nvbo;
0493         }
0494 
0495         nvbo->reserved_by = file_priv;
0496         nvbo->pbbo_index = i;
0497         if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
0498             (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
0499             list_add_tail(&nvbo->entry, &both_list);
0500         else
0501         if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
0502             list_add_tail(&nvbo->entry, &vram_list);
0503         else
0504         if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
0505             list_add_tail(&nvbo->entry, &gart_list);
0506         else {
0507             NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
0508                  b->valid_domains);
0509             list_add_tail(&nvbo->entry, &both_list);
0510             ret = -EINVAL;
0511             break;
0512         }
0513         if (nvbo == res_bo)
0514             goto retry;
0515     }
0516 
0517     ww_acquire_done(&op->ticket);
0518     list_splice_tail(&vram_list, &op->list);
0519     list_splice_tail(&gart_list, &op->list);
0520     list_splice_tail(&both_list, &op->list);
0521     if (ret)
0522         validate_fini(op, chan, NULL, NULL);
0523     return ret;
0524 
0525 }
0526 
0527 static int
0528 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
0529           struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
0530 {
0531     struct nouveau_drm *drm = chan->drm;
0532     struct nouveau_bo *nvbo;
0533     int ret, relocs = 0;
0534 
0535     list_for_each_entry(nvbo, list, entry) {
0536         struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
0537 
0538         ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
0539                          b->write_domains,
0540                          b->valid_domains);
0541         if (unlikely(ret)) {
0542             NV_PRINTK(err, cli, "fail set_domain\n");
0543             return ret;
0544         }
0545 
0546         ret = nouveau_bo_validate(nvbo, true, false);
0547         if (unlikely(ret)) {
0548             if (ret != -ERESTARTSYS)
0549                 NV_PRINTK(err, cli, "fail ttm_validate\n");
0550             return ret;
0551         }
0552 
0553         ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
0554         if (unlikely(ret)) {
0555             if (ret != -ERESTARTSYS)
0556                 NV_PRINTK(err, cli, "fail post-validate sync\n");
0557             return ret;
0558         }
0559 
0560         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
0561             if (nvbo->offset == b->presumed.offset &&
0562                 ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
0563                   b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
0564                  (nvbo->bo.resource->mem_type == TTM_PL_TT &&
0565                   b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
0566                 continue;
0567 
0568             if (nvbo->bo.resource->mem_type == TTM_PL_TT)
0569                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
0570             else
0571                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
0572             b->presumed.offset = nvbo->offset;
0573             b->presumed.valid = 0;
0574             relocs++;
0575         }
0576     }
0577 
0578     return relocs;
0579 }
0580 
0581 static int
0582 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
0583                  struct drm_file *file_priv,
0584                  struct drm_nouveau_gem_pushbuf_bo *pbbo,
0585                  int nr_buffers,
0586                  struct validate_op *op, bool *apply_relocs)
0587 {
0588     struct nouveau_cli *cli = nouveau_cli(file_priv);
0589     int ret;
0590 
0591     INIT_LIST_HEAD(&op->list);
0592 
0593     if (nr_buffers == 0)
0594         return 0;
0595 
0596     ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
0597     if (unlikely(ret)) {
0598         if (ret != -ERESTARTSYS)
0599             NV_PRINTK(err, cli, "validate_init\n");
0600         return ret;
0601     }
0602 
0603     ret = validate_list(chan, cli, &op->list, pbbo);
0604     if (unlikely(ret < 0)) {
0605         if (ret != -ERESTARTSYS)
0606             NV_PRINTK(err, cli, "validating bo list\n");
0607         validate_fini(op, chan, NULL, NULL);
0608         return ret;
0609     } else if (ret > 0) {
0610         *apply_relocs = true;
0611     }
0612 
0613     return 0;
0614 }
0615 
0616 static inline void
0617 u_free(void *addr)
0618 {
0619     kvfree(addr);
0620 }
0621 
0622 static inline void *
0623 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
0624 {
0625     void *mem;
0626     void __user *userptr = (void __force __user *)(uintptr_t)user;
0627 
0628     size *= nmemb;
0629 
0630     mem = kvmalloc(size, GFP_KERNEL);
0631     if (!mem)
0632         return ERR_PTR(-ENOMEM);
0633 
0634     if (copy_from_user(mem, userptr, size)) {
0635         u_free(mem);
0636         return ERR_PTR(-EFAULT);
0637     }
0638 
0639     return mem;
0640 }
0641 
0642 static int
0643 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
0644                 struct drm_nouveau_gem_pushbuf *req,
0645                 struct drm_nouveau_gem_pushbuf_reloc *reloc,
0646                 struct drm_nouveau_gem_pushbuf_bo *bo)
0647 {
0648     int ret = 0;
0649     unsigned i;
0650 
0651     for (i = 0; i < req->nr_relocs; i++) {
0652         struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
0653         struct drm_nouveau_gem_pushbuf_bo *b;
0654         struct nouveau_bo *nvbo;
0655         uint32_t data;
0656 
0657         if (unlikely(r->bo_index >= req->nr_buffers)) {
0658             NV_PRINTK(err, cli, "reloc bo index invalid\n");
0659             ret = -EINVAL;
0660             break;
0661         }
0662 
0663         b = &bo[r->bo_index];
0664         if (b->presumed.valid)
0665             continue;
0666 
0667         if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
0668             NV_PRINTK(err, cli, "reloc container bo index invalid\n");
0669             ret = -EINVAL;
0670             break;
0671         }
0672         nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
0673 
0674         if (unlikely(r->reloc_bo_offset + 4 >
0675                  nvbo->bo.base.size)) {
0676             NV_PRINTK(err, cli, "reloc outside of bo\n");
0677             ret = -EINVAL;
0678             break;
0679         }
0680 
0681         if (!nvbo->kmap.virtual) {
0682             ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
0683                       &nvbo->kmap);
0684             if (ret) {
0685                 NV_PRINTK(err, cli, "failed kmap for reloc\n");
0686                 break;
0687             }
0688             nvbo->validate_mapped = true;
0689         }
0690 
0691         if (r->flags & NOUVEAU_GEM_RELOC_LOW)
0692             data = b->presumed.offset + r->data;
0693         else
0694         if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
0695             data = (b->presumed.offset + r->data) >> 32;
0696         else
0697             data = r->data;
0698 
0699         if (r->flags & NOUVEAU_GEM_RELOC_OR) {
0700             if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
0701                 data |= r->tor;
0702             else
0703                 data |= r->vor;
0704         }
0705 
0706         ret = ttm_bo_wait(&nvbo->bo, false, false);
0707         if (ret) {
0708             NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
0709             break;
0710         }
0711 
0712         nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
0713     }
0714 
0715     return ret;
0716 }
0717 
0718 int
0719 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
0720               struct drm_file *file_priv)
0721 {
0722     struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
0723     struct nouveau_cli *cli = nouveau_cli(file_priv);
0724     struct nouveau_abi16_chan *temp;
0725     struct nouveau_drm *drm = nouveau_drm(dev);
0726     struct drm_nouveau_gem_pushbuf *req = data;
0727     struct drm_nouveau_gem_pushbuf_push *push;
0728     struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
0729     struct drm_nouveau_gem_pushbuf_bo *bo;
0730     struct nouveau_channel *chan = NULL;
0731     struct validate_op op;
0732     struct nouveau_fence *fence = NULL;
0733     int i, j, ret = 0;
0734     bool do_reloc = false, sync = false;
0735 
0736     if (unlikely(!abi16))
0737         return -ENOMEM;
0738 
0739     list_for_each_entry(temp, &abi16->channels, head) {
0740         if (temp->chan->chid == req->channel) {
0741             chan = temp->chan;
0742             break;
0743         }
0744     }
0745 
0746     if (!chan)
0747         return nouveau_abi16_put(abi16, -ENOENT);
0748     if (unlikely(atomic_read(&chan->killed)))
0749         return nouveau_abi16_put(abi16, -ENODEV);
0750 
0751     sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
0752 
0753     req->vram_available = drm->gem.vram_available;
0754     req->gart_available = drm->gem.gart_available;
0755     if (unlikely(req->nr_push == 0))
0756         goto out_next;
0757 
0758     if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
0759         NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
0760              req->nr_push, NOUVEAU_GEM_MAX_PUSH);
0761         return nouveau_abi16_put(abi16, -EINVAL);
0762     }
0763 
0764     if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
0765         NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
0766              req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
0767         return nouveau_abi16_put(abi16, -EINVAL);
0768     }
0769 
0770     if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
0771         NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
0772              req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
0773         return nouveau_abi16_put(abi16, -EINVAL);
0774     }
0775 
0776     push = u_memcpya(req->push, req->nr_push, sizeof(*push));
0777     if (IS_ERR(push))
0778         return nouveau_abi16_put(abi16, PTR_ERR(push));
0779 
0780     bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
0781     if (IS_ERR(bo)) {
0782         u_free(push);
0783         return nouveau_abi16_put(abi16, PTR_ERR(bo));
0784     }
0785 
0786     /* Ensure all push buffers are on validate list */
0787     for (i = 0; i < req->nr_push; i++) {
0788         if (push[i].bo_index >= req->nr_buffers) {
0789             NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
0790             ret = -EINVAL;
0791             goto out_prevalid;
0792         }
0793     }
0794 
0795     /* Validate buffer list */
0796 revalidate:
0797     ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
0798                        req->nr_buffers, &op, &do_reloc);
0799     if (ret) {
0800         if (ret != -ERESTARTSYS)
0801             NV_PRINTK(err, cli, "validate: %d\n", ret);
0802         goto out_prevalid;
0803     }
0804 
0805     /* Apply any relocations that are required */
0806     if (do_reloc) {
0807         if (!reloc) {
0808             validate_fini(&op, chan, NULL, bo);
0809             reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
0810             if (IS_ERR(reloc)) {
0811                 ret = PTR_ERR(reloc);
0812                 goto out_prevalid;
0813             }
0814 
0815             goto revalidate;
0816         }
0817 
0818         ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
0819         if (ret) {
0820             NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
0821             goto out;
0822         }
0823     }
0824 
0825     if (chan->dma.ib_max) {
0826         ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
0827         if (ret) {
0828             NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
0829             goto out;
0830         }
0831 
0832         for (i = 0; i < req->nr_push; i++) {
0833             struct nouveau_vma *vma = (void *)(unsigned long)
0834                 bo[push[i].bo_index].user_priv;
0835 
0836             nv50_dma_push(chan, vma->addr + push[i].offset,
0837                       push[i].length);
0838         }
0839     } else
0840     if (drm->client.device.info.chipset >= 0x25) {
0841         ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
0842         if (ret) {
0843             NV_PRINTK(err, cli, "cal_space: %d\n", ret);
0844             goto out;
0845         }
0846 
0847         for (i = 0; i < req->nr_push; i++) {
0848             struct nouveau_bo *nvbo = (void *)(unsigned long)
0849                 bo[push[i].bo_index].user_priv;
0850 
0851             PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
0852             PUSH_DATA(chan->chan.push, 0);
0853         }
0854     } else {
0855         ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
0856         if (ret) {
0857             NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
0858             goto out;
0859         }
0860 
0861         for (i = 0; i < req->nr_push; i++) {
0862             struct nouveau_bo *nvbo = (void *)(unsigned long)
0863                 bo[push[i].bo_index].user_priv;
0864             uint32_t cmd;
0865 
0866             cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
0867             cmd |= 0x20000000;
0868             if (unlikely(cmd != req->suffix0)) {
0869                 if (!nvbo->kmap.virtual) {
0870                     ret = ttm_bo_kmap(&nvbo->bo, 0,
0871                               nvbo->bo.resource->
0872                               num_pages,
0873                               &nvbo->kmap);
0874                     if (ret) {
0875                         WIND_RING(chan);
0876                         goto out;
0877                     }
0878                     nvbo->validate_mapped = true;
0879                 }
0880 
0881                 nouveau_bo_wr32(nvbo, (push[i].offset +
0882                         push[i].length - 8) / 4, cmd);
0883             }
0884 
0885             PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
0886             PUSH_DATA(chan->chan.push, 0);
0887             for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
0888                 PUSH_DATA(chan->chan.push, 0);
0889         }
0890     }
0891 
0892     ret = nouveau_fence_new(chan, false, &fence);
0893     if (ret) {
0894         NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
0895         WIND_RING(chan);
0896         goto out;
0897     }
0898 
0899     if (sync) {
0900         if (!(ret = nouveau_fence_wait(fence, false, false))) {
0901             if ((ret = dma_fence_get_status(&fence->base)) == 1)
0902                 ret = 0;
0903         }
0904     }
0905 
0906 out:
0907     validate_fini(&op, chan, fence, bo);
0908     nouveau_fence_unref(&fence);
0909 
0910     if (do_reloc) {
0911         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
0912             u64_to_user_ptr(req->buffers);
0913 
0914         for (i = 0; i < req->nr_buffers; i++) {
0915             if (bo[i].presumed.valid)
0916                 continue;
0917 
0918             if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
0919                      sizeof(bo[i].presumed))) {
0920                 ret = -EFAULT;
0921                 break;
0922             }
0923         }
0924     }
0925 out_prevalid:
0926     if (!IS_ERR(reloc))
0927         u_free(reloc);
0928     u_free(bo);
0929     u_free(push);
0930 
0931 out_next:
0932     if (chan->dma.ib_max) {
0933         req->suffix0 = 0x00000000;
0934         req->suffix1 = 0x00000000;
0935     } else
0936     if (drm->client.device.info.chipset >= 0x25) {
0937         req->suffix0 = 0x00020000;
0938         req->suffix1 = 0x00000000;
0939     } else {
0940         req->suffix0 = 0x20000000 |
0941                   (chan->push.addr + ((chan->dma.cur + 2) << 2));
0942         req->suffix1 = 0x00000000;
0943     }
0944 
0945     return nouveau_abi16_put(abi16, ret);
0946 }
0947 
0948 int
0949 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
0950                struct drm_file *file_priv)
0951 {
0952     struct drm_nouveau_gem_cpu_prep *req = data;
0953     struct drm_gem_object *gem;
0954     struct nouveau_bo *nvbo;
0955     bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
0956     bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
0957     long lret;
0958     int ret;
0959 
0960     gem = drm_gem_object_lookup(file_priv, req->handle);
0961     if (!gem)
0962         return -ENOENT;
0963     nvbo = nouveau_gem_object(gem);
0964 
0965     lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
0966                      dma_resv_usage_rw(write), true,
0967                      no_wait ? 0 : 30 * HZ);
0968     if (!lret)
0969         ret = -EBUSY;
0970     else if (lret > 0)
0971         ret = 0;
0972     else
0973         ret = lret;
0974 
0975     nouveau_bo_sync_for_cpu(nvbo);
0976     drm_gem_object_put(gem);
0977 
0978     return ret;
0979 }
0980 
0981 int
0982 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
0983                struct drm_file *file_priv)
0984 {
0985     struct drm_nouveau_gem_cpu_fini *req = data;
0986     struct drm_gem_object *gem;
0987     struct nouveau_bo *nvbo;
0988 
0989     gem = drm_gem_object_lookup(file_priv, req->handle);
0990     if (!gem)
0991         return -ENOENT;
0992     nvbo = nouveau_gem_object(gem);
0993 
0994     nouveau_bo_sync_for_device(nvbo);
0995     drm_gem_object_put(gem);
0996     return 0;
0997 }
0998 
0999 int
1000 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1001                struct drm_file *file_priv)
1002 {
1003     struct drm_nouveau_gem_info *req = data;
1004     struct drm_gem_object *gem;
1005     int ret;
1006 
1007     gem = drm_gem_object_lookup(file_priv, req->handle);
1008     if (!gem)
1009         return -ENOENT;
1010 
1011     ret = nouveau_gem_info(file_priv, gem, req);
1012     drm_gem_object_put(gem);
1013     return ret;
1014 }
1015