0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/dma-buf.h>
0014 #include <linux/iommu.h>
0015 #include <linux/module.h>
0016
0017 #include <drm/drm_drv.h>
0018 #include <drm/drm_prime.h>
0019 #include <drm/tegra_drm.h>
0020
0021 #include "drm.h"
0022 #include "gem.h"
0023
0024 MODULE_IMPORT_NS(DMA_BUF);
0025
0026 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
0027 {
0028 dma_addr_t next = ~(dma_addr_t)0;
0029 unsigned int count = 0, i;
0030 struct scatterlist *s;
0031
0032 for_each_sg(sgl, s, nents, i) {
0033
0034 if (!sg_dma_len(s))
0035 continue;
0036
0037 if (sg_dma_address(s) != next) {
0038 next = sg_dma_address(s) + sg_dma_len(s);
0039 count++;
0040 }
0041 }
0042
0043 return count;
0044 }
0045
0046 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
0047 {
0048 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
0049 }
0050
0051 static void tegra_bo_put(struct host1x_bo *bo)
0052 {
0053 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
0054
0055 drm_gem_object_put(&obj->gem);
0056 }
0057
0058 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
0059 enum dma_data_direction direction)
0060 {
0061 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
0062 struct drm_gem_object *gem = &obj->gem;
0063 struct host1x_bo_mapping *map;
0064 int err;
0065
0066 map = kzalloc(sizeof(*map), GFP_KERNEL);
0067 if (!map)
0068 return ERR_PTR(-ENOMEM);
0069
0070 kref_init(&map->ref);
0071 map->bo = host1x_bo_get(bo);
0072 map->direction = direction;
0073 map->dev = dev;
0074
0075
0076
0077
0078 if (gem->import_attach) {
0079 struct dma_buf *buf = gem->import_attach->dmabuf;
0080
0081 map->attach = dma_buf_attach(buf, dev);
0082 if (IS_ERR(map->attach)) {
0083 err = PTR_ERR(map->attach);
0084 goto free;
0085 }
0086
0087 map->sgt = dma_buf_map_attachment(map->attach, direction);
0088 if (IS_ERR(map->sgt)) {
0089 dma_buf_detach(buf, map->attach);
0090 err = PTR_ERR(map->sgt);
0091 map->sgt = NULL;
0092 goto free;
0093 }
0094
0095 err = sgt_dma_count_chunks(map->sgt);
0096 map->size = gem->size;
0097
0098 goto out;
0099 }
0100
0101
0102
0103
0104
0105 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
0106 if (!map->sgt) {
0107 err = -ENOMEM;
0108 goto free;
0109 }
0110
0111 if (obj->pages) {
0112
0113
0114
0115
0116 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
0117 GFP_KERNEL);
0118 if (err < 0)
0119 goto free;
0120 } else {
0121
0122
0123
0124
0125
0126 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
0127 if (err < 0)
0128 goto free;
0129 }
0130
0131 err = dma_map_sgtable(dev, map->sgt, direction, 0);
0132 if (err)
0133 goto free_sgt;
0134
0135 out:
0136
0137
0138
0139
0140 if (!obj->mm) {
0141 map->phys = sg_dma_address(map->sgt->sgl);
0142 map->chunks = err;
0143 } else {
0144 map->phys = obj->iova;
0145 map->chunks = 1;
0146 }
0147
0148 map->size = gem->size;
0149
0150 return map;
0151
0152 free_sgt:
0153 sg_free_table(map->sgt);
0154 free:
0155 kfree(map->sgt);
0156 kfree(map);
0157 return ERR_PTR(err);
0158 }
0159
0160 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
0161 {
0162 if (map->attach) {
0163 dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
0164 dma_buf_detach(map->attach->dmabuf, map->attach);
0165 } else {
0166 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
0167 sg_free_table(map->sgt);
0168 kfree(map->sgt);
0169 }
0170
0171 host1x_bo_put(map->bo);
0172 kfree(map);
0173 }
0174
0175 static void *tegra_bo_mmap(struct host1x_bo *bo)
0176 {
0177 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
0178 struct iosys_map map;
0179 int ret;
0180
0181 if (obj->vaddr) {
0182 return obj->vaddr;
0183 } else if (obj->gem.import_attach) {
0184 ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
0185 return ret ? NULL : map.vaddr;
0186 } else {
0187 return vmap(obj->pages, obj->num_pages, VM_MAP,
0188 pgprot_writecombine(PAGE_KERNEL));
0189 }
0190 }
0191
0192 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
0193 {
0194 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
0195 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
0196
0197 if (obj->vaddr)
0198 return;
0199 else if (obj->gem.import_attach)
0200 dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
0201 else
0202 vunmap(addr);
0203 }
0204
0205 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
0206 {
0207 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
0208
0209 drm_gem_object_get(&obj->gem);
0210
0211 return bo;
0212 }
0213
0214 static const struct host1x_bo_ops tegra_bo_ops = {
0215 .get = tegra_bo_get,
0216 .put = tegra_bo_put,
0217 .pin = tegra_bo_pin,
0218 .unpin = tegra_bo_unpin,
0219 .mmap = tegra_bo_mmap,
0220 .munmap = tegra_bo_munmap,
0221 };
0222
0223 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
0224 {
0225 int prot = IOMMU_READ | IOMMU_WRITE;
0226 int err;
0227
0228 if (bo->mm)
0229 return -EBUSY;
0230
0231 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
0232 if (!bo->mm)
0233 return -ENOMEM;
0234
0235 mutex_lock(&tegra->mm_lock);
0236
0237 err = drm_mm_insert_node_generic(&tegra->mm,
0238 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
0239 if (err < 0) {
0240 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
0241 err);
0242 goto unlock;
0243 }
0244
0245 bo->iova = bo->mm->start;
0246
0247 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
0248 if (!bo->size) {
0249 dev_err(tegra->drm->dev, "failed to map buffer\n");
0250 err = -ENOMEM;
0251 goto remove;
0252 }
0253
0254 mutex_unlock(&tegra->mm_lock);
0255
0256 return 0;
0257
0258 remove:
0259 drm_mm_remove_node(bo->mm);
0260 unlock:
0261 mutex_unlock(&tegra->mm_lock);
0262 kfree(bo->mm);
0263 return err;
0264 }
0265
0266 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
0267 {
0268 if (!bo->mm)
0269 return 0;
0270
0271 mutex_lock(&tegra->mm_lock);
0272 iommu_unmap(tegra->domain, bo->iova, bo->size);
0273 drm_mm_remove_node(bo->mm);
0274 mutex_unlock(&tegra->mm_lock);
0275
0276 kfree(bo->mm);
0277
0278 return 0;
0279 }
0280
0281 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
0282 .free = tegra_bo_free_object,
0283 .export = tegra_gem_prime_export,
0284 .vm_ops = &tegra_bo_vm_ops,
0285 };
0286
0287 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
0288 size_t size)
0289 {
0290 struct tegra_bo *bo;
0291 int err;
0292
0293 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
0294 if (!bo)
0295 return ERR_PTR(-ENOMEM);
0296
0297 bo->gem.funcs = &tegra_gem_object_funcs;
0298
0299 host1x_bo_init(&bo->base, &tegra_bo_ops);
0300 size = round_up(size, PAGE_SIZE);
0301
0302 err = drm_gem_object_init(drm, &bo->gem, size);
0303 if (err < 0)
0304 goto free;
0305
0306 err = drm_gem_create_mmap_offset(&bo->gem);
0307 if (err < 0)
0308 goto release;
0309
0310 return bo;
0311
0312 release:
0313 drm_gem_object_release(&bo->gem);
0314 free:
0315 kfree(bo);
0316 return ERR_PTR(err);
0317 }
0318
0319 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
0320 {
0321 if (bo->pages) {
0322 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
0323 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
0324 sg_free_table(bo->sgt);
0325 kfree(bo->sgt);
0326 } else if (bo->vaddr) {
0327 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
0328 }
0329 }
0330
0331 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
0332 {
0333 int err;
0334
0335 bo->pages = drm_gem_get_pages(&bo->gem);
0336 if (IS_ERR(bo->pages))
0337 return PTR_ERR(bo->pages);
0338
0339 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
0340
0341 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
0342 if (IS_ERR(bo->sgt)) {
0343 err = PTR_ERR(bo->sgt);
0344 goto put_pages;
0345 }
0346
0347 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
0348 if (err)
0349 goto free_sgt;
0350
0351 return 0;
0352
0353 free_sgt:
0354 sg_free_table(bo->sgt);
0355 kfree(bo->sgt);
0356 put_pages:
0357 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
0358 return err;
0359 }
0360
0361 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
0362 {
0363 struct tegra_drm *tegra = drm->dev_private;
0364 int err;
0365
0366 if (tegra->domain) {
0367 err = tegra_bo_get_pages(drm, bo);
0368 if (err < 0)
0369 return err;
0370
0371 err = tegra_bo_iommu_map(tegra, bo);
0372 if (err < 0) {
0373 tegra_bo_free(drm, bo);
0374 return err;
0375 }
0376 } else {
0377 size_t size = bo->gem.size;
0378
0379 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
0380 GFP_KERNEL | __GFP_NOWARN);
0381 if (!bo->vaddr) {
0382 dev_err(drm->dev,
0383 "failed to allocate buffer of size %zu\n",
0384 size);
0385 return -ENOMEM;
0386 }
0387 }
0388
0389 return 0;
0390 }
0391
0392 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
0393 unsigned long flags)
0394 {
0395 struct tegra_bo *bo;
0396 int err;
0397
0398 bo = tegra_bo_alloc_object(drm, size);
0399 if (IS_ERR(bo))
0400 return bo;
0401
0402 err = tegra_bo_alloc(drm, bo);
0403 if (err < 0)
0404 goto release;
0405
0406 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
0407 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
0408
0409 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
0410 bo->flags |= TEGRA_BO_BOTTOM_UP;
0411
0412 return bo;
0413
0414 release:
0415 drm_gem_object_release(&bo->gem);
0416 kfree(bo);
0417 return ERR_PTR(err);
0418 }
0419
0420 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
0421 struct drm_device *drm,
0422 size_t size,
0423 unsigned long flags,
0424 u32 *handle)
0425 {
0426 struct tegra_bo *bo;
0427 int err;
0428
0429 bo = tegra_bo_create(drm, size, flags);
0430 if (IS_ERR(bo))
0431 return bo;
0432
0433 err = drm_gem_handle_create(file, &bo->gem, handle);
0434 if (err) {
0435 tegra_bo_free_object(&bo->gem);
0436 return ERR_PTR(err);
0437 }
0438
0439 drm_gem_object_put(&bo->gem);
0440
0441 return bo;
0442 }
0443
0444 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
0445 struct dma_buf *buf)
0446 {
0447 struct tegra_drm *tegra = drm->dev_private;
0448 struct dma_buf_attachment *attach;
0449 struct tegra_bo *bo;
0450 int err;
0451
0452 bo = tegra_bo_alloc_object(drm, buf->size);
0453 if (IS_ERR(bo))
0454 return bo;
0455
0456 attach = dma_buf_attach(buf, drm->dev);
0457 if (IS_ERR(attach)) {
0458 err = PTR_ERR(attach);
0459 goto free;
0460 }
0461
0462 get_dma_buf(buf);
0463
0464 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
0465 if (IS_ERR(bo->sgt)) {
0466 err = PTR_ERR(bo->sgt);
0467 goto detach;
0468 }
0469
0470 if (tegra->domain) {
0471 err = tegra_bo_iommu_map(tegra, bo);
0472 if (err < 0)
0473 goto detach;
0474 }
0475
0476 bo->gem.import_attach = attach;
0477
0478 return bo;
0479
0480 detach:
0481 if (!IS_ERR_OR_NULL(bo->sgt))
0482 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
0483
0484 dma_buf_detach(buf, attach);
0485 dma_buf_put(buf);
0486 free:
0487 drm_gem_object_release(&bo->gem);
0488 kfree(bo);
0489 return ERR_PTR(err);
0490 }
0491
0492 void tegra_bo_free_object(struct drm_gem_object *gem)
0493 {
0494 struct tegra_drm *tegra = gem->dev->dev_private;
0495 struct host1x_bo_mapping *mapping, *tmp;
0496 struct tegra_bo *bo = to_tegra_bo(gem);
0497
0498
0499 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
0500 if (mapping->cache)
0501 host1x_bo_unpin(mapping);
0502 else
0503 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
0504 dev_name(mapping->dev));
0505 }
0506
0507 if (tegra->domain)
0508 tegra_bo_iommu_unmap(tegra, bo);
0509
0510 if (gem->import_attach) {
0511 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
0512 DMA_TO_DEVICE);
0513 drm_prime_gem_destroy(gem, NULL);
0514 } else {
0515 tegra_bo_free(gem->dev, bo);
0516 }
0517
0518 drm_gem_object_release(gem);
0519 kfree(bo);
0520 }
0521
0522 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
0523 struct drm_mode_create_dumb *args)
0524 {
0525 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0526 struct tegra_drm *tegra = drm->dev_private;
0527 struct tegra_bo *bo;
0528
0529 args->pitch = round_up(min_pitch, tegra->pitch_align);
0530 args->size = args->pitch * args->height;
0531
0532 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
0533 &args->handle);
0534 if (IS_ERR(bo))
0535 return PTR_ERR(bo);
0536
0537 return 0;
0538 }
0539
0540 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
0541 {
0542 struct vm_area_struct *vma = vmf->vma;
0543 struct drm_gem_object *gem = vma->vm_private_data;
0544 struct tegra_bo *bo = to_tegra_bo(gem);
0545 struct page *page;
0546 pgoff_t offset;
0547
0548 if (!bo->pages)
0549 return VM_FAULT_SIGBUS;
0550
0551 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0552 page = bo->pages[offset];
0553
0554 return vmf_insert_page(vma, vmf->address, page);
0555 }
0556
0557 const struct vm_operations_struct tegra_bo_vm_ops = {
0558 .fault = tegra_bo_fault,
0559 .open = drm_gem_vm_open,
0560 .close = drm_gem_vm_close,
0561 };
0562
0563 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
0564 {
0565 struct tegra_bo *bo = to_tegra_bo(gem);
0566
0567 if (!bo->pages) {
0568 unsigned long vm_pgoff = vma->vm_pgoff;
0569 int err;
0570
0571
0572
0573
0574
0575
0576 vma->vm_flags &= ~VM_PFNMAP;
0577 vma->vm_pgoff = 0;
0578
0579 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
0580 gem->size);
0581 if (err < 0) {
0582 drm_gem_vm_close(vma);
0583 return err;
0584 }
0585
0586 vma->vm_pgoff = vm_pgoff;
0587 } else {
0588 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
0589
0590 vma->vm_flags |= VM_MIXEDMAP;
0591 vma->vm_flags &= ~VM_PFNMAP;
0592
0593 vma->vm_page_prot = pgprot_writecombine(prot);
0594 }
0595
0596 return 0;
0597 }
0598
0599 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
0600 {
0601 struct drm_gem_object *gem;
0602 int err;
0603
0604 err = drm_gem_mmap(file, vma);
0605 if (err < 0)
0606 return err;
0607
0608 gem = vma->vm_private_data;
0609
0610 return __tegra_gem_mmap(gem, vma);
0611 }
0612
0613 static struct sg_table *
0614 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
0615 enum dma_data_direction dir)
0616 {
0617 struct drm_gem_object *gem = attach->dmabuf->priv;
0618 struct tegra_bo *bo = to_tegra_bo(gem);
0619 struct sg_table *sgt;
0620
0621 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
0622 if (!sgt)
0623 return NULL;
0624
0625 if (bo->pages) {
0626 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
0627 0, gem->size, GFP_KERNEL) < 0)
0628 goto free;
0629 } else {
0630 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
0631 gem->size) < 0)
0632 goto free;
0633 }
0634
0635 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
0636 goto free;
0637
0638 return sgt;
0639
0640 free:
0641 sg_free_table(sgt);
0642 kfree(sgt);
0643 return NULL;
0644 }
0645
0646 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
0647 struct sg_table *sgt,
0648 enum dma_data_direction dir)
0649 {
0650 struct drm_gem_object *gem = attach->dmabuf->priv;
0651 struct tegra_bo *bo = to_tegra_bo(gem);
0652
0653 if (bo->pages)
0654 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
0655
0656 sg_free_table(sgt);
0657 kfree(sgt);
0658 }
0659
0660 static void tegra_gem_prime_release(struct dma_buf *buf)
0661 {
0662 drm_gem_dmabuf_release(buf);
0663 }
0664
0665 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
0666 enum dma_data_direction direction)
0667 {
0668 struct drm_gem_object *gem = buf->priv;
0669 struct tegra_bo *bo = to_tegra_bo(gem);
0670 struct drm_device *drm = gem->dev;
0671
0672 if (bo->pages)
0673 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
0674
0675 return 0;
0676 }
0677
0678 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
0679 enum dma_data_direction direction)
0680 {
0681 struct drm_gem_object *gem = buf->priv;
0682 struct tegra_bo *bo = to_tegra_bo(gem);
0683 struct drm_device *drm = gem->dev;
0684
0685 if (bo->pages)
0686 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
0687
0688 return 0;
0689 }
0690
0691 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
0692 {
0693 struct drm_gem_object *gem = buf->priv;
0694 int err;
0695
0696 err = drm_gem_mmap_obj(gem, gem->size, vma);
0697 if (err < 0)
0698 return err;
0699
0700 return __tegra_gem_mmap(gem, vma);
0701 }
0702
0703 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
0704 {
0705 struct drm_gem_object *gem = buf->priv;
0706 struct tegra_bo *bo = to_tegra_bo(gem);
0707 void *vaddr;
0708
0709 vaddr = tegra_bo_mmap(&bo->base);
0710 if (IS_ERR(vaddr))
0711 return PTR_ERR(vaddr);
0712
0713 iosys_map_set_vaddr(map, vaddr);
0714
0715 return 0;
0716 }
0717
0718 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
0719 {
0720 struct drm_gem_object *gem = buf->priv;
0721 struct tegra_bo *bo = to_tegra_bo(gem);
0722
0723 tegra_bo_munmap(&bo->base, map->vaddr);
0724 }
0725
0726 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
0727 .map_dma_buf = tegra_gem_prime_map_dma_buf,
0728 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
0729 .release = tegra_gem_prime_release,
0730 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
0731 .end_cpu_access = tegra_gem_prime_end_cpu_access,
0732 .mmap = tegra_gem_prime_mmap,
0733 .vmap = tegra_gem_prime_vmap,
0734 .vunmap = tegra_gem_prime_vunmap,
0735 };
0736
0737 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
0738 int flags)
0739 {
0740 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0741
0742 exp_info.exp_name = KBUILD_MODNAME;
0743 exp_info.owner = gem->dev->driver->fops->owner;
0744 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
0745 exp_info.size = gem->size;
0746 exp_info.flags = flags;
0747 exp_info.priv = gem;
0748
0749 return drm_gem_dmabuf_export(gem->dev, &exp_info);
0750 }
0751
0752 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
0753 struct dma_buf *buf)
0754 {
0755 struct tegra_bo *bo;
0756
0757 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
0758 struct drm_gem_object *gem = buf->priv;
0759
0760 if (gem->dev == drm) {
0761 drm_gem_object_get(gem);
0762 return gem;
0763 }
0764 }
0765
0766 bo = tegra_bo_import(drm, buf);
0767 if (IS_ERR(bo))
0768 return ERR_CAST(bo);
0769
0770 return &bo->gem;
0771 }
0772
0773 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
0774 {
0775 struct drm_gem_object *gem;
0776 struct tegra_bo *bo;
0777
0778 gem = drm_gem_object_lookup(file, handle);
0779 if (!gem)
0780 return NULL;
0781
0782 bo = to_tegra_bo(gem);
0783 return &bo->base;
0784 }