0001
0002
0003
0004
0005
0006 #include <drm/drm_prime.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/shmem_fs.h>
0009 #include <linux/spinlock.h>
0010 #include <linux/vmalloc.h>
0011
0012 #include "etnaviv_drv.h"
0013 #include "etnaviv_gem.h"
0014 #include "etnaviv_gpu.h"
0015 #include "etnaviv_mmu.h"
0016
0017 static struct lock_class_key etnaviv_shm_lock_class;
0018 static struct lock_class_key etnaviv_userptr_lock_class;
0019
0020 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
0021 {
0022 struct drm_device *dev = etnaviv_obj->base.dev;
0023 struct sg_table *sgt = etnaviv_obj->sgt;
0024
0025
0026
0027
0028
0029 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
0030 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
0031 }
0032
0033 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
0034 {
0035 struct drm_device *dev = etnaviv_obj->base.dev;
0036 struct sg_table *sgt = etnaviv_obj->sgt;
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
0054 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
0055 }
0056
0057
0058 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
0059 {
0060 struct drm_device *dev = etnaviv_obj->base.dev;
0061 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
0062
0063 if (IS_ERR(p)) {
0064 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
0065 return PTR_ERR(p);
0066 }
0067
0068 etnaviv_obj->pages = p;
0069
0070 return 0;
0071 }
0072
0073 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
0074 {
0075 if (etnaviv_obj->sgt) {
0076 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
0077 sg_free_table(etnaviv_obj->sgt);
0078 kfree(etnaviv_obj->sgt);
0079 etnaviv_obj->sgt = NULL;
0080 }
0081 if (etnaviv_obj->pages) {
0082 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
0083 true, false);
0084
0085 etnaviv_obj->pages = NULL;
0086 }
0087 }
0088
0089 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
0090 {
0091 int ret;
0092
0093 lockdep_assert_held(&etnaviv_obj->lock);
0094
0095 if (!etnaviv_obj->pages) {
0096 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
0097 if (ret < 0)
0098 return ERR_PTR(ret);
0099 }
0100
0101 if (!etnaviv_obj->sgt) {
0102 struct drm_device *dev = etnaviv_obj->base.dev;
0103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
0104 struct sg_table *sgt;
0105
0106 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
0107 etnaviv_obj->pages, npages);
0108 if (IS_ERR(sgt)) {
0109 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
0110 PTR_ERR(sgt));
0111 return ERR_CAST(sgt);
0112 }
0113
0114 etnaviv_obj->sgt = sgt;
0115
0116 etnaviv_gem_scatter_map(etnaviv_obj);
0117 }
0118
0119 return etnaviv_obj->pages;
0120 }
0121
0122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
0123 {
0124 lockdep_assert_held(&etnaviv_obj->lock);
0125
0126 }
0127
0128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
0129 struct vm_area_struct *vma)
0130 {
0131 pgprot_t vm_page_prot;
0132
0133 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
0134
0135 vm_page_prot = vm_get_page_prot(vma->vm_flags);
0136
0137 if (etnaviv_obj->flags & ETNA_BO_WC) {
0138 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
0139 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
0140 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
0141 } else {
0142
0143
0144
0145
0146
0147 vma->vm_pgoff = 0;
0148 vma_set_file(vma, etnaviv_obj->base.filp);
0149
0150 vma->vm_page_prot = vm_page_prot;
0151 }
0152
0153 return 0;
0154 }
0155
0156 static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
0157 {
0158 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0159
0160 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
0161 }
0162
0163 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
0164 {
0165 struct vm_area_struct *vma = vmf->vma;
0166 struct drm_gem_object *obj = vma->vm_private_data;
0167 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0168 struct page **pages, *page;
0169 pgoff_t pgoff;
0170 int err;
0171
0172
0173
0174
0175
0176
0177 err = mutex_lock_interruptible(&etnaviv_obj->lock);
0178 if (err)
0179 return VM_FAULT_NOPAGE;
0180
0181 pages = etnaviv_gem_get_pages(etnaviv_obj);
0182 mutex_unlock(&etnaviv_obj->lock);
0183
0184 if (IS_ERR(pages)) {
0185 err = PTR_ERR(pages);
0186 return vmf_error(err);
0187 }
0188
0189
0190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0191
0192 page = pages[pgoff];
0193
0194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
0195 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
0196
0197 return vmf_insert_page(vma, vmf->address, page);
0198 }
0199
0200 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
0201 {
0202 int ret;
0203
0204
0205 ret = drm_gem_create_mmap_offset(obj);
0206 if (ret)
0207 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
0208 else
0209 *offset = drm_vma_node_offset_addr(&obj->vma_node);
0210
0211 return ret;
0212 }
0213
0214 static struct etnaviv_vram_mapping *
0215 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
0216 struct etnaviv_iommu_context *context)
0217 {
0218 struct etnaviv_vram_mapping *mapping;
0219
0220 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
0221 if (mapping->context == context)
0222 return mapping;
0223 }
0224
0225 return NULL;
0226 }
0227
0228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
0229 {
0230 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
0231
0232 mutex_lock(&etnaviv_obj->lock);
0233 WARN_ON(mapping->use == 0);
0234 mapping->use -= 1;
0235 mutex_unlock(&etnaviv_obj->lock);
0236
0237 drm_gem_object_put(&etnaviv_obj->base);
0238 }
0239
0240 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
0241 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
0242 u64 va)
0243 {
0244 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0245 struct etnaviv_vram_mapping *mapping;
0246 struct page **pages;
0247 int ret = 0;
0248
0249 mutex_lock(&etnaviv_obj->lock);
0250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
0251 if (mapping) {
0252
0253
0254
0255
0256
0257
0258 if (mapping->use == 0) {
0259 mutex_lock(&mmu_context->lock);
0260 if (mapping->context == mmu_context)
0261 mapping->use += 1;
0262 else
0263 mapping = NULL;
0264 mutex_unlock(&mmu_context->lock);
0265 if (mapping)
0266 goto out;
0267 } else {
0268 mapping->use += 1;
0269 goto out;
0270 }
0271 }
0272
0273 pages = etnaviv_gem_get_pages(etnaviv_obj);
0274 if (IS_ERR(pages)) {
0275 ret = PTR_ERR(pages);
0276 goto out;
0277 }
0278
0279
0280
0281
0282
0283 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
0284 if (!mapping) {
0285 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
0286 if (!mapping) {
0287 ret = -ENOMEM;
0288 goto out;
0289 }
0290
0291 INIT_LIST_HEAD(&mapping->scan_node);
0292 mapping->object = etnaviv_obj;
0293 } else {
0294 list_del(&mapping->obj_node);
0295 }
0296
0297 mapping->use = 1;
0298
0299 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
0300 mmu_context->global->memory_base,
0301 mapping, va);
0302 if (ret < 0)
0303 kfree(mapping);
0304 else
0305 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
0306
0307 out:
0308 mutex_unlock(&etnaviv_obj->lock);
0309
0310 if (ret)
0311 return ERR_PTR(ret);
0312
0313
0314 drm_gem_object_get(obj);
0315 return mapping;
0316 }
0317
0318 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
0319 {
0320 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0321
0322 if (etnaviv_obj->vaddr)
0323 return etnaviv_obj->vaddr;
0324
0325 mutex_lock(&etnaviv_obj->lock);
0326
0327
0328
0329
0330 if (!etnaviv_obj->vaddr)
0331 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
0332 mutex_unlock(&etnaviv_obj->lock);
0333
0334 return etnaviv_obj->vaddr;
0335 }
0336
0337 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
0338 {
0339 struct page **pages;
0340
0341 lockdep_assert_held(&obj->lock);
0342
0343 pages = etnaviv_gem_get_pages(obj);
0344 if (IS_ERR(pages))
0345 return NULL;
0346
0347 return vmap(pages, obj->base.size >> PAGE_SHIFT,
0348 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
0349 }
0350
0351 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
0352 {
0353 if (op & ETNA_PREP_READ)
0354 return DMA_FROM_DEVICE;
0355 else if (op & ETNA_PREP_WRITE)
0356 return DMA_TO_DEVICE;
0357 else
0358 return DMA_BIDIRECTIONAL;
0359 }
0360
0361 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
0362 struct drm_etnaviv_timespec *timeout)
0363 {
0364 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0365 struct drm_device *dev = obj->dev;
0366 bool write = !!(op & ETNA_PREP_WRITE);
0367 int ret;
0368
0369 if (!etnaviv_obj->sgt) {
0370 void *ret;
0371
0372 mutex_lock(&etnaviv_obj->lock);
0373 ret = etnaviv_gem_get_pages(etnaviv_obj);
0374 mutex_unlock(&etnaviv_obj->lock);
0375 if (IS_ERR(ret))
0376 return PTR_ERR(ret);
0377 }
0378
0379 if (op & ETNA_PREP_NOSYNC) {
0380 if (!dma_resv_test_signaled(obj->resv,
0381 dma_resv_usage_rw(write)))
0382 return -EBUSY;
0383 } else {
0384 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
0385
0386 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
0387 true, remain);
0388 if (ret <= 0)
0389 return ret == 0 ? -ETIMEDOUT : ret;
0390 }
0391
0392 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
0393 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
0394 etnaviv_op_to_dma_dir(op));
0395 etnaviv_obj->last_cpu_prep_op = op;
0396 }
0397
0398 return 0;
0399 }
0400
0401 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
0402 {
0403 struct drm_device *dev = obj->dev;
0404 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0405
0406 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
0407
0408 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
0409 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
0410 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
0411 etnaviv_obj->last_cpu_prep_op = 0;
0412 }
0413
0414 return 0;
0415 }
0416
0417 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
0418 struct drm_etnaviv_timespec *timeout)
0419 {
0420 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0421
0422 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
0423 }
0424
0425 #ifdef CONFIG_DEBUG_FS
0426 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
0427 {
0428 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0429 struct dma_resv *robj = obj->resv;
0430 unsigned long off = drm_vma_node_start(&obj->vma_node);
0431 int r;
0432
0433 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
0434 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
0435 obj->name, kref_read(&obj->refcount),
0436 off, etnaviv_obj->vaddr, obj->size);
0437
0438 r = dma_resv_lock(robj, NULL);
0439 if (r)
0440 return;
0441
0442 dma_resv_describe(robj, m);
0443 dma_resv_unlock(robj);
0444 }
0445
0446 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
0447 struct seq_file *m)
0448 {
0449 struct etnaviv_gem_object *etnaviv_obj;
0450 int count = 0;
0451 size_t size = 0;
0452
0453 mutex_lock(&priv->gem_lock);
0454 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
0455 struct drm_gem_object *obj = &etnaviv_obj->base;
0456
0457 seq_puts(m, " ");
0458 etnaviv_gem_describe(obj, m);
0459 count++;
0460 size += obj->size;
0461 }
0462 mutex_unlock(&priv->gem_lock);
0463
0464 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
0465 }
0466 #endif
0467
0468 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
0469 {
0470 vunmap(etnaviv_obj->vaddr);
0471 put_pages(etnaviv_obj);
0472 }
0473
0474 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
0475 .get_pages = etnaviv_gem_shmem_get_pages,
0476 .release = etnaviv_gem_shmem_release,
0477 .vmap = etnaviv_gem_vmap_impl,
0478 .mmap = etnaviv_gem_mmap_obj,
0479 };
0480
0481 void etnaviv_gem_free_object(struct drm_gem_object *obj)
0482 {
0483 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0484 struct etnaviv_drm_private *priv = obj->dev->dev_private;
0485 struct etnaviv_vram_mapping *mapping, *tmp;
0486
0487
0488 WARN_ON(is_active(etnaviv_obj));
0489
0490 mutex_lock(&priv->gem_lock);
0491 list_del(&etnaviv_obj->gem_node);
0492 mutex_unlock(&priv->gem_lock);
0493
0494 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
0495 obj_node) {
0496 struct etnaviv_iommu_context *context = mapping->context;
0497
0498 WARN_ON(mapping->use);
0499
0500 if (context)
0501 etnaviv_iommu_unmap_gem(context, mapping);
0502
0503 list_del(&mapping->obj_node);
0504 kfree(mapping);
0505 }
0506
0507 drm_gem_free_mmap_offset(obj);
0508 etnaviv_obj->ops->release(etnaviv_obj);
0509 drm_gem_object_release(obj);
0510
0511 kfree(etnaviv_obj);
0512 }
0513
0514 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
0515 {
0516 struct etnaviv_drm_private *priv = dev->dev_private;
0517 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
0518
0519 mutex_lock(&priv->gem_lock);
0520 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
0521 mutex_unlock(&priv->gem_lock);
0522 }
0523
0524 static const struct vm_operations_struct vm_ops = {
0525 .fault = etnaviv_gem_fault,
0526 .open = drm_gem_vm_open,
0527 .close = drm_gem_vm_close,
0528 };
0529
0530 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
0531 .free = etnaviv_gem_free_object,
0532 .pin = etnaviv_gem_prime_pin,
0533 .unpin = etnaviv_gem_prime_unpin,
0534 .get_sg_table = etnaviv_gem_prime_get_sg_table,
0535 .vmap = etnaviv_gem_prime_vmap,
0536 .mmap = etnaviv_gem_mmap,
0537 .vm_ops = &vm_ops,
0538 };
0539
0540 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
0541 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
0542 {
0543 struct etnaviv_gem_object *etnaviv_obj;
0544 unsigned sz = sizeof(*etnaviv_obj);
0545 bool valid = true;
0546
0547
0548 switch (flags & ETNA_BO_CACHE_MASK) {
0549 case ETNA_BO_UNCACHED:
0550 case ETNA_BO_CACHED:
0551 case ETNA_BO_WC:
0552 break;
0553 default:
0554 valid = false;
0555 }
0556
0557 if (!valid) {
0558 dev_err(dev->dev, "invalid cache flag: %x\n",
0559 (flags & ETNA_BO_CACHE_MASK));
0560 return -EINVAL;
0561 }
0562
0563 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
0564 if (!etnaviv_obj)
0565 return -ENOMEM;
0566
0567 etnaviv_obj->flags = flags;
0568 etnaviv_obj->ops = ops;
0569
0570 mutex_init(&etnaviv_obj->lock);
0571 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
0572
0573 *obj = &etnaviv_obj->base;
0574 (*obj)->funcs = &etnaviv_gem_object_funcs;
0575
0576 return 0;
0577 }
0578
0579
0580 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
0581 u32 size, u32 flags, u32 *handle)
0582 {
0583 struct etnaviv_drm_private *priv = dev->dev_private;
0584 struct drm_gem_object *obj = NULL;
0585 int ret;
0586
0587 size = PAGE_ALIGN(size);
0588
0589 ret = etnaviv_gem_new_impl(dev, size, flags,
0590 &etnaviv_gem_shmem_ops, &obj);
0591 if (ret)
0592 goto fail;
0593
0594 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
0595
0596 ret = drm_gem_object_init(dev, obj, size);
0597 if (ret)
0598 goto fail;
0599
0600
0601
0602
0603
0604
0605
0606 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
0607
0608 etnaviv_gem_obj_add(dev, obj);
0609
0610 ret = drm_gem_handle_create(file, obj, handle);
0611
0612
0613 fail:
0614 drm_gem_object_put(obj);
0615
0616 return ret;
0617 }
0618
0619 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
0620 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
0621 {
0622 struct drm_gem_object *obj;
0623 int ret;
0624
0625 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
0626 if (ret)
0627 return ret;
0628
0629 drm_gem_private_object_init(dev, obj, size);
0630
0631 *res = to_etnaviv_bo(obj);
0632
0633 return 0;
0634 }
0635
0636 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
0637 {
0638 struct page **pvec = NULL;
0639 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
0640 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
0641
0642 might_lock_read(¤t->mm->mmap_lock);
0643
0644 if (userptr->mm != current->mm)
0645 return -EPERM;
0646
0647 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
0648 if (!pvec)
0649 return -ENOMEM;
0650
0651 do {
0652 unsigned num_pages = npages - pinned;
0653 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
0654 struct page **pages = pvec + pinned;
0655
0656 ret = pin_user_pages_fast(ptr, num_pages,
0657 FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
0658 pages);
0659 if (ret < 0) {
0660 unpin_user_pages(pvec, pinned);
0661 kvfree(pvec);
0662 return ret;
0663 }
0664
0665 pinned += ret;
0666
0667 } while (pinned < npages);
0668
0669 etnaviv_obj->pages = pvec;
0670
0671 return 0;
0672 }
0673
0674 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
0675 {
0676 if (etnaviv_obj->sgt) {
0677 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
0678 sg_free_table(etnaviv_obj->sgt);
0679 kfree(etnaviv_obj->sgt);
0680 }
0681 if (etnaviv_obj->pages) {
0682 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
0683
0684 unpin_user_pages(etnaviv_obj->pages, npages);
0685 kvfree(etnaviv_obj->pages);
0686 }
0687 }
0688
0689 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
0690 struct vm_area_struct *vma)
0691 {
0692 return -EINVAL;
0693 }
0694
0695 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
0696 .get_pages = etnaviv_gem_userptr_get_pages,
0697 .release = etnaviv_gem_userptr_release,
0698 .vmap = etnaviv_gem_vmap_impl,
0699 .mmap = etnaviv_gem_userptr_mmap_obj,
0700 };
0701
0702 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
0703 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
0704 {
0705 struct etnaviv_gem_object *etnaviv_obj;
0706 int ret;
0707
0708 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
0709 &etnaviv_gem_userptr_ops, &etnaviv_obj);
0710 if (ret)
0711 return ret;
0712
0713 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
0714
0715 etnaviv_obj->userptr.ptr = ptr;
0716 etnaviv_obj->userptr.mm = current->mm;
0717 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
0718
0719 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
0720
0721 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
0722
0723
0724 drm_gem_object_put(&etnaviv_obj->base);
0725 return ret;
0726 }