0001
0002
0003
0004
0005
0006 #include <linux/dma-buf.h>
0007 #include <linux/export.h>
0008 #include <linux/module.h>
0009 #include <linux/mutex.h>
0010 #include <linux/shmem_fs.h>
0011 #include <linux/slab.h>
0012 #include <linux/vmalloc.h>
0013 #include <linux/module.h>
0014
0015 #ifdef CONFIG_X86
0016 #include <asm/set_memory.h>
0017 #endif
0018
0019 #include <drm/drm.h>
0020 #include <drm/drm_device.h>
0021 #include <drm/drm_drv.h>
0022 #include <drm/drm_gem_shmem_helper.h>
0023 #include <drm/drm_prime.h>
0024 #include <drm/drm_print.h>
0025
0026 MODULE_IMPORT_NS(DMA_BUF);
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
0041 .free = drm_gem_shmem_object_free,
0042 .print_info = drm_gem_shmem_object_print_info,
0043 .pin = drm_gem_shmem_object_pin,
0044 .unpin = drm_gem_shmem_object_unpin,
0045 .get_sg_table = drm_gem_shmem_object_get_sg_table,
0046 .vmap = drm_gem_shmem_object_vmap,
0047 .vunmap = drm_gem_shmem_object_vunmap,
0048 .mmap = drm_gem_shmem_object_mmap,
0049 .vm_ops = &drm_gem_shmem_vm_ops,
0050 };
0051
0052 static struct drm_gem_shmem_object *
0053 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
0054 {
0055 struct drm_gem_shmem_object *shmem;
0056 struct drm_gem_object *obj;
0057 int ret = 0;
0058
0059 size = PAGE_ALIGN(size);
0060
0061 if (dev->driver->gem_create_object) {
0062 obj = dev->driver->gem_create_object(dev, size);
0063 if (IS_ERR(obj))
0064 return ERR_CAST(obj);
0065 shmem = to_drm_gem_shmem_obj(obj);
0066 } else {
0067 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
0068 if (!shmem)
0069 return ERR_PTR(-ENOMEM);
0070 obj = &shmem->base;
0071 }
0072
0073 if (!obj->funcs)
0074 obj->funcs = &drm_gem_shmem_funcs;
0075
0076 if (private) {
0077 drm_gem_private_object_init(dev, obj, size);
0078 shmem->map_wc = false;
0079 } else {
0080 ret = drm_gem_object_init(dev, obj, size);
0081 }
0082 if (ret)
0083 goto err_free;
0084
0085 ret = drm_gem_create_mmap_offset(obj);
0086 if (ret)
0087 goto err_release;
0088
0089 mutex_init(&shmem->pages_lock);
0090 mutex_init(&shmem->vmap_lock);
0091 INIT_LIST_HEAD(&shmem->madv_list);
0092
0093 if (!private) {
0094
0095
0096
0097
0098
0099
0100
0101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
0102 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
0103 }
0104
0105 return shmem;
0106
0107 err_release:
0108 drm_gem_object_release(obj);
0109 err_free:
0110 kfree(obj);
0111
0112 return ERR_PTR(ret);
0113 }
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
0126 {
0127 return __drm_gem_shmem_create(dev, size, false);
0128 }
0129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
0130
0131
0132
0133
0134
0135
0136
0137
0138 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
0139 {
0140 struct drm_gem_object *obj = &shmem->base;
0141
0142 WARN_ON(shmem->vmap_use_count);
0143
0144 if (obj->import_attach) {
0145 drm_prime_gem_destroy(obj, shmem->sgt);
0146 } else {
0147 if (shmem->sgt) {
0148 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
0149 DMA_BIDIRECTIONAL, 0);
0150 sg_free_table(shmem->sgt);
0151 kfree(shmem->sgt);
0152 }
0153 if (shmem->pages)
0154 drm_gem_shmem_put_pages(shmem);
0155 }
0156
0157 WARN_ON(shmem->pages_use_count);
0158
0159 drm_gem_object_release(obj);
0160 mutex_destroy(&shmem->pages_lock);
0161 mutex_destroy(&shmem->vmap_lock);
0162 kfree(shmem);
0163 }
0164 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
0165
0166 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
0167 {
0168 struct drm_gem_object *obj = &shmem->base;
0169 struct page **pages;
0170
0171 if (shmem->pages_use_count++ > 0)
0172 return 0;
0173
0174 pages = drm_gem_get_pages(obj);
0175 if (IS_ERR(pages)) {
0176 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
0177 shmem->pages_use_count = 0;
0178 return PTR_ERR(pages);
0179 }
0180
0181
0182
0183
0184
0185
0186 #ifdef CONFIG_X86
0187 if (shmem->map_wc)
0188 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
0189 #endif
0190
0191 shmem->pages = pages;
0192
0193 return 0;
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
0207 {
0208 int ret;
0209
0210 WARN_ON(shmem->base.import_attach);
0211
0212 ret = mutex_lock_interruptible(&shmem->pages_lock);
0213 if (ret)
0214 return ret;
0215 ret = drm_gem_shmem_get_pages_locked(shmem);
0216 mutex_unlock(&shmem->pages_lock);
0217
0218 return ret;
0219 }
0220 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
0221
0222 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
0223 {
0224 struct drm_gem_object *obj = &shmem->base;
0225
0226 if (WARN_ON_ONCE(!shmem->pages_use_count))
0227 return;
0228
0229 if (--shmem->pages_use_count > 0)
0230 return;
0231
0232 #ifdef CONFIG_X86
0233 if (shmem->map_wc)
0234 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
0235 #endif
0236
0237 drm_gem_put_pages(obj, shmem->pages,
0238 shmem->pages_mark_dirty_on_put,
0239 shmem->pages_mark_accessed_on_put);
0240 shmem->pages = NULL;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
0250 {
0251 mutex_lock(&shmem->pages_lock);
0252 drm_gem_shmem_put_pages_locked(shmem);
0253 mutex_unlock(&shmem->pages_lock);
0254 }
0255 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
0268 {
0269 WARN_ON(shmem->base.import_attach);
0270
0271 return drm_gem_shmem_get_pages(shmem);
0272 }
0273 EXPORT_SYMBOL(drm_gem_shmem_pin);
0274
0275
0276
0277
0278
0279
0280
0281
0282 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
0283 {
0284 WARN_ON(shmem->base.import_attach);
0285
0286 drm_gem_shmem_put_pages(shmem);
0287 }
0288 EXPORT_SYMBOL(drm_gem_shmem_unpin);
0289
0290 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
0291 struct iosys_map *map)
0292 {
0293 struct drm_gem_object *obj = &shmem->base;
0294 int ret = 0;
0295
0296 if (shmem->vmap_use_count++ > 0) {
0297 iosys_map_set_vaddr(map, shmem->vaddr);
0298 return 0;
0299 }
0300
0301 if (obj->import_attach) {
0302 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
0303 if (!ret) {
0304 if (WARN_ON(map->is_iomem)) {
0305 dma_buf_vunmap(obj->import_attach->dmabuf, map);
0306 ret = -EIO;
0307 goto err_put_pages;
0308 }
0309 shmem->vaddr = map->vaddr;
0310 }
0311 } else {
0312 pgprot_t prot = PAGE_KERNEL;
0313
0314 ret = drm_gem_shmem_get_pages(shmem);
0315 if (ret)
0316 goto err_zero_use;
0317
0318 if (shmem->map_wc)
0319 prot = pgprot_writecombine(prot);
0320 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
0321 VM_MAP, prot);
0322 if (!shmem->vaddr)
0323 ret = -ENOMEM;
0324 else
0325 iosys_map_set_vaddr(map, shmem->vaddr);
0326 }
0327
0328 if (ret) {
0329 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
0330 goto err_put_pages;
0331 }
0332
0333 return 0;
0334
0335 err_put_pages:
0336 if (!obj->import_attach)
0337 drm_gem_shmem_put_pages(shmem);
0338 err_zero_use:
0339 shmem->vmap_use_count = 0;
0340
0341 return ret;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
0360 struct iosys_map *map)
0361 {
0362 int ret;
0363
0364 ret = mutex_lock_interruptible(&shmem->vmap_lock);
0365 if (ret)
0366 return ret;
0367 ret = drm_gem_shmem_vmap_locked(shmem, map);
0368 mutex_unlock(&shmem->vmap_lock);
0369
0370 return ret;
0371 }
0372 EXPORT_SYMBOL(drm_gem_shmem_vmap);
0373
0374 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
0375 struct iosys_map *map)
0376 {
0377 struct drm_gem_object *obj = &shmem->base;
0378
0379 if (WARN_ON_ONCE(!shmem->vmap_use_count))
0380 return;
0381
0382 if (--shmem->vmap_use_count > 0)
0383 return;
0384
0385 if (obj->import_attach) {
0386 dma_buf_vunmap(obj->import_attach->dmabuf, map);
0387 } else {
0388 vunmap(shmem->vaddr);
0389 drm_gem_shmem_put_pages(shmem);
0390 }
0391
0392 shmem->vaddr = NULL;
0393 }
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
0408 struct iosys_map *map)
0409 {
0410 mutex_lock(&shmem->vmap_lock);
0411 drm_gem_shmem_vunmap_locked(shmem, map);
0412 mutex_unlock(&shmem->vmap_lock);
0413 }
0414 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
0415
0416 static struct drm_gem_shmem_object *
0417 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
0418 struct drm_device *dev, size_t size,
0419 uint32_t *handle)
0420 {
0421 struct drm_gem_shmem_object *shmem;
0422 int ret;
0423
0424 shmem = drm_gem_shmem_create(dev, size);
0425 if (IS_ERR(shmem))
0426 return shmem;
0427
0428
0429
0430
0431
0432 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
0433
0434 drm_gem_object_put(&shmem->base);
0435 if (ret)
0436 return ERR_PTR(ret);
0437
0438 return shmem;
0439 }
0440
0441
0442
0443
0444 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
0445 {
0446 mutex_lock(&shmem->pages_lock);
0447
0448 if (shmem->madv >= 0)
0449 shmem->madv = madv;
0450
0451 madv = shmem->madv;
0452
0453 mutex_unlock(&shmem->pages_lock);
0454
0455 return (madv >= 0);
0456 }
0457 EXPORT_SYMBOL(drm_gem_shmem_madvise);
0458
0459 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
0460 {
0461 struct drm_gem_object *obj = &shmem->base;
0462 struct drm_device *dev = obj->dev;
0463
0464 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
0465
0466 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
0467 sg_free_table(shmem->sgt);
0468 kfree(shmem->sgt);
0469 shmem->sgt = NULL;
0470
0471 drm_gem_shmem_put_pages_locked(shmem);
0472
0473 shmem->madv = -1;
0474
0475 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
0476 drm_gem_free_mmap_offset(obj);
0477
0478
0479
0480
0481
0482
0483 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
0484
0485 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
0486 }
0487 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
0488
0489 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
0490 {
0491 if (!mutex_trylock(&shmem->pages_lock))
0492 return false;
0493 drm_gem_shmem_purge_locked(shmem);
0494 mutex_unlock(&shmem->pages_lock);
0495
0496 return true;
0497 }
0498 EXPORT_SYMBOL(drm_gem_shmem_purge);
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
0518 struct drm_mode_create_dumb *args)
0519 {
0520 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0521 struct drm_gem_shmem_object *shmem;
0522
0523 if (!args->pitch || !args->size) {
0524 args->pitch = min_pitch;
0525 args->size = PAGE_ALIGN(args->pitch * args->height);
0526 } else {
0527
0528 if (args->pitch < min_pitch)
0529 args->pitch = min_pitch;
0530 if (args->size < args->pitch * args->height)
0531 args->size = PAGE_ALIGN(args->pitch * args->height);
0532 }
0533
0534 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
0535
0536 return PTR_ERR_OR_ZERO(shmem);
0537 }
0538 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
0539
0540 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
0541 {
0542 struct vm_area_struct *vma = vmf->vma;
0543 struct drm_gem_object *obj = vma->vm_private_data;
0544 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
0545 loff_t num_pages = obj->size >> PAGE_SHIFT;
0546 vm_fault_t ret;
0547 struct page *page;
0548 pgoff_t page_offset;
0549
0550
0551 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0552
0553 mutex_lock(&shmem->pages_lock);
0554
0555 if (page_offset >= num_pages ||
0556 WARN_ON_ONCE(!shmem->pages) ||
0557 shmem->madv < 0) {
0558 ret = VM_FAULT_SIGBUS;
0559 } else {
0560 page = shmem->pages[page_offset];
0561
0562 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
0563 }
0564
0565 mutex_unlock(&shmem->pages_lock);
0566
0567 return ret;
0568 }
0569
0570 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
0571 {
0572 struct drm_gem_object *obj = vma->vm_private_data;
0573 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
0574 int ret;
0575
0576 WARN_ON(shmem->base.import_attach);
0577
0578 ret = drm_gem_shmem_get_pages(shmem);
0579 WARN_ON_ONCE(ret != 0);
0580
0581 drm_gem_vm_open(vma);
0582 }
0583
0584 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
0585 {
0586 struct drm_gem_object *obj = vma->vm_private_data;
0587 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
0588
0589 drm_gem_shmem_put_pages(shmem);
0590 drm_gem_vm_close(vma);
0591 }
0592
0593 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
0594 .fault = drm_gem_shmem_fault,
0595 .open = drm_gem_shmem_vm_open,
0596 .close = drm_gem_shmem_vm_close,
0597 };
0598 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
0612 {
0613 struct drm_gem_object *obj = &shmem->base;
0614 int ret;
0615
0616 if (obj->import_attach) {
0617
0618 drm_gem_object_put(obj);
0619 vma->vm_private_data = NULL;
0620
0621 return dma_buf_mmap(obj->dma_buf, vma, 0);
0622 }
0623
0624 ret = drm_gem_shmem_get_pages(shmem);
0625 if (ret) {
0626 drm_gem_vm_close(vma);
0627 return ret;
0628 }
0629
0630 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
0631 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0632 if (shmem->map_wc)
0633 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
0634
0635 return 0;
0636 }
0637 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
0638
0639
0640
0641
0642
0643
0644
0645 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
0646 struct drm_printer *p, unsigned int indent)
0647 {
0648 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
0649 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
0650 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
0651 }
0652 EXPORT_SYMBOL(drm_gem_shmem_print_info);
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
0669 {
0670 struct drm_gem_object *obj = &shmem->base;
0671
0672 WARN_ON(shmem->base.import_attach);
0673
0674 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
0675 }
0676 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
0695 {
0696 struct drm_gem_object *obj = &shmem->base;
0697 int ret;
0698 struct sg_table *sgt;
0699
0700 if (shmem->sgt)
0701 return shmem->sgt;
0702
0703 WARN_ON(obj->import_attach);
0704
0705 ret = drm_gem_shmem_get_pages(shmem);
0706 if (ret)
0707 return ERR_PTR(ret);
0708
0709 sgt = drm_gem_shmem_get_sg_table(shmem);
0710 if (IS_ERR(sgt)) {
0711 ret = PTR_ERR(sgt);
0712 goto err_put_pages;
0713 }
0714
0715 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
0716 if (ret)
0717 goto err_free_sgt;
0718
0719 shmem->sgt = sgt;
0720
0721 return sgt;
0722
0723 err_free_sgt:
0724 sg_free_table(sgt);
0725 kfree(sgt);
0726 err_put_pages:
0727 drm_gem_shmem_put_pages(shmem);
0728 return ERR_PTR(ret);
0729 }
0730 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747 struct drm_gem_object *
0748 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
0749 struct dma_buf_attachment *attach,
0750 struct sg_table *sgt)
0751 {
0752 size_t size = PAGE_ALIGN(attach->dmabuf->size);
0753 struct drm_gem_shmem_object *shmem;
0754
0755 shmem = __drm_gem_shmem_create(dev, size, true);
0756 if (IS_ERR(shmem))
0757 return ERR_CAST(shmem);
0758
0759 shmem->sgt = sgt;
0760
0761 DRM_DEBUG_PRIME("size = %zu\n", size);
0762
0763 return &shmem->base;
0764 }
0765 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
0766
0767 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
0768 MODULE_IMPORT_NS(DMA_BUF);
0769 MODULE_LICENSE("GPL v2");