0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/dma-buf.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/export.h>
0015 #include <linux/mm.h>
0016 #include <linux/module.h>
0017 #include <linux/mutex.h>
0018 #include <linux/slab.h>
0019
0020 #include <drm/drm.h>
0021 #include <drm/drm_device.h>
0022 #include <drm/drm_drv.h>
0023 #include <drm/drm_gem_cma_helper.h>
0024 #include <drm/drm_vma_manager.h>
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
0052 .free = drm_gem_cma_object_free,
0053 .print_info = drm_gem_cma_object_print_info,
0054 .get_sg_table = drm_gem_cma_object_get_sg_table,
0055 .vmap = drm_gem_cma_object_vmap,
0056 .mmap = drm_gem_cma_object_mmap,
0057 .vm_ops = &drm_gem_cma_vm_ops,
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static struct drm_gem_cma_object *
0074 __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
0075 {
0076 struct drm_gem_cma_object *cma_obj;
0077 struct drm_gem_object *gem_obj;
0078 int ret = 0;
0079
0080 if (drm->driver->gem_create_object) {
0081 gem_obj = drm->driver->gem_create_object(drm, size);
0082 if (IS_ERR(gem_obj))
0083 return ERR_CAST(gem_obj);
0084 cma_obj = to_drm_gem_cma_obj(gem_obj);
0085 } else {
0086 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
0087 if (!cma_obj)
0088 return ERR_PTR(-ENOMEM);
0089 gem_obj = &cma_obj->base;
0090 }
0091
0092 if (!gem_obj->funcs)
0093 gem_obj->funcs = &drm_gem_cma_default_funcs;
0094
0095 if (private) {
0096 drm_gem_private_object_init(drm, gem_obj, size);
0097
0098
0099 cma_obj->map_noncoherent = false;
0100 } else {
0101 ret = drm_gem_object_init(drm, gem_obj, size);
0102 }
0103 if (ret)
0104 goto error;
0105
0106 ret = drm_gem_create_mmap_offset(gem_obj);
0107 if (ret) {
0108 drm_gem_object_release(gem_obj);
0109 goto error;
0110 }
0111
0112 return cma_obj;
0113
0114 error:
0115 kfree(cma_obj);
0116 return ERR_PTR(ret);
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
0138 size_t size)
0139 {
0140 struct drm_gem_cma_object *cma_obj;
0141 int ret;
0142
0143 size = round_up(size, PAGE_SIZE);
0144
0145 cma_obj = __drm_gem_cma_create(drm, size, false);
0146 if (IS_ERR(cma_obj))
0147 return cma_obj;
0148
0149 if (cma_obj->map_noncoherent) {
0150 cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
0151 &cma_obj->paddr,
0152 DMA_TO_DEVICE,
0153 GFP_KERNEL | __GFP_NOWARN);
0154 } else {
0155 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
0156 GFP_KERNEL | __GFP_NOWARN);
0157 }
0158 if (!cma_obj->vaddr) {
0159 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
0160 size);
0161 ret = -ENOMEM;
0162 goto error;
0163 }
0164
0165 return cma_obj;
0166
0167 error:
0168 drm_gem_object_put(&cma_obj->base);
0169 return ERR_PTR(ret);
0170 }
0171 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 static struct drm_gem_cma_object *
0193 drm_gem_cma_create_with_handle(struct drm_file *file_priv,
0194 struct drm_device *drm, size_t size,
0195 uint32_t *handle)
0196 {
0197 struct drm_gem_cma_object *cma_obj;
0198 struct drm_gem_object *gem_obj;
0199 int ret;
0200
0201 cma_obj = drm_gem_cma_create(drm, size);
0202 if (IS_ERR(cma_obj))
0203 return cma_obj;
0204
0205 gem_obj = &cma_obj->base;
0206
0207
0208
0209
0210
0211 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
0212
0213 drm_gem_object_put(gem_obj);
0214 if (ret)
0215 return ERR_PTR(ret);
0216
0217 return cma_obj;
0218 }
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
0229 {
0230 struct drm_gem_object *gem_obj = &cma_obj->base;
0231 struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr);
0232
0233 if (gem_obj->import_attach) {
0234 if (cma_obj->vaddr)
0235 dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
0236 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
0237 } else if (cma_obj->vaddr) {
0238 if (cma_obj->map_noncoherent)
0239 dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
0240 cma_obj->vaddr, cma_obj->paddr,
0241 DMA_TO_DEVICE);
0242 else
0243 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
0244 cma_obj->vaddr, cma_obj->paddr);
0245 }
0246
0247 drm_gem_object_release(gem_obj);
0248
0249 kfree(cma_obj);
0250 }
0251 EXPORT_SYMBOL_GPL(drm_gem_cma_free);
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
0268 struct drm_device *drm,
0269 struct drm_mode_create_dumb *args)
0270 {
0271 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0272 struct drm_gem_cma_object *cma_obj;
0273
0274 if (args->pitch < min_pitch)
0275 args->pitch = min_pitch;
0276
0277 if (args->size < args->pitch * args->height)
0278 args->size = args->pitch * args->height;
0279
0280 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
0281 &args->handle);
0282 return PTR_ERR_OR_ZERO(cma_obj);
0283 }
0284 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
0305 struct drm_device *drm,
0306 struct drm_mode_create_dumb *args)
0307 {
0308 struct drm_gem_cma_object *cma_obj;
0309
0310 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0311 args->size = args->pitch * args->height;
0312
0313 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
0314 &args->handle);
0315 return PTR_ERR_OR_ZERO(cma_obj);
0316 }
0317 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
0318
0319 const struct vm_operations_struct drm_gem_cma_vm_ops = {
0320 .open = drm_gem_vm_open,
0321 .close = drm_gem_vm_close,
0322 };
0323 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
0324
0325 #ifndef CONFIG_MMU
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
0343 unsigned long addr,
0344 unsigned long len,
0345 unsigned long pgoff,
0346 unsigned long flags)
0347 {
0348 struct drm_gem_cma_object *cma_obj;
0349 struct drm_gem_object *obj = NULL;
0350 struct drm_file *priv = filp->private_data;
0351 struct drm_device *dev = priv->minor->dev;
0352 struct drm_vma_offset_node *node;
0353
0354 if (drm_dev_is_unplugged(dev))
0355 return -ENODEV;
0356
0357 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
0358 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
0359 pgoff,
0360 len >> PAGE_SHIFT);
0361 if (likely(node)) {
0362 obj = container_of(node, struct drm_gem_object, vma_node);
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 if (!kref_get_unless_zero(&obj->refcount))
0374 obj = NULL;
0375 }
0376
0377 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
0378
0379 if (!obj)
0380 return -EINVAL;
0381
0382 if (!drm_vma_node_is_allowed(node, priv)) {
0383 drm_gem_object_put(obj);
0384 return -EACCES;
0385 }
0386
0387 cma_obj = to_drm_gem_cma_obj(obj);
0388
0389 drm_gem_object_put(obj);
0390
0391 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
0392 }
0393 EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
0394 #endif
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
0405 struct drm_printer *p, unsigned int indent)
0406 {
0407 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
0408 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
0409 }
0410 EXPORT_SYMBOL(drm_gem_cma_print_info);
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
0424 {
0425 struct drm_gem_object *obj = &cma_obj->base;
0426 struct sg_table *sgt;
0427 int ret;
0428
0429 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
0430 if (!sgt)
0431 return ERR_PTR(-ENOMEM);
0432
0433 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
0434 cma_obj->paddr, obj->size);
0435 if (ret < 0)
0436 goto out;
0437
0438 return sgt;
0439
0440 out:
0441 kfree(sgt);
0442 return ERR_PTR(ret);
0443 }
0444 EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 struct drm_gem_object *
0464 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
0465 struct dma_buf_attachment *attach,
0466 struct sg_table *sgt)
0467 {
0468 struct drm_gem_cma_object *cma_obj;
0469
0470
0471 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
0472 return ERR_PTR(-EINVAL);
0473
0474
0475 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
0476 if (IS_ERR(cma_obj))
0477 return ERR_CAST(cma_obj);
0478
0479 cma_obj->paddr = sg_dma_address(sgt->sgl);
0480 cma_obj->sgt = sgt;
0481
0482 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
0483
0484 return &cma_obj->base;
0485 }
0486 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502 int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
0503 struct iosys_map *map)
0504 {
0505 iosys_map_set_vaddr(map, cma_obj->vaddr);
0506
0507 return 0;
0508 }
0509 EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
0524 {
0525 struct drm_gem_object *obj = &cma_obj->base;
0526 int ret;
0527
0528
0529
0530
0531
0532
0533 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
0534 vma->vm_flags &= ~VM_PFNMAP;
0535 vma->vm_flags |= VM_DONTEXPAND;
0536
0537 if (cma_obj->map_noncoherent) {
0538 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0539
0540 ret = dma_mmap_pages(cma_obj->base.dev->dev,
0541 vma, vma->vm_end - vma->vm_start,
0542 virt_to_page(cma_obj->vaddr));
0543 } else {
0544 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
0545 cma_obj->paddr, vma->vm_end - vma->vm_start);
0546 }
0547 if (ret)
0548 drm_gem_vm_close(vma);
0549
0550 return ret;
0551 }
0552 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574 struct drm_gem_object *
0575 drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
0576 struct dma_buf_attachment *attach,
0577 struct sg_table *sgt)
0578 {
0579 struct drm_gem_cma_object *cma_obj;
0580 struct drm_gem_object *obj;
0581 struct iosys_map map;
0582 int ret;
0583
0584 ret = dma_buf_vmap(attach->dmabuf, &map);
0585 if (ret) {
0586 DRM_ERROR("Failed to vmap PRIME buffer\n");
0587 return ERR_PTR(ret);
0588 }
0589
0590 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
0591 if (IS_ERR(obj)) {
0592 dma_buf_vunmap(attach->dmabuf, &map);
0593 return obj;
0594 }
0595
0596 cma_obj = to_drm_gem_cma_obj(obj);
0597 cma_obj->vaddr = map.vaddr;
0598
0599 return obj;
0600 }
0601 EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
0602
0603 MODULE_DESCRIPTION("DRM CMA memory-management helpers");
0604 MODULE_IMPORT_NS(DMA_BUF);
0605 MODULE_LICENSE("GPL");