0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-buf.h>
0008 #include <linux/iommu.h>
0009 #include <linux/vmalloc.h>
0010
0011 #include <drm/drm.h>
0012 #include <drm/drm_gem.h>
0013 #include <drm/drm_gem_cma_helper.h>
0014 #include <drm/drm_prime.h>
0015 #include <drm/drm_vma_manager.h>
0016
0017 #include "rockchip_drm_drv.h"
0018 #include "rockchip_drm_gem.h"
0019
0020 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
0021 {
0022 struct drm_device *drm = rk_obj->base.dev;
0023 struct rockchip_drm_private *private = drm->dev_private;
0024 int prot = IOMMU_READ | IOMMU_WRITE;
0025 ssize_t ret;
0026
0027 mutex_lock(&private->mm_lock);
0028 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
0029 rk_obj->base.size, PAGE_SIZE,
0030 0, 0);
0031 mutex_unlock(&private->mm_lock);
0032
0033 if (ret < 0) {
0034 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
0035 return ret;
0036 }
0037
0038 rk_obj->dma_addr = rk_obj->mm.start;
0039
0040 ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
0041 prot);
0042 if (ret < rk_obj->base.size) {
0043 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
0044 ret, rk_obj->base.size);
0045 ret = -ENOMEM;
0046 goto err_remove_node;
0047 }
0048
0049 rk_obj->size = ret;
0050
0051 return 0;
0052
0053 err_remove_node:
0054 mutex_lock(&private->mm_lock);
0055 drm_mm_remove_node(&rk_obj->mm);
0056 mutex_unlock(&private->mm_lock);
0057
0058 return ret;
0059 }
0060
0061 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
0062 {
0063 struct drm_device *drm = rk_obj->base.dev;
0064 struct rockchip_drm_private *private = drm->dev_private;
0065
0066 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
0067
0068 mutex_lock(&private->mm_lock);
0069
0070 drm_mm_remove_node(&rk_obj->mm);
0071
0072 mutex_unlock(&private->mm_lock);
0073
0074 return 0;
0075 }
0076
0077 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
0078 {
0079 struct drm_device *drm = rk_obj->base.dev;
0080 int ret, i;
0081 struct scatterlist *s;
0082
0083 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
0084 if (IS_ERR(rk_obj->pages))
0085 return PTR_ERR(rk_obj->pages);
0086
0087 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
0088
0089 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
0090 rk_obj->pages, rk_obj->num_pages);
0091 if (IS_ERR(rk_obj->sgt)) {
0092 ret = PTR_ERR(rk_obj->sgt);
0093 goto err_put_pages;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103 for_each_sgtable_sg(rk_obj->sgt, s, i)
0104 sg_dma_address(s) = sg_phys(s);
0105
0106 dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
0107
0108 return 0;
0109
0110 err_put_pages:
0111 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
0112 return ret;
0113 }
0114
0115 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
0116 {
0117 sg_free_table(rk_obj->sgt);
0118 kfree(rk_obj->sgt);
0119 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
0120 }
0121
0122 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
0123 bool alloc_kmap)
0124 {
0125 int ret;
0126
0127 ret = rockchip_gem_get_pages(rk_obj);
0128 if (ret < 0)
0129 return ret;
0130
0131 ret = rockchip_gem_iommu_map(rk_obj);
0132 if (ret < 0)
0133 goto err_free;
0134
0135 if (alloc_kmap) {
0136 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
0137 pgprot_writecombine(PAGE_KERNEL));
0138 if (!rk_obj->kvaddr) {
0139 DRM_ERROR("failed to vmap() buffer\n");
0140 ret = -ENOMEM;
0141 goto err_unmap;
0142 }
0143 }
0144
0145 return 0;
0146
0147 err_unmap:
0148 rockchip_gem_iommu_unmap(rk_obj);
0149 err_free:
0150 rockchip_gem_put_pages(rk_obj);
0151
0152 return ret;
0153 }
0154
0155 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
0156 bool alloc_kmap)
0157 {
0158 struct drm_gem_object *obj = &rk_obj->base;
0159 struct drm_device *drm = obj->dev;
0160
0161 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
0162
0163 if (!alloc_kmap)
0164 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
0165
0166 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
0167 &rk_obj->dma_addr, GFP_KERNEL,
0168 rk_obj->dma_attrs);
0169 if (!rk_obj->kvaddr) {
0170 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
0171 return -ENOMEM;
0172 }
0173
0174 return 0;
0175 }
0176
0177 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
0178 bool alloc_kmap)
0179 {
0180 struct drm_gem_object *obj = &rk_obj->base;
0181 struct drm_device *drm = obj->dev;
0182 struct rockchip_drm_private *private = drm->dev_private;
0183
0184 if (private->domain)
0185 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
0186 else
0187 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
0188 }
0189
0190 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
0191 {
0192 vunmap(rk_obj->kvaddr);
0193 rockchip_gem_iommu_unmap(rk_obj);
0194 rockchip_gem_put_pages(rk_obj);
0195 }
0196
0197 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
0198 {
0199 struct drm_gem_object *obj = &rk_obj->base;
0200 struct drm_device *drm = obj->dev;
0201
0202 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
0203 rk_obj->dma_attrs);
0204 }
0205
0206 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
0207 {
0208 if (rk_obj->pages)
0209 rockchip_gem_free_iommu(rk_obj);
0210 else
0211 rockchip_gem_free_dma(rk_obj);
0212 }
0213
0214 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
0215 struct vm_area_struct *vma)
0216 {
0217 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0218 unsigned int count = obj->size >> PAGE_SHIFT;
0219 unsigned long user_count = vma_pages(vma);
0220
0221 if (user_count == 0)
0222 return -ENXIO;
0223
0224 return vm_map_pages(vma, rk_obj->pages, count);
0225 }
0226
0227 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
0228 struct vm_area_struct *vma)
0229 {
0230 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0231 struct drm_device *drm = obj->dev;
0232
0233 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
0234 obj->size, rk_obj->dma_attrs);
0235 }
0236
0237 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
0238 struct vm_area_struct *vma)
0239 {
0240 int ret;
0241 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0242
0243
0244
0245
0246
0247 vma->vm_pgoff = 0;
0248
0249
0250
0251
0252
0253 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
0254 vma->vm_flags &= ~VM_PFNMAP;
0255
0256 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
0257 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
0258
0259 if (rk_obj->pages)
0260 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
0261 else
0262 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
0263
0264 if (ret)
0265 drm_gem_vm_close(vma);
0266
0267 return ret;
0268 }
0269
0270 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
0271 {
0272 drm_gem_object_release(&rk_obj->base);
0273 kfree(rk_obj);
0274 }
0275
0276 static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
0277 .free = rockchip_gem_free_object,
0278 .get_sg_table = rockchip_gem_prime_get_sg_table,
0279 .vmap = rockchip_gem_prime_vmap,
0280 .vunmap = rockchip_gem_prime_vunmap,
0281 .mmap = rockchip_drm_gem_object_mmap,
0282 .vm_ops = &drm_gem_cma_vm_ops,
0283 };
0284
0285 static struct rockchip_gem_object *
0286 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
0287 {
0288 struct rockchip_gem_object *rk_obj;
0289 struct drm_gem_object *obj;
0290
0291 size = round_up(size, PAGE_SIZE);
0292
0293 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
0294 if (!rk_obj)
0295 return ERR_PTR(-ENOMEM);
0296
0297 obj = &rk_obj->base;
0298
0299 obj->funcs = &rockchip_gem_object_funcs;
0300
0301 drm_gem_object_init(drm, obj, size);
0302
0303 return rk_obj;
0304 }
0305
0306 struct rockchip_gem_object *
0307 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
0308 bool alloc_kmap)
0309 {
0310 struct rockchip_gem_object *rk_obj;
0311 int ret;
0312
0313 rk_obj = rockchip_gem_alloc_object(drm, size);
0314 if (IS_ERR(rk_obj))
0315 return rk_obj;
0316
0317 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
0318 if (ret)
0319 goto err_free_rk_obj;
0320
0321 return rk_obj;
0322
0323 err_free_rk_obj:
0324 rockchip_gem_release_object(rk_obj);
0325 return ERR_PTR(ret);
0326 }
0327
0328
0329
0330
0331
0332 void rockchip_gem_free_object(struct drm_gem_object *obj)
0333 {
0334 struct drm_device *drm = obj->dev;
0335 struct rockchip_drm_private *private = drm->dev_private;
0336 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0337
0338 if (obj->import_attach) {
0339 if (private->domain) {
0340 rockchip_gem_iommu_unmap(rk_obj);
0341 } else {
0342 dma_unmap_sgtable(drm->dev, rk_obj->sgt,
0343 DMA_BIDIRECTIONAL, 0);
0344 }
0345 drm_prime_gem_destroy(obj, rk_obj->sgt);
0346 } else {
0347 rockchip_gem_free_buf(rk_obj);
0348 }
0349
0350 rockchip_gem_release_object(rk_obj);
0351 }
0352
0353
0354
0355
0356
0357
0358
0359
0360 static struct rockchip_gem_object *
0361 rockchip_gem_create_with_handle(struct drm_file *file_priv,
0362 struct drm_device *drm, unsigned int size,
0363 unsigned int *handle)
0364 {
0365 struct rockchip_gem_object *rk_obj;
0366 struct drm_gem_object *obj;
0367 int ret;
0368
0369 rk_obj = rockchip_gem_create_object(drm, size, false);
0370 if (IS_ERR(rk_obj))
0371 return ERR_CAST(rk_obj);
0372
0373 obj = &rk_obj->base;
0374
0375
0376
0377
0378
0379 ret = drm_gem_handle_create(file_priv, obj, handle);
0380 if (ret)
0381 goto err_handle_create;
0382
0383
0384 drm_gem_object_put(obj);
0385
0386 return rk_obj;
0387
0388 err_handle_create:
0389 rockchip_gem_free_object(obj);
0390
0391 return ERR_PTR(ret);
0392 }
0393
0394
0395
0396
0397
0398
0399
0400
0401 int rockchip_gem_dumb_create(struct drm_file *file_priv,
0402 struct drm_device *dev,
0403 struct drm_mode_create_dumb *args)
0404 {
0405 struct rockchip_gem_object *rk_obj;
0406 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0407
0408
0409
0410
0411 args->pitch = ALIGN(min_pitch, 64);
0412 args->size = args->pitch * args->height;
0413
0414 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
0415 &args->handle);
0416
0417 return PTR_ERR_OR_ZERO(rk_obj);
0418 }
0419
0420
0421
0422
0423
0424
0425
0426 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
0427 {
0428 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0429 struct drm_device *drm = obj->dev;
0430 struct sg_table *sgt;
0431 int ret;
0432
0433 if (rk_obj->pages)
0434 return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
0435
0436 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
0437 if (!sgt)
0438 return ERR_PTR(-ENOMEM);
0439
0440 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
0441 rk_obj->dma_addr, obj->size,
0442 rk_obj->dma_attrs);
0443 if (ret) {
0444 DRM_ERROR("failed to allocate sgt, %d\n", ret);
0445 kfree(sgt);
0446 return ERR_PTR(ret);
0447 }
0448
0449 return sgt;
0450 }
0451
0452 static int
0453 rockchip_gem_iommu_map_sg(struct drm_device *drm,
0454 struct dma_buf_attachment *attach,
0455 struct sg_table *sg,
0456 struct rockchip_gem_object *rk_obj)
0457 {
0458 rk_obj->sgt = sg;
0459 return rockchip_gem_iommu_map(rk_obj);
0460 }
0461
0462 static int
0463 rockchip_gem_dma_map_sg(struct drm_device *drm,
0464 struct dma_buf_attachment *attach,
0465 struct sg_table *sg,
0466 struct rockchip_gem_object *rk_obj)
0467 {
0468 int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
0469 if (err)
0470 return err;
0471
0472 if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
0473 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
0474 dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
0475 return -EINVAL;
0476 }
0477
0478 rk_obj->dma_addr = sg_dma_address(sg->sgl);
0479 rk_obj->sgt = sg;
0480 return 0;
0481 }
0482
0483 struct drm_gem_object *
0484 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
0485 struct dma_buf_attachment *attach,
0486 struct sg_table *sg)
0487 {
0488 struct rockchip_drm_private *private = drm->dev_private;
0489 struct rockchip_gem_object *rk_obj;
0490 int ret;
0491
0492 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
0493 if (IS_ERR(rk_obj))
0494 return ERR_CAST(rk_obj);
0495
0496 if (private->domain)
0497 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
0498 else
0499 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
0500
0501 if (ret < 0) {
0502 DRM_ERROR("failed to import sg table: %d\n", ret);
0503 goto err_free_rk_obj;
0504 }
0505
0506 return &rk_obj->base;
0507
0508 err_free_rk_obj:
0509 rockchip_gem_release_object(rk_obj);
0510 return ERR_PTR(ret);
0511 }
0512
0513 int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
0514 {
0515 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0516
0517 if (rk_obj->pages) {
0518 void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
0519 pgprot_writecombine(PAGE_KERNEL));
0520 if (!vaddr)
0521 return -ENOMEM;
0522 iosys_map_set_vaddr(map, vaddr);
0523 return 0;
0524 }
0525
0526 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
0527 return -ENOMEM;
0528 iosys_map_set_vaddr(map, rk_obj->kvaddr);
0529
0530 return 0;
0531 }
0532
0533 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
0534 struct iosys_map *map)
0535 {
0536 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
0537
0538 if (rk_obj->pages) {
0539 vunmap(map->vaddr);
0540 return;
0541 }
0542
0543
0544 }