0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/dma-buf.h>
0014 #include <linux/module.h>
0015 #include <linux/refcount.h>
0016 #include <linux/scatterlist.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/highmem.h>
0021
0022 #include <media/videobuf2-v4l2.h>
0023 #include <media/videobuf2-dma-contig.h>
0024 #include <media/videobuf2-memops.h>
0025
0026 struct vb2_dc_buf {
0027 struct device *dev;
0028 void *vaddr;
0029 unsigned long size;
0030 void *cookie;
0031 dma_addr_t dma_addr;
0032 unsigned long attrs;
0033 enum dma_data_direction dma_dir;
0034 struct sg_table *dma_sgt;
0035 struct frame_vector *vec;
0036
0037
0038 struct vb2_vmarea_handler handler;
0039 refcount_t refcount;
0040 struct sg_table *sgt_base;
0041
0042
0043 struct dma_buf_attachment *db_attach;
0044
0045 struct vb2_buffer *vb;
0046 bool non_coherent_mem;
0047 };
0048
0049
0050
0051
0052
0053 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
0054 {
0055 struct scatterlist *s;
0056 dma_addr_t expected = sg_dma_address(sgt->sgl);
0057 unsigned int i;
0058 unsigned long size = 0;
0059
0060 for_each_sgtable_dma_sg(sgt, s, i) {
0061 if (sg_dma_address(s) != expected)
0062 break;
0063 expected += sg_dma_len(s);
0064 size += sg_dma_len(s);
0065 }
0066 return size;
0067 }
0068
0069
0070
0071
0072
0073 static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
0074 {
0075 struct vb2_dc_buf *buf = buf_priv;
0076
0077 return &buf->dma_addr;
0078 }
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
0095 {
0096 struct vb2_dc_buf *buf = buf_priv;
0097
0098 if (buf->vaddr)
0099 return buf->vaddr;
0100
0101 if (buf->db_attach) {
0102 struct iosys_map map;
0103
0104 if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
0105 buf->vaddr = map.vaddr;
0106
0107 return buf->vaddr;
0108 }
0109
0110 if (buf->non_coherent_mem)
0111 buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
0112 buf->dma_sgt);
0113 return buf->vaddr;
0114 }
0115
0116 static unsigned int vb2_dc_num_users(void *buf_priv)
0117 {
0118 struct vb2_dc_buf *buf = buf_priv;
0119
0120 return refcount_read(&buf->refcount);
0121 }
0122
0123 static void vb2_dc_prepare(void *buf_priv)
0124 {
0125 struct vb2_dc_buf *buf = buf_priv;
0126 struct sg_table *sgt = buf->dma_sgt;
0127
0128
0129 if (buf->vb->skip_cache_sync_on_prepare)
0130 return;
0131
0132 if (!buf->non_coherent_mem)
0133 return;
0134
0135
0136 if (buf->vaddr)
0137 flush_kernel_vmap_range(buf->vaddr, buf->size);
0138
0139
0140 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
0141 }
0142
0143 static void vb2_dc_finish(void *buf_priv)
0144 {
0145 struct vb2_dc_buf *buf = buf_priv;
0146 struct sg_table *sgt = buf->dma_sgt;
0147
0148
0149 if (buf->vb->skip_cache_sync_on_finish)
0150 return;
0151
0152 if (!buf->non_coherent_mem)
0153 return;
0154
0155
0156 if (buf->vaddr)
0157 invalidate_kernel_vmap_range(buf->vaddr, buf->size);
0158
0159
0160 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
0161 }
0162
0163
0164
0165
0166
0167 static void vb2_dc_put(void *buf_priv)
0168 {
0169 struct vb2_dc_buf *buf = buf_priv;
0170
0171 if (!refcount_dec_and_test(&buf->refcount))
0172 return;
0173
0174 if (buf->non_coherent_mem) {
0175 if (buf->vaddr)
0176 dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
0177 dma_free_noncontiguous(buf->dev, buf->size,
0178 buf->dma_sgt, buf->dma_dir);
0179 } else {
0180 if (buf->sgt_base) {
0181 sg_free_table(buf->sgt_base);
0182 kfree(buf->sgt_base);
0183 }
0184 dma_free_attrs(buf->dev, buf->size, buf->cookie,
0185 buf->dma_addr, buf->attrs);
0186 }
0187 put_device(buf->dev);
0188 kfree(buf);
0189 }
0190
0191 static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
0192 {
0193 struct vb2_queue *q = buf->vb->vb2_queue;
0194
0195 buf->cookie = dma_alloc_attrs(buf->dev,
0196 buf->size,
0197 &buf->dma_addr,
0198 GFP_KERNEL | q->gfp_flags,
0199 buf->attrs);
0200 if (!buf->cookie)
0201 return -ENOMEM;
0202
0203 if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
0204 return 0;
0205
0206 buf->vaddr = buf->cookie;
0207 return 0;
0208 }
0209
0210 static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
0211 {
0212 struct vb2_queue *q = buf->vb->vb2_queue;
0213
0214 buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
0215 buf->size,
0216 buf->dma_dir,
0217 GFP_KERNEL | q->gfp_flags,
0218 buf->attrs);
0219 if (!buf->dma_sgt)
0220 return -ENOMEM;
0221
0222 buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
0223
0224
0225
0226
0227
0228 return 0;
0229 }
0230
0231 static void *vb2_dc_alloc(struct vb2_buffer *vb,
0232 struct device *dev,
0233 unsigned long size)
0234 {
0235 struct vb2_dc_buf *buf;
0236 int ret;
0237
0238 if (WARN_ON(!dev))
0239 return ERR_PTR(-EINVAL);
0240
0241 buf = kzalloc(sizeof *buf, GFP_KERNEL);
0242 if (!buf)
0243 return ERR_PTR(-ENOMEM);
0244
0245 buf->attrs = vb->vb2_queue->dma_attrs;
0246 buf->dma_dir = vb->vb2_queue->dma_dir;
0247 buf->vb = vb;
0248 buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
0249
0250 buf->size = size;
0251
0252 buf->dev = get_device(dev);
0253
0254 if (buf->non_coherent_mem)
0255 ret = vb2_dc_alloc_non_coherent(buf);
0256 else
0257 ret = vb2_dc_alloc_coherent(buf);
0258
0259 if (ret) {
0260 dev_err(dev, "dma alloc of size %lu failed\n", size);
0261 kfree(buf);
0262 return ERR_PTR(-ENOMEM);
0263 }
0264
0265 buf->handler.refcount = &buf->refcount;
0266 buf->handler.put = vb2_dc_put;
0267 buf->handler.arg = buf;
0268
0269 refcount_set(&buf->refcount, 1);
0270
0271 return buf;
0272 }
0273
0274 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
0275 {
0276 struct vb2_dc_buf *buf = buf_priv;
0277 int ret;
0278
0279 if (!buf) {
0280 printk(KERN_ERR "No buffer to map\n");
0281 return -EINVAL;
0282 }
0283
0284 if (buf->non_coherent_mem)
0285 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
0286 buf->dma_sgt);
0287 else
0288 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
0289 buf->size, buf->attrs);
0290 if (ret) {
0291 pr_err("Remapping memory failed, error: %d\n", ret);
0292 return ret;
0293 }
0294
0295 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
0296 vma->vm_private_data = &buf->handler;
0297 vma->vm_ops = &vb2_common_vm_ops;
0298
0299 vma->vm_ops->open(vma);
0300
0301 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
0302 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
0303 buf->size);
0304
0305 return 0;
0306 }
0307
0308
0309
0310
0311
0312 struct vb2_dc_attachment {
0313 struct sg_table sgt;
0314 enum dma_data_direction dma_dir;
0315 };
0316
0317 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
0318 struct dma_buf_attachment *dbuf_attach)
0319 {
0320 struct vb2_dc_attachment *attach;
0321 unsigned int i;
0322 struct scatterlist *rd, *wr;
0323 struct sg_table *sgt;
0324 struct vb2_dc_buf *buf = dbuf->priv;
0325 int ret;
0326
0327 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
0328 if (!attach)
0329 return -ENOMEM;
0330
0331 sgt = &attach->sgt;
0332
0333
0334
0335 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
0336 if (ret) {
0337 kfree(attach);
0338 return -ENOMEM;
0339 }
0340
0341 rd = buf->sgt_base->sgl;
0342 wr = sgt->sgl;
0343 for (i = 0; i < sgt->orig_nents; ++i) {
0344 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
0345 rd = sg_next(rd);
0346 wr = sg_next(wr);
0347 }
0348
0349 attach->dma_dir = DMA_NONE;
0350 dbuf_attach->priv = attach;
0351
0352 return 0;
0353 }
0354
0355 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
0356 struct dma_buf_attachment *db_attach)
0357 {
0358 struct vb2_dc_attachment *attach = db_attach->priv;
0359 struct sg_table *sgt;
0360
0361 if (!attach)
0362 return;
0363
0364 sgt = &attach->sgt;
0365
0366
0367 if (attach->dma_dir != DMA_NONE)
0368
0369
0370
0371
0372
0373
0374 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
0375 DMA_ATTR_SKIP_CPU_SYNC);
0376 sg_free_table(sgt);
0377 kfree(attach);
0378 db_attach->priv = NULL;
0379 }
0380
0381 static struct sg_table *vb2_dc_dmabuf_ops_map(
0382 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
0383 {
0384 struct vb2_dc_attachment *attach = db_attach->priv;
0385
0386 struct mutex *lock = &db_attach->dmabuf->lock;
0387 struct sg_table *sgt;
0388
0389 mutex_lock(lock);
0390
0391 sgt = &attach->sgt;
0392
0393 if (attach->dma_dir == dma_dir) {
0394 mutex_unlock(lock);
0395 return sgt;
0396 }
0397
0398
0399 if (attach->dma_dir != DMA_NONE) {
0400 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
0401 DMA_ATTR_SKIP_CPU_SYNC);
0402 attach->dma_dir = DMA_NONE;
0403 }
0404
0405
0406
0407
0408
0409 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
0410 DMA_ATTR_SKIP_CPU_SYNC)) {
0411 pr_err("failed to map scatterlist\n");
0412 mutex_unlock(lock);
0413 return ERR_PTR(-EIO);
0414 }
0415
0416 attach->dma_dir = dma_dir;
0417
0418 mutex_unlock(lock);
0419
0420 return sgt;
0421 }
0422
0423 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
0424 struct sg_table *sgt, enum dma_data_direction dma_dir)
0425 {
0426
0427 }
0428
0429 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
0430 {
0431
0432 vb2_dc_put(dbuf->priv);
0433 }
0434
0435 static int
0436 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
0437 enum dma_data_direction direction)
0438 {
0439 return 0;
0440 }
0441
0442 static int
0443 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
0444 enum dma_data_direction direction)
0445 {
0446 return 0;
0447 }
0448
0449 static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
0450 {
0451 struct vb2_dc_buf *buf;
0452 void *vaddr;
0453
0454 buf = dbuf->priv;
0455 vaddr = vb2_dc_vaddr(buf->vb, buf);
0456 if (!vaddr)
0457 return -EINVAL;
0458
0459 iosys_map_set_vaddr(map, vaddr);
0460
0461 return 0;
0462 }
0463
0464 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
0465 struct vm_area_struct *vma)
0466 {
0467 return vb2_dc_mmap(dbuf->priv, vma);
0468 }
0469
0470 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
0471 .attach = vb2_dc_dmabuf_ops_attach,
0472 .detach = vb2_dc_dmabuf_ops_detach,
0473 .map_dma_buf = vb2_dc_dmabuf_ops_map,
0474 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
0475 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
0476 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
0477 .vmap = vb2_dc_dmabuf_ops_vmap,
0478 .mmap = vb2_dc_dmabuf_ops_mmap,
0479 .release = vb2_dc_dmabuf_ops_release,
0480 };
0481
0482 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
0483 {
0484 int ret;
0485 struct sg_table *sgt;
0486
0487 if (buf->non_coherent_mem)
0488 return buf->dma_sgt;
0489
0490 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
0491 if (!sgt) {
0492 dev_err(buf->dev, "failed to alloc sg table\n");
0493 return NULL;
0494 }
0495
0496 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
0497 buf->size, buf->attrs);
0498 if (ret < 0) {
0499 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
0500 kfree(sgt);
0501 return NULL;
0502 }
0503
0504 return sgt;
0505 }
0506
0507 static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
0508 void *buf_priv,
0509 unsigned long flags)
0510 {
0511 struct vb2_dc_buf *buf = buf_priv;
0512 struct dma_buf *dbuf;
0513 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0514
0515 exp_info.ops = &vb2_dc_dmabuf_ops;
0516 exp_info.size = buf->size;
0517 exp_info.flags = flags;
0518 exp_info.priv = buf;
0519
0520 if (!buf->sgt_base)
0521 buf->sgt_base = vb2_dc_get_base_sgt(buf);
0522
0523 if (WARN_ON(!buf->sgt_base))
0524 return NULL;
0525
0526 dbuf = dma_buf_export(&exp_info);
0527 if (IS_ERR(dbuf))
0528 return NULL;
0529
0530
0531 refcount_inc(&buf->refcount);
0532
0533 return dbuf;
0534 }
0535
0536
0537
0538
0539
0540 static void vb2_dc_put_userptr(void *buf_priv)
0541 {
0542 struct vb2_dc_buf *buf = buf_priv;
0543 struct sg_table *sgt = buf->dma_sgt;
0544 int i;
0545 struct page **pages;
0546
0547 if (sgt) {
0548
0549
0550
0551
0552 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
0553 DMA_ATTR_SKIP_CPU_SYNC);
0554 pages = frame_vector_pages(buf->vec);
0555
0556 BUG_ON(IS_ERR(pages));
0557 if (buf->dma_dir == DMA_FROM_DEVICE ||
0558 buf->dma_dir == DMA_BIDIRECTIONAL)
0559 for (i = 0; i < frame_vector_count(buf->vec); i++)
0560 set_page_dirty_lock(pages[i]);
0561 sg_free_table(sgt);
0562 kfree(sgt);
0563 } else {
0564 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
0565 buf->dma_dir, 0);
0566 }
0567 vb2_destroy_framevec(buf->vec);
0568 kfree(buf);
0569 }
0570
0571 static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
0572 unsigned long vaddr, unsigned long size)
0573 {
0574 struct vb2_dc_buf *buf;
0575 struct frame_vector *vec;
0576 unsigned int offset;
0577 int n_pages, i;
0578 int ret = 0;
0579 struct sg_table *sgt;
0580 unsigned long contig_size;
0581 unsigned long dma_align = dma_get_cache_alignment();
0582
0583
0584 if (!IS_ALIGNED(vaddr | size, dma_align)) {
0585 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
0586 return ERR_PTR(-EINVAL);
0587 }
0588
0589 if (!size) {
0590 pr_debug("size is zero\n");
0591 return ERR_PTR(-EINVAL);
0592 }
0593
0594 if (WARN_ON(!dev))
0595 return ERR_PTR(-EINVAL);
0596
0597 buf = kzalloc(sizeof *buf, GFP_KERNEL);
0598 if (!buf)
0599 return ERR_PTR(-ENOMEM);
0600
0601 buf->dev = dev;
0602 buf->dma_dir = vb->vb2_queue->dma_dir;
0603 buf->vb = vb;
0604
0605 offset = lower_32_bits(offset_in_page(vaddr));
0606 vec = vb2_create_framevec(vaddr, size);
0607 if (IS_ERR(vec)) {
0608 ret = PTR_ERR(vec);
0609 goto fail_buf;
0610 }
0611 buf->vec = vec;
0612 n_pages = frame_vector_count(vec);
0613 ret = frame_vector_to_pages(vec);
0614 if (ret < 0) {
0615 unsigned long *nums = frame_vector_pfns(vec);
0616
0617
0618
0619
0620
0621 for (i = 1; i < n_pages; i++)
0622 if (nums[i-1] + 1 != nums[i])
0623 goto fail_pfnvec;
0624 buf->dma_addr = dma_map_resource(buf->dev,
0625 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
0626 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
0627 ret = -ENOMEM;
0628 goto fail_pfnvec;
0629 }
0630 goto out;
0631 }
0632
0633 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
0634 if (!sgt) {
0635 pr_err("failed to allocate sg table\n");
0636 ret = -ENOMEM;
0637 goto fail_pfnvec;
0638 }
0639
0640 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
0641 offset, size, GFP_KERNEL);
0642 if (ret) {
0643 pr_err("failed to initialize sg table\n");
0644 goto fail_sgt;
0645 }
0646
0647
0648
0649
0650
0651 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
0652 DMA_ATTR_SKIP_CPU_SYNC)) {
0653 pr_err("failed to map scatterlist\n");
0654 ret = -EIO;
0655 goto fail_sgt_init;
0656 }
0657
0658 contig_size = vb2_dc_get_contiguous_size(sgt);
0659 if (contig_size < size) {
0660 pr_err("contiguous mapping is too small %lu/%lu\n",
0661 contig_size, size);
0662 ret = -EFAULT;
0663 goto fail_map_sg;
0664 }
0665
0666 buf->dma_addr = sg_dma_address(sgt->sgl);
0667 buf->dma_sgt = sgt;
0668 buf->non_coherent_mem = 1;
0669
0670 out:
0671 buf->size = size;
0672
0673 return buf;
0674
0675 fail_map_sg:
0676 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
0677
0678 fail_sgt_init:
0679 sg_free_table(sgt);
0680
0681 fail_sgt:
0682 kfree(sgt);
0683
0684 fail_pfnvec:
0685 vb2_destroy_framevec(vec);
0686
0687 fail_buf:
0688 kfree(buf);
0689
0690 return ERR_PTR(ret);
0691 }
0692
0693
0694
0695
0696
0697 static int vb2_dc_map_dmabuf(void *mem_priv)
0698 {
0699 struct vb2_dc_buf *buf = mem_priv;
0700 struct sg_table *sgt;
0701 unsigned long contig_size;
0702
0703 if (WARN_ON(!buf->db_attach)) {
0704 pr_err("trying to pin a non attached buffer\n");
0705 return -EINVAL;
0706 }
0707
0708 if (WARN_ON(buf->dma_sgt)) {
0709 pr_err("dmabuf buffer is already pinned\n");
0710 return 0;
0711 }
0712
0713
0714 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
0715 if (IS_ERR(sgt)) {
0716 pr_err("Error getting dmabuf scatterlist\n");
0717 return -EINVAL;
0718 }
0719
0720
0721 contig_size = vb2_dc_get_contiguous_size(sgt);
0722 if (contig_size < buf->size) {
0723 pr_err("contiguous chunk is too small %lu/%lu\n",
0724 contig_size, buf->size);
0725 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
0726 return -EFAULT;
0727 }
0728
0729 buf->dma_addr = sg_dma_address(sgt->sgl);
0730 buf->dma_sgt = sgt;
0731 buf->vaddr = NULL;
0732
0733 return 0;
0734 }
0735
0736 static void vb2_dc_unmap_dmabuf(void *mem_priv)
0737 {
0738 struct vb2_dc_buf *buf = mem_priv;
0739 struct sg_table *sgt = buf->dma_sgt;
0740 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
0741
0742 if (WARN_ON(!buf->db_attach)) {
0743 pr_err("trying to unpin a not attached buffer\n");
0744 return;
0745 }
0746
0747 if (WARN_ON(!sgt)) {
0748 pr_err("dmabuf buffer is already unpinned\n");
0749 return;
0750 }
0751
0752 if (buf->vaddr) {
0753 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
0754 buf->vaddr = NULL;
0755 }
0756 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
0757
0758 buf->dma_addr = 0;
0759 buf->dma_sgt = NULL;
0760 }
0761
0762 static void vb2_dc_detach_dmabuf(void *mem_priv)
0763 {
0764 struct vb2_dc_buf *buf = mem_priv;
0765
0766
0767 if (WARN_ON(buf->dma_addr))
0768 vb2_dc_unmap_dmabuf(buf);
0769
0770
0771 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
0772 kfree(buf);
0773 }
0774
0775 static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
0776 struct dma_buf *dbuf, unsigned long size)
0777 {
0778 struct vb2_dc_buf *buf;
0779 struct dma_buf_attachment *dba;
0780
0781 if (dbuf->size < size)
0782 return ERR_PTR(-EFAULT);
0783
0784 if (WARN_ON(!dev))
0785 return ERR_PTR(-EINVAL);
0786
0787 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
0788 if (!buf)
0789 return ERR_PTR(-ENOMEM);
0790
0791 buf->dev = dev;
0792 buf->vb = vb;
0793
0794
0795 dba = dma_buf_attach(dbuf, buf->dev);
0796 if (IS_ERR(dba)) {
0797 pr_err("failed to attach dmabuf\n");
0798 kfree(buf);
0799 return dba;
0800 }
0801
0802 buf->dma_dir = vb->vb2_queue->dma_dir;
0803 buf->size = size;
0804 buf->db_attach = dba;
0805
0806 return buf;
0807 }
0808
0809
0810
0811
0812
0813 const struct vb2_mem_ops vb2_dma_contig_memops = {
0814 .alloc = vb2_dc_alloc,
0815 .put = vb2_dc_put,
0816 .get_dmabuf = vb2_dc_get_dmabuf,
0817 .cookie = vb2_dc_cookie,
0818 .vaddr = vb2_dc_vaddr,
0819 .mmap = vb2_dc_mmap,
0820 .get_userptr = vb2_dc_get_userptr,
0821 .put_userptr = vb2_dc_put_userptr,
0822 .prepare = vb2_dc_prepare,
0823 .finish = vb2_dc_finish,
0824 .map_dmabuf = vb2_dc_map_dmabuf,
0825 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
0826 .attach_dmabuf = vb2_dc_attach_dmabuf,
0827 .detach_dmabuf = vb2_dc_detach_dmabuf,
0828 .num_users = vb2_dc_num_users,
0829 };
0830 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
0857 {
0858 if (!dev->dma_parms) {
0859 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
0860 return -ENODEV;
0861 }
0862 if (dma_get_max_seg_size(dev) < size)
0863 return dma_set_max_seg_size(dev, size);
0864
0865 return 0;
0866 }
0867 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
0868
0869 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
0870 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
0871 MODULE_LICENSE("GPL");
0872 MODULE_IMPORT_NS(DMA_BUF);