0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/module.h>
0014 #include <linux/mm.h>
0015 #include <linux/refcount.h>
0016 #include <linux/scatterlist.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019 #include <linux/vmalloc.h>
0020
0021 #include <media/videobuf2-v4l2.h>
0022 #include <media/videobuf2-memops.h>
0023 #include <media/videobuf2-dma-sg.h>
0024
0025 static int debug;
0026 module_param(debug, int, 0644);
0027
0028 #define dprintk(level, fmt, arg...) \
0029 do { \
0030 if (debug >= level) \
0031 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
0032 } while (0)
0033
0034 struct vb2_dma_sg_buf {
0035 struct device *dev;
0036 void *vaddr;
0037 struct page **pages;
0038 struct frame_vector *vec;
0039 int offset;
0040 enum dma_data_direction dma_dir;
0041 struct sg_table sg_table;
0042
0043
0044
0045
0046
0047 struct sg_table *dma_sgt;
0048 size_t size;
0049 unsigned int num_pages;
0050 refcount_t refcount;
0051 struct vb2_vmarea_handler handler;
0052
0053 struct dma_buf_attachment *db_attach;
0054
0055 struct vb2_buffer *vb;
0056 };
0057
0058 static void vb2_dma_sg_put(void *buf_priv);
0059
0060 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
0061 gfp_t gfp_flags)
0062 {
0063 unsigned int last_page = 0;
0064 unsigned long size = buf->size;
0065
0066 while (size > 0) {
0067 struct page *pages;
0068 int order;
0069 int i;
0070
0071 order = get_order(size);
0072
0073 if ((PAGE_SIZE << order) > size)
0074 order--;
0075
0076 pages = NULL;
0077 while (!pages) {
0078 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
0079 __GFP_NOWARN | gfp_flags, order);
0080 if (pages)
0081 break;
0082
0083 if (order == 0) {
0084 while (last_page--)
0085 __free_page(buf->pages[last_page]);
0086 return -ENOMEM;
0087 }
0088 order--;
0089 }
0090
0091 split_page(pages, order);
0092 for (i = 0; i < (1 << order); i++)
0093 buf->pages[last_page++] = &pages[i];
0094
0095 size -= PAGE_SIZE << order;
0096 }
0097
0098 return 0;
0099 }
0100
0101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
0102 unsigned long size)
0103 {
0104 struct vb2_dma_sg_buf *buf;
0105 struct sg_table *sgt;
0106 int ret;
0107 int num_pages;
0108
0109 if (WARN_ON(!dev) || WARN_ON(!size))
0110 return ERR_PTR(-EINVAL);
0111
0112 buf = kzalloc(sizeof *buf, GFP_KERNEL);
0113 if (!buf)
0114 return ERR_PTR(-ENOMEM);
0115
0116 buf->vaddr = NULL;
0117 buf->dma_dir = vb->vb2_queue->dma_dir;
0118 buf->offset = 0;
0119 buf->size = size;
0120
0121 buf->num_pages = size >> PAGE_SHIFT;
0122 buf->dma_sgt = &buf->sg_table;
0123
0124
0125
0126
0127
0128
0129 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL);
0130 if (!buf->pages)
0131 goto fail_pages_array_alloc;
0132
0133 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
0134 if (ret)
0135 goto fail_pages_alloc;
0136
0137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
0138 buf->num_pages, 0, size, GFP_KERNEL);
0139 if (ret)
0140 goto fail_table_alloc;
0141
0142
0143 buf->dev = get_device(dev);
0144
0145 sgt = &buf->sg_table;
0146
0147
0148
0149
0150 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
0151 DMA_ATTR_SKIP_CPU_SYNC))
0152 goto fail_map;
0153
0154 buf->handler.refcount = &buf->refcount;
0155 buf->handler.put = vb2_dma_sg_put;
0156 buf->handler.arg = buf;
0157 buf->vb = vb;
0158
0159 refcount_set(&buf->refcount, 1);
0160
0161 dprintk(1, "%s: Allocated buffer of %d pages\n",
0162 __func__, buf->num_pages);
0163 return buf;
0164
0165 fail_map:
0166 put_device(buf->dev);
0167 sg_free_table(buf->dma_sgt);
0168 fail_table_alloc:
0169 num_pages = buf->num_pages;
0170 while (num_pages--)
0171 __free_page(buf->pages[num_pages]);
0172 fail_pages_alloc:
0173 kvfree(buf->pages);
0174 fail_pages_array_alloc:
0175 kfree(buf);
0176 return ERR_PTR(-ENOMEM);
0177 }
0178
0179 static void vb2_dma_sg_put(void *buf_priv)
0180 {
0181 struct vb2_dma_sg_buf *buf = buf_priv;
0182 struct sg_table *sgt = &buf->sg_table;
0183 int i = buf->num_pages;
0184
0185 if (refcount_dec_and_test(&buf->refcount)) {
0186 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
0187 buf->num_pages);
0188 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
0189 DMA_ATTR_SKIP_CPU_SYNC);
0190 if (buf->vaddr)
0191 vm_unmap_ram(buf->vaddr, buf->num_pages);
0192 sg_free_table(buf->dma_sgt);
0193 while (--i >= 0)
0194 __free_page(buf->pages[i]);
0195 kvfree(buf->pages);
0196 put_device(buf->dev);
0197 kfree(buf);
0198 }
0199 }
0200
0201 static void vb2_dma_sg_prepare(void *buf_priv)
0202 {
0203 struct vb2_dma_sg_buf *buf = buf_priv;
0204 struct sg_table *sgt = buf->dma_sgt;
0205
0206 if (buf->vb->skip_cache_sync_on_prepare)
0207 return;
0208
0209 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
0210 }
0211
0212 static void vb2_dma_sg_finish(void *buf_priv)
0213 {
0214 struct vb2_dma_sg_buf *buf = buf_priv;
0215 struct sg_table *sgt = buf->dma_sgt;
0216
0217 if (buf->vb->skip_cache_sync_on_finish)
0218 return;
0219
0220 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
0221 }
0222
0223 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
0224 unsigned long vaddr, unsigned long size)
0225 {
0226 struct vb2_dma_sg_buf *buf;
0227 struct sg_table *sgt;
0228 struct frame_vector *vec;
0229
0230 if (WARN_ON(!dev))
0231 return ERR_PTR(-EINVAL);
0232
0233 buf = kzalloc(sizeof *buf, GFP_KERNEL);
0234 if (!buf)
0235 return ERR_PTR(-ENOMEM);
0236
0237 buf->vaddr = NULL;
0238 buf->dev = dev;
0239 buf->dma_dir = vb->vb2_queue->dma_dir;
0240 buf->offset = vaddr & ~PAGE_MASK;
0241 buf->size = size;
0242 buf->dma_sgt = &buf->sg_table;
0243 buf->vb = vb;
0244 vec = vb2_create_framevec(vaddr, size);
0245 if (IS_ERR(vec))
0246 goto userptr_fail_pfnvec;
0247 buf->vec = vec;
0248
0249 buf->pages = frame_vector_pages(vec);
0250 if (IS_ERR(buf->pages))
0251 goto userptr_fail_sgtable;
0252 buf->num_pages = frame_vector_count(vec);
0253
0254 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
0255 buf->num_pages, buf->offset, size, 0))
0256 goto userptr_fail_sgtable;
0257
0258 sgt = &buf->sg_table;
0259
0260
0261
0262
0263 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
0264 DMA_ATTR_SKIP_CPU_SYNC))
0265 goto userptr_fail_map;
0266
0267 return buf;
0268
0269 userptr_fail_map:
0270 sg_free_table(&buf->sg_table);
0271 userptr_fail_sgtable:
0272 vb2_destroy_framevec(vec);
0273 userptr_fail_pfnvec:
0274 kfree(buf);
0275 return ERR_PTR(-ENOMEM);
0276 }
0277
0278
0279
0280
0281
0282 static void vb2_dma_sg_put_userptr(void *buf_priv)
0283 {
0284 struct vb2_dma_sg_buf *buf = buf_priv;
0285 struct sg_table *sgt = &buf->sg_table;
0286 int i = buf->num_pages;
0287
0288 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
0289 __func__, buf->num_pages);
0290 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
0291 if (buf->vaddr)
0292 vm_unmap_ram(buf->vaddr, buf->num_pages);
0293 sg_free_table(buf->dma_sgt);
0294 if (buf->dma_dir == DMA_FROM_DEVICE ||
0295 buf->dma_dir == DMA_BIDIRECTIONAL)
0296 while (--i >= 0)
0297 set_page_dirty_lock(buf->pages[i]);
0298 vb2_destroy_framevec(buf->vec);
0299 kfree(buf);
0300 }
0301
0302 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
0303 {
0304 struct vb2_dma_sg_buf *buf = buf_priv;
0305 struct iosys_map map;
0306 int ret;
0307
0308 BUG_ON(!buf);
0309
0310 if (!buf->vaddr) {
0311 if (buf->db_attach) {
0312 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
0313 buf->vaddr = ret ? NULL : map.vaddr;
0314 } else {
0315 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
0316 }
0317 }
0318
0319
0320 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
0321 }
0322
0323 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
0324 {
0325 struct vb2_dma_sg_buf *buf = buf_priv;
0326
0327 return refcount_read(&buf->refcount);
0328 }
0329
0330 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
0331 {
0332 struct vb2_dma_sg_buf *buf = buf_priv;
0333 int err;
0334
0335 if (!buf) {
0336 printk(KERN_ERR "No memory to map\n");
0337 return -EINVAL;
0338 }
0339
0340 err = vm_map_pages(vma, buf->pages, buf->num_pages);
0341 if (err) {
0342 printk(KERN_ERR "Remapping memory, error: %d\n", err);
0343 return err;
0344 }
0345
0346
0347
0348
0349 vma->vm_private_data = &buf->handler;
0350 vma->vm_ops = &vb2_common_vm_ops;
0351
0352 vma->vm_ops->open(vma);
0353
0354 return 0;
0355 }
0356
0357
0358
0359
0360
0361 struct vb2_dma_sg_attachment {
0362 struct sg_table sgt;
0363 enum dma_data_direction dma_dir;
0364 };
0365
0366 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
0367 struct dma_buf_attachment *dbuf_attach)
0368 {
0369 struct vb2_dma_sg_attachment *attach;
0370 unsigned int i;
0371 struct scatterlist *rd, *wr;
0372 struct sg_table *sgt;
0373 struct vb2_dma_sg_buf *buf = dbuf->priv;
0374 int ret;
0375
0376 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
0377 if (!attach)
0378 return -ENOMEM;
0379
0380 sgt = &attach->sgt;
0381
0382
0383
0384 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
0385 if (ret) {
0386 kfree(attach);
0387 return -ENOMEM;
0388 }
0389
0390 rd = buf->dma_sgt->sgl;
0391 wr = sgt->sgl;
0392 for (i = 0; i < sgt->orig_nents; ++i) {
0393 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
0394 rd = sg_next(rd);
0395 wr = sg_next(wr);
0396 }
0397
0398 attach->dma_dir = DMA_NONE;
0399 dbuf_attach->priv = attach;
0400
0401 return 0;
0402 }
0403
0404 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
0405 struct dma_buf_attachment *db_attach)
0406 {
0407 struct vb2_dma_sg_attachment *attach = db_attach->priv;
0408 struct sg_table *sgt;
0409
0410 if (!attach)
0411 return;
0412
0413 sgt = &attach->sgt;
0414
0415
0416 if (attach->dma_dir != DMA_NONE)
0417 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
0418 sg_free_table(sgt);
0419 kfree(attach);
0420 db_attach->priv = NULL;
0421 }
0422
0423 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
0424 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
0425 {
0426 struct vb2_dma_sg_attachment *attach = db_attach->priv;
0427
0428 struct mutex *lock = &db_attach->dmabuf->lock;
0429 struct sg_table *sgt;
0430
0431 mutex_lock(lock);
0432
0433 sgt = &attach->sgt;
0434
0435 if (attach->dma_dir == dma_dir) {
0436 mutex_unlock(lock);
0437 return sgt;
0438 }
0439
0440
0441 if (attach->dma_dir != DMA_NONE) {
0442 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
0443 attach->dma_dir = DMA_NONE;
0444 }
0445
0446
0447 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
0448 pr_err("failed to map scatterlist\n");
0449 mutex_unlock(lock);
0450 return ERR_PTR(-EIO);
0451 }
0452
0453 attach->dma_dir = dma_dir;
0454
0455 mutex_unlock(lock);
0456
0457 return sgt;
0458 }
0459
0460 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
0461 struct sg_table *sgt, enum dma_data_direction dma_dir)
0462 {
0463
0464 }
0465
0466 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
0467 {
0468
0469 vb2_dma_sg_put(dbuf->priv);
0470 }
0471
0472 static int
0473 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
0474 enum dma_data_direction direction)
0475 {
0476 struct vb2_dma_sg_buf *buf = dbuf->priv;
0477 struct sg_table *sgt = buf->dma_sgt;
0478
0479 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
0480 return 0;
0481 }
0482
0483 static int
0484 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
0485 enum dma_data_direction direction)
0486 {
0487 struct vb2_dma_sg_buf *buf = dbuf->priv;
0488 struct sg_table *sgt = buf->dma_sgt;
0489
0490 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
0491 return 0;
0492 }
0493
0494 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
0495 struct iosys_map *map)
0496 {
0497 struct vb2_dma_sg_buf *buf = dbuf->priv;
0498
0499 iosys_map_set_vaddr(map, buf->vaddr);
0500
0501 return 0;
0502 }
0503
0504 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
0505 struct vm_area_struct *vma)
0506 {
0507 return vb2_dma_sg_mmap(dbuf->priv, vma);
0508 }
0509
0510 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
0511 .attach = vb2_dma_sg_dmabuf_ops_attach,
0512 .detach = vb2_dma_sg_dmabuf_ops_detach,
0513 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
0514 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
0515 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
0516 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
0517 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
0518 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
0519 .release = vb2_dma_sg_dmabuf_ops_release,
0520 };
0521
0522 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
0523 void *buf_priv,
0524 unsigned long flags)
0525 {
0526 struct vb2_dma_sg_buf *buf = buf_priv;
0527 struct dma_buf *dbuf;
0528 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0529
0530 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
0531 exp_info.size = buf->size;
0532 exp_info.flags = flags;
0533 exp_info.priv = buf;
0534
0535 if (WARN_ON(!buf->dma_sgt))
0536 return NULL;
0537
0538 dbuf = dma_buf_export(&exp_info);
0539 if (IS_ERR(dbuf))
0540 return NULL;
0541
0542
0543 refcount_inc(&buf->refcount);
0544
0545 return dbuf;
0546 }
0547
0548
0549
0550
0551
0552 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
0553 {
0554 struct vb2_dma_sg_buf *buf = mem_priv;
0555 struct sg_table *sgt;
0556
0557 if (WARN_ON(!buf->db_attach)) {
0558 pr_err("trying to pin a non attached buffer\n");
0559 return -EINVAL;
0560 }
0561
0562 if (WARN_ON(buf->dma_sgt)) {
0563 pr_err("dmabuf buffer is already pinned\n");
0564 return 0;
0565 }
0566
0567
0568 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
0569 if (IS_ERR(sgt)) {
0570 pr_err("Error getting dmabuf scatterlist\n");
0571 return -EINVAL;
0572 }
0573
0574 buf->dma_sgt = sgt;
0575 buf->vaddr = NULL;
0576
0577 return 0;
0578 }
0579
0580 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
0581 {
0582 struct vb2_dma_sg_buf *buf = mem_priv;
0583 struct sg_table *sgt = buf->dma_sgt;
0584 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
0585
0586 if (WARN_ON(!buf->db_attach)) {
0587 pr_err("trying to unpin a not attached buffer\n");
0588 return;
0589 }
0590
0591 if (WARN_ON(!sgt)) {
0592 pr_err("dmabuf buffer is already unpinned\n");
0593 return;
0594 }
0595
0596 if (buf->vaddr) {
0597 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
0598 buf->vaddr = NULL;
0599 }
0600 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
0601
0602 buf->dma_sgt = NULL;
0603 }
0604
0605 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
0606 {
0607 struct vb2_dma_sg_buf *buf = mem_priv;
0608
0609
0610 if (WARN_ON(buf->dma_sgt))
0611 vb2_dma_sg_unmap_dmabuf(buf);
0612
0613
0614 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
0615 kfree(buf);
0616 }
0617
0618 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
0619 struct dma_buf *dbuf, unsigned long size)
0620 {
0621 struct vb2_dma_sg_buf *buf;
0622 struct dma_buf_attachment *dba;
0623
0624 if (WARN_ON(!dev))
0625 return ERR_PTR(-EINVAL);
0626
0627 if (dbuf->size < size)
0628 return ERR_PTR(-EFAULT);
0629
0630 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
0631 if (!buf)
0632 return ERR_PTR(-ENOMEM);
0633
0634 buf->dev = dev;
0635
0636 dba = dma_buf_attach(dbuf, buf->dev);
0637 if (IS_ERR(dba)) {
0638 pr_err("failed to attach dmabuf\n");
0639 kfree(buf);
0640 return dba;
0641 }
0642
0643 buf->dma_dir = vb->vb2_queue->dma_dir;
0644 buf->size = size;
0645 buf->db_attach = dba;
0646 buf->vb = vb;
0647
0648 return buf;
0649 }
0650
0651 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
0652 {
0653 struct vb2_dma_sg_buf *buf = buf_priv;
0654
0655 return buf->dma_sgt;
0656 }
0657
0658 const struct vb2_mem_ops vb2_dma_sg_memops = {
0659 .alloc = vb2_dma_sg_alloc,
0660 .put = vb2_dma_sg_put,
0661 .get_userptr = vb2_dma_sg_get_userptr,
0662 .put_userptr = vb2_dma_sg_put_userptr,
0663 .prepare = vb2_dma_sg_prepare,
0664 .finish = vb2_dma_sg_finish,
0665 .vaddr = vb2_dma_sg_vaddr,
0666 .mmap = vb2_dma_sg_mmap,
0667 .num_users = vb2_dma_sg_num_users,
0668 .get_dmabuf = vb2_dma_sg_get_dmabuf,
0669 .map_dmabuf = vb2_dma_sg_map_dmabuf,
0670 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
0671 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
0672 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
0673 .cookie = vb2_dma_sg_cookie,
0674 };
0675 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
0676
0677 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
0678 MODULE_AUTHOR("Andrzej Pietrasiewicz");
0679 MODULE_LICENSE("GPL");
0680 MODULE_IMPORT_NS(DMA_BUF);