Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
0003  *
0004  * Copyright (C) 2010 Samsung Electronics
0005  *
0006  * Author: Pawel Osciak <pawel@osciak.com>
0007  *
0008  * This program is free software; you can redistribute it and/or modify
0009  * it under the terms of the GNU General Public License as published by
0010  * the Free Software Foundation.
0011  */
0012 
0013 #include <linux/io.h>
0014 #include <linux/module.h>
0015 #include <linux/mm.h>
0016 #include <linux/refcount.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019 #include <linux/vmalloc.h>
0020 
0021 #include <media/videobuf2-v4l2.h>
0022 #include <media/videobuf2-vmalloc.h>
0023 #include <media/videobuf2-memops.h>
0024 
0025 struct vb2_vmalloc_buf {
0026     void                *vaddr;
0027     struct frame_vector     *vec;
0028     enum dma_data_direction     dma_dir;
0029     unsigned long           size;
0030     refcount_t          refcount;
0031     struct vb2_vmarea_handler   handler;
0032     struct dma_buf          *dbuf;
0033 };
0034 
0035 static void vb2_vmalloc_put(void *buf_priv);
0036 
0037 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
0038                    unsigned long size)
0039 {
0040     struct vb2_vmalloc_buf *buf;
0041 
0042     buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
0043     if (!buf)
0044         return ERR_PTR(-ENOMEM);
0045 
0046     buf->size = size;
0047     buf->vaddr = vmalloc_user(buf->size);
0048     if (!buf->vaddr) {
0049         pr_debug("vmalloc of size %ld failed\n", buf->size);
0050         kfree(buf);
0051         return ERR_PTR(-ENOMEM);
0052     }
0053 
0054     buf->dma_dir = vb->vb2_queue->dma_dir;
0055     buf->handler.refcount = &buf->refcount;
0056     buf->handler.put = vb2_vmalloc_put;
0057     buf->handler.arg = buf;
0058 
0059     refcount_set(&buf->refcount, 1);
0060     return buf;
0061 }
0062 
0063 static void vb2_vmalloc_put(void *buf_priv)
0064 {
0065     struct vb2_vmalloc_buf *buf = buf_priv;
0066 
0067     if (refcount_dec_and_test(&buf->refcount)) {
0068         vfree(buf->vaddr);
0069         kfree(buf);
0070     }
0071 }
0072 
0073 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
0074                      unsigned long vaddr, unsigned long size)
0075 {
0076     struct vb2_vmalloc_buf *buf;
0077     struct frame_vector *vec;
0078     int n_pages, offset, i;
0079     int ret = -ENOMEM;
0080 
0081     buf = kzalloc(sizeof(*buf), GFP_KERNEL);
0082     if (!buf)
0083         return ERR_PTR(-ENOMEM);
0084 
0085     buf->dma_dir = vb->vb2_queue->dma_dir;
0086     offset = vaddr & ~PAGE_MASK;
0087     buf->size = size;
0088     vec = vb2_create_framevec(vaddr, size);
0089     if (IS_ERR(vec)) {
0090         ret = PTR_ERR(vec);
0091         goto fail_pfnvec_create;
0092     }
0093     buf->vec = vec;
0094     n_pages = frame_vector_count(vec);
0095     if (frame_vector_to_pages(vec) < 0) {
0096         unsigned long *nums = frame_vector_pfns(vec);
0097 
0098         /*
0099          * We cannot get page pointers for these pfns. Check memory is
0100          * physically contiguous and use direct mapping.
0101          */
0102         for (i = 1; i < n_pages; i++)
0103             if (nums[i-1] + 1 != nums[i])
0104                 goto fail_map;
0105         buf->vaddr = (__force void *)
0106             ioremap(__pfn_to_phys(nums[0]), size + offset);
0107     } else {
0108         buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
0109     }
0110 
0111     if (!buf->vaddr)
0112         goto fail_map;
0113     buf->vaddr += offset;
0114     return buf;
0115 
0116 fail_map:
0117     vb2_destroy_framevec(vec);
0118 fail_pfnvec_create:
0119     kfree(buf);
0120 
0121     return ERR_PTR(ret);
0122 }
0123 
0124 static void vb2_vmalloc_put_userptr(void *buf_priv)
0125 {
0126     struct vb2_vmalloc_buf *buf = buf_priv;
0127     unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
0128     unsigned int i;
0129     struct page **pages;
0130     unsigned int n_pages;
0131 
0132     if (!buf->vec->is_pfns) {
0133         n_pages = frame_vector_count(buf->vec);
0134         pages = frame_vector_pages(buf->vec);
0135         if (vaddr)
0136             vm_unmap_ram((void *)vaddr, n_pages);
0137         if (buf->dma_dir == DMA_FROM_DEVICE ||
0138             buf->dma_dir == DMA_BIDIRECTIONAL)
0139             for (i = 0; i < n_pages; i++)
0140                 set_page_dirty_lock(pages[i]);
0141     } else {
0142         iounmap((__force void __iomem *)buf->vaddr);
0143     }
0144     vb2_destroy_framevec(buf->vec);
0145     kfree(buf);
0146 }
0147 
0148 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
0149 {
0150     struct vb2_vmalloc_buf *buf = buf_priv;
0151 
0152     if (!buf->vaddr) {
0153         pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
0154         return NULL;
0155     }
0156 
0157     return buf->vaddr;
0158 }
0159 
0160 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
0161 {
0162     struct vb2_vmalloc_buf *buf = buf_priv;
0163     return refcount_read(&buf->refcount);
0164 }
0165 
0166 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
0167 {
0168     struct vb2_vmalloc_buf *buf = buf_priv;
0169     int ret;
0170 
0171     if (!buf) {
0172         pr_err("No memory to map\n");
0173         return -EINVAL;
0174     }
0175 
0176     ret = remap_vmalloc_range(vma, buf->vaddr, 0);
0177     if (ret) {
0178         pr_err("Remapping vmalloc memory, error: %d\n", ret);
0179         return ret;
0180     }
0181 
0182     /*
0183      * Make sure that vm_areas for 2 buffers won't be merged together
0184      */
0185     vma->vm_flags       |= VM_DONTEXPAND;
0186 
0187     /*
0188      * Use common vm_area operations to track buffer refcount.
0189      */
0190     vma->vm_private_data    = &buf->handler;
0191     vma->vm_ops     = &vb2_common_vm_ops;
0192 
0193     vma->vm_ops->open(vma);
0194 
0195     return 0;
0196 }
0197 
0198 #ifdef CONFIG_HAS_DMA
0199 /*********************************************/
0200 /*         DMABUF ops for exporters          */
0201 /*********************************************/
0202 
0203 struct vb2_vmalloc_attachment {
0204     struct sg_table sgt;
0205     enum dma_data_direction dma_dir;
0206 };
0207 
0208 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
0209     struct dma_buf_attachment *dbuf_attach)
0210 {
0211     struct vb2_vmalloc_attachment *attach;
0212     struct vb2_vmalloc_buf *buf = dbuf->priv;
0213     int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
0214     struct sg_table *sgt;
0215     struct scatterlist *sg;
0216     void *vaddr = buf->vaddr;
0217     int ret;
0218     int i;
0219 
0220     attach = kzalloc(sizeof(*attach), GFP_KERNEL);
0221     if (!attach)
0222         return -ENOMEM;
0223 
0224     sgt = &attach->sgt;
0225     ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
0226     if (ret) {
0227         kfree(attach);
0228         return ret;
0229     }
0230     for_each_sgtable_sg(sgt, sg, i) {
0231         struct page *page = vmalloc_to_page(vaddr);
0232 
0233         if (!page) {
0234             sg_free_table(sgt);
0235             kfree(attach);
0236             return -ENOMEM;
0237         }
0238         sg_set_page(sg, page, PAGE_SIZE, 0);
0239         vaddr += PAGE_SIZE;
0240     }
0241 
0242     attach->dma_dir = DMA_NONE;
0243     dbuf_attach->priv = attach;
0244     return 0;
0245 }
0246 
0247 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
0248     struct dma_buf_attachment *db_attach)
0249 {
0250     struct vb2_vmalloc_attachment *attach = db_attach->priv;
0251     struct sg_table *sgt;
0252 
0253     if (!attach)
0254         return;
0255 
0256     sgt = &attach->sgt;
0257 
0258     /* release the scatterlist cache */
0259     if (attach->dma_dir != DMA_NONE)
0260         dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
0261     sg_free_table(sgt);
0262     kfree(attach);
0263     db_attach->priv = NULL;
0264 }
0265 
0266 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
0267     struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
0268 {
0269     struct vb2_vmalloc_attachment *attach = db_attach->priv;
0270     /* stealing dmabuf mutex to serialize map/unmap operations */
0271     struct mutex *lock = &db_attach->dmabuf->lock;
0272     struct sg_table *sgt;
0273 
0274     mutex_lock(lock);
0275 
0276     sgt = &attach->sgt;
0277     /* return previously mapped sg table */
0278     if (attach->dma_dir == dma_dir) {
0279         mutex_unlock(lock);
0280         return sgt;
0281     }
0282 
0283     /* release any previous cache */
0284     if (attach->dma_dir != DMA_NONE) {
0285         dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
0286         attach->dma_dir = DMA_NONE;
0287     }
0288 
0289     /* mapping to the client with new direction */
0290     if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
0291         pr_err("failed to map scatterlist\n");
0292         mutex_unlock(lock);
0293         return ERR_PTR(-EIO);
0294     }
0295 
0296     attach->dma_dir = dma_dir;
0297 
0298     mutex_unlock(lock);
0299 
0300     return sgt;
0301 }
0302 
0303 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
0304     struct sg_table *sgt, enum dma_data_direction dma_dir)
0305 {
0306     /* nothing to be done here */
0307 }
0308 
0309 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
0310 {
0311     /* drop reference obtained in vb2_vmalloc_get_dmabuf */
0312     vb2_vmalloc_put(dbuf->priv);
0313 }
0314 
0315 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
0316                        struct iosys_map *map)
0317 {
0318     struct vb2_vmalloc_buf *buf = dbuf->priv;
0319 
0320     iosys_map_set_vaddr(map, buf->vaddr);
0321 
0322     return 0;
0323 }
0324 
0325 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
0326     struct vm_area_struct *vma)
0327 {
0328     return vb2_vmalloc_mmap(dbuf->priv, vma);
0329 }
0330 
0331 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
0332     .attach = vb2_vmalloc_dmabuf_ops_attach,
0333     .detach = vb2_vmalloc_dmabuf_ops_detach,
0334     .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
0335     .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
0336     .vmap = vb2_vmalloc_dmabuf_ops_vmap,
0337     .mmap = vb2_vmalloc_dmabuf_ops_mmap,
0338     .release = vb2_vmalloc_dmabuf_ops_release,
0339 };
0340 
0341 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
0342                           void *buf_priv,
0343                           unsigned long flags)
0344 {
0345     struct vb2_vmalloc_buf *buf = buf_priv;
0346     struct dma_buf *dbuf;
0347     DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0348 
0349     exp_info.ops = &vb2_vmalloc_dmabuf_ops;
0350     exp_info.size = buf->size;
0351     exp_info.flags = flags;
0352     exp_info.priv = buf;
0353 
0354     if (WARN_ON(!buf->vaddr))
0355         return NULL;
0356 
0357     dbuf = dma_buf_export(&exp_info);
0358     if (IS_ERR(dbuf))
0359         return NULL;
0360 
0361     /* dmabuf keeps reference to vb2 buffer */
0362     refcount_inc(&buf->refcount);
0363 
0364     return dbuf;
0365 }
0366 #endif /* CONFIG_HAS_DMA */
0367 
0368 
0369 /*********************************************/
0370 /*       callbacks for DMABUF buffers        */
0371 /*********************************************/
0372 
0373 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
0374 {
0375     struct vb2_vmalloc_buf *buf = mem_priv;
0376     struct iosys_map map;
0377     int ret;
0378 
0379     ret = dma_buf_vmap(buf->dbuf, &map);
0380     if (ret)
0381         return -EFAULT;
0382     buf->vaddr = map.vaddr;
0383 
0384     return 0;
0385 }
0386 
0387 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
0388 {
0389     struct vb2_vmalloc_buf *buf = mem_priv;
0390     struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
0391 
0392     dma_buf_vunmap(buf->dbuf, &map);
0393     buf->vaddr = NULL;
0394 }
0395 
0396 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
0397 {
0398     struct vb2_vmalloc_buf *buf = mem_priv;
0399     struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
0400 
0401     if (buf->vaddr)
0402         dma_buf_vunmap(buf->dbuf, &map);
0403 
0404     kfree(buf);
0405 }
0406 
0407 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
0408                        struct device *dev,
0409                        struct dma_buf *dbuf,
0410                        unsigned long size)
0411 {
0412     struct vb2_vmalloc_buf *buf;
0413 
0414     if (dbuf->size < size)
0415         return ERR_PTR(-EFAULT);
0416 
0417     buf = kzalloc(sizeof(*buf), GFP_KERNEL);
0418     if (!buf)
0419         return ERR_PTR(-ENOMEM);
0420 
0421     buf->dbuf = dbuf;
0422     buf->dma_dir = vb->vb2_queue->dma_dir;
0423     buf->size = size;
0424 
0425     return buf;
0426 }
0427 
0428 
0429 const struct vb2_mem_ops vb2_vmalloc_memops = {
0430     .alloc      = vb2_vmalloc_alloc,
0431     .put        = vb2_vmalloc_put,
0432     .get_userptr    = vb2_vmalloc_get_userptr,
0433     .put_userptr    = vb2_vmalloc_put_userptr,
0434 #ifdef CONFIG_HAS_DMA
0435     .get_dmabuf = vb2_vmalloc_get_dmabuf,
0436 #endif
0437     .map_dmabuf = vb2_vmalloc_map_dmabuf,
0438     .unmap_dmabuf   = vb2_vmalloc_unmap_dmabuf,
0439     .attach_dmabuf  = vb2_vmalloc_attach_dmabuf,
0440     .detach_dmabuf  = vb2_vmalloc_detach_dmabuf,
0441     .vaddr      = vb2_vmalloc_vaddr,
0442     .mmap       = vb2_vmalloc_mmap,
0443     .num_users  = vb2_vmalloc_num_users,
0444 };
0445 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
0446 
0447 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
0448 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
0449 MODULE_LICENSE("GPL");
0450 MODULE_IMPORT_NS(DMA_BUF);