0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/cma.h>
0013 #include <linux/dma-buf.h>
0014 #include <linux/dma-heap.h>
0015 #include <linux/dma-map-ops.h>
0016 #include <linux/err.h>
0017 #include <linux/highmem.h>
0018 #include <linux/io.h>
0019 #include <linux/mm.h>
0020 #include <linux/module.h>
0021 #include <linux/scatterlist.h>
0022 #include <linux/slab.h>
0023 #include <linux/vmalloc.h>
0024
0025
0026 struct cma_heap {
0027 struct dma_heap *heap;
0028 struct cma *cma;
0029 };
0030
0031 struct cma_heap_buffer {
0032 struct cma_heap *heap;
0033 struct list_head attachments;
0034 struct mutex lock;
0035 unsigned long len;
0036 struct page *cma_pages;
0037 struct page **pages;
0038 pgoff_t pagecount;
0039 int vmap_cnt;
0040 void *vaddr;
0041 };
0042
0043 struct dma_heap_attachment {
0044 struct device *dev;
0045 struct sg_table table;
0046 struct list_head list;
0047 bool mapped;
0048 };
0049
0050 static int cma_heap_attach(struct dma_buf *dmabuf,
0051 struct dma_buf_attachment *attachment)
0052 {
0053 struct cma_heap_buffer *buffer = dmabuf->priv;
0054 struct dma_heap_attachment *a;
0055 int ret;
0056
0057 a = kzalloc(sizeof(*a), GFP_KERNEL);
0058 if (!a)
0059 return -ENOMEM;
0060
0061 ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
0062 buffer->pagecount, 0,
0063 buffer->pagecount << PAGE_SHIFT,
0064 GFP_KERNEL);
0065 if (ret) {
0066 kfree(a);
0067 return ret;
0068 }
0069
0070 a->dev = attachment->dev;
0071 INIT_LIST_HEAD(&a->list);
0072 a->mapped = false;
0073
0074 attachment->priv = a;
0075
0076 mutex_lock(&buffer->lock);
0077 list_add(&a->list, &buffer->attachments);
0078 mutex_unlock(&buffer->lock);
0079
0080 return 0;
0081 }
0082
0083 static void cma_heap_detach(struct dma_buf *dmabuf,
0084 struct dma_buf_attachment *attachment)
0085 {
0086 struct cma_heap_buffer *buffer = dmabuf->priv;
0087 struct dma_heap_attachment *a = attachment->priv;
0088
0089 mutex_lock(&buffer->lock);
0090 list_del(&a->list);
0091 mutex_unlock(&buffer->lock);
0092
0093 sg_free_table(&a->table);
0094 kfree(a);
0095 }
0096
0097 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
0098 enum dma_data_direction direction)
0099 {
0100 struct dma_heap_attachment *a = attachment->priv;
0101 struct sg_table *table = &a->table;
0102 int ret;
0103
0104 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
0105 if (ret)
0106 return ERR_PTR(-ENOMEM);
0107 a->mapped = true;
0108 return table;
0109 }
0110
0111 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
0112 struct sg_table *table,
0113 enum dma_data_direction direction)
0114 {
0115 struct dma_heap_attachment *a = attachment->priv;
0116
0117 a->mapped = false;
0118 dma_unmap_sgtable(attachment->dev, table, direction, 0);
0119 }
0120
0121 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
0122 enum dma_data_direction direction)
0123 {
0124 struct cma_heap_buffer *buffer = dmabuf->priv;
0125 struct dma_heap_attachment *a;
0126
0127 mutex_lock(&buffer->lock);
0128
0129 if (buffer->vmap_cnt)
0130 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
0131
0132 list_for_each_entry(a, &buffer->attachments, list) {
0133 if (!a->mapped)
0134 continue;
0135 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
0136 }
0137 mutex_unlock(&buffer->lock);
0138
0139 return 0;
0140 }
0141
0142 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
0143 enum dma_data_direction direction)
0144 {
0145 struct cma_heap_buffer *buffer = dmabuf->priv;
0146 struct dma_heap_attachment *a;
0147
0148 mutex_lock(&buffer->lock);
0149
0150 if (buffer->vmap_cnt)
0151 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
0152
0153 list_for_each_entry(a, &buffer->attachments, list) {
0154 if (!a->mapped)
0155 continue;
0156 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
0157 }
0158 mutex_unlock(&buffer->lock);
0159
0160 return 0;
0161 }
0162
0163 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
0164 {
0165 struct vm_area_struct *vma = vmf->vma;
0166 struct cma_heap_buffer *buffer = vma->vm_private_data;
0167
0168 if (vmf->pgoff > buffer->pagecount)
0169 return VM_FAULT_SIGBUS;
0170
0171 vmf->page = buffer->pages[vmf->pgoff];
0172 get_page(vmf->page);
0173
0174 return 0;
0175 }
0176
0177 static const struct vm_operations_struct dma_heap_vm_ops = {
0178 .fault = cma_heap_vm_fault,
0179 };
0180
0181 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
0182 {
0183 struct cma_heap_buffer *buffer = dmabuf->priv;
0184
0185 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
0186 return -EINVAL;
0187
0188 vma->vm_ops = &dma_heap_vm_ops;
0189 vma->vm_private_data = buffer;
0190
0191 return 0;
0192 }
0193
0194 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
0195 {
0196 void *vaddr;
0197
0198 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
0199 if (!vaddr)
0200 return ERR_PTR(-ENOMEM);
0201
0202 return vaddr;
0203 }
0204
0205 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
0206 {
0207 struct cma_heap_buffer *buffer = dmabuf->priv;
0208 void *vaddr;
0209 int ret = 0;
0210
0211 mutex_lock(&buffer->lock);
0212 if (buffer->vmap_cnt) {
0213 buffer->vmap_cnt++;
0214 iosys_map_set_vaddr(map, buffer->vaddr);
0215 goto out;
0216 }
0217
0218 vaddr = cma_heap_do_vmap(buffer);
0219 if (IS_ERR(vaddr)) {
0220 ret = PTR_ERR(vaddr);
0221 goto out;
0222 }
0223 buffer->vaddr = vaddr;
0224 buffer->vmap_cnt++;
0225 iosys_map_set_vaddr(map, buffer->vaddr);
0226 out:
0227 mutex_unlock(&buffer->lock);
0228
0229 return ret;
0230 }
0231
0232 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
0233 {
0234 struct cma_heap_buffer *buffer = dmabuf->priv;
0235
0236 mutex_lock(&buffer->lock);
0237 if (!--buffer->vmap_cnt) {
0238 vunmap(buffer->vaddr);
0239 buffer->vaddr = NULL;
0240 }
0241 mutex_unlock(&buffer->lock);
0242 iosys_map_clear(map);
0243 }
0244
0245 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
0246 {
0247 struct cma_heap_buffer *buffer = dmabuf->priv;
0248 struct cma_heap *cma_heap = buffer->heap;
0249
0250 if (buffer->vmap_cnt > 0) {
0251 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
0252 vunmap(buffer->vaddr);
0253 buffer->vaddr = NULL;
0254 }
0255
0256
0257 kfree(buffer->pages);
0258
0259 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
0260 kfree(buffer);
0261 }
0262
0263 static const struct dma_buf_ops cma_heap_buf_ops = {
0264 .attach = cma_heap_attach,
0265 .detach = cma_heap_detach,
0266 .map_dma_buf = cma_heap_map_dma_buf,
0267 .unmap_dma_buf = cma_heap_unmap_dma_buf,
0268 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
0269 .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
0270 .mmap = cma_heap_mmap,
0271 .vmap = cma_heap_vmap,
0272 .vunmap = cma_heap_vunmap,
0273 .release = cma_heap_dma_buf_release,
0274 };
0275
0276 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
0277 unsigned long len,
0278 unsigned long fd_flags,
0279 unsigned long heap_flags)
0280 {
0281 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
0282 struct cma_heap_buffer *buffer;
0283 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0284 size_t size = PAGE_ALIGN(len);
0285 pgoff_t pagecount = size >> PAGE_SHIFT;
0286 unsigned long align = get_order(size);
0287 struct page *cma_pages;
0288 struct dma_buf *dmabuf;
0289 int ret = -ENOMEM;
0290 pgoff_t pg;
0291
0292 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
0293 if (!buffer)
0294 return ERR_PTR(-ENOMEM);
0295
0296 INIT_LIST_HEAD(&buffer->attachments);
0297 mutex_init(&buffer->lock);
0298 buffer->len = size;
0299
0300 if (align > CONFIG_CMA_ALIGNMENT)
0301 align = CONFIG_CMA_ALIGNMENT;
0302
0303 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
0304 if (!cma_pages)
0305 goto free_buffer;
0306
0307
0308 if (PageHighMem(cma_pages)) {
0309 unsigned long nr_clear_pages = pagecount;
0310 struct page *page = cma_pages;
0311
0312 while (nr_clear_pages > 0) {
0313 void *vaddr = kmap_atomic(page);
0314
0315 memset(vaddr, 0, PAGE_SIZE);
0316 kunmap_atomic(vaddr);
0317
0318
0319
0320
0321 if (fatal_signal_pending(current))
0322 goto free_cma;
0323 page++;
0324 nr_clear_pages--;
0325 }
0326 } else {
0327 memset(page_address(cma_pages), 0, size);
0328 }
0329
0330 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
0331 if (!buffer->pages) {
0332 ret = -ENOMEM;
0333 goto free_cma;
0334 }
0335
0336 for (pg = 0; pg < pagecount; pg++)
0337 buffer->pages[pg] = &cma_pages[pg];
0338
0339 buffer->cma_pages = cma_pages;
0340 buffer->heap = cma_heap;
0341 buffer->pagecount = pagecount;
0342
0343
0344 exp_info.exp_name = dma_heap_get_name(heap);
0345 exp_info.ops = &cma_heap_buf_ops;
0346 exp_info.size = buffer->len;
0347 exp_info.flags = fd_flags;
0348 exp_info.priv = buffer;
0349 dmabuf = dma_buf_export(&exp_info);
0350 if (IS_ERR(dmabuf)) {
0351 ret = PTR_ERR(dmabuf);
0352 goto free_pages;
0353 }
0354 return dmabuf;
0355
0356 free_pages:
0357 kfree(buffer->pages);
0358 free_cma:
0359 cma_release(cma_heap->cma, cma_pages, pagecount);
0360 free_buffer:
0361 kfree(buffer);
0362
0363 return ERR_PTR(ret);
0364 }
0365
0366 static const struct dma_heap_ops cma_heap_ops = {
0367 .allocate = cma_heap_allocate,
0368 };
0369
0370 static int __add_cma_heap(struct cma *cma, void *data)
0371 {
0372 struct cma_heap *cma_heap;
0373 struct dma_heap_export_info exp_info;
0374
0375 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
0376 if (!cma_heap)
0377 return -ENOMEM;
0378 cma_heap->cma = cma;
0379
0380 exp_info.name = cma_get_name(cma);
0381 exp_info.ops = &cma_heap_ops;
0382 exp_info.priv = cma_heap;
0383
0384 cma_heap->heap = dma_heap_add(&exp_info);
0385 if (IS_ERR(cma_heap->heap)) {
0386 int ret = PTR_ERR(cma_heap->heap);
0387
0388 kfree(cma_heap);
0389 return ret;
0390 }
0391
0392 return 0;
0393 }
0394
0395 static int add_default_cma_heap(void)
0396 {
0397 struct cma *default_cma = dev_get_cma_area(NULL);
0398 int ret = 0;
0399
0400 if (default_cma)
0401 ret = __add_cma_heap(default_cma, NULL);
0402
0403 return ret;
0404 }
0405 module_init(add_default_cma_heap);
0406 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
0407 MODULE_LICENSE("GPL v2");