0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/dma-buf.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/dma-heap.h>
0016 #include <linux/err.h>
0017 #include <linux/highmem.h>
0018 #include <linux/mm.h>
0019 #include <linux/module.h>
0020 #include <linux/scatterlist.h>
0021 #include <linux/slab.h>
0022 #include <linux/vmalloc.h>
0023
0024 static struct dma_heap *sys_heap;
0025
0026 struct system_heap_buffer {
0027 struct dma_heap *heap;
0028 struct list_head attachments;
0029 struct mutex lock;
0030 unsigned long len;
0031 struct sg_table sg_table;
0032 int vmap_cnt;
0033 void *vaddr;
0034 };
0035
0036 struct dma_heap_attachment {
0037 struct device *dev;
0038 struct sg_table *table;
0039 struct list_head list;
0040 bool mapped;
0041 };
0042
0043 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
0044 #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
0045 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
0046 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
0047 | __GFP_COMP)
0048 static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
0049
0050
0051
0052
0053
0054
0055 static const unsigned int orders[] = {8, 4, 0};
0056 #define NUM_ORDERS ARRAY_SIZE(orders)
0057
0058 static struct sg_table *dup_sg_table(struct sg_table *table)
0059 {
0060 struct sg_table *new_table;
0061 int ret, i;
0062 struct scatterlist *sg, *new_sg;
0063
0064 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
0065 if (!new_table)
0066 return ERR_PTR(-ENOMEM);
0067
0068 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
0069 if (ret) {
0070 kfree(new_table);
0071 return ERR_PTR(-ENOMEM);
0072 }
0073
0074 new_sg = new_table->sgl;
0075 for_each_sgtable_sg(table, sg, i) {
0076 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
0077 new_sg = sg_next(new_sg);
0078 }
0079
0080 return new_table;
0081 }
0082
0083 static int system_heap_attach(struct dma_buf *dmabuf,
0084 struct dma_buf_attachment *attachment)
0085 {
0086 struct system_heap_buffer *buffer = dmabuf->priv;
0087 struct dma_heap_attachment *a;
0088 struct sg_table *table;
0089
0090 a = kzalloc(sizeof(*a), GFP_KERNEL);
0091 if (!a)
0092 return -ENOMEM;
0093
0094 table = dup_sg_table(&buffer->sg_table);
0095 if (IS_ERR(table)) {
0096 kfree(a);
0097 return -ENOMEM;
0098 }
0099
0100 a->table = table;
0101 a->dev = attachment->dev;
0102 INIT_LIST_HEAD(&a->list);
0103 a->mapped = false;
0104
0105 attachment->priv = a;
0106
0107 mutex_lock(&buffer->lock);
0108 list_add(&a->list, &buffer->attachments);
0109 mutex_unlock(&buffer->lock);
0110
0111 return 0;
0112 }
0113
0114 static void system_heap_detach(struct dma_buf *dmabuf,
0115 struct dma_buf_attachment *attachment)
0116 {
0117 struct system_heap_buffer *buffer = dmabuf->priv;
0118 struct dma_heap_attachment *a = attachment->priv;
0119
0120 mutex_lock(&buffer->lock);
0121 list_del(&a->list);
0122 mutex_unlock(&buffer->lock);
0123
0124 sg_free_table(a->table);
0125 kfree(a->table);
0126 kfree(a);
0127 }
0128
0129 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
0130 enum dma_data_direction direction)
0131 {
0132 struct dma_heap_attachment *a = attachment->priv;
0133 struct sg_table *table = a->table;
0134 int ret;
0135
0136 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
0137 if (ret)
0138 return ERR_PTR(ret);
0139
0140 a->mapped = true;
0141 return table;
0142 }
0143
0144 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
0145 struct sg_table *table,
0146 enum dma_data_direction direction)
0147 {
0148 struct dma_heap_attachment *a = attachment->priv;
0149
0150 a->mapped = false;
0151 dma_unmap_sgtable(attachment->dev, table, direction, 0);
0152 }
0153
0154 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
0155 enum dma_data_direction direction)
0156 {
0157 struct system_heap_buffer *buffer = dmabuf->priv;
0158 struct dma_heap_attachment *a;
0159
0160 mutex_lock(&buffer->lock);
0161
0162 if (buffer->vmap_cnt)
0163 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
0164
0165 list_for_each_entry(a, &buffer->attachments, list) {
0166 if (!a->mapped)
0167 continue;
0168 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
0169 }
0170 mutex_unlock(&buffer->lock);
0171
0172 return 0;
0173 }
0174
0175 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
0176 enum dma_data_direction direction)
0177 {
0178 struct system_heap_buffer *buffer = dmabuf->priv;
0179 struct dma_heap_attachment *a;
0180
0181 mutex_lock(&buffer->lock);
0182
0183 if (buffer->vmap_cnt)
0184 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
0185
0186 list_for_each_entry(a, &buffer->attachments, list) {
0187 if (!a->mapped)
0188 continue;
0189 dma_sync_sgtable_for_device(a->dev, a->table, direction);
0190 }
0191 mutex_unlock(&buffer->lock);
0192
0193 return 0;
0194 }
0195
0196 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
0197 {
0198 struct system_heap_buffer *buffer = dmabuf->priv;
0199 struct sg_table *table = &buffer->sg_table;
0200 unsigned long addr = vma->vm_start;
0201 struct sg_page_iter piter;
0202 int ret;
0203
0204 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
0205 struct page *page = sg_page_iter_page(&piter);
0206
0207 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
0208 vma->vm_page_prot);
0209 if (ret)
0210 return ret;
0211 addr += PAGE_SIZE;
0212 if (addr >= vma->vm_end)
0213 return 0;
0214 }
0215 return 0;
0216 }
0217
0218 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
0219 {
0220 struct sg_table *table = &buffer->sg_table;
0221 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
0222 struct page **pages = vmalloc(sizeof(struct page *) * npages);
0223 struct page **tmp = pages;
0224 struct sg_page_iter piter;
0225 void *vaddr;
0226
0227 if (!pages)
0228 return ERR_PTR(-ENOMEM);
0229
0230 for_each_sgtable_page(table, &piter, 0) {
0231 WARN_ON(tmp - pages >= npages);
0232 *tmp++ = sg_page_iter_page(&piter);
0233 }
0234
0235 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
0236 vfree(pages);
0237
0238 if (!vaddr)
0239 return ERR_PTR(-ENOMEM);
0240
0241 return vaddr;
0242 }
0243
0244 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
0245 {
0246 struct system_heap_buffer *buffer = dmabuf->priv;
0247 void *vaddr;
0248 int ret = 0;
0249
0250 mutex_lock(&buffer->lock);
0251 if (buffer->vmap_cnt) {
0252 buffer->vmap_cnt++;
0253 iosys_map_set_vaddr(map, buffer->vaddr);
0254 goto out;
0255 }
0256
0257 vaddr = system_heap_do_vmap(buffer);
0258 if (IS_ERR(vaddr)) {
0259 ret = PTR_ERR(vaddr);
0260 goto out;
0261 }
0262
0263 buffer->vaddr = vaddr;
0264 buffer->vmap_cnt++;
0265 iosys_map_set_vaddr(map, buffer->vaddr);
0266 out:
0267 mutex_unlock(&buffer->lock);
0268
0269 return ret;
0270 }
0271
0272 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
0273 {
0274 struct system_heap_buffer *buffer = dmabuf->priv;
0275
0276 mutex_lock(&buffer->lock);
0277 if (!--buffer->vmap_cnt) {
0278 vunmap(buffer->vaddr);
0279 buffer->vaddr = NULL;
0280 }
0281 mutex_unlock(&buffer->lock);
0282 iosys_map_clear(map);
0283 }
0284
0285 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
0286 {
0287 struct system_heap_buffer *buffer = dmabuf->priv;
0288 struct sg_table *table;
0289 struct scatterlist *sg;
0290 int i;
0291
0292 table = &buffer->sg_table;
0293 for_each_sgtable_sg(table, sg, i) {
0294 struct page *page = sg_page(sg);
0295
0296 __free_pages(page, compound_order(page));
0297 }
0298 sg_free_table(table);
0299 kfree(buffer);
0300 }
0301
0302 static const struct dma_buf_ops system_heap_buf_ops = {
0303 .attach = system_heap_attach,
0304 .detach = system_heap_detach,
0305 .map_dma_buf = system_heap_map_dma_buf,
0306 .unmap_dma_buf = system_heap_unmap_dma_buf,
0307 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
0308 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
0309 .mmap = system_heap_mmap,
0310 .vmap = system_heap_vmap,
0311 .vunmap = system_heap_vunmap,
0312 .release = system_heap_dma_buf_release,
0313 };
0314
0315 static struct page *alloc_largest_available(unsigned long size,
0316 unsigned int max_order)
0317 {
0318 struct page *page;
0319 int i;
0320
0321 for (i = 0; i < NUM_ORDERS; i++) {
0322 if (size < (PAGE_SIZE << orders[i]))
0323 continue;
0324 if (max_order < orders[i])
0325 continue;
0326
0327 page = alloc_pages(order_flags[i], orders[i]);
0328 if (!page)
0329 continue;
0330 return page;
0331 }
0332 return NULL;
0333 }
0334
0335 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
0336 unsigned long len,
0337 unsigned long fd_flags,
0338 unsigned long heap_flags)
0339 {
0340 struct system_heap_buffer *buffer;
0341 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0342 unsigned long size_remaining = len;
0343 unsigned int max_order = orders[0];
0344 struct dma_buf *dmabuf;
0345 struct sg_table *table;
0346 struct scatterlist *sg;
0347 struct list_head pages;
0348 struct page *page, *tmp_page;
0349 int i, ret = -ENOMEM;
0350
0351 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
0352 if (!buffer)
0353 return ERR_PTR(-ENOMEM);
0354
0355 INIT_LIST_HEAD(&buffer->attachments);
0356 mutex_init(&buffer->lock);
0357 buffer->heap = heap;
0358 buffer->len = len;
0359
0360 INIT_LIST_HEAD(&pages);
0361 i = 0;
0362 while (size_remaining > 0) {
0363
0364
0365
0366
0367 if (fatal_signal_pending(current)) {
0368 ret = -EINTR;
0369 goto free_buffer;
0370 }
0371
0372 page = alloc_largest_available(size_remaining, max_order);
0373 if (!page)
0374 goto free_buffer;
0375
0376 list_add_tail(&page->lru, &pages);
0377 size_remaining -= page_size(page);
0378 max_order = compound_order(page);
0379 i++;
0380 }
0381
0382 table = &buffer->sg_table;
0383 if (sg_alloc_table(table, i, GFP_KERNEL))
0384 goto free_buffer;
0385
0386 sg = table->sgl;
0387 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
0388 sg_set_page(sg, page, page_size(page), 0);
0389 sg = sg_next(sg);
0390 list_del(&page->lru);
0391 }
0392
0393
0394 exp_info.exp_name = dma_heap_get_name(heap);
0395 exp_info.ops = &system_heap_buf_ops;
0396 exp_info.size = buffer->len;
0397 exp_info.flags = fd_flags;
0398 exp_info.priv = buffer;
0399 dmabuf = dma_buf_export(&exp_info);
0400 if (IS_ERR(dmabuf)) {
0401 ret = PTR_ERR(dmabuf);
0402 goto free_pages;
0403 }
0404 return dmabuf;
0405
0406 free_pages:
0407 for_each_sgtable_sg(table, sg, i) {
0408 struct page *p = sg_page(sg);
0409
0410 __free_pages(p, compound_order(p));
0411 }
0412 sg_free_table(table);
0413 free_buffer:
0414 list_for_each_entry_safe(page, tmp_page, &pages, lru)
0415 __free_pages(page, compound_order(page));
0416 kfree(buffer);
0417
0418 return ERR_PTR(ret);
0419 }
0420
0421 static const struct dma_heap_ops system_heap_ops = {
0422 .allocate = system_heap_allocate,
0423 };
0424
0425 static int system_heap_create(void)
0426 {
0427 struct dma_heap_export_info exp_info;
0428
0429 exp_info.name = "system";
0430 exp_info.ops = &system_heap_ops;
0431 exp_info.priv = NULL;
0432
0433 sys_heap = dma_heap_add(&exp_info);
0434 if (IS_ERR(sys_heap))
0435 return PTR_ERR(sys_heap);
0436
0437 return 0;
0438 }
0439 module_init(system_heap_create);
0440 MODULE_LICENSE("GPL v2");