0001
0002
0003
0004
0005
0006 #include <linux/dma-map-ops.h>
0007
0008 static struct page *dma_common_vaddr_to_page(void *cpu_addr)
0009 {
0010 if (is_vmalloc_addr(cpu_addr))
0011 return vmalloc_to_page(cpu_addr);
0012 return virt_to_page(cpu_addr);
0013 }
0014
0015
0016
0017
0018 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
0019 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0020 unsigned long attrs)
0021 {
0022 struct page *page = dma_common_vaddr_to_page(cpu_addr);
0023 int ret;
0024
0025 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
0026 if (!ret)
0027 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
0028 return ret;
0029 }
0030
0031
0032
0033
0034 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
0035 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0036 unsigned long attrs)
0037 {
0038 #ifdef CONFIG_MMU
0039 unsigned long user_count = vma_pages(vma);
0040 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
0041 unsigned long off = vma->vm_pgoff;
0042 struct page *page = dma_common_vaddr_to_page(cpu_addr);
0043 int ret = -ENXIO;
0044
0045 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
0046
0047 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
0048 return ret;
0049
0050 if (off >= count || user_count > count - off)
0051 return -ENXIO;
0052
0053 return remap_pfn_range(vma, vma->vm_start,
0054 page_to_pfn(page) + vma->vm_pgoff,
0055 user_count << PAGE_SHIFT, vma->vm_page_prot);
0056 #else
0057 return -ENXIO;
0058 #endif
0059 }
0060
0061 struct page *dma_common_alloc_pages(struct device *dev, size_t size,
0062 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
0063 {
0064 const struct dma_map_ops *ops = get_dma_ops(dev);
0065 struct page *page;
0066
0067 page = dma_alloc_contiguous(dev, size, gfp);
0068 if (!page)
0069 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
0070 if (!page)
0071 return NULL;
0072
0073 *dma_handle = ops->map_page(dev, page, 0, size, dir,
0074 DMA_ATTR_SKIP_CPU_SYNC);
0075 if (*dma_handle == DMA_MAPPING_ERROR) {
0076 dma_free_contiguous(dev, page, size);
0077 return NULL;
0078 }
0079
0080 memset(page_address(page), 0, size);
0081 return page;
0082 }
0083
0084 void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
0085 dma_addr_t dma_handle, enum dma_data_direction dir)
0086 {
0087 const struct dma_map_ops *ops = get_dma_ops(dev);
0088
0089 if (ops->unmap_page)
0090 ops->unmap_page(dev, dma_handle, size, dir,
0091 DMA_ATTR_SKIP_CPU_SYNC);
0092 dma_free_contiguous(dev, page, size);
0093 }