0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/vmalloc.h>
0011
0012 #include "ipu3.h"
0013 #include "ipu3-css-pool.h"
0014 #include "ipu3-mmu.h"
0015 #include "ipu3-dmamap.h"
0016
0017
0018
0019
0020 static void imgu_dmamap_free_buffer(struct page **pages,
0021 size_t size)
0022 {
0023 int count = size >> PAGE_SHIFT;
0024
0025 while (count--)
0026 __free_page(pages[count]);
0027 kvfree(pages);
0028 }
0029
0030
0031
0032
0033
0034 static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
0035 {
0036 struct page **pages;
0037 unsigned int i = 0, count = size >> PAGE_SHIFT;
0038 unsigned int order_mask = 1;
0039 const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
0040
0041
0042 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
0043
0044 if (!pages)
0045 return NULL;
0046
0047 gfp |= __GFP_HIGHMEM | __GFP_ZERO;
0048
0049 while (count) {
0050 struct page *page = NULL;
0051 unsigned int order_size;
0052
0053 for (order_mask &= (2U << __fls(count)) - 1;
0054 order_mask; order_mask &= ~order_size) {
0055 unsigned int order = __fls(order_mask);
0056
0057 order_size = 1U << order;
0058 page = alloc_pages((order_mask - order_size) ?
0059 gfp | high_order_gfp : gfp, order);
0060 if (!page)
0061 continue;
0062 if (!order)
0063 break;
0064 if (!PageCompound(page)) {
0065 split_page(page, order);
0066 break;
0067 }
0068
0069 __free_pages(page, order);
0070 }
0071 if (!page) {
0072 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
0073 return NULL;
0074 }
0075 count -= order_size;
0076 while (order_size--)
0077 pages[i++] = page++;
0078 }
0079
0080 return pages;
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
0094 size_t len)
0095 {
0096 unsigned long shift = iova_shift(&imgu->iova_domain);
0097 struct device *dev = &imgu->pci_dev->dev;
0098 size_t size = PAGE_ALIGN(len);
0099 int count = size >> PAGE_SHIFT;
0100 struct page **pages;
0101 dma_addr_t iovaddr;
0102 struct iova *iova;
0103 int i, rval;
0104
0105 dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
0106
0107 iova = alloc_iova(&imgu->iova_domain, size >> shift,
0108 imgu->mmu->aperture_end >> shift, 0);
0109 if (!iova)
0110 return NULL;
0111
0112 pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
0113 if (!pages)
0114 goto out_free_iova;
0115
0116
0117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
0118 for (i = 0; i < count; ++i) {
0119 rval = imgu_mmu_map(imgu->mmu, iovaddr,
0120 page_to_phys(pages[i]), PAGE_SIZE);
0121 if (rval)
0122 goto out_unmap;
0123
0124 iovaddr += PAGE_SIZE;
0125 }
0126
0127 map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
0128 if (!map->vaddr)
0129 goto out_unmap;
0130
0131 map->pages = pages;
0132 map->size = size;
0133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
0134
0135 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
0136 size, &map->daddr, map->vaddr);
0137
0138 return map->vaddr;
0139
0140 out_unmap:
0141 imgu_dmamap_free_buffer(pages, size);
0142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
0143 i * PAGE_SIZE);
0144
0145 out_free_iova:
0146 __free_iova(&imgu->iova_domain, iova);
0147
0148 return NULL;
0149 }
0150
0151 void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
0152 {
0153 struct iova *iova;
0154
0155 iova = find_iova(&imgu->iova_domain,
0156 iova_pfn(&imgu->iova_domain, map->daddr));
0157 if (WARN_ON(!iova))
0158 return;
0159
0160 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
0161 iova_size(iova) << iova_shift(&imgu->iova_domain));
0162
0163 __free_iova(&imgu->iova_domain, iova);
0164 }
0165
0166
0167
0168
0169 void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
0170 {
0171 dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
0172 __func__, map->size, &map->daddr, map->vaddr);
0173
0174 if (!map->vaddr)
0175 return;
0176
0177 imgu_dmamap_unmap(imgu, map);
0178
0179 vunmap(map->vaddr);
0180 imgu_dmamap_free_buffer(map->pages, map->size);
0181 map->vaddr = NULL;
0182 }
0183
0184 int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
0185 int nents, struct imgu_css_map *map)
0186 {
0187 unsigned long shift = iova_shift(&imgu->iova_domain);
0188 struct scatterlist *sg;
0189 struct iova *iova;
0190 size_t size = 0;
0191 int i;
0192
0193 for_each_sg(sglist, sg, nents, i) {
0194 if (sg->offset)
0195 return -EINVAL;
0196
0197 if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
0198 return -EINVAL;
0199
0200 size += sg->length;
0201 }
0202
0203 size = iova_align(&imgu->iova_domain, size);
0204 dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
0205 nents, size >> shift);
0206
0207 iova = alloc_iova(&imgu->iova_domain, size >> shift,
0208 imgu->mmu->aperture_end >> shift, 0);
0209 if (!iova)
0210 return -ENOMEM;
0211
0212 dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
0213 iova->pfn_lo, iova->pfn_hi);
0214
0215 if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
0216 sglist, nents) < size)
0217 goto out_fail;
0218
0219 memset(map, 0, sizeof(*map));
0220 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
0221 map->size = size;
0222
0223 return 0;
0224
0225 out_fail:
0226 __free_iova(&imgu->iova_domain, iova);
0227
0228 return -EFAULT;
0229 }
0230
0231 int imgu_dmamap_init(struct imgu_device *imgu)
0232 {
0233 unsigned long order, base_pfn;
0234 int ret = iova_cache_get();
0235
0236 if (ret)
0237 return ret;
0238
0239 order = __ffs(IPU3_PAGE_SIZE);
0240 base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
0241 init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
0242
0243 return 0;
0244 }
0245
0246 void imgu_dmamap_exit(struct imgu_device *imgu)
0247 {
0248 put_iova_domain(&imgu->iova_domain);
0249 iova_cache_put();
0250 }