0001
0002
0003
0004
0005 #include <linux/dma-mapping.h>
0006 #include <linux/mm.h>
0007 #include <linux/pagemap.h>
0008 #include <linux/pgtable.h>
0009 #include <linux/vmalloc.h>
0010
0011 #include <asm/page.h>
0012 #include <asm/unaligned.h>
0013
0014 #include <uapi/linux/misc/bcm_vk.h>
0015
0016 #include "bcm_vk.h"
0017 #include "bcm_vk_msg.h"
0018 #include "bcm_vk_sg.h"
0019
0020
0021
0022
0023
0024 #define BCM_VK_MAX_SGL_CHUNK SZ_16M
0025
0026 static int bcm_vk_dma_alloc(struct device *dev,
0027 struct bcm_vk_dma *dma,
0028 int dir,
0029 struct _vk_data *vkdata);
0030 static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma);
0031
0032
0033
0034
0035 static int bcm_vk_dma_alloc(struct device *dev,
0036 struct bcm_vk_dma *dma,
0037 int direction,
0038 struct _vk_data *vkdata)
0039 {
0040 dma_addr_t addr, sg_addr;
0041 int err;
0042 int i;
0043 int offset;
0044 u32 size;
0045 u32 remaining_size;
0046 u32 transfer_size;
0047 u64 data;
0048 unsigned long first, last;
0049 struct _vk_data *sgdata;
0050
0051
0052 data = get_unaligned(&vkdata->address);
0053
0054
0055 offset = offset_in_page(data);
0056
0057
0058 first = (data & PAGE_MASK) >> PAGE_SHIFT;
0059 last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
0060 dma->nr_pages = last - first + 1;
0061
0062
0063 dma->pages = kmalloc_array(dma->nr_pages,
0064 sizeof(struct page *),
0065 GFP_KERNEL);
0066 if (!dma->pages)
0067 return -ENOMEM;
0068
0069 dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
0070 data, vkdata->size, dma->nr_pages);
0071
0072 dma->direction = direction;
0073
0074
0075 err = get_user_pages_fast(data & PAGE_MASK,
0076 dma->nr_pages,
0077 direction == DMA_FROM_DEVICE,
0078 dma->pages);
0079 if (err != dma->nr_pages) {
0080 dma->nr_pages = (err >= 0) ? err : 0;
0081 dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
0082 err, dma->nr_pages);
0083 return err < 0 ? err : -EINVAL;
0084 }
0085
0086
0087 dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
0088 (sizeof(u32) * SGLIST_VKDATA_START);
0089
0090
0091 dma->sglist = dma_alloc_coherent(dev,
0092 dma->sglen,
0093 &dma->handle,
0094 GFP_KERNEL);
0095 if (!dma->sglist)
0096 return -ENOMEM;
0097
0098 dma->sglist[SGLIST_NUM_SG] = 0;
0099 dma->sglist[SGLIST_TOTALSIZE] = vkdata->size;
0100 remaining_size = vkdata->size;
0101 sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
0102
0103
0104 size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
0105 remaining_size -= size;
0106 sg_addr = dma_map_page(dev,
0107 dma->pages[0],
0108 offset,
0109 size,
0110 dma->direction);
0111 transfer_size = size;
0112 if (unlikely(dma_mapping_error(dev, sg_addr))) {
0113 __free_page(dma->pages[0]);
0114 return -EIO;
0115 }
0116
0117 for (i = 1; i < dma->nr_pages; i++) {
0118 size = min_t(size_t, PAGE_SIZE, remaining_size);
0119 remaining_size -= size;
0120 addr = dma_map_page(dev,
0121 dma->pages[i],
0122 0,
0123 size,
0124 dma->direction);
0125 if (unlikely(dma_mapping_error(dev, addr))) {
0126 __free_page(dma->pages[i]);
0127 return -EIO;
0128 }
0129
0130
0131
0132
0133
0134 if ((addr == (sg_addr + transfer_size)) &&
0135 ((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) {
0136
0137 transfer_size += size;
0138 } else {
0139
0140 sgdata->size = transfer_size;
0141 put_unaligned(sg_addr, (u64 *)&sgdata->address);
0142 dma->sglist[SGLIST_NUM_SG]++;
0143
0144
0145 sgdata++;
0146 sg_addr = addr;
0147 transfer_size = size;
0148 }
0149 }
0150
0151 sgdata->size = transfer_size;
0152 put_unaligned(sg_addr, (u64 *)&sgdata->address);
0153 dma->sglist[SGLIST_NUM_SG]++;
0154
0155
0156 put_unaligned((u64)dma->handle, &vkdata->address);
0157 vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
0158 (sizeof(u32) * SGLIST_VKDATA_START);
0159
0160 #ifdef BCM_VK_DUMP_SGLIST
0161 dev_dbg(dev,
0162 "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
0163 (u64)dma->sglist,
0164 dma->handle,
0165 dma->sglen,
0166 vkdata->size);
0167 for (i = 0; i < vkdata->size / sizeof(u32); i++)
0168 dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]);
0169 #endif
0170
0171 return 0;
0172 }
0173
0174 int bcm_vk_sg_alloc(struct device *dev,
0175 struct bcm_vk_dma *dma,
0176 int dir,
0177 struct _vk_data *vkdata,
0178 int num)
0179 {
0180 int i;
0181 int rc = -EINVAL;
0182
0183
0184 for (i = 0; i < num; i++) {
0185 if (vkdata[i].size && vkdata[i].address) {
0186
0187
0188
0189
0190 rc = bcm_vk_dma_alloc(dev,
0191 &dma[i],
0192 dir,
0193 &vkdata[i]);
0194 } else if (vkdata[i].size ||
0195 vkdata[i].address) {
0196
0197
0198
0199
0200 dev_err(dev,
0201 "Invalid vkdata %x 0x%x 0x%llx\n",
0202 i, vkdata[i].size, vkdata[i].address);
0203 rc = -EINVAL;
0204 } else {
0205
0206
0207
0208
0209 rc = 0;
0210 }
0211
0212 if (rc)
0213 goto fail_alloc;
0214 }
0215 return rc;
0216
0217 fail_alloc:
0218 while (i > 0) {
0219 i--;
0220 if (dma[i].sglist)
0221 bcm_vk_dma_free(dev, &dma[i]);
0222 }
0223 return rc;
0224 }
0225
0226 static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
0227 {
0228 dma_addr_t addr;
0229 int i;
0230 int num_sg;
0231 u32 size;
0232 struct _vk_data *vkdata;
0233
0234 dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen);
0235
0236
0237 num_sg = dma->sglist[SGLIST_NUM_SG];
0238 vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
0239 for (i = 0; i < num_sg; i++) {
0240 size = vkdata[i].size;
0241 addr = get_unaligned(&vkdata[i].address);
0242
0243 dma_unmap_page(dev, addr, size, dma->direction);
0244 }
0245
0246
0247 dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle);
0248
0249
0250 for (i = 0; i < dma->nr_pages; i++)
0251 put_page(dma->pages[i]);
0252
0253
0254 kfree(dma->pages);
0255 dma->sglist = NULL;
0256
0257 return 0;
0258 }
0259
0260 int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
0261 int *proc_cnt)
0262 {
0263 int i;
0264
0265 *proc_cnt = 0;
0266
0267 for (i = 0; i < num; i++) {
0268 if (dma[i].sglist) {
0269 bcm_vk_dma_free(dev, &dma[i]);
0270 *proc_cnt += 1;
0271 }
0272 }
0273
0274 return 0;
0275 }