0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "ivtv-driver.h"
0012 #include "ivtv-udma.h"
0013
0014 void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
0015 {
0016 dma_page->uaddr = first & PAGE_MASK;
0017 dma_page->offset = first & ~PAGE_MASK;
0018 dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
0019 dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
0020 dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
0021 dma_page->page_count = dma_page->last - dma_page->first + 1;
0022 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
0023 }
0024
0025 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
0026 {
0027 int i, offset;
0028 unsigned long flags;
0029
0030 if (map_offset < 0)
0031 return map_offset;
0032
0033 offset = dma_page->offset;
0034
0035
0036 for (i = 0; i < dma_page->page_count; i++) {
0037 unsigned int len = (i == dma_page->page_count - 1) ?
0038 dma_page->tail : PAGE_SIZE - offset;
0039
0040 if (PageHighMem(dma->map[map_offset])) {
0041 void *src;
0042
0043 if (dma->bouncemap[map_offset] == NULL)
0044 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
0045 if (dma->bouncemap[map_offset] == NULL)
0046 return -1;
0047 local_irq_save(flags);
0048 src = kmap_atomic(dma->map[map_offset]) + offset;
0049 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
0050 kunmap_atomic(src);
0051 local_irq_restore(flags);
0052 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
0053 }
0054 else {
0055 sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
0056 }
0057 offset = 0;
0058 map_offset++;
0059 }
0060 return map_offset;
0061 }
0062
0063 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
0064 int i;
0065 struct scatterlist *sg;
0066
0067 for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
0068 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
0069 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
0070 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
0071 buffer_offset += sg_dma_len(sg);
0072
0073 split -= sg_dma_len(sg);
0074 if (split == 0)
0075 buffer_offset = buffer_offset_2;
0076 }
0077 }
0078
0079
0080 void ivtv_udma_alloc(struct ivtv *itv)
0081 {
0082 if (itv->udma.SG_handle == 0) {
0083
0084 itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
0085 itv->udma.SGarray,
0086 sizeof(itv->udma.SGarray),
0087 DMA_TO_DEVICE);
0088 ivtv_udma_sync_for_cpu(itv);
0089 }
0090 }
0091
0092 int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
0093 void __user *userbuf, int size_in_bytes)
0094 {
0095 struct ivtv_dma_page_info user_dma;
0096 struct ivtv_user_dma *dma = &itv->udma;
0097 int err;
0098
0099 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
0100
0101
0102 if (dma->SG_length || dma->page_count) {
0103 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
0104 dma->SG_length, dma->page_count);
0105 return -EBUSY;
0106 }
0107
0108 ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
0109
0110 if (user_dma.page_count <= 0) {
0111 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
0112 user_dma.page_count, size_in_bytes, user_dma.offset);
0113 return -EINVAL;
0114 }
0115
0116
0117 err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
0118 dma->map, FOLL_FORCE);
0119
0120 if (user_dma.page_count != err) {
0121 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
0122 err, user_dma.page_count);
0123 if (err >= 0) {
0124 unpin_user_pages(dma->map, err);
0125 return -EINVAL;
0126 }
0127 return err;
0128 }
0129
0130 dma->page_count = user_dma.page_count;
0131
0132
0133 if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
0134 unpin_user_pages(dma->map, dma->page_count);
0135 dma->page_count = 0;
0136 return -ENOMEM;
0137 }
0138
0139
0140 dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
0141 dma->page_count, DMA_TO_DEVICE);
0142
0143
0144 ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
0145
0146
0147 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
0148
0149 ivtv_udma_sync_for_device(itv);
0150 return dma->page_count;
0151 }
0152
0153 void ivtv_udma_unmap(struct ivtv *itv)
0154 {
0155 struct ivtv_user_dma *dma = &itv->udma;
0156
0157 IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
0158
0159
0160 if (dma->page_count == 0)
0161 return;
0162
0163
0164 if (dma->SG_length) {
0165 dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
0166 DMA_TO_DEVICE);
0167 dma->SG_length = 0;
0168 }
0169
0170 ivtv_udma_sync_for_cpu(itv);
0171
0172 unpin_user_pages(dma->map, dma->page_count);
0173 dma->page_count = 0;
0174 }
0175
0176 void ivtv_udma_free(struct ivtv *itv)
0177 {
0178 int i;
0179
0180
0181 if (itv->udma.SG_handle) {
0182 dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
0183 sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
0184 }
0185
0186
0187 if (itv->udma.SG_length) {
0188 dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
0189 itv->udma.page_count, DMA_TO_DEVICE);
0190 }
0191
0192 for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
0193 if (itv->udma.bouncemap[i])
0194 __free_page(itv->udma.bouncemap[i]);
0195 }
0196 }
0197
0198 void ivtv_udma_start(struct ivtv *itv)
0199 {
0200 IVTV_DEBUG_DMA("start UDMA\n");
0201 write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
0202 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
0203 set_bit(IVTV_F_I_DMA, &itv->i_flags);
0204 set_bit(IVTV_F_I_UDMA, &itv->i_flags);
0205 clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
0206 }
0207
0208 void ivtv_udma_prepare(struct ivtv *itv)
0209 {
0210 unsigned long flags;
0211
0212 spin_lock_irqsave(&itv->dma_reg_lock, flags);
0213 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
0214 ivtv_udma_start(itv);
0215 else
0216 set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
0217 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
0218 }