0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "ivtv-driver.h"
0011 #include "ivtv-queue.h"
0012
0013 int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
0014 {
0015 if (s->buf_size - buf->bytesused < copybytes)
0016 copybytes = s->buf_size - buf->bytesused;
0017 if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
0018 return -EFAULT;
0019 }
0020 buf->bytesused += copybytes;
0021 return copybytes;
0022 }
0023
0024 void ivtv_buf_swap(struct ivtv_buffer *buf)
0025 {
0026 int i;
0027
0028 for (i = 0; i < buf->bytesused; i += 4)
0029 swab32s((u32 *)(buf->buf + i));
0030 }
0031
0032 void ivtv_queue_init(struct ivtv_queue *q)
0033 {
0034 INIT_LIST_HEAD(&q->list);
0035 q->buffers = 0;
0036 q->length = 0;
0037 q->bytesused = 0;
0038 }
0039
0040 void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
0041 {
0042 unsigned long flags;
0043
0044
0045 if (q == &s->q_free) {
0046 buf->bytesused = 0;
0047 buf->readpos = 0;
0048 buf->b_flags = 0;
0049 buf->dma_xfer_cnt = 0;
0050 }
0051 spin_lock_irqsave(&s->qlock, flags);
0052 list_add_tail(&buf->list, &q->list);
0053 q->buffers++;
0054 q->length += s->buf_size;
0055 q->bytesused += buf->bytesused - buf->readpos;
0056 spin_unlock_irqrestore(&s->qlock, flags);
0057 }
0058
0059 struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
0060 {
0061 struct ivtv_buffer *buf = NULL;
0062 unsigned long flags;
0063
0064 spin_lock_irqsave(&s->qlock, flags);
0065 if (!list_empty(&q->list)) {
0066 buf = list_entry(q->list.next, struct ivtv_buffer, list);
0067 list_del_init(q->list.next);
0068 q->buffers--;
0069 q->length -= s->buf_size;
0070 q->bytesused -= buf->bytesused - buf->readpos;
0071 }
0072 spin_unlock_irqrestore(&s->qlock, flags);
0073 return buf;
0074 }
0075
0076 static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
0077 struct ivtv_queue *to, int clear)
0078 {
0079 struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
0080
0081 list_move_tail(from->list.next, &to->list);
0082 from->buffers--;
0083 from->length -= s->buf_size;
0084 from->bytesused -= buf->bytesused - buf->readpos;
0085
0086 if (clear)
0087 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
0088 to->buffers++;
0089 to->length += s->buf_size;
0090 to->bytesused += buf->bytesused - buf->readpos;
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
0111 struct ivtv_queue *to, int needed_bytes)
0112 {
0113 unsigned long flags;
0114 int rc = 0;
0115 int from_free = from == &s->q_free;
0116 int to_free = to == &s->q_free;
0117 int bytes_available, bytes_steal;
0118
0119 spin_lock_irqsave(&s->qlock, flags);
0120 if (needed_bytes == 0) {
0121 from_free = 1;
0122 needed_bytes = from->length;
0123 }
0124
0125 bytes_available = from_free ? from->length : from->bytesused;
0126 bytes_steal = (from_free && steal) ? steal->length : 0;
0127
0128 if (bytes_available + bytes_steal < needed_bytes) {
0129 spin_unlock_irqrestore(&s->qlock, flags);
0130 return -ENOMEM;
0131 }
0132 while (steal && bytes_available < needed_bytes) {
0133 struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
0134 u16 dma_xfer_cnt = buf->dma_xfer_cnt;
0135
0136
0137
0138
0139
0140 while (dma_xfer_cnt == buf->dma_xfer_cnt) {
0141 list_move_tail(steal->list.prev, &from->list);
0142 rc++;
0143 steal->buffers--;
0144 steal->length -= s->buf_size;
0145 steal->bytesused -= buf->bytesused - buf->readpos;
0146 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
0147 from->buffers++;
0148 from->length += s->buf_size;
0149 bytes_available += s->buf_size;
0150 if (list_empty(&steal->list))
0151 break;
0152 buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
0153 }
0154 }
0155 if (from_free) {
0156 u32 old_length = to->length;
0157
0158 while (to->length - old_length < needed_bytes) {
0159 ivtv_queue_move_buf(s, from, to, 1);
0160 }
0161 }
0162 else {
0163 u32 old_bytesused = to->bytesused;
0164
0165 while (to->bytesused - old_bytesused < needed_bytes) {
0166 ivtv_queue_move_buf(s, from, to, to_free);
0167 }
0168 }
0169 spin_unlock_irqrestore(&s->qlock, flags);
0170 return rc;
0171 }
0172
0173 void ivtv_flush_queues(struct ivtv_stream *s)
0174 {
0175 ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
0176 ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
0177 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
0178 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
0179 }
0180
0181 int ivtv_stream_alloc(struct ivtv_stream *s)
0182 {
0183 struct ivtv *itv = s->itv;
0184 int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
0185 int i;
0186
0187 if (s->buffers == 0)
0188 return 0;
0189
0190 IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
0191 s->dma != DMA_NONE ? "DMA " : "",
0192 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
0193
0194 s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
0195 if (s->sg_pending == NULL) {
0196 IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
0197 return -ENOMEM;
0198 }
0199 s->sg_pending_size = 0;
0200
0201 s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
0202 if (s->sg_processing == NULL) {
0203 IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
0204 kfree(s->sg_pending);
0205 s->sg_pending = NULL;
0206 return -ENOMEM;
0207 }
0208 s->sg_processing_size = 0;
0209
0210 s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
0211 GFP_KERNEL|__GFP_NOWARN);
0212 if (s->sg_dma == NULL) {
0213 IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
0214 kfree(s->sg_pending);
0215 s->sg_pending = NULL;
0216 kfree(s->sg_processing);
0217 s->sg_processing = NULL;
0218 return -ENOMEM;
0219 }
0220 if (ivtv_might_use_dma(s)) {
0221 s->sg_handle = dma_map_single(&itv->pdev->dev, s->sg_dma,
0222 sizeof(struct ivtv_sg_element),
0223 DMA_TO_DEVICE);
0224 ivtv_stream_sync_for_cpu(s);
0225 }
0226
0227
0228 for (i = 0; i < s->buffers; i++) {
0229 struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
0230 GFP_KERNEL|__GFP_NOWARN);
0231
0232 if (buf == NULL)
0233 break;
0234 buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
0235 if (buf->buf == NULL) {
0236 kfree(buf);
0237 break;
0238 }
0239 INIT_LIST_HEAD(&buf->list);
0240 if (ivtv_might_use_dma(s)) {
0241 buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
0242 buf->buf, s->buf_size + 256, s->dma);
0243 ivtv_buf_sync_for_cpu(s, buf);
0244 }
0245 ivtv_enqueue(s, buf, &s->q_free);
0246 }
0247 if (i == s->buffers)
0248 return 0;
0249 IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
0250 ivtv_stream_free(s);
0251 return -ENOMEM;
0252 }
0253
0254 void ivtv_stream_free(struct ivtv_stream *s)
0255 {
0256 struct ivtv_buffer *buf;
0257
0258
0259 ivtv_flush_queues(s);
0260
0261
0262 while ((buf = ivtv_dequeue(s, &s->q_free))) {
0263 if (ivtv_might_use_dma(s))
0264 dma_unmap_single(&s->itv->pdev->dev, buf->dma_handle,
0265 s->buf_size + 256, s->dma);
0266 kfree(buf->buf);
0267 kfree(buf);
0268 }
0269
0270
0271 if (s->sg_dma != NULL) {
0272 if (s->sg_handle != IVTV_DMA_UNMAPPED) {
0273 dma_unmap_single(&s->itv->pdev->dev, s->sg_handle,
0274 sizeof(struct ivtv_sg_element),
0275 DMA_TO_DEVICE);
0276 s->sg_handle = IVTV_DMA_UNMAPPED;
0277 }
0278 kfree(s->sg_pending);
0279 kfree(s->sg_processing);
0280 kfree(s->sg_dma);
0281 s->sg_pending = NULL;
0282 s->sg_processing = NULL;
0283 s->sg_dma = NULL;
0284 s->sg_pending_size = 0;
0285 s->sg_processing_size = 0;
0286 }
0287 }