0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/atomic.h>
0010 #include <linux/kernel.h>
0011 #include <linux/mm.h>
0012 #include <linux/list.h>
0013 #include <linux/module.h>
0014 #include <linux/usb.h>
0015 #include <linux/videodev2.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/wait.h>
0018 #include <media/videobuf2-v4l2.h>
0019 #include <media/videobuf2-vmalloc.h>
0020
0021 #include "uvcvideo.h"
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
0036 {
0037 return container_of(buf, struct uvc_buffer, buf);
0038 }
0039
0040
0041
0042
0043
0044
0045 static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
0046 enum uvc_buffer_state state)
0047 {
0048 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
0049 ? VB2_BUF_STATE_ERROR
0050 : VB2_BUF_STATE_QUEUED;
0051
0052 while (!list_empty(&queue->irqqueue)) {
0053 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
0054 struct uvc_buffer,
0055 queue);
0056 list_del(&buf->queue);
0057 buf->state = state;
0058 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
0059 }
0060 }
0061
0062
0063
0064
0065
0066 static int uvc_queue_setup(struct vb2_queue *vq,
0067 unsigned int *nbuffers, unsigned int *nplanes,
0068 unsigned int sizes[], struct device *alloc_devs[])
0069 {
0070 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
0071 struct uvc_streaming *stream;
0072 unsigned int size;
0073
0074 switch (vq->type) {
0075 case V4L2_BUF_TYPE_META_CAPTURE:
0076 size = UVC_METADATA_BUF_SIZE;
0077 break;
0078
0079 default:
0080 stream = uvc_queue_to_stream(queue);
0081 size = stream->ctrl.dwMaxVideoFrameSize;
0082 break;
0083 }
0084
0085
0086
0087
0088
0089
0090 if (*nplanes)
0091 return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0;
0092
0093 *nplanes = 1;
0094 sizes[0] = size;
0095 return 0;
0096 }
0097
0098 static int uvc_buffer_prepare(struct vb2_buffer *vb)
0099 {
0100 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0101 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
0102 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
0103
0104 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
0105 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
0106 uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE,
0107 "[E] Bytes used out of bounds\n");
0108 return -EINVAL;
0109 }
0110
0111 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
0112 return -ENODEV;
0113
0114 buf->state = UVC_BUF_STATE_QUEUED;
0115 buf->error = 0;
0116 buf->mem = vb2_plane_vaddr(vb, 0);
0117 buf->length = vb2_plane_size(vb, 0);
0118 if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
0119 buf->bytesused = 0;
0120 else
0121 buf->bytesused = vb2_get_plane_payload(vb, 0);
0122
0123 return 0;
0124 }
0125
0126 static void uvc_buffer_queue(struct vb2_buffer *vb)
0127 {
0128 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0129 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
0130 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
0131 unsigned long flags;
0132
0133 spin_lock_irqsave(&queue->irqlock, flags);
0134 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
0135 kref_init(&buf->ref);
0136 list_add_tail(&buf->queue, &queue->irqqueue);
0137 } else {
0138
0139
0140
0141
0142 buf->state = UVC_BUF_STATE_ERROR;
0143 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
0144 }
0145
0146 spin_unlock_irqrestore(&queue->irqlock, flags);
0147 }
0148
0149 static void uvc_buffer_finish(struct vb2_buffer *vb)
0150 {
0151 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0152 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
0153 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
0154 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
0155
0156 if (vb->state == VB2_BUF_STATE_DONE)
0157 uvc_video_clock_update(stream, vbuf, buf);
0158 }
0159
0160 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
0161 {
0162 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
0163 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
0164 int ret;
0165
0166 lockdep_assert_irqs_enabled();
0167
0168 queue->buf_used = 0;
0169
0170 ret = uvc_video_start_streaming(stream);
0171 if (ret == 0)
0172 return 0;
0173
0174 spin_lock_irq(&queue->irqlock);
0175 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
0176 spin_unlock_irq(&queue->irqlock);
0177
0178 return ret;
0179 }
0180
0181 static void uvc_stop_streaming(struct vb2_queue *vq)
0182 {
0183 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
0184
0185 lockdep_assert_irqs_enabled();
0186
0187 if (vq->type != V4L2_BUF_TYPE_META_CAPTURE)
0188 uvc_video_stop_streaming(uvc_queue_to_stream(queue));
0189
0190 spin_lock_irq(&queue->irqlock);
0191 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
0192 spin_unlock_irq(&queue->irqlock);
0193 }
0194
0195 static const struct vb2_ops uvc_queue_qops = {
0196 .queue_setup = uvc_queue_setup,
0197 .buf_prepare = uvc_buffer_prepare,
0198 .buf_queue = uvc_buffer_queue,
0199 .buf_finish = uvc_buffer_finish,
0200 .wait_prepare = vb2_ops_wait_prepare,
0201 .wait_finish = vb2_ops_wait_finish,
0202 .start_streaming = uvc_start_streaming,
0203 .stop_streaming = uvc_stop_streaming,
0204 };
0205
0206 static const struct vb2_ops uvc_meta_queue_qops = {
0207 .queue_setup = uvc_queue_setup,
0208 .buf_prepare = uvc_buffer_prepare,
0209 .buf_queue = uvc_buffer_queue,
0210 .wait_prepare = vb2_ops_wait_prepare,
0211 .wait_finish = vb2_ops_wait_finish,
0212 .stop_streaming = uvc_stop_streaming,
0213 };
0214
0215 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
0216 int drop_corrupted)
0217 {
0218 int ret;
0219
0220 queue->queue.type = type;
0221 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
0222 queue->queue.drv_priv = queue;
0223 queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
0224 queue->queue.mem_ops = &vb2_vmalloc_memops;
0225 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
0226 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
0227 queue->queue.lock = &queue->mutex;
0228
0229 switch (type) {
0230 case V4L2_BUF_TYPE_META_CAPTURE:
0231 queue->queue.ops = &uvc_meta_queue_qops;
0232 break;
0233 default:
0234 queue->queue.io_modes |= VB2_DMABUF;
0235 queue->queue.ops = &uvc_queue_qops;
0236 break;
0237 }
0238
0239 ret = vb2_queue_init(&queue->queue);
0240 if (ret)
0241 return ret;
0242
0243 mutex_init(&queue->mutex);
0244 spin_lock_init(&queue->irqlock);
0245 INIT_LIST_HEAD(&queue->irqqueue);
0246 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
0247
0248 return 0;
0249 }
0250
0251 void uvc_queue_release(struct uvc_video_queue *queue)
0252 {
0253 mutex_lock(&queue->mutex);
0254 vb2_queue_release(&queue->queue);
0255 mutex_unlock(&queue->mutex);
0256 }
0257
0258
0259
0260
0261
0262 int uvc_request_buffers(struct uvc_video_queue *queue,
0263 struct v4l2_requestbuffers *rb)
0264 {
0265 int ret;
0266
0267 mutex_lock(&queue->mutex);
0268 ret = vb2_reqbufs(&queue->queue, rb);
0269 mutex_unlock(&queue->mutex);
0270
0271 return ret ? ret : rb->count;
0272 }
0273
0274 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
0275 {
0276 int ret;
0277
0278 mutex_lock(&queue->mutex);
0279 ret = vb2_querybuf(&queue->queue, buf);
0280 mutex_unlock(&queue->mutex);
0281
0282 return ret;
0283 }
0284
0285 int uvc_create_buffers(struct uvc_video_queue *queue,
0286 struct v4l2_create_buffers *cb)
0287 {
0288 int ret;
0289
0290 mutex_lock(&queue->mutex);
0291 ret = vb2_create_bufs(&queue->queue, cb);
0292 mutex_unlock(&queue->mutex);
0293
0294 return ret;
0295 }
0296
0297 int uvc_queue_buffer(struct uvc_video_queue *queue,
0298 struct media_device *mdev, struct v4l2_buffer *buf)
0299 {
0300 int ret;
0301
0302 mutex_lock(&queue->mutex);
0303 ret = vb2_qbuf(&queue->queue, mdev, buf);
0304 mutex_unlock(&queue->mutex);
0305
0306 return ret;
0307 }
0308
0309 int uvc_export_buffer(struct uvc_video_queue *queue,
0310 struct v4l2_exportbuffer *exp)
0311 {
0312 int ret;
0313
0314 mutex_lock(&queue->mutex);
0315 ret = vb2_expbuf(&queue->queue, exp);
0316 mutex_unlock(&queue->mutex);
0317
0318 return ret;
0319 }
0320
0321 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
0322 int nonblocking)
0323 {
0324 int ret;
0325
0326 mutex_lock(&queue->mutex);
0327 ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
0328 mutex_unlock(&queue->mutex);
0329
0330 return ret;
0331 }
0332
0333 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
0334 {
0335 int ret;
0336
0337 mutex_lock(&queue->mutex);
0338 ret = vb2_streamon(&queue->queue, type);
0339 mutex_unlock(&queue->mutex);
0340
0341 return ret;
0342 }
0343
0344 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
0345 {
0346 int ret;
0347
0348 mutex_lock(&queue->mutex);
0349 ret = vb2_streamoff(&queue->queue, type);
0350 mutex_unlock(&queue->mutex);
0351
0352 return ret;
0353 }
0354
0355 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
0356 {
0357 return vb2_mmap(&queue->queue, vma);
0358 }
0359
0360 #ifndef CONFIG_MMU
0361 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
0362 unsigned long pgoff)
0363 {
0364 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
0365 }
0366 #endif
0367
0368 __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
0369 poll_table *wait)
0370 {
0371 __poll_t ret;
0372
0373 mutex_lock(&queue->mutex);
0374 ret = vb2_poll(&queue->queue, file, wait);
0375 mutex_unlock(&queue->mutex);
0376
0377 return ret;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387 int uvc_queue_allocated(struct uvc_video_queue *queue)
0388 {
0389 int allocated;
0390
0391 mutex_lock(&queue->mutex);
0392 allocated = vb2_is_busy(&queue->queue);
0393 mutex_unlock(&queue->mutex);
0394
0395 return allocated;
0396 }
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
0411 {
0412 unsigned long flags;
0413
0414 spin_lock_irqsave(&queue->irqlock, flags);
0415 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
0416
0417
0418
0419
0420
0421
0422
0423 if (disconnect)
0424 queue->flags |= UVC_QUEUE_DISCONNECTED;
0425 spin_unlock_irqrestore(&queue->irqlock, flags);
0426 }
0427
0428
0429
0430
0431
0432
0433
0434 static struct uvc_buffer *
0435 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
0436 {
0437 if (list_empty(&queue->irqqueue))
0438 return NULL;
0439
0440 return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue);
0441 }
0442
0443 struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
0444 {
0445 struct uvc_buffer *nextbuf;
0446 unsigned long flags;
0447
0448 spin_lock_irqsave(&queue->irqlock, flags);
0449 nextbuf = __uvc_queue_get_current_buffer(queue);
0450 spin_unlock_irqrestore(&queue->irqlock, flags);
0451
0452 return nextbuf;
0453 }
0454
0455
0456
0457
0458
0459
0460
0461
0462 static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue,
0463 struct uvc_buffer *buf)
0464 {
0465 buf->error = 0;
0466 buf->state = UVC_BUF_STATE_QUEUED;
0467 buf->bytesused = 0;
0468 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
0469
0470 uvc_buffer_queue(&buf->buf.vb2_buf);
0471 }
0472
0473 static void uvc_queue_buffer_complete(struct kref *ref)
0474 {
0475 struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref);
0476 struct vb2_buffer *vb = &buf->buf.vb2_buf;
0477 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
0478
0479 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
0480 uvc_queue_buffer_requeue(queue, buf);
0481 return;
0482 }
0483
0484 buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
0485 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
0486 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
0487 }
0488
0489
0490
0491
0492
0493 void uvc_queue_buffer_release(struct uvc_buffer *buf)
0494 {
0495 kref_put(&buf->ref, uvc_queue_buffer_complete);
0496 }
0497
0498
0499
0500
0501
0502
0503 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
0504 struct uvc_buffer *buf)
0505 {
0506 struct uvc_buffer *nextbuf;
0507 unsigned long flags;
0508
0509 spin_lock_irqsave(&queue->irqlock, flags);
0510 list_del(&buf->queue);
0511 nextbuf = __uvc_queue_get_current_buffer(queue);
0512 spin_unlock_irqrestore(&queue->irqlock, flags);
0513
0514 uvc_queue_buffer_release(buf);
0515
0516 return nextbuf;
0517 }