0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/device.h>
0018 #include <linux/err.h>
0019 #include <linux/freezer.h>
0020 #include <linux/kernel.h>
0021 #include <linux/kthread.h>
0022 #include <linux/mm.h>
0023 #include <linux/module.h>
0024 #include <linux/poll.h>
0025 #include <linux/sched.h>
0026 #include <linux/slab.h>
0027
0028 #include <media/v4l2-common.h>
0029 #include <media/v4l2-dev.h>
0030 #include <media/v4l2-device.h>
0031 #include <media/v4l2-event.h>
0032 #include <media/v4l2-fh.h>
0033
0034 #include <media/videobuf2-v4l2.h>
0035
0036 static int debug;
0037 module_param(debug, int, 0644);
0038
0039 #define dprintk(q, level, fmt, arg...) \
0040 do { \
0041 if (debug >= level) \
0042 pr_info("vb2-v4l2: [%p] %s: " fmt, \
0043 (q)->name, __func__, ## arg); \
0044 } while (0)
0045
0046
0047 #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
0048 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
0049 V4L2_BUF_FLAG_PREPARED | \
0050 V4L2_BUF_FLAG_IN_REQUEST | \
0051 V4L2_BUF_FLAG_REQUEST_FD | \
0052 V4L2_BUF_FLAG_TIMESTAMP_MASK)
0053
0054 #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
0055 V4L2_BUF_FLAG_BFRAME | \
0056 V4L2_BUF_FLAG_KEYFRAME | \
0057 V4L2_BUF_FLAG_TIMECODE | \
0058 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
0059
0060
0061
0062
0063
0064 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
0065 {
0066 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
0067 return 0;
0068
0069
0070 if (b->m.planes == NULL) {
0071 dprintk(vb->vb2_queue, 1,
0072 "multi-planar buffer passed but planes array not provided\n");
0073 return -EINVAL;
0074 }
0075
0076 if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
0077 dprintk(vb->vb2_queue, 1,
0078 "incorrect planes array length, expected %d, got %d\n",
0079 vb->num_planes, b->length);
0080 return -EINVAL;
0081 }
0082
0083 return 0;
0084 }
0085
0086 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
0087 {
0088 return __verify_planes_array(vb, pb);
0089 }
0090
0091
0092
0093
0094
0095 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
0096 {
0097 unsigned int length;
0098 unsigned int bytesused;
0099 unsigned int plane;
0100
0101 if (V4L2_TYPE_IS_CAPTURE(b->type))
0102 return 0;
0103
0104 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
0105 for (plane = 0; plane < vb->num_planes; ++plane) {
0106 length = (b->memory == VB2_MEMORY_USERPTR ||
0107 b->memory == VB2_MEMORY_DMABUF)
0108 ? b->m.planes[plane].length
0109 : vb->planes[plane].length;
0110 bytesused = b->m.planes[plane].bytesused
0111 ? b->m.planes[plane].bytesused : length;
0112
0113 if (b->m.planes[plane].bytesused > length)
0114 return -EINVAL;
0115
0116 if (b->m.planes[plane].data_offset > 0 &&
0117 b->m.planes[plane].data_offset >= bytesused)
0118 return -EINVAL;
0119 }
0120 } else {
0121 length = (b->memory == VB2_MEMORY_USERPTR)
0122 ? b->length : vb->planes[0].length;
0123
0124 if (b->bytesused > length)
0125 return -EINVAL;
0126 }
0127
0128 return 0;
0129 }
0130
0131
0132
0133
0134 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
0135 {
0136 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0137
0138 vbuf->request_fd = -1;
0139 }
0140
0141 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
0142 {
0143 const struct v4l2_buffer *b = pb;
0144 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0145 struct vb2_queue *q = vb->vb2_queue;
0146
0147 if (q->is_output) {
0148
0149
0150
0151
0152 if (q->copy_timestamp)
0153 vb->timestamp = v4l2_buffer_get_timestamp(b);
0154 vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
0155 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
0156 vbuf->timecode = b->timecode;
0157 }
0158 };
0159
0160 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
0161 {
0162 static bool check_once;
0163
0164 if (check_once)
0165 return;
0166
0167 check_once = true;
0168
0169 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
0170 if (vb->vb2_queue->allow_zero_bytesused)
0171 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
0172 else
0173 pr_warn("use the actual size instead.\n");
0174 }
0175
0176 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
0177 {
0178 struct vb2_queue *q = vb->vb2_queue;
0179 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0180 struct vb2_plane *planes = vbuf->planes;
0181 unsigned int plane;
0182 int ret;
0183
0184 ret = __verify_length(vb, b);
0185 if (ret < 0) {
0186 dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
0187 return ret;
0188 }
0189 if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
0200 return -EINVAL;
0201 }
0202 vbuf->sequence = 0;
0203 vbuf->request_fd = -1;
0204 vbuf->is_held = false;
0205
0206 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
0207 switch (b->memory) {
0208 case VB2_MEMORY_USERPTR:
0209 for (plane = 0; plane < vb->num_planes; ++plane) {
0210 planes[plane].m.userptr =
0211 b->m.planes[plane].m.userptr;
0212 planes[plane].length =
0213 b->m.planes[plane].length;
0214 }
0215 break;
0216 case VB2_MEMORY_DMABUF:
0217 for (plane = 0; plane < vb->num_planes; ++plane) {
0218 planes[plane].m.fd =
0219 b->m.planes[plane].m.fd;
0220 planes[plane].length =
0221 b->m.planes[plane].length;
0222 }
0223 break;
0224 default:
0225 for (plane = 0; plane < vb->num_planes; ++plane) {
0226 planes[plane].m.offset =
0227 vb->planes[plane].m.offset;
0228 planes[plane].length =
0229 vb->planes[plane].length;
0230 }
0231 break;
0232 }
0233
0234
0235 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 for (plane = 0; plane < vb->num_planes; ++plane) {
0253 struct vb2_plane *pdst = &planes[plane];
0254 struct v4l2_plane *psrc = &b->m.planes[plane];
0255
0256 if (psrc->bytesused == 0)
0257 vb2_warn_zero_bytesused(vb);
0258
0259 if (vb->vb2_queue->allow_zero_bytesused)
0260 pdst->bytesused = psrc->bytesused;
0261 else
0262 pdst->bytesused = psrc->bytesused ?
0263 psrc->bytesused : pdst->length;
0264 pdst->data_offset = psrc->data_offset;
0265 }
0266 }
0267 } else {
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282 switch (b->memory) {
0283 case VB2_MEMORY_USERPTR:
0284 planes[0].m.userptr = b->m.userptr;
0285 planes[0].length = b->length;
0286 break;
0287 case VB2_MEMORY_DMABUF:
0288 planes[0].m.fd = b->m.fd;
0289 planes[0].length = b->length;
0290 break;
0291 default:
0292 planes[0].m.offset = vb->planes[0].m.offset;
0293 planes[0].length = vb->planes[0].length;
0294 break;
0295 }
0296
0297 planes[0].data_offset = 0;
0298 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
0299 if (b->bytesused == 0)
0300 vb2_warn_zero_bytesused(vb);
0301
0302 if (vb->vb2_queue->allow_zero_bytesused)
0303 planes[0].bytesused = b->bytesused;
0304 else
0305 planes[0].bytesused = b->bytesused ?
0306 b->bytesused : planes[0].length;
0307 } else
0308 planes[0].bytesused = 0;
0309
0310 }
0311
0312
0313 vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
0314 if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
0315
0316
0317
0318
0319
0320 vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
0321 }
0322
0323 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
0324
0325
0326
0327
0328
0329
0330 vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
0331 vbuf->field = b->field;
0332 if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
0333 vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
0334 } else {
0335
0336 vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
0337
0338 vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
0339 }
0340
0341 return 0;
0342 }
0343
0344 static void set_buffer_cache_hints(struct vb2_queue *q,
0345 struct vb2_buffer *vb,
0346 struct v4l2_buffer *b)
0347 {
0348 if (!vb2_queue_allows_cache_hints(q)) {
0349
0350
0351
0352
0353
0354 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
0355 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
0356 return;
0357 }
0358
0359 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
0360 vb->skip_cache_sync_on_finish = 1;
0361
0362 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
0363 vb->skip_cache_sync_on_prepare = 1;
0364 }
0365
0366 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
0367 struct v4l2_buffer *b, bool is_prepare,
0368 struct media_request **p_req)
0369 {
0370 const char *opname = is_prepare ? "prepare_buf" : "qbuf";
0371 struct media_request *req;
0372 struct vb2_v4l2_buffer *vbuf;
0373 struct vb2_buffer *vb;
0374 int ret;
0375
0376 if (b->type != q->type) {
0377 dprintk(q, 1, "%s: invalid buffer type\n", opname);
0378 return -EINVAL;
0379 }
0380
0381 if (b->index >= q->num_buffers) {
0382 dprintk(q, 1, "%s: buffer index out of range\n", opname);
0383 return -EINVAL;
0384 }
0385
0386 if (q->bufs[b->index] == NULL) {
0387
0388 dprintk(q, 1, "%s: buffer is NULL\n", opname);
0389 return -EINVAL;
0390 }
0391
0392 if (b->memory != q->memory) {
0393 dprintk(q, 1, "%s: invalid memory type\n", opname);
0394 return -EINVAL;
0395 }
0396
0397 vb = q->bufs[b->index];
0398 vbuf = to_vb2_v4l2_buffer(vb);
0399 ret = __verify_planes_array(vb, b);
0400 if (ret)
0401 return ret;
0402
0403 if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
0404 vb->state != VB2_BUF_STATE_DEQUEUED) {
0405 dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
0406 return -EINVAL;
0407 }
0408
0409 if (!vb->prepared) {
0410 set_buffer_cache_hints(q, vb, b);
0411
0412 memset(vbuf->planes, 0,
0413 sizeof(vbuf->planes[0]) * vb->num_planes);
0414 ret = vb2_fill_vb2_v4l2_buffer(vb, b);
0415 if (ret)
0416 return ret;
0417 }
0418
0419 if (is_prepare)
0420 return 0;
0421
0422 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
0423 if (q->requires_requests) {
0424 dprintk(q, 1, "%s: queue requires requests\n", opname);
0425 return -EBADR;
0426 }
0427 if (q->uses_requests) {
0428 dprintk(q, 1, "%s: queue uses requests\n", opname);
0429 return -EBUSY;
0430 }
0431 return 0;
0432 } else if (!q->supports_requests) {
0433 dprintk(q, 1, "%s: queue does not support requests\n", opname);
0434 return -EBADR;
0435 } else if (q->uses_qbuf) {
0436 dprintk(q, 1, "%s: queue does not use requests\n", opname);
0437 return -EBUSY;
0438 }
0439
0440
0441
0442
0443
0444
0445 if (WARN_ON(!q->lock || !p_req))
0446 return -EINVAL;
0447
0448
0449
0450
0451
0452
0453 if (WARN_ON(!q->ops->buf_request_complete))
0454 return -EINVAL;
0455
0456
0457
0458
0459
0460 if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
0461 q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
0462 !q->ops->buf_out_validate))
0463 return -EINVAL;
0464
0465 req = media_request_get_by_fd(mdev, b->request_fd);
0466 if (IS_ERR(req)) {
0467 dprintk(q, 1, "%s: invalid request_fd\n", opname);
0468 return PTR_ERR(req);
0469 }
0470
0471
0472
0473
0474
0475 if (req->state != MEDIA_REQUEST_STATE_IDLE &&
0476 req->state != MEDIA_REQUEST_STATE_UPDATING) {
0477 dprintk(q, 1, "%s: request is not idle\n", opname);
0478 media_request_put(req);
0479 return -EBUSY;
0480 }
0481
0482 *p_req = req;
0483 vbuf->request_fd = b->request_fd;
0484
0485 return 0;
0486 }
0487
0488
0489
0490
0491
0492 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
0493 {
0494 struct v4l2_buffer *b = pb;
0495 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0496 struct vb2_queue *q = vb->vb2_queue;
0497 unsigned int plane;
0498
0499
0500 b->index = vb->index;
0501 b->type = vb->type;
0502 b->memory = vb->memory;
0503 b->bytesused = 0;
0504
0505 b->flags = vbuf->flags;
0506 b->field = vbuf->field;
0507 v4l2_buffer_set_timestamp(b, vb->timestamp);
0508 b->timecode = vbuf->timecode;
0509 b->sequence = vbuf->sequence;
0510 b->reserved2 = 0;
0511 b->request_fd = 0;
0512
0513 if (q->is_multiplanar) {
0514
0515
0516
0517
0518 b->length = vb->num_planes;
0519 for (plane = 0; plane < vb->num_planes; ++plane) {
0520 struct v4l2_plane *pdst = &b->m.planes[plane];
0521 struct vb2_plane *psrc = &vb->planes[plane];
0522
0523 pdst->bytesused = psrc->bytesused;
0524 pdst->length = psrc->length;
0525 if (q->memory == VB2_MEMORY_MMAP)
0526 pdst->m.mem_offset = psrc->m.offset;
0527 else if (q->memory == VB2_MEMORY_USERPTR)
0528 pdst->m.userptr = psrc->m.userptr;
0529 else if (q->memory == VB2_MEMORY_DMABUF)
0530 pdst->m.fd = psrc->m.fd;
0531 pdst->data_offset = psrc->data_offset;
0532 memset(pdst->reserved, 0, sizeof(pdst->reserved));
0533 }
0534 } else {
0535
0536
0537
0538
0539 b->length = vb->planes[0].length;
0540 b->bytesused = vb->planes[0].bytesused;
0541 if (q->memory == VB2_MEMORY_MMAP)
0542 b->m.offset = vb->planes[0].m.offset;
0543 else if (q->memory == VB2_MEMORY_USERPTR)
0544 b->m.userptr = vb->planes[0].m.userptr;
0545 else if (q->memory == VB2_MEMORY_DMABUF)
0546 b->m.fd = vb->planes[0].m.fd;
0547 }
0548
0549
0550
0551
0552 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
0553 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
0554 if (!q->copy_timestamp) {
0555
0556
0557
0558
0559 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
0560 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
0561 }
0562
0563 switch (vb->state) {
0564 case VB2_BUF_STATE_QUEUED:
0565 case VB2_BUF_STATE_ACTIVE:
0566 b->flags |= V4L2_BUF_FLAG_QUEUED;
0567 break;
0568 case VB2_BUF_STATE_IN_REQUEST:
0569 b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
0570 break;
0571 case VB2_BUF_STATE_ERROR:
0572 b->flags |= V4L2_BUF_FLAG_ERROR;
0573 fallthrough;
0574 case VB2_BUF_STATE_DONE:
0575 b->flags |= V4L2_BUF_FLAG_DONE;
0576 break;
0577 case VB2_BUF_STATE_PREPARING:
0578 case VB2_BUF_STATE_DEQUEUED:
0579
0580 break;
0581 }
0582
0583 if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
0584 vb->state == VB2_BUF_STATE_IN_REQUEST) &&
0585 vb->synced && vb->prepared)
0586 b->flags |= V4L2_BUF_FLAG_PREPARED;
0587
0588 if (vb2_buffer_in_use(q, vb))
0589 b->flags |= V4L2_BUF_FLAG_MAPPED;
0590 if (vbuf->request_fd >= 0) {
0591 b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
0592 b->request_fd = vbuf->request_fd;
0593 }
0594 }
0595
0596
0597
0598
0599
0600
0601 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
0602 {
0603 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0604 unsigned int plane;
0605
0606 if (!vb->vb2_queue->copy_timestamp)
0607 vb->timestamp = 0;
0608
0609 for (plane = 0; plane < vb->num_planes; ++plane) {
0610 if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
0611 planes[plane].m = vbuf->planes[plane].m;
0612 planes[plane].length = vbuf->planes[plane].length;
0613 }
0614 planes[plane].bytesused = vbuf->planes[plane].bytesused;
0615 planes[plane].data_offset = vbuf->planes[plane].data_offset;
0616 }
0617 return 0;
0618 }
0619
0620 static const struct vb2_buf_ops v4l2_buf_ops = {
0621 .verify_planes_array = __verify_planes_array_core,
0622 .init_buffer = __init_vb2_v4l2_buffer,
0623 .fill_user_buffer = __fill_v4l2_buffer,
0624 .fill_vb2_buffer = __fill_vb2_buffer,
0625 .copy_timestamp = __copy_timestamp,
0626 };
0627
0628 int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
0629 unsigned int start_idx)
0630 {
0631 unsigned int i;
0632
0633 for (i = start_idx; i < q->num_buffers; i++)
0634 if (q->bufs[i]->copied_timestamp &&
0635 q->bufs[i]->timestamp == timestamp)
0636 return i;
0637 return -1;
0638 }
0639 EXPORT_SYMBOL_GPL(vb2_find_timestamp);
0640
0641 struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
0642 {
0643 unsigned int i;
0644
0645 for (i = 0; i < q->num_buffers; i++)
0646 if (q->bufs[i]->copied_timestamp &&
0647 q->bufs[i]->timestamp == timestamp)
0648 return vb2_get_buffer(q, i);
0649 return NULL;
0650 }
0651 EXPORT_SYMBOL_GPL(vb2_find_buffer);
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
0667 {
0668 struct vb2_buffer *vb;
0669 int ret;
0670
0671 if (b->type != q->type) {
0672 dprintk(q, 1, "wrong buffer type\n");
0673 return -EINVAL;
0674 }
0675
0676 if (b->index >= q->num_buffers) {
0677 dprintk(q, 1, "buffer index out of range\n");
0678 return -EINVAL;
0679 }
0680 vb = q->bufs[b->index];
0681 ret = __verify_planes_array(vb, b);
0682 if (!ret)
0683 vb2_core_querybuf(q, b->index, b);
0684 return ret;
0685 }
0686 EXPORT_SYMBOL(vb2_querybuf);
0687
0688 static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
0689 {
0690 *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
0691 if (q->io_modes & VB2_MMAP)
0692 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
0693 if (q->io_modes & VB2_USERPTR)
0694 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
0695 if (q->io_modes & VB2_DMABUF)
0696 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
0697 if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
0698 *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
0699 if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
0700 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
0701 #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
0702 if (q->supports_requests)
0703 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
0704 #endif
0705 }
0706
0707 static void validate_memory_flags(struct vb2_queue *q,
0708 int memory,
0709 u32 *flags)
0710 {
0711 if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
0712
0713
0714
0715
0716 *flags = 0;
0717 } else {
0718
0719 *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
0720 }
0721 }
0722
0723 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
0724 {
0725 int ret = vb2_verify_memory_type(q, req->memory, req->type);
0726 u32 flags = req->flags;
0727
0728 fill_buf_caps(q, &req->capabilities);
0729 validate_memory_flags(q, req->memory, &flags);
0730 req->flags = flags;
0731 return ret ? ret : vb2_core_reqbufs(q, req->memory,
0732 req->flags, &req->count);
0733 }
0734 EXPORT_SYMBOL_GPL(vb2_reqbufs);
0735
0736 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
0737 struct v4l2_buffer *b)
0738 {
0739 int ret;
0740
0741 if (vb2_fileio_is_active(q)) {
0742 dprintk(q, 1, "file io in progress\n");
0743 return -EBUSY;
0744 }
0745
0746 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
0747 return -EINVAL;
0748
0749 ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
0750
0751 return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
0752 }
0753 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
0754
0755 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
0756 {
0757 unsigned requested_planes = 1;
0758 unsigned requested_sizes[VIDEO_MAX_PLANES];
0759 struct v4l2_format *f = &create->format;
0760 int ret = vb2_verify_memory_type(q, create->memory, f->type);
0761 unsigned i;
0762
0763 fill_buf_caps(q, &create->capabilities);
0764 validate_memory_flags(q, create->memory, &create->flags);
0765 create->index = q->num_buffers;
0766 if (create->count == 0)
0767 return ret != -EBUSY ? ret : 0;
0768
0769 switch (f->type) {
0770 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
0771 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
0772 requested_planes = f->fmt.pix_mp.num_planes;
0773 if (requested_planes == 0 ||
0774 requested_planes > VIDEO_MAX_PLANES)
0775 return -EINVAL;
0776 for (i = 0; i < requested_planes; i++)
0777 requested_sizes[i] =
0778 f->fmt.pix_mp.plane_fmt[i].sizeimage;
0779 break;
0780 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
0781 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
0782 requested_sizes[0] = f->fmt.pix.sizeimage;
0783 break;
0784 case V4L2_BUF_TYPE_VBI_CAPTURE:
0785 case V4L2_BUF_TYPE_VBI_OUTPUT:
0786 requested_sizes[0] = f->fmt.vbi.samples_per_line *
0787 (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
0788 break;
0789 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
0790 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
0791 requested_sizes[0] = f->fmt.sliced.io_size;
0792 break;
0793 case V4L2_BUF_TYPE_SDR_CAPTURE:
0794 case V4L2_BUF_TYPE_SDR_OUTPUT:
0795 requested_sizes[0] = f->fmt.sdr.buffersize;
0796 break;
0797 case V4L2_BUF_TYPE_META_CAPTURE:
0798 case V4L2_BUF_TYPE_META_OUTPUT:
0799 requested_sizes[0] = f->fmt.meta.buffersize;
0800 break;
0801 default:
0802 return -EINVAL;
0803 }
0804 for (i = 0; i < requested_planes; i++)
0805 if (requested_sizes[i] == 0)
0806 return -EINVAL;
0807 return ret ? ret : vb2_core_create_bufs(q, create->memory,
0808 create->flags,
0809 &create->count,
0810 requested_planes,
0811 requested_sizes);
0812 }
0813 EXPORT_SYMBOL_GPL(vb2_create_bufs);
0814
0815 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
0816 struct v4l2_buffer *b)
0817 {
0818 struct media_request *req = NULL;
0819 int ret;
0820
0821 if (vb2_fileio_is_active(q)) {
0822 dprintk(q, 1, "file io in progress\n");
0823 return -EBUSY;
0824 }
0825
0826 ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
0827 if (ret)
0828 return ret;
0829 ret = vb2_core_qbuf(q, b->index, b, req);
0830 if (req)
0831 media_request_put(req);
0832 return ret;
0833 }
0834 EXPORT_SYMBOL_GPL(vb2_qbuf);
0835
0836 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
0837 {
0838 int ret;
0839
0840 if (vb2_fileio_is_active(q)) {
0841 dprintk(q, 1, "file io in progress\n");
0842 return -EBUSY;
0843 }
0844
0845 if (b->type != q->type) {
0846 dprintk(q, 1, "invalid buffer type\n");
0847 return -EINVAL;
0848 }
0849
0850 ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
0851
0852 if (!q->is_output &&
0853 b->flags & V4L2_BUF_FLAG_DONE &&
0854 b->flags & V4L2_BUF_FLAG_LAST)
0855 q->last_buffer_dequeued = true;
0856
0857
0858
0859
0860
0861 b->flags &= ~V4L2_BUF_FLAG_DONE;
0862
0863 return ret;
0864 }
0865 EXPORT_SYMBOL_GPL(vb2_dqbuf);
0866
0867 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
0868 {
0869 if (vb2_fileio_is_active(q)) {
0870 dprintk(q, 1, "file io in progress\n");
0871 return -EBUSY;
0872 }
0873 return vb2_core_streamon(q, type);
0874 }
0875 EXPORT_SYMBOL_GPL(vb2_streamon);
0876
0877 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
0878 {
0879 if (vb2_fileio_is_active(q)) {
0880 dprintk(q, 1, "file io in progress\n");
0881 return -EBUSY;
0882 }
0883 return vb2_core_streamoff(q, type);
0884 }
0885 EXPORT_SYMBOL_GPL(vb2_streamoff);
0886
0887 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
0888 {
0889 return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
0890 eb->plane, eb->flags);
0891 }
0892 EXPORT_SYMBOL_GPL(vb2_expbuf);
0893
0894 int vb2_queue_init_name(struct vb2_queue *q, const char *name)
0895 {
0896
0897
0898
0899 if (WARN_ON(!q) ||
0900 WARN_ON(q->timestamp_flags &
0901 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
0902 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
0903 return -EINVAL;
0904
0905
0906 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
0907 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
0908
0909
0910 if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
0911 || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
0912 || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
0913 return -EINVAL;
0914
0915 if (q->buf_struct_size == 0)
0916 q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
0917
0918 q->buf_ops = &v4l2_buf_ops;
0919 q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
0920 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
0921 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
0922 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
0923
0924
0925
0926
0927
0928 q->quirk_poll_must_check_waiting_for_buffers = true;
0929
0930 if (name)
0931 strscpy(q->name, name, sizeof(q->name));
0932 else
0933 q->name[0] = '\0';
0934
0935 return vb2_core_queue_init(q);
0936 }
0937 EXPORT_SYMBOL_GPL(vb2_queue_init_name);
0938
0939 int vb2_queue_init(struct vb2_queue *q)
0940 {
0941 return vb2_queue_init_name(q, NULL);
0942 }
0943 EXPORT_SYMBOL_GPL(vb2_queue_init);
0944
0945 void vb2_queue_release(struct vb2_queue *q)
0946 {
0947 vb2_core_queue_release(q);
0948 }
0949 EXPORT_SYMBOL_GPL(vb2_queue_release);
0950
0951 int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
0952 {
0953 if (type == q->type)
0954 return 0;
0955
0956 if (vb2_is_busy(q))
0957 return -EBUSY;
0958
0959 q->type = type;
0960
0961 return 0;
0962 }
0963 EXPORT_SYMBOL_GPL(vb2_queue_change_type);
0964
0965 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
0966 {
0967 struct video_device *vfd = video_devdata(file);
0968 __poll_t res;
0969
0970 res = vb2_core_poll(q, file, wait);
0971
0972 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
0973 struct v4l2_fh *fh = file->private_data;
0974
0975 poll_wait(file, &fh->wait, wait);
0976 if (v4l2_event_pending(fh))
0977 res |= EPOLLPRI;
0978 }
0979
0980 return res;
0981 }
0982 EXPORT_SYMBOL_GPL(vb2_poll);
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994 int vb2_ioctl_reqbufs(struct file *file, void *priv,
0995 struct v4l2_requestbuffers *p)
0996 {
0997 struct video_device *vdev = video_devdata(file);
0998 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
0999 u32 flags = p->flags;
1000
1001 fill_buf_caps(vdev->queue, &p->capabilities);
1002 validate_memory_flags(vdev->queue, p->memory, &flags);
1003 p->flags = flags;
1004 if (res)
1005 return res;
1006 if (vb2_queue_is_busy(vdev->queue, file))
1007 return -EBUSY;
1008 res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
1009
1010
1011 if (res == 0)
1012 vdev->queue->owner = p->count ? file->private_data : NULL;
1013 return res;
1014 }
1015 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
1016
1017 int vb2_ioctl_create_bufs(struct file *file, void *priv,
1018 struct v4l2_create_buffers *p)
1019 {
1020 struct video_device *vdev = video_devdata(file);
1021 int res = vb2_verify_memory_type(vdev->queue, p->memory,
1022 p->format.type);
1023
1024 p->index = vdev->queue->num_buffers;
1025 fill_buf_caps(vdev->queue, &p->capabilities);
1026 validate_memory_flags(vdev->queue, p->memory, &p->flags);
1027
1028
1029
1030
1031 if (p->count == 0)
1032 return res != -EBUSY ? res : 0;
1033 if (res)
1034 return res;
1035 if (vb2_queue_is_busy(vdev->queue, file))
1036 return -EBUSY;
1037
1038 res = vb2_create_bufs(vdev->queue, p);
1039 if (res == 0)
1040 vdev->queue->owner = file->private_data;
1041 return res;
1042 }
1043 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
1044
1045 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
1046 struct v4l2_buffer *p)
1047 {
1048 struct video_device *vdev = video_devdata(file);
1049
1050 if (vb2_queue_is_busy(vdev->queue, file))
1051 return -EBUSY;
1052 return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1053 }
1054 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
1055
1056 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
1057 {
1058 struct video_device *vdev = video_devdata(file);
1059
1060
1061 return vb2_querybuf(vdev->queue, p);
1062 }
1063 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
1064
1065 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1066 {
1067 struct video_device *vdev = video_devdata(file);
1068
1069 if (vb2_queue_is_busy(vdev->queue, file))
1070 return -EBUSY;
1071 return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1072 }
1073 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
1074
1075 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1076 {
1077 struct video_device *vdev = video_devdata(file);
1078
1079 if (vb2_queue_is_busy(vdev->queue, file))
1080 return -EBUSY;
1081 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
1082 }
1083 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
1084
1085 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1086 {
1087 struct video_device *vdev = video_devdata(file);
1088
1089 if (vb2_queue_is_busy(vdev->queue, file))
1090 return -EBUSY;
1091 return vb2_streamon(vdev->queue, i);
1092 }
1093 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1094
1095 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1096 {
1097 struct video_device *vdev = video_devdata(file);
1098
1099 if (vb2_queue_is_busy(vdev->queue, file))
1100 return -EBUSY;
1101 return vb2_streamoff(vdev->queue, i);
1102 }
1103 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1104
1105 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1106 {
1107 struct video_device *vdev = video_devdata(file);
1108
1109 if (vb2_queue_is_busy(vdev->queue, file))
1110 return -EBUSY;
1111 return vb2_expbuf(vdev->queue, p);
1112 }
1113 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1114
1115
1116
1117 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1118 {
1119 struct video_device *vdev = video_devdata(file);
1120
1121 return vb2_mmap(vdev->queue, vma);
1122 }
1123 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1124
1125 int _vb2_fop_release(struct file *file, struct mutex *lock)
1126 {
1127 struct video_device *vdev = video_devdata(file);
1128
1129 if (lock)
1130 mutex_lock(lock);
1131 if (file->private_data == vdev->queue->owner) {
1132 vb2_queue_release(vdev->queue);
1133 vdev->queue->owner = NULL;
1134 }
1135 if (lock)
1136 mutex_unlock(lock);
1137 return v4l2_fh_release(file);
1138 }
1139 EXPORT_SYMBOL_GPL(_vb2_fop_release);
1140
1141 int vb2_fop_release(struct file *file)
1142 {
1143 struct video_device *vdev = video_devdata(file);
1144 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1145
1146 return _vb2_fop_release(file, lock);
1147 }
1148 EXPORT_SYMBOL_GPL(vb2_fop_release);
1149
1150 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1151 size_t count, loff_t *ppos)
1152 {
1153 struct video_device *vdev = video_devdata(file);
1154 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1155 int err = -EBUSY;
1156
1157 if (!(vdev->queue->io_modes & VB2_WRITE))
1158 return -EINVAL;
1159 if (lock && mutex_lock_interruptible(lock))
1160 return -ERESTARTSYS;
1161 if (vb2_queue_is_busy(vdev->queue, file))
1162 goto exit;
1163 err = vb2_write(vdev->queue, buf, count, ppos,
1164 file->f_flags & O_NONBLOCK);
1165 if (vdev->queue->fileio)
1166 vdev->queue->owner = file->private_data;
1167 exit:
1168 if (lock)
1169 mutex_unlock(lock);
1170 return err;
1171 }
1172 EXPORT_SYMBOL_GPL(vb2_fop_write);
1173
1174 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1175 size_t count, loff_t *ppos)
1176 {
1177 struct video_device *vdev = video_devdata(file);
1178 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1179 int err = -EBUSY;
1180
1181 if (!(vdev->queue->io_modes & VB2_READ))
1182 return -EINVAL;
1183 if (lock && mutex_lock_interruptible(lock))
1184 return -ERESTARTSYS;
1185 if (vb2_queue_is_busy(vdev->queue, file))
1186 goto exit;
1187 err = vb2_read(vdev->queue, buf, count, ppos,
1188 file->f_flags & O_NONBLOCK);
1189 if (vdev->queue->fileio)
1190 vdev->queue->owner = file->private_data;
1191 exit:
1192 if (lock)
1193 mutex_unlock(lock);
1194 return err;
1195 }
1196 EXPORT_SYMBOL_GPL(vb2_fop_read);
1197
1198 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1199 {
1200 struct video_device *vdev = video_devdata(file);
1201 struct vb2_queue *q = vdev->queue;
1202 struct mutex *lock = q->lock ? q->lock : vdev->lock;
1203 __poll_t res;
1204 void *fileio;
1205
1206
1207
1208
1209
1210 WARN_ON(!lock);
1211
1212 if (lock && mutex_lock_interruptible(lock))
1213 return EPOLLERR;
1214
1215 fileio = q->fileio;
1216
1217 res = vb2_poll(vdev->queue, file, wait);
1218
1219
1220 if (!fileio && q->fileio)
1221 q->owner = file->private_data;
1222 if (lock)
1223 mutex_unlock(lock);
1224 return res;
1225 }
1226 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1227
1228 #ifndef CONFIG_MMU
1229 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1230 unsigned long len, unsigned long pgoff, unsigned long flags)
1231 {
1232 struct video_device *vdev = video_devdata(file);
1233
1234 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1235 }
1236 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1237 #endif
1238
1239 void vb2_video_unregister_device(struct video_device *vdev)
1240 {
1241
1242 if (!vdev || !video_is_registered(vdev))
1243 return;
1244
1245
1246
1247
1248
1249 WARN_ON(!vdev->queue);
1250
1251
1252
1253
1254
1255
1256 get_device(&vdev->dev);
1257 video_unregister_device(vdev);
1258 if (vdev->queue && vdev->queue->owner) {
1259 struct mutex *lock = vdev->queue->lock ?
1260 vdev->queue->lock : vdev->lock;
1261
1262 if (lock)
1263 mutex_lock(lock);
1264 vb2_queue_release(vdev->queue);
1265 vdev->queue->owner = NULL;
1266 if (lock)
1267 mutex_unlock(lock);
1268 }
1269
1270
1271
1272
1273 put_device(&vdev->dev);
1274 }
1275 EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
1276
1277
1278
1279 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1280 {
1281 mutex_unlock(vq->lock);
1282 }
1283 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1284
1285 void vb2_ops_wait_finish(struct vb2_queue *vq)
1286 {
1287 mutex_lock(vq->lock);
1288 }
1289 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1290
1291
1292
1293
1294
1295
1296
1297 int vb2_request_validate(struct media_request *req)
1298 {
1299 struct media_request_object *obj;
1300 int ret = 0;
1301
1302 if (!vb2_request_buffer_cnt(req))
1303 return -ENOENT;
1304
1305 list_for_each_entry(obj, &req->objects, list) {
1306 if (!obj->ops->prepare)
1307 continue;
1308
1309 ret = obj->ops->prepare(obj);
1310 if (ret)
1311 break;
1312 }
1313
1314 if (ret) {
1315 list_for_each_entry_continue_reverse(obj, &req->objects, list)
1316 if (obj->ops->unprepare)
1317 obj->ops->unprepare(obj);
1318 return ret;
1319 }
1320 return 0;
1321 }
1322 EXPORT_SYMBOL_GPL(vb2_request_validate);
1323
1324 void vb2_request_queue(struct media_request *req)
1325 {
1326 struct media_request_object *obj, *obj_safe;
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1337 if (obj->ops->queue)
1338 obj->ops->queue(obj);
1339 }
1340 EXPORT_SYMBOL_GPL(vb2_request_queue);
1341
1342 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1343 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1344 MODULE_LICENSE("GPL");