![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-or-later */ 0002 /* 0003 * Memory-to-memory device framework for Video for Linux 2. 0004 * 0005 * Helper functions for devices that use memory buffers for both source 0006 * and destination. 0007 * 0008 * Copyright (c) 2009 Samsung Electronics Co., Ltd. 0009 * Pawel Osciak, <pawel@osciak.com> 0010 * Marek Szyprowski, <m.szyprowski@samsung.com> 0011 */ 0012 0013 #ifndef _MEDIA_V4L2_MEM2MEM_H 0014 #define _MEDIA_V4L2_MEM2MEM_H 0015 0016 #include <media/videobuf2-v4l2.h> 0017 0018 /** 0019 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks 0020 * @device_run: required. Begin the actual job (transaction) inside this 0021 * callback. 0022 * The job does NOT have to end before this callback returns 0023 * (and it will be the usual case). When the job finishes, 0024 * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() 0025 * has to be called. 0026 * @job_ready: optional. Should return 0 if the driver does not have a job 0027 * fully prepared to run yet (i.e. it will not be able to finish a 0028 * transaction without sleeping). If not provided, it will be 0029 * assumed that one source and one destination buffer are all 0030 * that is required for the driver to perform one full transaction. 0031 * This method may not sleep. 0032 * @job_abort: optional. Informs the driver that it has to abort the currently 0033 * running transaction as soon as possible (i.e. as soon as it can 0034 * stop the device safely; e.g. in the next interrupt handler), 0035 * even if the transaction would not have been finished by then. 0036 * After the driver performs the necessary steps, it has to call 0037 * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as 0038 * if the transaction ended normally. 0039 * This function does not have to (and will usually not) wait 0040 * until the device enters a state when it can be stopped. 0041 */ 0042 struct v4l2_m2m_ops { 0043 void (*device_run)(void *priv); 0044 int (*job_ready)(void *priv); 0045 void (*job_abort)(void *priv); 0046 }; 0047 0048 struct video_device; 0049 struct v4l2_m2m_dev; 0050 0051 /** 0052 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be 0053 * processed 0054 * 0055 * @q: pointer to struct &vb2_queue 0056 * @rdy_queue: List of V4L2 mem-to-mem queues 0057 * @rdy_spinlock: spin lock to protect the struct usage 0058 * @num_rdy: number of buffers ready to be processed 0059 * @buffered: is the queue buffered? 0060 * 0061 * Queue for buffers ready to be processed as soon as this 0062 * instance receives access to the device. 0063 */ 0064 0065 struct v4l2_m2m_queue_ctx { 0066 struct vb2_queue q; 0067 0068 struct list_head rdy_queue; 0069 spinlock_t rdy_spinlock; 0070 u8 num_rdy; 0071 bool buffered; 0072 }; 0073 0074 /** 0075 * struct v4l2_m2m_ctx - Memory to memory context structure 0076 * 0077 * @q_lock: struct &mutex lock 0078 * @new_frame: valid in the device_run callback: if true, then this 0079 * starts a new frame; if false, then this is a new slice 0080 * for an existing frame. This is always true unless 0081 * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which 0082 * indicates slicing support. 0083 * @is_draining: indicates device is in draining phase 0084 * @last_src_buf: indicate the last source buffer for draining 0085 * @next_buf_last: next capture queud buffer will be tagged as last 0086 * @has_stopped: indicate the device has been stopped 0087 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0088 * @cap_q_ctx: Capture (output to memory) queue context 0089 * @out_q_ctx: Output (input from memory) queue context 0090 * @queue: List of memory to memory contexts 0091 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: 0092 * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. 0093 * @finished: Wait queue used to signalize when a job queue finished. 0094 * @priv: Instance private data 0095 * 0096 * The memory to memory context is specific to a file handle, NOT to e.g. 0097 * a device. 0098 */ 0099 struct v4l2_m2m_ctx { 0100 /* optional cap/out vb2 queues lock */ 0101 struct mutex *q_lock; 0102 0103 bool new_frame; 0104 0105 bool is_draining; 0106 struct vb2_v4l2_buffer *last_src_buf; 0107 bool next_buf_last; 0108 bool has_stopped; 0109 0110 /* internal use only */ 0111 struct v4l2_m2m_dev *m2m_dev; 0112 0113 struct v4l2_m2m_queue_ctx cap_q_ctx; 0114 0115 struct v4l2_m2m_queue_ctx out_q_ctx; 0116 0117 /* For device job queue */ 0118 struct list_head queue; 0119 unsigned long job_flags; 0120 wait_queue_head_t finished; 0121 0122 void *priv; 0123 }; 0124 0125 /** 0126 * struct v4l2_m2m_buffer - Memory to memory buffer 0127 * 0128 * @vb: pointer to struct &vb2_v4l2_buffer 0129 * @list: list of m2m buffers 0130 */ 0131 struct v4l2_m2m_buffer { 0132 struct vb2_v4l2_buffer vb; 0133 struct list_head list; 0134 }; 0135 0136 /** 0137 * v4l2_m2m_get_curr_priv() - return driver private data for the currently 0138 * running instance or NULL if no instance is running 0139 * 0140 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0141 */ 0142 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); 0143 0144 /** 0145 * v4l2_m2m_get_vq() - return vb2_queue for the given type 0146 * 0147 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0148 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type 0149 */ 0150 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 0151 enum v4l2_buf_type type); 0152 0153 /** 0154 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to 0155 * the pending job queue and add it if so. 0156 * 0157 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0158 * 0159 * There are three basic requirements an instance has to meet to be able to run: 0160 * 1) at least one source buffer has to be queued, 0161 * 2) at least one destination buffer has to be queued, 0162 * 3) streaming has to be on. 0163 * 0164 * If a queue is buffered (for example a decoder hardware ringbuffer that has 0165 * to be drained before doing streamoff), allow scheduling without v4l2 buffers 0166 * on that queue. 0167 * 0168 * There may also be additional, custom requirements. In such case the driver 0169 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should 0170 * return 1 if the instance is ready. 0171 * An example of the above could be an instance that requires more than one 0172 * src/dst buffer per transaction. 0173 */ 0174 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); 0175 0176 /** 0177 * v4l2_m2m_job_finish() - inform the framework that a job has been finished 0178 * and have it clean up 0179 * 0180 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0181 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0182 * 0183 * Called by a driver to yield back the device after it has finished with it. 0184 * Should be called as soon as possible after reaching a state which allows 0185 * other instances to take control of the device. 0186 * 0187 * This function has to be called only after &v4l2_m2m_ops->device_run 0188 * callback has been called on the driver. To prevent recursion, it should 0189 * not be called directly from the &v4l2_m2m_ops->device_run callback though. 0190 */ 0191 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 0192 struct v4l2_m2m_ctx *m2m_ctx); 0193 0194 /** 0195 * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with 0196 * state and inform the framework that a job has been finished and have it 0197 * clean up 0198 * 0199 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0200 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0201 * @state: vb2 buffer state passed to v4l2_m2m_buf_done(). 0202 * 0203 * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this 0204 * function instead of job_finish() to take held buffers into account. It is 0205 * optional for other drivers. 0206 * 0207 * This function removes the source buffer from the ready list and returns 0208 * it with the given state. The same is done for the destination buffer, unless 0209 * it is marked 'held'. In that case the buffer is kept on the ready list. 0210 * 0211 * After that the job is finished (see job_finish()). 0212 * 0213 * This allows for multiple output buffers to be used to fill in a single 0214 * capture buffer. This is typically used by stateless decoders where 0215 * multiple e.g. H.264 slices contribute to a single decoded frame. 0216 */ 0217 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 0218 struct v4l2_m2m_ctx *m2m_ctx, 0219 enum vb2_buffer_state state); 0220 0221 static inline void 0222 v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) 0223 { 0224 vb2_buffer_done(&buf->vb2_buf, state); 0225 } 0226 0227 /** 0228 * v4l2_m2m_clear_state() - clear encoding/decoding state 0229 * 0230 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0231 */ 0232 static inline void 0233 v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) 0234 { 0235 m2m_ctx->next_buf_last = false; 0236 m2m_ctx->is_draining = false; 0237 m2m_ctx->has_stopped = false; 0238 } 0239 0240 /** 0241 * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped 0242 * 0243 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0244 */ 0245 static inline void 0246 v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) 0247 { 0248 m2m_ctx->next_buf_last = false; 0249 m2m_ctx->is_draining = false; 0250 m2m_ctx->has_stopped = true; 0251 } 0252 0253 /** 0254 * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session 0255 * draining management state of next queued capture buffer 0256 * 0257 * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify 0258 * the end of the capture session. 0259 * 0260 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0261 */ 0262 static inline bool 0263 v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) 0264 { 0265 return m2m_ctx->is_draining && m2m_ctx->next_buf_last; 0266 } 0267 0268 /** 0269 * v4l2_m2m_has_stopped() - return the current encoding/decoding session 0270 * stopped state 0271 * 0272 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0273 */ 0274 static inline bool 0275 v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) 0276 { 0277 return m2m_ctx->has_stopped; 0278 } 0279 0280 /** 0281 * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining 0282 * state in the current encoding/decoding session 0283 * 0284 * This will identify the last output buffer queued before a session stop 0285 * was required, leading to an actual encoding/decoding session stop state 0286 * in the encoding/decoding process after being processed. 0287 * 0288 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0289 * @vbuf: pointer to struct &v4l2_buffer 0290 */ 0291 static inline bool 0292 v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, 0293 struct vb2_v4l2_buffer *vbuf) 0294 { 0295 return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; 0296 } 0297 0298 /** 0299 * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE 0300 * 0301 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0302 * @vbuf: pointer to struct &v4l2_buffer 0303 */ 0304 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, 0305 struct vb2_v4l2_buffer *vbuf); 0306 0307 /** 0308 * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job 0309 * to finish 0310 * 0311 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0312 * 0313 * Called by a driver in the suspend hook. Stop new jobs from being run, and 0314 * wait for current running job to finish. 0315 */ 0316 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); 0317 0318 /** 0319 * v4l2_m2m_resume() - resume job running and try to run a queued job 0320 * 0321 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0322 * 0323 * Called by a driver in the resume hook. This reverts the operation of 0324 * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if 0325 * there is any. 0326 */ 0327 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); 0328 0329 /** 0330 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer 0331 * 0332 * @file: pointer to struct &file 0333 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0334 * @reqbufs: pointer to struct &v4l2_requestbuffers 0335 */ 0336 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0337 struct v4l2_requestbuffers *reqbufs); 0338 0339 /** 0340 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer 0341 * 0342 * @file: pointer to struct &file 0343 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0344 * @buf: pointer to struct &v4l2_buffer 0345 * 0346 * See v4l2_m2m_mmap() documentation for details. 0347 */ 0348 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0349 struct v4l2_buffer *buf); 0350 0351 /** 0352 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on 0353 * the type 0354 * 0355 * @file: pointer to struct &file 0356 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0357 * @buf: pointer to struct &v4l2_buffer 0358 */ 0359 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0360 struct v4l2_buffer *buf); 0361 0362 /** 0363 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on 0364 * the type 0365 * 0366 * @file: pointer to struct &file 0367 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0368 * @buf: pointer to struct &v4l2_buffer 0369 */ 0370 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0371 struct v4l2_buffer *buf); 0372 0373 /** 0374 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on 0375 * the type 0376 * 0377 * @file: pointer to struct &file 0378 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0379 * @buf: pointer to struct &v4l2_buffer 0380 */ 0381 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0382 struct v4l2_buffer *buf); 0383 0384 /** 0385 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending 0386 * on the type 0387 * 0388 * @file: pointer to struct &file 0389 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0390 * @create: pointer to struct &v4l2_create_buffers 0391 */ 0392 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0393 struct v4l2_create_buffers *create); 0394 0395 /** 0396 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on 0397 * the type 0398 * 0399 * @file: pointer to struct &file 0400 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0401 * @eb: pointer to struct &v4l2_exportbuffer 0402 */ 0403 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0404 struct v4l2_exportbuffer *eb); 0405 0406 /** 0407 * v4l2_m2m_streamon() - turn on streaming for a video queue 0408 * 0409 * @file: pointer to struct &file 0410 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0411 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type 0412 */ 0413 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0414 enum v4l2_buf_type type); 0415 0416 /** 0417 * v4l2_m2m_streamoff() - turn off streaming for a video queue 0418 * 0419 * @file: pointer to struct &file 0420 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0421 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type 0422 */ 0423 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0424 enum v4l2_buf_type type); 0425 0426 /** 0427 * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding 0428 * session state when a start of streaming of a video queue is requested 0429 * 0430 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0431 * @q: queue 0432 */ 0433 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 0434 struct vb2_queue *q); 0435 0436 /** 0437 * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding 0438 * session state when a stop of streaming of a video queue is requested 0439 * 0440 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0441 * @q: queue 0442 */ 0443 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 0444 struct vb2_queue *q); 0445 0446 /** 0447 * v4l2_m2m_encoder_cmd() - execute an encoder command 0448 * 0449 * @file: pointer to struct &file 0450 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0451 * @ec: pointer to the encoder command 0452 */ 0453 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0454 struct v4l2_encoder_cmd *ec); 0455 0456 /** 0457 * v4l2_m2m_decoder_cmd() - execute a decoder command 0458 * 0459 * @file: pointer to struct &file 0460 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0461 * @dc: pointer to the decoder command 0462 */ 0463 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0464 struct v4l2_decoder_cmd *dc); 0465 0466 /** 0467 * v4l2_m2m_poll() - poll replacement, for destination buffers only 0468 * 0469 * @file: pointer to struct &file 0470 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0471 * @wait: pointer to struct &poll_table_struct 0472 * 0473 * Call from the driver's poll() function. Will poll both queues. If a buffer 0474 * is available to dequeue (with dqbuf) from the source queue, this will 0475 * indicate that a non-blocking write can be performed, while read will be 0476 * returned in case of the destination queue. 0477 */ 0478 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0479 struct poll_table_struct *wait); 0480 0481 /** 0482 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer 0483 * 0484 * @file: pointer to struct &file 0485 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0486 * @vma: pointer to struct &vm_area_struct 0487 * 0488 * Call from driver's mmap() function. Will handle mmap() for both queues 0489 * seamlessly for videobuffer, which will receive normal per-queue offsets and 0490 * proper videobuf queue pointers. The differentiation is made outside videobuf 0491 * by adding a predefined offset to buffers from one of the queues and 0492 * subtracting it before passing it back to videobuf. Only drivers (and 0493 * thus applications) receive modified offsets. 0494 */ 0495 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 0496 struct vm_area_struct *vma); 0497 0498 #ifndef CONFIG_MMU 0499 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, 0500 unsigned long len, unsigned long pgoff, 0501 unsigned long flags); 0502 #endif 0503 /** 0504 * v4l2_m2m_init() - initialize per-driver m2m data 0505 * 0506 * @m2m_ops: pointer to struct v4l2_m2m_ops 0507 * 0508 * Usually called from driver's ``probe()`` function. 0509 * 0510 * Return: returns an opaque pointer to the internal data to handle M2M context 0511 */ 0512 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); 0513 0514 #if defined(CONFIG_MEDIA_CONTROLLER) 0515 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); 0516 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 0517 struct video_device *vdev, int function); 0518 #else 0519 static inline void 0520 v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 0521 { 0522 } 0523 0524 static inline int 0525 v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 0526 struct video_device *vdev, int function) 0527 { 0528 return 0; 0529 } 0530 #endif 0531 0532 /** 0533 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure 0534 * 0535 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0536 * 0537 * Usually called from driver's ``remove()`` function. 0538 */ 0539 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); 0540 0541 /** 0542 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context 0543 * 0544 * @m2m_dev: opaque pointer to the internal data to handle M2M context 0545 * @drv_priv: driver's instance private data 0546 * @queue_init: a callback for queue type-specific initialization function 0547 * to be used for initializing videobuf_queues 0548 * 0549 * Usually called from driver's ``open()`` function. 0550 */ 0551 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 0552 void *drv_priv, 0553 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); 0554 0555 static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, 0556 bool buffered) 0557 { 0558 m2m_ctx->out_q_ctx.buffered = buffered; 0559 } 0560 0561 static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, 0562 bool buffered) 0563 { 0564 m2m_ctx->cap_q_ctx.buffered = buffered; 0565 } 0566 0567 /** 0568 * v4l2_m2m_ctx_release() - release m2m context 0569 * 0570 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0571 * 0572 * Usually called from driver's release() function. 0573 */ 0574 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); 0575 0576 /** 0577 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. 0578 * 0579 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0580 * @vbuf: pointer to struct &vb2_v4l2_buffer 0581 * 0582 * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback. 0583 */ 0584 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 0585 struct vb2_v4l2_buffer *vbuf); 0586 0587 /** 0588 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for 0589 * use 0590 * 0591 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0592 */ 0593 static inline 0594 unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) 0595 { 0596 return m2m_ctx->out_q_ctx.num_rdy; 0597 } 0598 0599 /** 0600 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers 0601 * ready for use 0602 * 0603 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0604 */ 0605 static inline 0606 unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) 0607 { 0608 return m2m_ctx->cap_q_ctx.num_rdy; 0609 } 0610 0611 /** 0612 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers 0613 * 0614 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 0615 */ 0616 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); 0617 0618 /** 0619 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready 0620 * buffers 0621 * 0622 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0623 */ 0624 static inline struct vb2_v4l2_buffer * 0625 v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) 0626 { 0627 return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); 0628 } 0629 0630 /** 0631 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of 0632 * ready buffers 0633 * 0634 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0635 */ 0636 static inline struct vb2_v4l2_buffer * 0637 v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) 0638 { 0639 return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); 0640 } 0641 0642 /** 0643 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers 0644 * 0645 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 0646 */ 0647 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); 0648 0649 /** 0650 * v4l2_m2m_last_src_buf() - return last destination buffer from the list of 0651 * ready buffers 0652 * 0653 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0654 */ 0655 static inline struct vb2_v4l2_buffer * 0656 v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) 0657 { 0658 return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); 0659 } 0660 0661 /** 0662 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of 0663 * ready buffers 0664 * 0665 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0666 */ 0667 static inline struct vb2_v4l2_buffer * 0668 v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) 0669 { 0670 return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); 0671 } 0672 0673 /** 0674 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready 0675 * buffers 0676 * 0677 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0678 * @b: current buffer of type struct v4l2_m2m_buffer 0679 */ 0680 #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ 0681 list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) 0682 0683 /** 0684 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers 0685 * 0686 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0687 * @b: current buffer of type struct v4l2_m2m_buffer 0688 */ 0689 #define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ 0690 list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) 0691 0692 /** 0693 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready 0694 * buffers safely 0695 * 0696 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0697 * @b: current buffer of type struct v4l2_m2m_buffer 0698 * @n: used as temporary storage 0699 */ 0700 #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ 0701 list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) 0702 0703 /** 0704 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready 0705 * buffers safely 0706 * 0707 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0708 * @b: current buffer of type struct v4l2_m2m_buffer 0709 * @n: used as temporary storage 0710 */ 0711 #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ 0712 list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) 0713 0714 /** 0715 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers 0716 * 0717 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0718 */ 0719 static inline 0720 struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) 0721 { 0722 return &m2m_ctx->out_q_ctx.q; 0723 } 0724 0725 /** 0726 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers 0727 * 0728 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0729 */ 0730 static inline 0731 struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) 0732 { 0733 return &m2m_ctx->cap_q_ctx.q; 0734 } 0735 0736 /** 0737 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and 0738 * return it 0739 * 0740 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 0741 */ 0742 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); 0743 0744 /** 0745 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready 0746 * buffers and return it 0747 * 0748 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0749 */ 0750 static inline struct vb2_v4l2_buffer * 0751 v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) 0752 { 0753 return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); 0754 } 0755 0756 /** 0757 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of 0758 * ready buffers and return it 0759 * 0760 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0761 */ 0762 static inline struct vb2_v4l2_buffer * 0763 v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) 0764 { 0765 return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); 0766 } 0767 0768 /** 0769 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready 0770 * buffers 0771 * 0772 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 0773 * @vbuf: the buffer to be removed 0774 */ 0775 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 0776 struct vb2_v4l2_buffer *vbuf); 0777 0778 /** 0779 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list 0780 * of ready buffers 0781 * 0782 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0783 * @vbuf: the buffer to be removed 0784 */ 0785 static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, 0786 struct vb2_v4l2_buffer *vbuf) 0787 { 0788 v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); 0789 } 0790 0791 /** 0792 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the 0793 * list of ready buffers 0794 * 0795 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 0796 * @vbuf: the buffer to be removed 0797 */ 0798 static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, 0799 struct vb2_v4l2_buffer *vbuf) 0800 { 0801 v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); 0802 } 0803 0804 struct vb2_v4l2_buffer * 0805 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); 0806 0807 static inline struct vb2_v4l2_buffer * 0808 v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) 0809 { 0810 return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); 0811 } 0812 0813 static inline struct vb2_v4l2_buffer * 0814 v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) 0815 { 0816 return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); 0817 } 0818 0819 /** 0820 * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from 0821 * the output buffer to the capture buffer 0822 * 0823 * @out_vb: the output buffer that is the source of the metadata. 0824 * @cap_vb: the capture buffer that will receive the metadata. 0825 * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. 0826 * 0827 * This helper function copies the timestamp, timecode (if the TIMECODE 0828 * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME 0829 * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. 0830 * 0831 * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME 0832 * flags are not copied. This is typically needed for encoders that 0833 * set this bits explicitly. 0834 */ 0835 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 0836 struct vb2_v4l2_buffer *cap_vb, 0837 bool copy_frame_flags); 0838 0839 /* v4l2 request helper */ 0840 0841 void v4l2_m2m_request_queue(struct media_request *req); 0842 0843 /* v4l2 ioctl helpers */ 0844 0845 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 0846 struct v4l2_requestbuffers *rb); 0847 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, 0848 struct v4l2_create_buffers *create); 0849 int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, 0850 struct v4l2_buffer *buf); 0851 int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, 0852 struct v4l2_exportbuffer *eb); 0853 int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh, 0854 struct v4l2_buffer *buf); 0855 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, 0856 struct v4l2_buffer *buf); 0857 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh, 0858 struct v4l2_buffer *buf); 0859 int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, 0860 enum v4l2_buf_type type); 0861 int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, 0862 enum v4l2_buf_type type); 0863 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh, 0864 struct v4l2_encoder_cmd *ec); 0865 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh, 0866 struct v4l2_decoder_cmd *dc); 0867 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, 0868 struct v4l2_encoder_cmd *ec); 0869 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, 0870 struct v4l2_decoder_cmd *dc); 0871 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, 0872 struct v4l2_decoder_cmd *dc); 0873 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 0874 struct v4l2_decoder_cmd *dc); 0875 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); 0876 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); 0877 0878 #endif /* _MEDIA_V4L2_MEM2MEM_H */ 0879
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |