cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

v4l2-mem2mem.h (29319B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * Memory-to-memory device framework for Video for Linux 2.
      4 *
      5 * Helper functions for devices that use memory buffers for both source
      6 * and destination.
      7 *
      8 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
      9 * Pawel Osciak, <pawel@osciak.com>
     10 * Marek Szyprowski, <m.szyprowski@samsung.com>
     11 */
     12
     13#ifndef _MEDIA_V4L2_MEM2MEM_H
     14#define _MEDIA_V4L2_MEM2MEM_H
     15
     16#include <media/videobuf2-v4l2.h>
     17
     18/**
     19 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
     20 * @device_run:	required. Begin the actual job (transaction) inside this
     21 *		callback.
     22 *		The job does NOT have to end before this callback returns
     23 *		(and it will be the usual case). When the job finishes,
     24 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
     25 *		has to be called.
     26 * @job_ready:	optional. Should return 0 if the driver does not have a job
     27 *		fully prepared to run yet (i.e. it will not be able to finish a
     28 *		transaction without sleeping). If not provided, it will be
     29 *		assumed that one source and one destination buffer are all
     30 *		that is required for the driver to perform one full transaction.
     31 *		This method may not sleep.
     32 * @job_abort:	optional. Informs the driver that it has to abort the currently
     33 *		running transaction as soon as possible (i.e. as soon as it can
     34 *		stop the device safely; e.g. in the next interrupt handler),
     35 *		even if the transaction would not have been finished by then.
     36 *		After the driver performs the necessary steps, it has to call
     37 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
     38 *		if the transaction ended normally.
     39 *		This function does not have to (and will usually not) wait
     40 *		until the device enters a state when it can be stopped.
     41 */
     42struct v4l2_m2m_ops {
     43	void (*device_run)(void *priv);
     44	int (*job_ready)(void *priv);
     45	void (*job_abort)(void *priv);
     46};
     47
     48struct video_device;
     49struct v4l2_m2m_dev;
     50
     51/**
     52 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
     53 *	processed
     54 *
     55 * @q:		pointer to struct &vb2_queue
     56 * @rdy_queue:	List of V4L2 mem-to-mem queues
     57 * @rdy_spinlock: spin lock to protect the struct usage
     58 * @num_rdy:	number of buffers ready to be processed
     59 * @buffered:	is the queue buffered?
     60 *
     61 * Queue for buffers ready to be processed as soon as this
     62 * instance receives access to the device.
     63 */
     64
     65struct v4l2_m2m_queue_ctx {
     66	struct vb2_queue	q;
     67
     68	struct list_head	rdy_queue;
     69	spinlock_t		rdy_spinlock;
     70	u8			num_rdy;
     71	bool			buffered;
     72};
     73
     74/**
     75 * struct v4l2_m2m_ctx - Memory to memory context structure
     76 *
     77 * @q_lock: struct &mutex lock
     78 * @new_frame: valid in the device_run callback: if true, then this
     79 *		starts a new frame; if false, then this is a new slice
     80 *		for an existing frame. This is always true unless
     81 *		V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
     82 *		indicates slicing support.
     83 * @is_draining: indicates device is in draining phase
     84 * @last_src_buf: indicate the last source buffer for draining
     85 * @next_buf_last: next capture queud buffer will be tagged as last
     86 * @has_stopped: indicate the device has been stopped
     87 * @m2m_dev: opaque pointer to the internal data to handle M2M context
     88 * @cap_q_ctx: Capture (output to memory) queue context
     89 * @out_q_ctx: Output (input from memory) queue context
     90 * @queue: List of memory to memory contexts
     91 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
     92 *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
     93 * @finished: Wait queue used to signalize when a job queue finished.
     94 * @priv: Instance private data
     95 *
     96 * The memory to memory context is specific to a file handle, NOT to e.g.
     97 * a device.
     98 */
     99struct v4l2_m2m_ctx {
    100	/* optional cap/out vb2 queues lock */
    101	struct mutex			*q_lock;
    102
    103	bool				new_frame;
    104
    105	bool				is_draining;
    106	struct vb2_v4l2_buffer		*last_src_buf;
    107	bool				next_buf_last;
    108	bool				has_stopped;
    109
    110	/* internal use only */
    111	struct v4l2_m2m_dev		*m2m_dev;
    112
    113	struct v4l2_m2m_queue_ctx	cap_q_ctx;
    114
    115	struct v4l2_m2m_queue_ctx	out_q_ctx;
    116
    117	/* For device job queue */
    118	struct list_head		queue;
    119	unsigned long			job_flags;
    120	wait_queue_head_t		finished;
    121
    122	void				*priv;
    123};
    124
    125/**
    126 * struct v4l2_m2m_buffer - Memory to memory buffer
    127 *
    128 * @vb: pointer to struct &vb2_v4l2_buffer
    129 * @list: list of m2m buffers
    130 */
    131struct v4l2_m2m_buffer {
    132	struct vb2_v4l2_buffer	vb;
    133	struct list_head	list;
    134};
    135
    136/**
    137 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
    138 * running instance or NULL if no instance is running
    139 *
    140 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    141 */
    142void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
    143
    144/**
    145 * v4l2_m2m_get_vq() - return vb2_queue for the given type
    146 *
    147 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    148 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
    149 */
    150struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
    151				       enum v4l2_buf_type type);
    152
    153/**
    154 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
    155 * the pending job queue and add it if so.
    156 *
    157 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    158 *
    159 * There are three basic requirements an instance has to meet to be able to run:
    160 * 1) at least one source buffer has to be queued,
    161 * 2) at least one destination buffer has to be queued,
    162 * 3) streaming has to be on.
    163 *
    164 * If a queue is buffered (for example a decoder hardware ringbuffer that has
    165 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
    166 * on that queue.
    167 *
    168 * There may also be additional, custom requirements. In such case the driver
    169 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
    170 * return 1 if the instance is ready.
    171 * An example of the above could be an instance that requires more than one
    172 * src/dst buffer per transaction.
    173 */
    174void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
    175
    176/**
    177 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
    178 * and have it clean up
    179 *
    180 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    181 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    182 *
    183 * Called by a driver to yield back the device after it has finished with it.
    184 * Should be called as soon as possible after reaching a state which allows
    185 * other instances to take control of the device.
    186 *
    187 * This function has to be called only after &v4l2_m2m_ops->device_run
    188 * callback has been called on the driver. To prevent recursion, it should
    189 * not be called directly from the &v4l2_m2m_ops->device_run callback though.
    190 */
    191void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
    192			 struct v4l2_m2m_ctx *m2m_ctx);
    193
    194/**
    195 * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
    196 * state and inform the framework that a job has been finished and have it
    197 * clean up
    198 *
    199 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    200 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    201 * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
    202 *
    203 * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
    204 * function instead of job_finish() to take held buffers into account. It is
    205 * optional for other drivers.
    206 *
    207 * This function removes the source buffer from the ready list and returns
    208 * it with the given state. The same is done for the destination buffer, unless
    209 * it is marked 'held'. In that case the buffer is kept on the ready list.
    210 *
    211 * After that the job is finished (see job_finish()).
    212 *
    213 * This allows for multiple output buffers to be used to fill in a single
    214 * capture buffer. This is typically used by stateless decoders where
    215 * multiple e.g. H.264 slices contribute to a single decoded frame.
    216 */
    217void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
    218				      struct v4l2_m2m_ctx *m2m_ctx,
    219				      enum vb2_buffer_state state);
    220
    221static inline void
    222v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
    223{
    224	vb2_buffer_done(&buf->vb2_buf, state);
    225}
    226
    227/**
    228 * v4l2_m2m_clear_state() - clear encoding/decoding state
    229 *
    230 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    231 */
    232static inline void
    233v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
    234{
    235	m2m_ctx->next_buf_last = false;
    236	m2m_ctx->is_draining = false;
    237	m2m_ctx->has_stopped = false;
    238}
    239
    240/**
    241 * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
    242 *
    243 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    244 */
    245static inline void
    246v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
    247{
    248	m2m_ctx->next_buf_last = false;
    249	m2m_ctx->is_draining = false;
    250	m2m_ctx->has_stopped = true;
    251}
    252
    253/**
    254 * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
    255 * draining management state of next queued capture buffer
    256 *
    257 * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
    258 * the end of the capture session.
    259 *
    260 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    261 */
    262static inline bool
    263v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
    264{
    265	return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
    266}
    267
    268/**
    269 * v4l2_m2m_has_stopped() - return the current encoding/decoding session
    270 * stopped state
    271 *
    272 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    273 */
    274static inline bool
    275v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
    276{
    277	return m2m_ctx->has_stopped;
    278}
    279
    280/**
    281 * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
    282 * state in the current encoding/decoding session
    283 *
    284 * This will identify the last output buffer queued before a session stop
    285 * was required, leading to an actual encoding/decoding session stop state
    286 * in the encoding/decoding process after being processed.
    287 *
    288 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    289 * @vbuf: pointer to struct &v4l2_buffer
    290 */
    291static inline bool
    292v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
    293				  struct vb2_v4l2_buffer *vbuf)
    294{
    295	return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
    296}
    297
    298/**
    299 * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
    300 *
    301 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    302 * @vbuf: pointer to struct &v4l2_buffer
    303 */
    304void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
    305			       struct vb2_v4l2_buffer *vbuf);
    306
    307/**
    308 * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
    309 * to finish
    310 *
    311 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    312 *
    313 * Called by a driver in the suspend hook. Stop new jobs from being run, and
    314 * wait for current running job to finish.
    315 */
    316void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
    317
    318/**
    319 * v4l2_m2m_resume() - resume job running and try to run a queued job
    320 *
    321 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    322 *
    323 * Called by a driver in the resume hook. This reverts the operation of
    324 * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
    325 * there is any.
    326 */
    327void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
    328
    329/**
    330 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
    331 *
    332 * @file: pointer to struct &file
    333 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    334 * @reqbufs: pointer to struct &v4l2_requestbuffers
    335 */
    336int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    337		     struct v4l2_requestbuffers *reqbufs);
    338
    339/**
    340 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
    341 *
    342 * @file: pointer to struct &file
    343 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    344 * @buf: pointer to struct &v4l2_buffer
    345 *
    346 * See v4l2_m2m_mmap() documentation for details.
    347 */
    348int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    349		      struct v4l2_buffer *buf);
    350
    351/**
    352 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
    353 * the type
    354 *
    355 * @file: pointer to struct &file
    356 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    357 * @buf: pointer to struct &v4l2_buffer
    358 */
    359int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    360		  struct v4l2_buffer *buf);
    361
    362/**
    363 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
    364 * the type
    365 *
    366 * @file: pointer to struct &file
    367 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    368 * @buf: pointer to struct &v4l2_buffer
    369 */
    370int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    371		   struct v4l2_buffer *buf);
    372
    373/**
    374 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
    375 * the type
    376 *
    377 * @file: pointer to struct &file
    378 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    379 * @buf: pointer to struct &v4l2_buffer
    380 */
    381int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    382			 struct v4l2_buffer *buf);
    383
    384/**
    385 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
    386 * on the type
    387 *
    388 * @file: pointer to struct &file
    389 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    390 * @create: pointer to struct &v4l2_create_buffers
    391 */
    392int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    393			 struct v4l2_create_buffers *create);
    394
    395/**
    396 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
    397 * the type
    398 *
    399 * @file: pointer to struct &file
    400 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    401 * @eb: pointer to struct &v4l2_exportbuffer
    402 */
    403int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    404		   struct v4l2_exportbuffer *eb);
    405
    406/**
    407 * v4l2_m2m_streamon() - turn on streaming for a video queue
    408 *
    409 * @file: pointer to struct &file
    410 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    411 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
    412 */
    413int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    414		      enum v4l2_buf_type type);
    415
    416/**
    417 * v4l2_m2m_streamoff() - turn off streaming for a video queue
    418 *
    419 * @file: pointer to struct &file
    420 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    421 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
    422 */
    423int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    424		       enum v4l2_buf_type type);
    425
    426/**
    427 * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
    428 * session state when a start of streaming of a video queue is requested
    429 *
    430 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    431 * @q: queue
    432 */
    433void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
    434					   struct vb2_queue *q);
    435
    436/**
    437 * v4l2_m2m_update_stop_streaming_state() -  update the encoding/decoding
    438 * session state when a stop of streaming of a video queue is requested
    439 *
    440 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    441 * @q: queue
    442 */
    443void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
    444					  struct vb2_queue *q);
    445
    446/**
    447 * v4l2_m2m_encoder_cmd() - execute an encoder command
    448 *
    449 * @file: pointer to struct &file
    450 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    451 * @ec: pointer to the encoder command
    452 */
    453int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    454			 struct v4l2_encoder_cmd *ec);
    455
    456/**
    457 * v4l2_m2m_decoder_cmd() - execute a decoder command
    458 *
    459 * @file: pointer to struct &file
    460 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    461 * @dc: pointer to the decoder command
    462 */
    463int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    464			 struct v4l2_decoder_cmd *dc);
    465
    466/**
    467 * v4l2_m2m_poll() - poll replacement, for destination buffers only
    468 *
    469 * @file: pointer to struct &file
    470 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    471 * @wait: pointer to struct &poll_table_struct
    472 *
    473 * Call from the driver's poll() function. Will poll both queues. If a buffer
    474 * is available to dequeue (with dqbuf) from the source queue, this will
    475 * indicate that a non-blocking write can be performed, while read will be
    476 * returned in case of the destination queue.
    477 */
    478__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    479			   struct poll_table_struct *wait);
    480
    481/**
    482 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
    483 *
    484 * @file: pointer to struct &file
    485 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    486 * @vma: pointer to struct &vm_area_struct
    487 *
    488 * Call from driver's mmap() function. Will handle mmap() for both queues
    489 * seamlessly for videobuffer, which will receive normal per-queue offsets and
    490 * proper videobuf queue pointers. The differentiation is made outside videobuf
    491 * by adding a predefined offset to buffers from one of the queues and
    492 * subtracting it before passing it back to videobuf. Only drivers (and
    493 * thus applications) receive modified offsets.
    494 */
    495int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    496		  struct vm_area_struct *vma);
    497
    498#ifndef CONFIG_MMU
    499unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
    500					 unsigned long len, unsigned long pgoff,
    501					 unsigned long flags);
    502#endif
    503/**
    504 * v4l2_m2m_init() - initialize per-driver m2m data
    505 *
    506 * @m2m_ops: pointer to struct v4l2_m2m_ops
    507 *
    508 * Usually called from driver's ``probe()`` function.
    509 *
    510 * Return: returns an opaque pointer to the internal data to handle M2M context
    511 */
    512struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
    513
    514#if defined(CONFIG_MEDIA_CONTROLLER)
    515void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
    516int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
    517			struct video_device *vdev, int function);
    518#else
    519static inline void
    520v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
    521{
    522}
    523
    524static inline int
    525v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
    526		struct video_device *vdev, int function)
    527{
    528	return 0;
    529}
    530#endif
    531
    532/**
    533 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
    534 *
    535 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    536 *
    537 * Usually called from driver's ``remove()`` function.
    538 */
    539void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
    540
    541/**
    542 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
    543 *
    544 * @m2m_dev: opaque pointer to the internal data to handle M2M context
    545 * @drv_priv: driver's instance private data
    546 * @queue_init: a callback for queue type-specific initialization function
    547 *	to be used for initializing videobuf_queues
    548 *
    549 * Usually called from driver's ``open()`` function.
    550 */
    551struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
    552		void *drv_priv,
    553		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
    554
    555static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
    556					     bool buffered)
    557{
    558	m2m_ctx->out_q_ctx.buffered = buffered;
    559}
    560
    561static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
    562					     bool buffered)
    563{
    564	m2m_ctx->cap_q_ctx.buffered = buffered;
    565}
    566
    567/**
    568 * v4l2_m2m_ctx_release() - release m2m context
    569 *
    570 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    571 *
    572 * Usually called from driver's release() function.
    573 */
    574void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
    575
    576/**
    577 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
    578 *
    579 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    580 * @vbuf: pointer to struct &vb2_v4l2_buffer
    581 *
    582 * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
    583 */
    584void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
    585			struct vb2_v4l2_buffer *vbuf);
    586
    587/**
    588 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
    589 * use
    590 *
    591 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    592 */
    593static inline
    594unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
    595{
    596	return m2m_ctx->out_q_ctx.num_rdy;
    597}
    598
    599/**
    600 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
    601 * ready for use
    602 *
    603 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    604 */
    605static inline
    606unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
    607{
    608	return m2m_ctx->cap_q_ctx.num_rdy;
    609}
    610
    611/**
    612 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
    613 *
    614 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
    615 */
    616struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
    617
    618/**
    619 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
    620 * buffers
    621 *
    622 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    623 */
    624static inline struct vb2_v4l2_buffer *
    625v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
    626{
    627	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
    628}
    629
    630/**
    631 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
    632 * ready buffers
    633 *
    634 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    635 */
    636static inline struct vb2_v4l2_buffer *
    637v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
    638{
    639	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
    640}
    641
    642/**
    643 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
    644 *
    645 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
    646 */
    647struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
    648
    649/**
    650 * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
    651 * ready buffers
    652 *
    653 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    654 */
    655static inline struct vb2_v4l2_buffer *
    656v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
    657{
    658	return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
    659}
    660
    661/**
    662 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
    663 * ready buffers
    664 *
    665 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    666 */
    667static inline struct vb2_v4l2_buffer *
    668v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
    669{
    670	return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
    671}
    672
    673/**
    674 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
    675 * buffers
    676 *
    677 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    678 * @b: current buffer of type struct v4l2_m2m_buffer
    679 */
    680#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)	\
    681	list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
    682
    683/**
    684 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
    685 *
    686 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    687 * @b: current buffer of type struct v4l2_m2m_buffer
    688 */
    689#define v4l2_m2m_for_each_src_buf(m2m_ctx, b)	\
    690	list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
    691
    692/**
    693 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
    694 * buffers safely
    695 *
    696 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    697 * @b: current buffer of type struct v4l2_m2m_buffer
    698 * @n: used as temporary storage
    699 */
    700#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)	\
    701	list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
    702
    703/**
    704 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
    705 * buffers safely
    706 *
    707 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    708 * @b: current buffer of type struct v4l2_m2m_buffer
    709 * @n: used as temporary storage
    710 */
    711#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)	\
    712	list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
    713
    714/**
    715 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
    716 *
    717 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    718 */
    719static inline
    720struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
    721{
    722	return &m2m_ctx->out_q_ctx.q;
    723}
    724
    725/**
    726 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
    727 *
    728 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    729 */
    730static inline
    731struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
    732{
    733	return &m2m_ctx->cap_q_ctx.q;
    734}
    735
    736/**
    737 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
    738 * return it
    739 *
    740 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
    741 */
    742struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
    743
    744/**
    745 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
    746 * buffers and return it
    747 *
    748 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    749 */
    750static inline struct vb2_v4l2_buffer *
    751v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
    752{
    753	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
    754}
    755
    756/**
    757 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
    758 * ready buffers and return it
    759 *
    760 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    761 */
    762static inline struct vb2_v4l2_buffer *
    763v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
    764{
    765	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
    766}
    767
    768/**
    769 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
    770 * buffers
    771 *
    772 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
    773 * @vbuf: the buffer to be removed
    774 */
    775void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
    776				struct vb2_v4l2_buffer *vbuf);
    777
    778/**
    779 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
    780 * of ready buffers
    781 *
    782 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    783 * @vbuf: the buffer to be removed
    784 */
    785static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
    786						  struct vb2_v4l2_buffer *vbuf)
    787{
    788	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
    789}
    790
    791/**
    792 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
    793 * list of ready buffers
    794 *
    795 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
    796 * @vbuf: the buffer to be removed
    797 */
    798static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
    799						  struct vb2_v4l2_buffer *vbuf)
    800{
    801	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
    802}
    803
    804struct vb2_v4l2_buffer *
    805v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
    806
    807static inline struct vb2_v4l2_buffer *
    808v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
    809{
    810	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
    811}
    812
    813static inline struct vb2_v4l2_buffer *
    814v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
    815{
    816	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
    817}
    818
    819/**
    820 * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
    821 * the output buffer to the capture buffer
    822 *
    823 * @out_vb: the output buffer that is the source of the metadata.
    824 * @cap_vb: the capture buffer that will receive the metadata.
    825 * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
    826 *
    827 * This helper function copies the timestamp, timecode (if the TIMECODE
    828 * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
    829 * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
    830 *
    831 * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
    832 * flags are not copied. This is typically needed for encoders that
    833 * set this bits explicitly.
    834 */
    835void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
    836				struct vb2_v4l2_buffer *cap_vb,
    837				bool copy_frame_flags);
    838
    839/* v4l2 request helper */
    840
    841void v4l2_m2m_request_queue(struct media_request *req);
    842
    843/* v4l2 ioctl helpers */
    844
    845int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
    846				struct v4l2_requestbuffers *rb);
    847int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
    848				struct v4l2_create_buffers *create);
    849int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
    850				struct v4l2_buffer *buf);
    851int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
    852				struct v4l2_exportbuffer *eb);
    853int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
    854				struct v4l2_buffer *buf);
    855int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
    856				struct v4l2_buffer *buf);
    857int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
    858			       struct v4l2_buffer *buf);
    859int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
    860				enum v4l2_buf_type type);
    861int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
    862				enum v4l2_buf_type type);
    863int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
    864			       struct v4l2_encoder_cmd *ec);
    865int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
    866			       struct v4l2_decoder_cmd *dc);
    867int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
    868				   struct v4l2_encoder_cmd *ec);
    869int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
    870				   struct v4l2_decoder_cmd *dc);
    871int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
    872					     struct v4l2_decoder_cmd *dc);
    873int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
    874					 struct v4l2_decoder_cmd *dc);
    875int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
    876__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
    877
    878#endif /* _MEDIA_V4L2_MEM2MEM_H */
    879