cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

v4l2-mem2mem.c (44404B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
      4 *
      5 * Helper functions for devices that use videobuf buffers for both their
      6 * source and destination.
      7 *
      8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
      9 * Pawel Osciak, <pawel@osciak.com>
     10 * Marek Szyprowski, <m.szyprowski@samsung.com>
     11 */
     12#include <linux/module.h>
     13#include <linux/sched.h>
     14#include <linux/slab.h>
     15
     16#include <media/media-device.h>
     17#include <media/videobuf2-v4l2.h>
     18#include <media/v4l2-mem2mem.h>
     19#include <media/v4l2-dev.h>
     20#include <media/v4l2-device.h>
     21#include <media/v4l2-fh.h>
     22#include <media/v4l2-event.h>
     23
     24MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
     25MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
     26MODULE_LICENSE("GPL");
     27
     28static bool debug;
     29module_param(debug, bool, 0644);
     30
     31#define dprintk(fmt, arg...)						\
     32	do {								\
     33		if (debug)						\
     34			printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
     35	} while (0)
     36
     37
     38/* Instance is already queued on the job_queue */
     39#define TRANS_QUEUED		(1 << 0)
     40/* Instance is currently running in hardware */
     41#define TRANS_RUNNING		(1 << 1)
     42/* Instance is currently aborting */
     43#define TRANS_ABORT		(1 << 2)
     44
     45
     46/* The job queue is not running new jobs */
     47#define QUEUE_PAUSED		(1 << 0)
     48
     49
     50/* Offset base for buffers on the destination queue - used to distinguish
     51 * between source and destination buffers when mmapping - they receive the same
     52 * offsets but for different queues */
     53#define DST_QUEUE_OFF_BASE	(1 << 30)
     54
     55enum v4l2_m2m_entity_type {
     56	MEM2MEM_ENT_TYPE_SOURCE,
     57	MEM2MEM_ENT_TYPE_SINK,
     58	MEM2MEM_ENT_TYPE_PROC
     59};
     60
     61static const char * const m2m_entity_name[] = {
     62	"source",
     63	"sink",
     64	"proc"
     65};
     66
     67/**
     68 * struct v4l2_m2m_dev - per-device context
     69 * @source:		&struct media_entity pointer with the source entity
     70 *			Used only when the M2M device is registered via
     71 *			v4l2_m2m_register_media_controller().
     72 * @source_pad:		&struct media_pad with the source pad.
     73 *			Used only when the M2M device is registered via
     74 *			v4l2_m2m_register_media_controller().
     75 * @sink:		&struct media_entity pointer with the sink entity
     76 *			Used only when the M2M device is registered via
     77 *			v4l2_m2m_register_media_controller().
     78 * @sink_pad:		&struct media_pad with the sink pad.
     79 *			Used only when the M2M device is registered via
     80 *			v4l2_m2m_register_media_controller().
     81 * @proc:		&struct media_entity pointer with the M2M device itself.
     82 * @proc_pads:		&struct media_pad with the @proc pads.
     83 *			Used only when the M2M device is registered via
     84 *			v4l2_m2m_unregister_media_controller().
     85 * @intf_devnode:	&struct media_intf devnode pointer with the interface
     86 *			with controls the M2M device.
     87 * @curr_ctx:		currently running instance
     88 * @job_queue:		instances queued to run
     89 * @job_spinlock:	protects job_queue
     90 * @job_work:		worker to run queued jobs.
     91 * @job_queue_flags:	flags of the queue status, %QUEUE_PAUSED.
     92 * @m2m_ops:		driver callbacks
     93 */
     94struct v4l2_m2m_dev {
     95	struct v4l2_m2m_ctx	*curr_ctx;
     96#ifdef CONFIG_MEDIA_CONTROLLER
     97	struct media_entity	*source;
     98	struct media_pad	source_pad;
     99	struct media_entity	sink;
    100	struct media_pad	sink_pad;
    101	struct media_entity	proc;
    102	struct media_pad	proc_pads[2];
    103	struct media_intf_devnode *intf_devnode;
    104#endif
    105
    106	struct list_head	job_queue;
    107	spinlock_t		job_spinlock;
    108	struct work_struct	job_work;
    109	unsigned long		job_queue_flags;
    110
    111	const struct v4l2_m2m_ops *m2m_ops;
    112};
    113
    114static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
    115						enum v4l2_buf_type type)
    116{
    117	if (V4L2_TYPE_IS_OUTPUT(type))
    118		return &m2m_ctx->out_q_ctx;
    119	else
    120		return &m2m_ctx->cap_q_ctx;
    121}
    122
    123struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
    124				       enum v4l2_buf_type type)
    125{
    126	struct v4l2_m2m_queue_ctx *q_ctx;
    127
    128	q_ctx = get_queue_ctx(m2m_ctx, type);
    129	if (!q_ctx)
    130		return NULL;
    131
    132	return &q_ctx->q;
    133}
    134EXPORT_SYMBOL(v4l2_m2m_get_vq);
    135
    136struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
    137{
    138	struct v4l2_m2m_buffer *b;
    139	unsigned long flags;
    140
    141	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
    142
    143	if (list_empty(&q_ctx->rdy_queue)) {
    144		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    145		return NULL;
    146	}
    147
    148	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
    149	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    150	return &b->vb;
    151}
    152EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
    153
    154struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
    155{
    156	struct v4l2_m2m_buffer *b;
    157	unsigned long flags;
    158
    159	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
    160
    161	if (list_empty(&q_ctx->rdy_queue)) {
    162		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    163		return NULL;
    164	}
    165
    166	b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
    167	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    168	return &b->vb;
    169}
    170EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
    171
    172struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
    173{
    174	struct v4l2_m2m_buffer *b;
    175	unsigned long flags;
    176
    177	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
    178	if (list_empty(&q_ctx->rdy_queue)) {
    179		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    180		return NULL;
    181	}
    182	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
    183	list_del(&b->list);
    184	q_ctx->num_rdy--;
    185	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    186
    187	return &b->vb;
    188}
    189EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
    190
    191void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
    192				struct vb2_v4l2_buffer *vbuf)
    193{
    194	struct v4l2_m2m_buffer *b;
    195	unsigned long flags;
    196
    197	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
    198	b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
    199	list_del(&b->list);
    200	q_ctx->num_rdy--;
    201	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    202}
    203EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
    204
    205struct vb2_v4l2_buffer *
    206v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
    207
    208{
    209	struct v4l2_m2m_buffer *b, *tmp;
    210	struct vb2_v4l2_buffer *ret = NULL;
    211	unsigned long flags;
    212
    213	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
    214	list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
    215		if (b->vb.vb2_buf.index == idx) {
    216			list_del(&b->list);
    217			q_ctx->num_rdy--;
    218			ret = &b->vb;
    219			break;
    220		}
    221	}
    222	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    223
    224	return ret;
    225}
    226EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
    227
    228/*
    229 * Scheduling handlers
    230 */
    231
    232void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
    233{
    234	unsigned long flags;
    235	void *ret = NULL;
    236
    237	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    238	if (m2m_dev->curr_ctx)
    239		ret = m2m_dev->curr_ctx->priv;
    240	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    241
    242	return ret;
    243}
    244EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
    245
    246/**
    247 * v4l2_m2m_try_run() - select next job to perform and run it if possible
    248 * @m2m_dev: per-device context
    249 *
    250 * Get next transaction (if present) from the waiting jobs list and run it.
    251 *
    252 * Note that this function can run on a given v4l2_m2m_ctx context,
    253 * but call .device_run for another context.
    254 */
    255static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
    256{
    257	unsigned long flags;
    258
    259	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    260	if (NULL != m2m_dev->curr_ctx) {
    261		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    262		dprintk("Another instance is running, won't run now\n");
    263		return;
    264	}
    265
    266	if (list_empty(&m2m_dev->job_queue)) {
    267		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    268		dprintk("No job pending\n");
    269		return;
    270	}
    271
    272	if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
    273		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    274		dprintk("Running new jobs is paused\n");
    275		return;
    276	}
    277
    278	m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
    279				   struct v4l2_m2m_ctx, queue);
    280	m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
    281	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    282
    283	dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
    284	m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
    285}
    286
    287/*
    288 * __v4l2_m2m_try_queue() - queue a job
    289 * @m2m_dev: m2m device
    290 * @m2m_ctx: m2m context
    291 *
    292 * Check if this context is ready to queue a job.
    293 *
    294 * This function can run in interrupt context.
    295 */
    296static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
    297				 struct v4l2_m2m_ctx *m2m_ctx)
    298{
    299	unsigned long flags_job;
    300	struct vb2_v4l2_buffer *dst, *src;
    301
    302	dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
    303
    304	if (!m2m_ctx->out_q_ctx.q.streaming
    305	    || !m2m_ctx->cap_q_ctx.q.streaming) {
    306		dprintk("Streaming needs to be on for both queues\n");
    307		return;
    308	}
    309
    310	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
    311
    312	/* If the context is aborted then don't schedule it */
    313	if (m2m_ctx->job_flags & TRANS_ABORT) {
    314		dprintk("Aborted context\n");
    315		goto job_unlock;
    316	}
    317
    318	if (m2m_ctx->job_flags & TRANS_QUEUED) {
    319		dprintk("On job queue already\n");
    320		goto job_unlock;
    321	}
    322
    323	src = v4l2_m2m_next_src_buf(m2m_ctx);
    324	dst = v4l2_m2m_next_dst_buf(m2m_ctx);
    325	if (!src && !m2m_ctx->out_q_ctx.buffered) {
    326		dprintk("No input buffers available\n");
    327		goto job_unlock;
    328	}
    329	if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
    330		dprintk("No output buffers available\n");
    331		goto job_unlock;
    332	}
    333
    334	m2m_ctx->new_frame = true;
    335
    336	if (src && dst && dst->is_held &&
    337	    dst->vb2_buf.copied_timestamp &&
    338	    dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
    339		dprintk("Timestamp mismatch, returning held capture buffer\n");
    340		dst->is_held = false;
    341		v4l2_m2m_dst_buf_remove(m2m_ctx);
    342		v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
    343		dst = v4l2_m2m_next_dst_buf(m2m_ctx);
    344
    345		if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
    346			dprintk("No output buffers available after returning held buffer\n");
    347			goto job_unlock;
    348		}
    349	}
    350
    351	if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
    352			   VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
    353		m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
    354			dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
    355
    356	if (m2m_ctx->has_stopped) {
    357		dprintk("Device has stopped\n");
    358		goto job_unlock;
    359	}
    360
    361	if (m2m_dev->m2m_ops->job_ready
    362		&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
    363		dprintk("Driver not ready\n");
    364		goto job_unlock;
    365	}
    366
    367	list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
    368	m2m_ctx->job_flags |= TRANS_QUEUED;
    369
    370job_unlock:
    371	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
    372}
    373
    374/**
    375 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
    376 * @m2m_ctx: m2m context
    377 *
    378 * Check if this context is ready to queue a job. If suitable,
    379 * run the next queued job on the mem2mem device.
    380 *
    381 * This function shouldn't run in interrupt context.
    382 *
    383 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
    384 * and then run another job for another context.
    385 */
    386void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
    387{
    388	struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
    389
    390	__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
    391	v4l2_m2m_try_run(m2m_dev);
    392}
    393EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
    394
    395/**
    396 * v4l2_m2m_device_run_work() - run pending jobs for the context
    397 * @work: Work structure used for scheduling the execution of this function.
    398 */
    399static void v4l2_m2m_device_run_work(struct work_struct *work)
    400{
    401	struct v4l2_m2m_dev *m2m_dev =
    402		container_of(work, struct v4l2_m2m_dev, job_work);
    403
    404	v4l2_m2m_try_run(m2m_dev);
    405}
    406
    407/**
    408 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
    409 * @m2m_ctx: m2m context with jobs to be canceled
    410 *
    411 * In case of streamoff or release called on any context,
    412 * 1] If the context is currently running, then abort job will be called
    413 * 2] If the context is queued, then the context will be removed from
    414 *    the job_queue
    415 */
    416static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
    417{
    418	struct v4l2_m2m_dev *m2m_dev;
    419	unsigned long flags;
    420
    421	m2m_dev = m2m_ctx->m2m_dev;
    422	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    423
    424	m2m_ctx->job_flags |= TRANS_ABORT;
    425	if (m2m_ctx->job_flags & TRANS_RUNNING) {
    426		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    427		if (m2m_dev->m2m_ops->job_abort)
    428			m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
    429		dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
    430		wait_event(m2m_ctx->finished,
    431				!(m2m_ctx->job_flags & TRANS_RUNNING));
    432	} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
    433		list_del(&m2m_ctx->queue);
    434		m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
    435		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    436		dprintk("m2m_ctx: %p had been on queue and was removed\n",
    437			m2m_ctx);
    438	} else {
    439		/* Do nothing, was not on queue/running */
    440		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    441	}
    442}
    443
    444/*
    445 * Schedule the next job, called from v4l2_m2m_job_finish() or
    446 * v4l2_m2m_buf_done_and_job_finish().
    447 */
    448static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
    449				       struct v4l2_m2m_ctx *m2m_ctx)
    450{
    451	/*
    452	 * This instance might have more buffers ready, but since we do not
    453	 * allow more than one job on the job_queue per instance, each has
    454	 * to be scheduled separately after the previous one finishes.
    455	 */
    456	__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
    457
    458	/*
    459	 * We might be running in atomic context,
    460	 * but the job must be run in non-atomic context.
    461	 */
    462	schedule_work(&m2m_dev->job_work);
    463}
    464
    465/*
    466 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
    467 * v4l2_m2m_buf_done_and_job_finish().
    468 */
    469static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
    470				 struct v4l2_m2m_ctx *m2m_ctx)
    471{
    472	if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
    473		dprintk("Called by an instance not currently running\n");
    474		return false;
    475	}
    476
    477	list_del(&m2m_dev->curr_ctx->queue);
    478	m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
    479	wake_up(&m2m_dev->curr_ctx->finished);
    480	m2m_dev->curr_ctx = NULL;
    481	return true;
    482}
    483
    484void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
    485			 struct v4l2_m2m_ctx *m2m_ctx)
    486{
    487	unsigned long flags;
    488	bool schedule_next;
    489
    490	/*
    491	 * This function should not be used for drivers that support
    492	 * holding capture buffers. Those should use
    493	 * v4l2_m2m_buf_done_and_job_finish() instead.
    494	 */
    495	WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
    496		VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
    497	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    498	schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
    499	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    500
    501	if (schedule_next)
    502		v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
    503}
    504EXPORT_SYMBOL(v4l2_m2m_job_finish);
    505
    506void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
    507				      struct v4l2_m2m_ctx *m2m_ctx,
    508				      enum vb2_buffer_state state)
    509{
    510	struct vb2_v4l2_buffer *src_buf, *dst_buf;
    511	bool schedule_next = false;
    512	unsigned long flags;
    513
    514	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    515	src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
    516	dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
    517
    518	if (WARN_ON(!src_buf || !dst_buf))
    519		goto unlock;
    520	dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
    521	if (!dst_buf->is_held) {
    522		v4l2_m2m_dst_buf_remove(m2m_ctx);
    523		v4l2_m2m_buf_done(dst_buf, state);
    524	}
    525	/*
    526	 * If the request API is being used, returning the OUTPUT
    527	 * (src) buffer will wake-up any process waiting on the
    528	 * request file descriptor.
    529	 *
    530	 * Therefore, return the CAPTURE (dst) buffer first,
    531	 * to avoid signalling the request file descriptor
    532	 * before the CAPTURE buffer is done.
    533	 */
    534	v4l2_m2m_buf_done(src_buf, state);
    535	schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
    536unlock:
    537	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    538
    539	if (schedule_next)
    540		v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
    541}
    542EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
    543
    544void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
    545{
    546	unsigned long flags;
    547	struct v4l2_m2m_ctx *curr_ctx;
    548
    549	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    550	m2m_dev->job_queue_flags |= QUEUE_PAUSED;
    551	curr_ctx = m2m_dev->curr_ctx;
    552	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    553
    554	if (curr_ctx)
    555		wait_event(curr_ctx->finished,
    556			   !(curr_ctx->job_flags & TRANS_RUNNING));
    557}
    558EXPORT_SYMBOL(v4l2_m2m_suspend);
    559
    560void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
    561{
    562	unsigned long flags;
    563
    564	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
    565	m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
    566	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
    567
    568	v4l2_m2m_try_run(m2m_dev);
    569}
    570EXPORT_SYMBOL(v4l2_m2m_resume);
    571
    572int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    573		     struct v4l2_requestbuffers *reqbufs)
    574{
    575	struct vb2_queue *vq;
    576	int ret;
    577
    578	vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
    579	ret = vb2_reqbufs(vq, reqbufs);
    580	/* If count == 0, then the owner has released all buffers and he
    581	   is no longer owner of the queue. Otherwise we have an owner. */
    582	if (ret == 0)
    583		vq->owner = reqbufs->count ? file->private_data : NULL;
    584
    585	return ret;
    586}
    587EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
    588
    589static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
    590				       struct v4l2_buffer *buf)
    591{
    592	/* Adjust MMAP memory offsets for the CAPTURE queue */
    593	if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
    594		if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
    595			unsigned int i;
    596
    597			for (i = 0; i < buf->length; ++i)
    598				buf->m.planes[i].m.mem_offset
    599					+= DST_QUEUE_OFF_BASE;
    600		} else {
    601			buf->m.offset += DST_QUEUE_OFF_BASE;
    602		}
    603	}
    604}
    605
    606int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    607		      struct v4l2_buffer *buf)
    608{
    609	struct vb2_queue *vq;
    610	int ret;
    611
    612	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
    613	ret = vb2_querybuf(vq, buf);
    614	if (ret)
    615		return ret;
    616
    617	/* Adjust MMAP memory offsets for the CAPTURE queue */
    618	v4l2_m2m_adjust_mem_offset(vq, buf);
    619
    620	return 0;
    621}
    622EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
    623
    624/*
    625 * This will add the LAST flag and mark the buffer management
    626 * state as stopped.
    627 * This is called when the last capture buffer must be flagged as LAST
    628 * in draining mode from the encoder/decoder driver buf_queue() callback
    629 * or from v4l2_update_last_buf_state() when a capture buffer is available.
    630 */
    631void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
    632			       struct vb2_v4l2_buffer *vbuf)
    633{
    634	vbuf->flags |= V4L2_BUF_FLAG_LAST;
    635	vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
    636
    637	v4l2_m2m_mark_stopped(m2m_ctx);
    638}
    639EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done);
    640
    641/* When stop command is issued, update buffer management state */
    642static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
    643{
    644	struct vb2_v4l2_buffer *next_dst_buf;
    645
    646	if (m2m_ctx->is_draining)
    647		return -EBUSY;
    648
    649	if (m2m_ctx->has_stopped)
    650		return 0;
    651
    652	m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
    653	m2m_ctx->is_draining = true;
    654
    655	/*
    656	 * The processing of the last output buffer queued before
    657	 * the STOP command is expected to mark the buffer management
    658	 * state as stopped with v4l2_m2m_mark_stopped().
    659	 */
    660	if (m2m_ctx->last_src_buf)
    661		return 0;
    662
    663	/*
    664	 * In case the output queue is empty, try to mark the last capture
    665	 * buffer as LAST.
    666	 */
    667	next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
    668	if (!next_dst_buf) {
    669		/*
    670		 * Wait for the next queued one in encoder/decoder driver
    671		 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
    672		 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
    673		 * streaming.
    674		 */
    675		m2m_ctx->next_buf_last = true;
    676		return 0;
    677	}
    678
    679	v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf);
    680
    681	return 0;
    682}
    683
    684/*
    685 * Updates the encoding/decoding buffer management state, should
    686 * be called from encoder/decoder drivers start_streaming()
    687 */
    688void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
    689					   struct vb2_queue *q)
    690{
    691	/* If start streaming again, untag the last output buffer */
    692	if (V4L2_TYPE_IS_OUTPUT(q->type))
    693		m2m_ctx->last_src_buf = NULL;
    694}
    695EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state);
    696
    697/*
    698 * Updates the encoding/decoding buffer management state, should
    699 * be called from encoder/decoder driver stop_streaming()
    700 */
    701void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
    702					  struct vb2_queue *q)
    703{
    704	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
    705		/*
    706		 * If in draining state, either mark next dst buffer as
    707		 * done or flag next one to be marked as done either
    708		 * in encoder/decoder driver buf_queue() callback using
    709		 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
    710		 * if encoder/decoder is not yet streaming
    711		 */
    712		if (m2m_ctx->is_draining) {
    713			struct vb2_v4l2_buffer *next_dst_buf;
    714
    715			m2m_ctx->last_src_buf = NULL;
    716			next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
    717			if (!next_dst_buf)
    718				m2m_ctx->next_buf_last = true;
    719			else
    720				v4l2_m2m_last_buffer_done(m2m_ctx,
    721							  next_dst_buf);
    722		}
    723	} else {
    724		v4l2_m2m_clear_state(m2m_ctx);
    725	}
    726}
    727EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state);
    728
    729static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
    730					 struct vb2_queue *q)
    731{
    732	struct vb2_buffer *vb;
    733	struct vb2_v4l2_buffer *vbuf;
    734	unsigned int i;
    735
    736	if (WARN_ON(q->is_output))
    737		return;
    738	if (list_empty(&q->queued_list))
    739		return;
    740
    741	vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
    742	for (i = 0; i < vb->num_planes; i++)
    743		vb2_set_plane_payload(vb, i, 0);
    744
    745	/*
    746	 * Since the buffer hasn't been queued to the ready queue,
    747	 * mark is active and owned before marking it LAST and DONE
    748	 */
    749	vb->state = VB2_BUF_STATE_ACTIVE;
    750	atomic_inc(&q->owned_by_drv_count);
    751
    752	vbuf = to_vb2_v4l2_buffer(vb);
    753	vbuf->field = V4L2_FIELD_NONE;
    754
    755	v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
    756}
    757
    758int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    759		  struct v4l2_buffer *buf)
    760{
    761	struct video_device *vdev = video_devdata(file);
    762	struct vb2_queue *vq;
    763	int ret;
    764
    765	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
    766	if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
    767	    (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
    768		dprintk("%s: requests cannot be used with capture buffers\n",
    769			__func__);
    770		return -EPERM;
    771	}
    772
    773	ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
    774	if (ret)
    775		return ret;
    776
    777	/* Adjust MMAP memory offsets for the CAPTURE queue */
    778	v4l2_m2m_adjust_mem_offset(vq, buf);
    779
    780	/*
    781	 * If the capture queue is streaming, but streaming hasn't started
    782	 * on the device, but was asked to stop, mark the previously queued
    783	 * buffer as DONE with LAST flag since it won't be queued on the
    784	 * device.
    785	 */
    786	if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
    787	    vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) &&
    788	   (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx)))
    789		v4l2_m2m_force_last_buf_done(m2m_ctx, vq);
    790	else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
    791		v4l2_m2m_try_schedule(m2m_ctx);
    792
    793	return 0;
    794}
    795EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
    796
    797int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    798		   struct v4l2_buffer *buf)
    799{
    800	struct vb2_queue *vq;
    801	int ret;
    802
    803	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
    804	ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
    805	if (ret)
    806		return ret;
    807
    808	/* Adjust MMAP memory offsets for the CAPTURE queue */
    809	v4l2_m2m_adjust_mem_offset(vq, buf);
    810
    811	return 0;
    812}
    813EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
    814
    815int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    816			 struct v4l2_buffer *buf)
    817{
    818	struct video_device *vdev = video_devdata(file);
    819	struct vb2_queue *vq;
    820	int ret;
    821
    822	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
    823	ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
    824	if (ret)
    825		return ret;
    826
    827	/* Adjust MMAP memory offsets for the CAPTURE queue */
    828	v4l2_m2m_adjust_mem_offset(vq, buf);
    829
    830	return 0;
    831}
    832EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
    833
    834int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    835			 struct v4l2_create_buffers *create)
    836{
    837	struct vb2_queue *vq;
    838
    839	vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
    840	return vb2_create_bufs(vq, create);
    841}
    842EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
    843
    844int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    845		  struct v4l2_exportbuffer *eb)
    846{
    847	struct vb2_queue *vq;
    848
    849	vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
    850	return vb2_expbuf(vq, eb);
    851}
    852EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
    853
    854int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    855		      enum v4l2_buf_type type)
    856{
    857	struct vb2_queue *vq;
    858	int ret;
    859
    860	vq = v4l2_m2m_get_vq(m2m_ctx, type);
    861	ret = vb2_streamon(vq, type);
    862	if (!ret)
    863		v4l2_m2m_try_schedule(m2m_ctx);
    864
    865	return ret;
    866}
    867EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
    868
    869int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    870		       enum v4l2_buf_type type)
    871{
    872	struct v4l2_m2m_dev *m2m_dev;
    873	struct v4l2_m2m_queue_ctx *q_ctx;
    874	unsigned long flags_job, flags;
    875	int ret;
    876
    877	/* wait until the current context is dequeued from job_queue */
    878	v4l2_m2m_cancel_job(m2m_ctx);
    879
    880	q_ctx = get_queue_ctx(m2m_ctx, type);
    881	ret = vb2_streamoff(&q_ctx->q, type);
    882	if (ret)
    883		return ret;
    884
    885	m2m_dev = m2m_ctx->m2m_dev;
    886	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
    887	/* We should not be scheduled anymore, since we're dropping a queue. */
    888	if (m2m_ctx->job_flags & TRANS_QUEUED)
    889		list_del(&m2m_ctx->queue);
    890	m2m_ctx->job_flags = 0;
    891
    892	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
    893	/* Drop queue, since streamoff returns device to the same state as after
    894	 * calling reqbufs. */
    895	INIT_LIST_HEAD(&q_ctx->rdy_queue);
    896	q_ctx->num_rdy = 0;
    897	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
    898
    899	if (m2m_dev->curr_ctx == m2m_ctx) {
    900		m2m_dev->curr_ctx = NULL;
    901		wake_up(&m2m_ctx->finished);
    902	}
    903	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
    904
    905	return 0;
    906}
    907EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
    908
    909static __poll_t v4l2_m2m_poll_for_data(struct file *file,
    910				       struct v4l2_m2m_ctx *m2m_ctx,
    911				       struct poll_table_struct *wait)
    912{
    913	struct vb2_queue *src_q, *dst_q;
    914	__poll_t rc = 0;
    915	unsigned long flags;
    916
    917	src_q = v4l2_m2m_get_src_vq(m2m_ctx);
    918	dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
    919
    920	/*
    921	 * There has to be at least one buffer queued on each queued_list, which
    922	 * means either in driver already or waiting for driver to claim it
    923	 * and start processing.
    924	 */
    925	if ((!src_q->streaming || src_q->error ||
    926	     list_empty(&src_q->queued_list)) &&
    927	    (!dst_q->streaming || dst_q->error ||
    928	     list_empty(&dst_q->queued_list)))
    929		return EPOLLERR;
    930
    931	spin_lock_irqsave(&src_q->done_lock, flags);
    932	if (!list_empty(&src_q->done_list))
    933		rc |= EPOLLOUT | EPOLLWRNORM;
    934	spin_unlock_irqrestore(&src_q->done_lock, flags);
    935
    936	spin_lock_irqsave(&dst_q->done_lock, flags);
    937	/*
    938	 * If the last buffer was dequeued from the capture queue, signal
    939	 * userspace. DQBUF(CAPTURE) will return -EPIPE.
    940	 */
    941	if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued)
    942		rc |= EPOLLIN | EPOLLRDNORM;
    943	spin_unlock_irqrestore(&dst_q->done_lock, flags);
    944
    945	return rc;
    946}
    947
    948__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    949		       struct poll_table_struct *wait)
    950{
    951	struct video_device *vfd = video_devdata(file);
    952	struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
    953	struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
    954	__poll_t req_events = poll_requested_events(wait);
    955	__poll_t rc = 0;
    956
    957	/*
    958	 * poll_wait() MUST be called on the first invocation on all the
    959	 * potential queues of interest, even if we are not interested in their
    960	 * events during this first call. Failure to do so will result in
    961	 * queue's events to be ignored because the poll_table won't be capable
    962	 * of adding new wait queues thereafter.
    963	 */
    964	poll_wait(file, &src_q->done_wq, wait);
    965	poll_wait(file, &dst_q->done_wq, wait);
    966
    967	if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
    968		rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
    969
    970	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
    971		struct v4l2_fh *fh = file->private_data;
    972
    973		poll_wait(file, &fh->wait, wait);
    974		if (v4l2_event_pending(fh))
    975			rc |= EPOLLPRI;
    976	}
    977
    978	return rc;
    979}
    980EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
    981
    982int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
    983			 struct vm_area_struct *vma)
    984{
    985	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
    986	struct vb2_queue *vq;
    987
    988	if (offset < DST_QUEUE_OFF_BASE) {
    989		vq = v4l2_m2m_get_src_vq(m2m_ctx);
    990	} else {
    991		vq = v4l2_m2m_get_dst_vq(m2m_ctx);
    992		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
    993	}
    994
    995	return vb2_mmap(vq, vma);
    996}
    997EXPORT_SYMBOL(v4l2_m2m_mmap);
    998
    999#ifndef CONFIG_MMU
   1000unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
   1001					 unsigned long len, unsigned long pgoff,
   1002					 unsigned long flags)
   1003{
   1004	struct v4l2_fh *fh = file->private_data;
   1005	unsigned long offset = pgoff << PAGE_SHIFT;
   1006	struct vb2_queue *vq;
   1007
   1008	if (offset < DST_QUEUE_OFF_BASE) {
   1009		vq = v4l2_m2m_get_src_vq(fh->m2m_ctx);
   1010	} else {
   1011		vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx);
   1012		pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
   1013	}
   1014
   1015	return vb2_get_unmapped_area(vq, addr, len, pgoff, flags);
   1016}
   1017EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area);
   1018#endif
   1019
   1020#if defined(CONFIG_MEDIA_CONTROLLER)
   1021void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
   1022{
   1023	media_remove_intf_links(&m2m_dev->intf_devnode->intf);
   1024	media_devnode_remove(m2m_dev->intf_devnode);
   1025
   1026	media_entity_remove_links(m2m_dev->source);
   1027	media_entity_remove_links(&m2m_dev->sink);
   1028	media_entity_remove_links(&m2m_dev->proc);
   1029	media_device_unregister_entity(m2m_dev->source);
   1030	media_device_unregister_entity(&m2m_dev->sink);
   1031	media_device_unregister_entity(&m2m_dev->proc);
   1032	kfree(m2m_dev->source->name);
   1033	kfree(m2m_dev->sink.name);
   1034	kfree(m2m_dev->proc.name);
   1035}
   1036EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
   1037
   1038static int v4l2_m2m_register_entity(struct media_device *mdev,
   1039	struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
   1040	struct video_device *vdev, int function)
   1041{
   1042	struct media_entity *entity;
   1043	struct media_pad *pads;
   1044	char *name;
   1045	unsigned int len;
   1046	int num_pads;
   1047	int ret;
   1048
   1049	switch (type) {
   1050	case MEM2MEM_ENT_TYPE_SOURCE:
   1051		entity = m2m_dev->source;
   1052		pads = &m2m_dev->source_pad;
   1053		pads[0].flags = MEDIA_PAD_FL_SOURCE;
   1054		num_pads = 1;
   1055		break;
   1056	case MEM2MEM_ENT_TYPE_SINK:
   1057		entity = &m2m_dev->sink;
   1058		pads = &m2m_dev->sink_pad;
   1059		pads[0].flags = MEDIA_PAD_FL_SINK;
   1060		num_pads = 1;
   1061		break;
   1062	case MEM2MEM_ENT_TYPE_PROC:
   1063		entity = &m2m_dev->proc;
   1064		pads = m2m_dev->proc_pads;
   1065		pads[0].flags = MEDIA_PAD_FL_SINK;
   1066		pads[1].flags = MEDIA_PAD_FL_SOURCE;
   1067		num_pads = 2;
   1068		break;
   1069	default:
   1070		return -EINVAL;
   1071	}
   1072
   1073	entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
   1074	if (type != MEM2MEM_ENT_TYPE_PROC) {
   1075		entity->info.dev.major = VIDEO_MAJOR;
   1076		entity->info.dev.minor = vdev->minor;
   1077	}
   1078	len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
   1079	name = kmalloc(len, GFP_KERNEL);
   1080	if (!name)
   1081		return -ENOMEM;
   1082	snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
   1083	entity->name = name;
   1084	entity->function = function;
   1085
   1086	ret = media_entity_pads_init(entity, num_pads, pads);
   1087	if (ret)
   1088		return ret;
   1089	ret = media_device_register_entity(mdev, entity);
   1090	if (ret)
   1091		return ret;
   1092
   1093	return 0;
   1094}
   1095
   1096int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
   1097		struct video_device *vdev, int function)
   1098{
   1099	struct media_device *mdev = vdev->v4l2_dev->mdev;
   1100	struct media_link *link;
   1101	int ret;
   1102
   1103	if (!mdev)
   1104		return 0;
   1105
   1106	/* A memory-to-memory device consists in two
   1107	 * DMA engine and one video processing entities.
   1108	 * The DMA engine entities are linked to a V4L interface
   1109	 */
   1110
   1111	/* Create the three entities with their pads */
   1112	m2m_dev->source = &vdev->entity;
   1113	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
   1114			MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
   1115	if (ret)
   1116		return ret;
   1117	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
   1118			MEM2MEM_ENT_TYPE_PROC, vdev, function);
   1119	if (ret)
   1120		goto err_rel_entity0;
   1121	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
   1122			MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
   1123	if (ret)
   1124		goto err_rel_entity1;
   1125
   1126	/* Connect the three entities */
   1127	ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
   1128			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
   1129	if (ret)
   1130		goto err_rel_entity2;
   1131
   1132	ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
   1133			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
   1134	if (ret)
   1135		goto err_rm_links0;
   1136
   1137	/* Create video interface */
   1138	m2m_dev->intf_devnode = media_devnode_create(mdev,
   1139			MEDIA_INTF_T_V4L_VIDEO, 0,
   1140			VIDEO_MAJOR, vdev->minor);
   1141	if (!m2m_dev->intf_devnode) {
   1142		ret = -ENOMEM;
   1143		goto err_rm_links1;
   1144	}
   1145
   1146	/* Connect the two DMA engines to the interface */
   1147	link = media_create_intf_link(m2m_dev->source,
   1148			&m2m_dev->intf_devnode->intf,
   1149			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
   1150	if (!link) {
   1151		ret = -ENOMEM;
   1152		goto err_rm_devnode;
   1153	}
   1154
   1155	link = media_create_intf_link(&m2m_dev->sink,
   1156			&m2m_dev->intf_devnode->intf,
   1157			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
   1158	if (!link) {
   1159		ret = -ENOMEM;
   1160		goto err_rm_intf_link;
   1161	}
   1162	return 0;
   1163
   1164err_rm_intf_link:
   1165	media_remove_intf_links(&m2m_dev->intf_devnode->intf);
   1166err_rm_devnode:
   1167	media_devnode_remove(m2m_dev->intf_devnode);
   1168err_rm_links1:
   1169	media_entity_remove_links(&m2m_dev->sink);
   1170err_rm_links0:
   1171	media_entity_remove_links(&m2m_dev->proc);
   1172	media_entity_remove_links(m2m_dev->source);
   1173err_rel_entity2:
   1174	media_device_unregister_entity(&m2m_dev->proc);
   1175	kfree(m2m_dev->proc.name);
   1176err_rel_entity1:
   1177	media_device_unregister_entity(&m2m_dev->sink);
   1178	kfree(m2m_dev->sink.name);
   1179err_rel_entity0:
   1180	media_device_unregister_entity(m2m_dev->source);
   1181	kfree(m2m_dev->source->name);
   1182	return ret;
   1183	return 0;
   1184}
   1185EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
   1186#endif
   1187
   1188struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
   1189{
   1190	struct v4l2_m2m_dev *m2m_dev;
   1191
   1192	if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
   1193		return ERR_PTR(-EINVAL);
   1194
   1195	m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
   1196	if (!m2m_dev)
   1197		return ERR_PTR(-ENOMEM);
   1198
   1199	m2m_dev->curr_ctx = NULL;
   1200	m2m_dev->m2m_ops = m2m_ops;
   1201	INIT_LIST_HEAD(&m2m_dev->job_queue);
   1202	spin_lock_init(&m2m_dev->job_spinlock);
   1203	INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
   1204
   1205	return m2m_dev;
   1206}
   1207EXPORT_SYMBOL_GPL(v4l2_m2m_init);
   1208
   1209void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
   1210{
   1211	kfree(m2m_dev);
   1212}
   1213EXPORT_SYMBOL_GPL(v4l2_m2m_release);
   1214
   1215struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
   1216		void *drv_priv,
   1217		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
   1218{
   1219	struct v4l2_m2m_ctx *m2m_ctx;
   1220	struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
   1221	int ret;
   1222
   1223	m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
   1224	if (!m2m_ctx)
   1225		return ERR_PTR(-ENOMEM);
   1226
   1227	m2m_ctx->priv = drv_priv;
   1228	m2m_ctx->m2m_dev = m2m_dev;
   1229	init_waitqueue_head(&m2m_ctx->finished);
   1230
   1231	out_q_ctx = &m2m_ctx->out_q_ctx;
   1232	cap_q_ctx = &m2m_ctx->cap_q_ctx;
   1233
   1234	INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
   1235	INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
   1236	spin_lock_init(&out_q_ctx->rdy_spinlock);
   1237	spin_lock_init(&cap_q_ctx->rdy_spinlock);
   1238
   1239	INIT_LIST_HEAD(&m2m_ctx->queue);
   1240
   1241	ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
   1242
   1243	if (ret)
   1244		goto err;
   1245	/*
   1246	 * Both queues should use same the mutex to lock the m2m context.
   1247	 * This lock is used in some v4l2_m2m_* helpers.
   1248	 */
   1249	if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
   1250		ret = -EINVAL;
   1251		goto err;
   1252	}
   1253	m2m_ctx->q_lock = out_q_ctx->q.lock;
   1254
   1255	return m2m_ctx;
   1256err:
   1257	kfree(m2m_ctx);
   1258	return ERR_PTR(ret);
   1259}
   1260EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
   1261
   1262void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
   1263{
   1264	/* wait until the current context is dequeued from job_queue */
   1265	v4l2_m2m_cancel_job(m2m_ctx);
   1266
   1267	vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
   1268	vb2_queue_release(&m2m_ctx->out_q_ctx.q);
   1269
   1270	kfree(m2m_ctx);
   1271}
   1272EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
   1273
   1274void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
   1275		struct vb2_v4l2_buffer *vbuf)
   1276{
   1277	struct v4l2_m2m_buffer *b = container_of(vbuf,
   1278				struct v4l2_m2m_buffer, vb);
   1279	struct v4l2_m2m_queue_ctx *q_ctx;
   1280	unsigned long flags;
   1281
   1282	q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
   1283	if (!q_ctx)
   1284		return;
   1285
   1286	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
   1287	list_add_tail(&b->list, &q_ctx->rdy_queue);
   1288	q_ctx->num_rdy++;
   1289	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
   1290}
   1291EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
   1292
   1293void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
   1294				struct vb2_v4l2_buffer *cap_vb,
   1295				bool copy_frame_flags)
   1296{
   1297	u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
   1298
   1299	if (copy_frame_flags)
   1300		mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
   1301			V4L2_BUF_FLAG_BFRAME;
   1302
   1303	cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
   1304
   1305	if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
   1306		cap_vb->timecode = out_vb->timecode;
   1307	cap_vb->field = out_vb->field;
   1308	cap_vb->flags &= ~mask;
   1309	cap_vb->flags |= out_vb->flags & mask;
   1310	cap_vb->vb2_buf.copied_timestamp = 1;
   1311}
   1312EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
   1313
   1314void v4l2_m2m_request_queue(struct media_request *req)
   1315{
   1316	struct media_request_object *obj, *obj_safe;
   1317	struct v4l2_m2m_ctx *m2m_ctx = NULL;
   1318
   1319	/*
   1320	 * Queue all objects. Note that buffer objects are at the end of the
   1321	 * objects list, after all other object types. Once buffer objects
   1322	 * are queued, the driver might delete them immediately (if the driver
   1323	 * processes the buffer at once), so we have to use
   1324	 * list_for_each_entry_safe() to handle the case where the object we
   1325	 * queue is deleted.
   1326	 */
   1327	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
   1328		struct v4l2_m2m_ctx *m2m_ctx_obj;
   1329		struct vb2_buffer *vb;
   1330
   1331		if (!obj->ops->queue)
   1332			continue;
   1333
   1334		if (vb2_request_object_is_buffer(obj)) {
   1335			/* Sanity checks */
   1336			vb = container_of(obj, struct vb2_buffer, req_obj);
   1337			WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
   1338			m2m_ctx_obj = container_of(vb->vb2_queue,
   1339						   struct v4l2_m2m_ctx,
   1340						   out_q_ctx.q);
   1341			WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
   1342			m2m_ctx = m2m_ctx_obj;
   1343		}
   1344
   1345		/*
   1346		 * The buffer we queue here can in theory be immediately
   1347		 * unbound, hence the use of list_for_each_entry_safe()
   1348		 * above and why we call the queue op last.
   1349		 */
   1350		obj->ops->queue(obj);
   1351	}
   1352
   1353	WARN_ON(!m2m_ctx);
   1354
   1355	if (m2m_ctx)
   1356		v4l2_m2m_try_schedule(m2m_ctx);
   1357}
   1358EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
   1359
   1360/* Videobuf2 ioctl helpers */
   1361
   1362int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
   1363				struct v4l2_requestbuffers *rb)
   1364{
   1365	struct v4l2_fh *fh = file->private_data;
   1366
   1367	return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
   1368}
   1369EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
   1370
   1371int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
   1372				struct v4l2_create_buffers *create)
   1373{
   1374	struct v4l2_fh *fh = file->private_data;
   1375
   1376	return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
   1377}
   1378EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
   1379
   1380int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
   1381				struct v4l2_buffer *buf)
   1382{
   1383	struct v4l2_fh *fh = file->private_data;
   1384
   1385	return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
   1386}
   1387EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
   1388
   1389int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
   1390				struct v4l2_buffer *buf)
   1391{
   1392	struct v4l2_fh *fh = file->private_data;
   1393
   1394	return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
   1395}
   1396EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
   1397
   1398int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
   1399				struct v4l2_buffer *buf)
   1400{
   1401	struct v4l2_fh *fh = file->private_data;
   1402
   1403	return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
   1404}
   1405EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
   1406
   1407int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
   1408			       struct v4l2_buffer *buf)
   1409{
   1410	struct v4l2_fh *fh = file->private_data;
   1411
   1412	return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
   1413}
   1414EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
   1415
   1416int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
   1417				struct v4l2_exportbuffer *eb)
   1418{
   1419	struct v4l2_fh *fh = file->private_data;
   1420
   1421	return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
   1422}
   1423EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
   1424
   1425int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
   1426				enum v4l2_buf_type type)
   1427{
   1428	struct v4l2_fh *fh = file->private_data;
   1429
   1430	return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
   1431}
   1432EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
   1433
   1434int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
   1435				enum v4l2_buf_type type)
   1436{
   1437	struct v4l2_fh *fh = file->private_data;
   1438
   1439	return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
   1440}
   1441EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
   1442
   1443int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
   1444				   struct v4l2_encoder_cmd *ec)
   1445{
   1446	if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
   1447		return -EINVAL;
   1448
   1449	ec->flags = 0;
   1450	return 0;
   1451}
   1452EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
   1453
   1454int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
   1455				   struct v4l2_decoder_cmd *dc)
   1456{
   1457	if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
   1458		return -EINVAL;
   1459
   1460	dc->flags = 0;
   1461
   1462	if (dc->cmd == V4L2_DEC_CMD_STOP) {
   1463		dc->stop.pts = 0;
   1464	} else if (dc->cmd == V4L2_DEC_CMD_START) {
   1465		dc->start.speed = 0;
   1466		dc->start.format = V4L2_DEC_START_FMT_NONE;
   1467	}
   1468	return 0;
   1469}
   1470EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
   1471
   1472/*
   1473 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
   1474 * Should be called from the encoder driver encoder_cmd() callback
   1475 */
   1476int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
   1477			 struct v4l2_encoder_cmd *ec)
   1478{
   1479	if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
   1480		return -EINVAL;
   1481
   1482	if (ec->cmd == V4L2_ENC_CMD_STOP)
   1483		return v4l2_update_last_buf_state(m2m_ctx);
   1484
   1485	if (m2m_ctx->is_draining)
   1486		return -EBUSY;
   1487
   1488	if (m2m_ctx->has_stopped)
   1489		m2m_ctx->has_stopped = false;
   1490
   1491	return 0;
   1492}
   1493EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd);
   1494
   1495/*
   1496 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
   1497 * Should be called from the decoder driver decoder_cmd() callback
   1498 */
   1499int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
   1500			 struct v4l2_decoder_cmd *dc)
   1501{
   1502	if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
   1503		return -EINVAL;
   1504
   1505	if (dc->cmd == V4L2_DEC_CMD_STOP)
   1506		return v4l2_update_last_buf_state(m2m_ctx);
   1507
   1508	if (m2m_ctx->is_draining)
   1509		return -EBUSY;
   1510
   1511	if (m2m_ctx->has_stopped)
   1512		m2m_ctx->has_stopped = false;
   1513
   1514	return 0;
   1515}
   1516EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
   1517
   1518int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
   1519			       struct v4l2_encoder_cmd *ec)
   1520{
   1521	struct v4l2_fh *fh = file->private_data;
   1522
   1523	return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
   1524}
   1525EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
   1526
   1527int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
   1528			       struct v4l2_decoder_cmd *dc)
   1529{
   1530	struct v4l2_fh *fh = file->private_data;
   1531
   1532	return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
   1533}
   1534EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
   1535
   1536int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
   1537					     struct v4l2_decoder_cmd *dc)
   1538{
   1539	if (dc->cmd != V4L2_DEC_CMD_FLUSH)
   1540		return -EINVAL;
   1541
   1542	dc->flags = 0;
   1543
   1544	return 0;
   1545}
   1546EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
   1547
   1548int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
   1549					 struct v4l2_decoder_cmd *dc)
   1550{
   1551	struct v4l2_fh *fh = file->private_data;
   1552	struct vb2_v4l2_buffer *out_vb, *cap_vb;
   1553	struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
   1554	unsigned long flags;
   1555	int ret;
   1556
   1557	ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
   1558	if (ret < 0)
   1559		return ret;
   1560
   1561	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
   1562	out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
   1563	cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
   1564
   1565	/*
   1566	 * If there is an out buffer pending, then clear any HOLD flag.
   1567	 *
   1568	 * By clearing this flag we ensure that when this output
   1569	 * buffer is processed any held capture buffer will be released.
   1570	 */
   1571	if (out_vb) {
   1572		out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
   1573	} else if (cap_vb && cap_vb->is_held) {
   1574		/*
   1575		 * If there were no output buffers, but there is a
   1576		 * capture buffer that is held, then release that
   1577		 * buffer.
   1578		 */
   1579		cap_vb->is_held = false;
   1580		v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
   1581		v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
   1582	}
   1583	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
   1584
   1585	return 0;
   1586}
   1587EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
   1588
   1589/*
   1590 * v4l2_file_operations helpers. It is assumed here same lock is used
   1591 * for the output and the capture buffer queue.
   1592 */
   1593
   1594int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
   1595{
   1596	struct v4l2_fh *fh = file->private_data;
   1597
   1598	return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
   1599}
   1600EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
   1601
   1602__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
   1603{
   1604	struct v4l2_fh *fh = file->private_data;
   1605	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
   1606	__poll_t ret;
   1607
   1608	if (m2m_ctx->q_lock)
   1609		mutex_lock(m2m_ctx->q_lock);
   1610
   1611	ret = v4l2_m2m_poll(file, m2m_ctx, wait);
   1612
   1613	if (m2m_ctx->q_lock)
   1614		mutex_unlock(m2m_ctx->q_lock);
   1615
   1616	return ret;
   1617}
   1618EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
   1619