cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

videobuf2-core.c (78632B)


      1/*
      2 * videobuf2-core.c - video buffer 2 core framework
      3 *
      4 * Copyright (C) 2010 Samsung Electronics
      5 *
      6 * Author: Pawel Osciak <pawel@osciak.com>
      7 *	   Marek Szyprowski <m.szyprowski@samsung.com>
      8 *
      9 * The vb2_thread implementation was based on code from videobuf-dvb.c:
     10 *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
     11 *
     12 * This program is free software; you can redistribute it and/or modify
     13 * it under the terms of the GNU General Public License as published by
     14 * the Free Software Foundation.
     15 */
     16
     17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     18
     19#include <linux/err.h>
     20#include <linux/kernel.h>
     21#include <linux/module.h>
     22#include <linux/mm.h>
     23#include <linux/poll.h>
     24#include <linux/slab.h>
     25#include <linux/sched.h>
     26#include <linux/freezer.h>
     27#include <linux/kthread.h>
     28
     29#include <media/videobuf2-core.h>
     30#include <media/v4l2-mc.h>
     31
     32#include <trace/events/vb2.h>
     33
     34static int debug;
     35module_param(debug, int, 0644);
     36
     37#define dprintk(q, level, fmt, arg...)					\
     38	do {								\
     39		if (debug >= level)					\
     40			pr_info("[%s] %s: " fmt, (q)->name, __func__,	\
     41				## arg);				\
     42	} while (0)
     43
     44#ifdef CONFIG_VIDEO_ADV_DEBUG
     45
     46/*
     47 * If advanced debugging is on, then count how often each op is called
     48 * successfully, which can either be per-buffer or per-queue.
     49 *
     50 * This makes it easy to check that the 'init' and 'cleanup'
     51 * (and variations thereof) stay balanced.
     52 */
     53
     54#define log_memop(vb, op)						\
     55	dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n",		\
     56		(vb)->index, #op,					\
     57		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
     58
     59#define call_memop(vb, op, args...)					\
     60({									\
     61	struct vb2_queue *_q = (vb)->vb2_queue;				\
     62	int err;							\
     63									\
     64	log_memop(vb, op);						\
     65	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\
     66	if (!err)							\
     67		(vb)->cnt_mem_ ## op++;					\
     68	err;								\
     69})
     70
     71#define call_ptr_memop(op, vb, args...)					\
     72({									\
     73	struct vb2_queue *_q = (vb)->vb2_queue;				\
     74	void *ptr;							\
     75									\
     76	log_memop(vb, op);						\
     77	ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL;	\
     78	if (!IS_ERR_OR_NULL(ptr))					\
     79		(vb)->cnt_mem_ ## op++;					\
     80	ptr;								\
     81})
     82
     83#define call_void_memop(vb, op, args...)				\
     84({									\
     85	struct vb2_queue *_q = (vb)->vb2_queue;				\
     86									\
     87	log_memop(vb, op);						\
     88	if (_q->mem_ops->op)						\
     89		_q->mem_ops->op(args);					\
     90	(vb)->cnt_mem_ ## op++;						\
     91})
     92
     93#define log_qop(q, op)							\
     94	dprintk(q, 2, "call_qop(%s)%s\n", #op,				\
     95		(q)->ops->op ? "" : " (nop)")
     96
     97#define call_qop(q, op, args...)					\
     98({									\
     99	int err;							\
    100									\
    101	log_qop(q, op);							\
    102	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\
    103	if (!err)							\
    104		(q)->cnt_ ## op++;					\
    105	err;								\
    106})
    107
    108#define call_void_qop(q, op, args...)					\
    109({									\
    110	log_qop(q, op);							\
    111	if ((q)->ops->op)						\
    112		(q)->ops->op(args);					\
    113	(q)->cnt_ ## op++;						\
    114})
    115
    116#define log_vb_qop(vb, op, args...)					\
    117	dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n",		\
    118		(vb)->index, #op,					\
    119		(vb)->vb2_queue->ops->op ? "" : " (nop)")
    120
    121#define call_vb_qop(vb, op, args...)					\
    122({									\
    123	int err;							\
    124									\
    125	log_vb_qop(vb, op);						\
    126	err = (vb)->vb2_queue->ops->op ?				\
    127		(vb)->vb2_queue->ops->op(args) : 0;			\
    128	if (!err)							\
    129		(vb)->cnt_ ## op++;					\
    130	err;								\
    131})
    132
    133#define call_void_vb_qop(vb, op, args...)				\
    134({									\
    135	log_vb_qop(vb, op);						\
    136	if ((vb)->vb2_queue->ops->op)					\
    137		(vb)->vb2_queue->ops->op(args);				\
    138	(vb)->cnt_ ## op++;						\
    139})
    140
    141#else
    142
    143#define call_memop(vb, op, args...)					\
    144	((vb)->vb2_queue->mem_ops->op ?					\
    145		(vb)->vb2_queue->mem_ops->op(args) : 0)
    146
    147#define call_ptr_memop(op, vb, args...)					\
    148	((vb)->vb2_queue->mem_ops->op ?					\
    149		(vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
    150
    151#define call_void_memop(vb, op, args...)				\
    152	do {								\
    153		if ((vb)->vb2_queue->mem_ops->op)			\
    154			(vb)->vb2_queue->mem_ops->op(args);		\
    155	} while (0)
    156
    157#define call_qop(q, op, args...)					\
    158	((q)->ops->op ? (q)->ops->op(args) : 0)
    159
    160#define call_void_qop(q, op, args...)					\
    161	do {								\
    162		if ((q)->ops->op)					\
    163			(q)->ops->op(args);				\
    164	} while (0)
    165
    166#define call_vb_qop(vb, op, args...)					\
    167	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
    168
    169#define call_void_vb_qop(vb, op, args...)				\
    170	do {								\
    171		if ((vb)->vb2_queue->ops->op)				\
    172			(vb)->vb2_queue->ops->op(args);			\
    173	} while (0)
    174
    175#endif
    176
    177#define call_bufop(q, op, args...)					\
    178({									\
    179	int ret = 0;							\
    180	if (q && q->buf_ops && q->buf_ops->op)				\
    181		ret = q->buf_ops->op(args);				\
    182	ret;								\
    183})
    184
    185#define call_void_bufop(q, op, args...)					\
    186({									\
    187	if (q && q->buf_ops && q->buf_ops->op)				\
    188		q->buf_ops->op(args);					\
    189})
    190
    191static void __vb2_queue_cancel(struct vb2_queue *q);
    192static void __enqueue_in_driver(struct vb2_buffer *vb);
    193
    194static const char *vb2_state_name(enum vb2_buffer_state s)
    195{
    196	static const char * const state_names[] = {
    197		[VB2_BUF_STATE_DEQUEUED] = "dequeued",
    198		[VB2_BUF_STATE_IN_REQUEST] = "in request",
    199		[VB2_BUF_STATE_PREPARING] = "preparing",
    200		[VB2_BUF_STATE_QUEUED] = "queued",
    201		[VB2_BUF_STATE_ACTIVE] = "active",
    202		[VB2_BUF_STATE_DONE] = "done",
    203		[VB2_BUF_STATE_ERROR] = "error",
    204	};
    205
    206	if ((unsigned int)(s) < ARRAY_SIZE(state_names))
    207		return state_names[s];
    208	return "unknown";
    209}
    210
    211/*
    212 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
    213 */
    214static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
    215{
    216	struct vb2_queue *q = vb->vb2_queue;
    217	void *mem_priv;
    218	int plane;
    219	int ret = -ENOMEM;
    220
    221	/*
    222	 * Allocate memory for all planes in this buffer
    223	 * NOTE: mmapped areas should be page aligned
    224	 */
    225	for (plane = 0; plane < vb->num_planes; ++plane) {
    226		/* Memops alloc requires size to be page aligned. */
    227		unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
    228
    229		/* Did it wrap around? */
    230		if (size < vb->planes[plane].length)
    231			goto free;
    232
    233		mem_priv = call_ptr_memop(alloc,
    234					  vb,
    235					  q->alloc_devs[plane] ? : q->dev,
    236					  size);
    237		if (IS_ERR_OR_NULL(mem_priv)) {
    238			if (mem_priv)
    239				ret = PTR_ERR(mem_priv);
    240			goto free;
    241		}
    242
    243		/* Associate allocator private data with this plane */
    244		vb->planes[plane].mem_priv = mem_priv;
    245	}
    246
    247	return 0;
    248free:
    249	/* Free already allocated memory if one of the allocations failed */
    250	for (; plane > 0; --plane) {
    251		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
    252		vb->planes[plane - 1].mem_priv = NULL;
    253	}
    254
    255	return ret;
    256}
    257
    258/*
    259 * __vb2_buf_mem_free() - free memory of the given buffer
    260 */
    261static void __vb2_buf_mem_free(struct vb2_buffer *vb)
    262{
    263	unsigned int plane;
    264
    265	for (plane = 0; plane < vb->num_planes; ++plane) {
    266		call_void_memop(vb, put, vb->planes[plane].mem_priv);
    267		vb->planes[plane].mem_priv = NULL;
    268		dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n",
    269			plane, vb->index);
    270	}
    271}
    272
    273/*
    274 * __vb2_buf_userptr_put() - release userspace memory associated with
    275 * a USERPTR buffer
    276 */
    277static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
    278{
    279	unsigned int plane;
    280
    281	for (plane = 0; plane < vb->num_planes; ++plane) {
    282		if (vb->planes[plane].mem_priv)
    283			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
    284		vb->planes[plane].mem_priv = NULL;
    285	}
    286}
    287
    288/*
    289 * __vb2_plane_dmabuf_put() - release memory associated with
    290 * a DMABUF shared plane
    291 */
    292static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
    293{
    294	if (!p->mem_priv)
    295		return;
    296
    297	if (p->dbuf_mapped)
    298		call_void_memop(vb, unmap_dmabuf, p->mem_priv);
    299
    300	call_void_memop(vb, detach_dmabuf, p->mem_priv);
    301	dma_buf_put(p->dbuf);
    302	p->mem_priv = NULL;
    303	p->dbuf = NULL;
    304	p->dbuf_mapped = 0;
    305}
    306
    307/*
    308 * __vb2_buf_dmabuf_put() - release memory associated with
    309 * a DMABUF shared buffer
    310 */
    311static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
    312{
    313	unsigned int plane;
    314
    315	for (plane = 0; plane < vb->num_planes; ++plane)
    316		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
    317}
    318
    319/*
    320 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory
    321 * to sync caches
    322 */
    323static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
    324{
    325	unsigned int plane;
    326
    327	if (vb->synced)
    328		return;
    329
    330	vb->synced = 1;
    331	for (plane = 0; plane < vb->num_planes; ++plane)
    332		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
    333}
    334
    335/*
    336 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory
    337 * to sync caches
    338 */
    339static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
    340{
    341	unsigned int plane;
    342
    343	if (!vb->synced)
    344		return;
    345
    346	vb->synced = 0;
    347	for (plane = 0; plane < vb->num_planes; ++plane)
    348		call_void_memop(vb, finish, vb->planes[plane].mem_priv);
    349}
    350
    351/*
    352 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
    353 * the buffer.
    354 */
    355static void __setup_offsets(struct vb2_buffer *vb)
    356{
    357	struct vb2_queue *q = vb->vb2_queue;
    358	unsigned int plane;
    359	unsigned long off = 0;
    360
    361	if (vb->index) {
    362		struct vb2_buffer *prev = q->bufs[vb->index - 1];
    363		struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
    364
    365		off = PAGE_ALIGN(p->m.offset + p->length);
    366	}
    367
    368	for (plane = 0; plane < vb->num_planes; ++plane) {
    369		vb->planes[plane].m.offset = off;
    370
    371		dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
    372				vb->index, plane, off);
    373
    374		off += vb->planes[plane].length;
    375		off = PAGE_ALIGN(off);
    376	}
    377}
    378
    379static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
    380{
    381	/*
    382	 * DMA exporter should take care of cache syncs, so we can avoid
    383	 * explicit ->prepare()/->finish() syncs. For other ->memory types
    384	 * we always need ->prepare() or/and ->finish() cache sync.
    385	 */
    386	if (q->memory == VB2_MEMORY_DMABUF) {
    387		vb->skip_cache_sync_on_finish = 1;
    388		vb->skip_cache_sync_on_prepare = 1;
    389		return;
    390	}
    391
    392	/*
    393	 * ->finish() cache sync can be avoided when queue direction is
    394	 * TO_DEVICE.
    395	 */
    396	if (q->dma_dir == DMA_TO_DEVICE)
    397		vb->skip_cache_sync_on_finish = 1;
    398}
    399
    400/*
    401 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
    402 * video buffer memory for all buffers/planes on the queue and initializes the
    403 * queue
    404 *
    405 * Returns the number of buffers successfully allocated.
    406 */
    407static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
    408			     unsigned int num_buffers, unsigned int num_planes,
    409			     const unsigned plane_sizes[VB2_MAX_PLANES])
    410{
    411	unsigned int buffer, plane;
    412	struct vb2_buffer *vb;
    413	int ret;
    414
    415	/* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
    416	num_buffers = min_t(unsigned int, num_buffers,
    417			    VB2_MAX_FRAME - q->num_buffers);
    418
    419	for (buffer = 0; buffer < num_buffers; ++buffer) {
    420		/* Allocate videobuf buffer structures */
    421		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
    422		if (!vb) {
    423			dprintk(q, 1, "memory alloc for buffer struct failed\n");
    424			break;
    425		}
    426
    427		vb->state = VB2_BUF_STATE_DEQUEUED;
    428		vb->vb2_queue = q;
    429		vb->num_planes = num_planes;
    430		vb->index = q->num_buffers + buffer;
    431		vb->type = q->type;
    432		vb->memory = memory;
    433		init_buffer_cache_hints(q, vb);
    434		for (plane = 0; plane < num_planes; ++plane) {
    435			vb->planes[plane].length = plane_sizes[plane];
    436			vb->planes[plane].min_length = plane_sizes[plane];
    437		}
    438		call_void_bufop(q, init_buffer, vb);
    439
    440		q->bufs[vb->index] = vb;
    441
    442		/* Allocate video buffer memory for the MMAP type */
    443		if (memory == VB2_MEMORY_MMAP) {
    444			ret = __vb2_buf_mem_alloc(vb);
    445			if (ret) {
    446				dprintk(q, 1, "failed allocating memory for buffer %d\n",
    447					buffer);
    448				q->bufs[vb->index] = NULL;
    449				kfree(vb);
    450				break;
    451			}
    452			__setup_offsets(vb);
    453			/*
    454			 * Call the driver-provided buffer initialization
    455			 * callback, if given. An error in initialization
    456			 * results in queue setup failure.
    457			 */
    458			ret = call_vb_qop(vb, buf_init, vb);
    459			if (ret) {
    460				dprintk(q, 1, "buffer %d %p initialization failed\n",
    461					buffer, vb);
    462				__vb2_buf_mem_free(vb);
    463				q->bufs[vb->index] = NULL;
    464				kfree(vb);
    465				break;
    466			}
    467		}
    468	}
    469
    470	dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n",
    471		buffer, num_planes);
    472
    473	return buffer;
    474}
    475
    476/*
    477 * __vb2_free_mem() - release all video buffer memory for a given queue
    478 */
    479static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
    480{
    481	unsigned int buffer;
    482	struct vb2_buffer *vb;
    483
    484	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
    485	     ++buffer) {
    486		vb = q->bufs[buffer];
    487		if (!vb)
    488			continue;
    489
    490		/* Free MMAP buffers or release USERPTR buffers */
    491		if (q->memory == VB2_MEMORY_MMAP)
    492			__vb2_buf_mem_free(vb);
    493		else if (q->memory == VB2_MEMORY_DMABUF)
    494			__vb2_buf_dmabuf_put(vb);
    495		else
    496			__vb2_buf_userptr_put(vb);
    497	}
    498}
    499
    500/*
    501 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
    502 * related information, if no buffers are left return the queue to an
    503 * uninitialized state. Might be called even if the queue has already been freed.
    504 */
    505static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
    506{
    507	unsigned int buffer;
    508
    509	/*
    510	 * Sanity check: when preparing a buffer the queue lock is released for
    511	 * a short while (see __buf_prepare for the details), which would allow
    512	 * a race with a reqbufs which can call this function. Removing the
    513	 * buffers from underneath __buf_prepare is obviously a bad idea, so we
    514	 * check if any of the buffers is in the state PREPARING, and if so we
    515	 * just return -EAGAIN.
    516	 */
    517	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
    518	     ++buffer) {
    519		if (q->bufs[buffer] == NULL)
    520			continue;
    521		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
    522			dprintk(q, 1, "preparing buffers, cannot free\n");
    523			return -EAGAIN;
    524		}
    525	}
    526
    527	/* Call driver-provided cleanup function for each buffer, if provided */
    528	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
    529	     ++buffer) {
    530		struct vb2_buffer *vb = q->bufs[buffer];
    531
    532		if (vb && vb->planes[0].mem_priv)
    533			call_void_vb_qop(vb, buf_cleanup, vb);
    534	}
    535
    536	/* Release video buffer memory */
    537	__vb2_free_mem(q, buffers);
    538
    539#ifdef CONFIG_VIDEO_ADV_DEBUG
    540	/*
    541	 * Check that all the calls were balances during the life-time of this
    542	 * queue. If not (or if the debug level is 1 or up), then dump the
    543	 * counters to the kernel log.
    544	 */
    545	if (q->num_buffers) {
    546		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
    547				  q->cnt_wait_prepare != q->cnt_wait_finish;
    548
    549		if (unbalanced || debug) {
    550			pr_info("counters for queue %p:%s\n", q,
    551				unbalanced ? " UNBALANCED!" : "");
    552			pr_info("     setup: %u start_streaming: %u stop_streaming: %u\n",
    553				q->cnt_queue_setup, q->cnt_start_streaming,
    554				q->cnt_stop_streaming);
    555			pr_info("     wait_prepare: %u wait_finish: %u\n",
    556				q->cnt_wait_prepare, q->cnt_wait_finish);
    557		}
    558		q->cnt_queue_setup = 0;
    559		q->cnt_wait_prepare = 0;
    560		q->cnt_wait_finish = 0;
    561		q->cnt_start_streaming = 0;
    562		q->cnt_stop_streaming = 0;
    563	}
    564	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
    565		struct vb2_buffer *vb = q->bufs[buffer];
    566		bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
    567				  vb->cnt_mem_prepare != vb->cnt_mem_finish ||
    568				  vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
    569				  vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
    570				  vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
    571				  vb->cnt_buf_queue != vb->cnt_buf_done ||
    572				  vb->cnt_buf_prepare != vb->cnt_buf_finish ||
    573				  vb->cnt_buf_init != vb->cnt_buf_cleanup;
    574
    575		if (unbalanced || debug) {
    576			pr_info("   counters for queue %p, buffer %d:%s\n",
    577				q, buffer, unbalanced ? " UNBALANCED!" : "");
    578			pr_info("     buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
    579				vb->cnt_buf_init, vb->cnt_buf_cleanup,
    580				vb->cnt_buf_prepare, vb->cnt_buf_finish);
    581			pr_info("     buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
    582				vb->cnt_buf_out_validate, vb->cnt_buf_queue,
    583				vb->cnt_buf_done, vb->cnt_buf_request_complete);
    584			pr_info("     alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
    585				vb->cnt_mem_alloc, vb->cnt_mem_put,
    586				vb->cnt_mem_prepare, vb->cnt_mem_finish,
    587				vb->cnt_mem_mmap);
    588			pr_info("     get_userptr: %u put_userptr: %u\n",
    589				vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
    590			pr_info("     attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
    591				vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
    592				vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
    593			pr_info("     get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
    594				vb->cnt_mem_get_dmabuf,
    595				vb->cnt_mem_num_users,
    596				vb->cnt_mem_vaddr,
    597				vb->cnt_mem_cookie);
    598		}
    599	}
    600#endif
    601
    602	/* Free videobuf buffers */
    603	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
    604	     ++buffer) {
    605		kfree(q->bufs[buffer]);
    606		q->bufs[buffer] = NULL;
    607	}
    608
    609	q->num_buffers -= buffers;
    610	if (!q->num_buffers) {
    611		q->memory = VB2_MEMORY_UNKNOWN;
    612		INIT_LIST_HEAD(&q->queued_list);
    613	}
    614	return 0;
    615}
    616
    617bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
    618{
    619	unsigned int plane;
    620	for (plane = 0; plane < vb->num_planes; ++plane) {
    621		void *mem_priv = vb->planes[plane].mem_priv;
    622		/*
    623		 * If num_users() has not been provided, call_memop
    624		 * will return 0, apparently nobody cares about this
    625		 * case anyway. If num_users() returns more than 1,
    626		 * we are not the only user of the plane's memory.
    627		 */
    628		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
    629			return true;
    630	}
    631	return false;
    632}
    633EXPORT_SYMBOL(vb2_buffer_in_use);
    634
    635/*
    636 * __buffers_in_use() - return true if any buffers on the queue are in use and
    637 * the queue cannot be freed (by the means of REQBUFS(0)) call
    638 */
    639static bool __buffers_in_use(struct vb2_queue *q)
    640{
    641	unsigned int buffer;
    642	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
    643		if (vb2_buffer_in_use(q, q->bufs[buffer]))
    644			return true;
    645	}
    646	return false;
    647}
    648
    649void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
    650{
    651	call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
    652}
    653EXPORT_SYMBOL_GPL(vb2_core_querybuf);
    654
    655/*
    656 * __verify_userptr_ops() - verify that all memory operations required for
    657 * USERPTR queue type have been provided
    658 */
    659static int __verify_userptr_ops(struct vb2_queue *q)
    660{
    661	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
    662	    !q->mem_ops->put_userptr)
    663		return -EINVAL;
    664
    665	return 0;
    666}
    667
    668/*
    669 * __verify_mmap_ops() - verify that all memory operations required for
    670 * MMAP queue type have been provided
    671 */
    672static int __verify_mmap_ops(struct vb2_queue *q)
    673{
    674	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
    675	    !q->mem_ops->put || !q->mem_ops->mmap)
    676		return -EINVAL;
    677
    678	return 0;
    679}
    680
    681/*
    682 * __verify_dmabuf_ops() - verify that all memory operations required for
    683 * DMABUF queue type have been provided
    684 */
    685static int __verify_dmabuf_ops(struct vb2_queue *q)
    686{
    687	if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
    688	    !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
    689	    !q->mem_ops->unmap_dmabuf)
    690		return -EINVAL;
    691
    692	return 0;
    693}
    694
    695int vb2_verify_memory_type(struct vb2_queue *q,
    696		enum vb2_memory memory, unsigned int type)
    697{
    698	if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
    699	    memory != VB2_MEMORY_DMABUF) {
    700		dprintk(q, 1, "unsupported memory type\n");
    701		return -EINVAL;
    702	}
    703
    704	if (type != q->type) {
    705		dprintk(q, 1, "requested type is incorrect\n");
    706		return -EINVAL;
    707	}
    708
    709	/*
    710	 * Make sure all the required memory ops for given memory type
    711	 * are available.
    712	 */
    713	if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
    714		dprintk(q, 1, "MMAP for current setup unsupported\n");
    715		return -EINVAL;
    716	}
    717
    718	if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
    719		dprintk(q, 1, "USERPTR for current setup unsupported\n");
    720		return -EINVAL;
    721	}
    722
    723	if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
    724		dprintk(q, 1, "DMABUF for current setup unsupported\n");
    725		return -EINVAL;
    726	}
    727
    728	/*
    729	 * Place the busy tests at the end: -EBUSY can be ignored when
    730	 * create_bufs is called with count == 0, but count == 0 should still
    731	 * do the memory and type validation.
    732	 */
    733	if (vb2_fileio_is_active(q)) {
    734		dprintk(q, 1, "file io in progress\n");
    735		return -EBUSY;
    736	}
    737	return 0;
    738}
    739EXPORT_SYMBOL(vb2_verify_memory_type);
    740
    741static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
    742{
    743	q->non_coherent_mem = 0;
    744
    745	if (!vb2_queue_allows_cache_hints(q))
    746		return;
    747	q->non_coherent_mem = non_coherent_mem;
    748}
    749
    750static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
    751{
    752	if (non_coherent_mem != q->non_coherent_mem) {
    753		dprintk(q, 1, "memory coherency model mismatch\n");
    754		return false;
    755	}
    756	return true;
    757}
    758
    759int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
    760		     unsigned int flags, unsigned int *count)
    761{
    762	unsigned int num_buffers, allocated_buffers, num_planes = 0;
    763	unsigned plane_sizes[VB2_MAX_PLANES] = { };
    764	bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
    765	unsigned int i;
    766	int ret;
    767
    768	if (q->streaming) {
    769		dprintk(q, 1, "streaming active\n");
    770		return -EBUSY;
    771	}
    772
    773	if (q->waiting_in_dqbuf && *count) {
    774		dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
    775		return -EBUSY;
    776	}
    777
    778	if (*count == 0 || q->num_buffers != 0 ||
    779	    (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
    780	    !verify_coherency_flags(q, non_coherent_mem)) {
    781		/*
    782		 * We already have buffers allocated, so first check if they
    783		 * are not in use and can be freed.
    784		 */
    785		mutex_lock(&q->mmap_lock);
    786		if (debug && q->memory == VB2_MEMORY_MMAP &&
    787		    __buffers_in_use(q))
    788			dprintk(q, 1, "memory in use, orphaning buffers\n");
    789
    790		/*
    791		 * Call queue_cancel to clean up any buffers in the
    792		 * QUEUED state which is possible if buffers were prepared or
    793		 * queued without ever calling STREAMON.
    794		 */
    795		__vb2_queue_cancel(q);
    796		ret = __vb2_queue_free(q, q->num_buffers);
    797		mutex_unlock(&q->mmap_lock);
    798		if (ret)
    799			return ret;
    800
    801		/*
    802		 * In case of REQBUFS(0) return immediately without calling
    803		 * driver's queue_setup() callback and allocating resources.
    804		 */
    805		if (*count == 0)
    806			return 0;
    807	}
    808
    809	/*
    810	 * Make sure the requested values and current defaults are sane.
    811	 */
    812	WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
    813	num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
    814	num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
    815	memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
    816	q->memory = memory;
    817	set_queue_coherency(q, non_coherent_mem);
    818
    819	/*
    820	 * Ask the driver how many buffers and planes per buffer it requires.
    821	 * Driver also sets the size and allocator context for each plane.
    822	 */
    823	ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
    824		       plane_sizes, q->alloc_devs);
    825	if (ret)
    826		return ret;
    827
    828	/* Check that driver has set sane values */
    829	if (WARN_ON(!num_planes))
    830		return -EINVAL;
    831
    832	for (i = 0; i < num_planes; i++)
    833		if (WARN_ON(!plane_sizes[i]))
    834			return -EINVAL;
    835
    836	/* Finally, allocate buffers and video memory */
    837	allocated_buffers =
    838		__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
    839	if (allocated_buffers == 0) {
    840		dprintk(q, 1, "memory allocation failed\n");
    841		return -ENOMEM;
    842	}
    843
    844	/*
    845	 * There is no point in continuing if we can't allocate the minimum
    846	 * number of buffers needed by this vb2_queue.
    847	 */
    848	if (allocated_buffers < q->min_buffers_needed)
    849		ret = -ENOMEM;
    850
    851	/*
    852	 * Check if driver can handle the allocated number of buffers.
    853	 */
    854	if (!ret && allocated_buffers < num_buffers) {
    855		num_buffers = allocated_buffers;
    856		/*
    857		 * num_planes is set by the previous queue_setup(), but since it
    858		 * signals to queue_setup() whether it is called from create_bufs()
    859		 * vs reqbufs() we zero it here to signal that queue_setup() is
    860		 * called for the reqbufs() case.
    861		 */
    862		num_planes = 0;
    863
    864		ret = call_qop(q, queue_setup, q, &num_buffers,
    865			       &num_planes, plane_sizes, q->alloc_devs);
    866
    867		if (!ret && allocated_buffers < num_buffers)
    868			ret = -ENOMEM;
    869
    870		/*
    871		 * Either the driver has accepted a smaller number of buffers,
    872		 * or .queue_setup() returned an error
    873		 */
    874	}
    875
    876	mutex_lock(&q->mmap_lock);
    877	q->num_buffers = allocated_buffers;
    878
    879	if (ret < 0) {
    880		/*
    881		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
    882		 * from q->num_buffers.
    883		 */
    884		__vb2_queue_free(q, allocated_buffers);
    885		mutex_unlock(&q->mmap_lock);
    886		return ret;
    887	}
    888	mutex_unlock(&q->mmap_lock);
    889
    890	/*
    891	 * Return the number of successfully allocated buffers
    892	 * to the userspace.
    893	 */
    894	*count = allocated_buffers;
    895	q->waiting_for_buffers = !q->is_output;
    896
    897	return 0;
    898}
    899EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
    900
    901int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
    902			 unsigned int flags, unsigned int *count,
    903			 unsigned int requested_planes,
    904			 const unsigned int requested_sizes[])
    905{
    906	unsigned int num_planes = 0, num_buffers, allocated_buffers;
    907	unsigned plane_sizes[VB2_MAX_PLANES] = { };
    908	bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
    909	int ret;
    910
    911	if (q->num_buffers == VB2_MAX_FRAME) {
    912		dprintk(q, 1, "maximum number of buffers already allocated\n");
    913		return -ENOBUFS;
    914	}
    915
    916	if (!q->num_buffers) {
    917		if (q->waiting_in_dqbuf && *count) {
    918			dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
    919			return -EBUSY;
    920		}
    921		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
    922		q->memory = memory;
    923		q->waiting_for_buffers = !q->is_output;
    924		set_queue_coherency(q, non_coherent_mem);
    925	} else {
    926		if (q->memory != memory) {
    927			dprintk(q, 1, "memory model mismatch\n");
    928			return -EINVAL;
    929		}
    930		if (!verify_coherency_flags(q, non_coherent_mem))
    931			return -EINVAL;
    932	}
    933
    934	num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
    935
    936	if (requested_planes && requested_sizes) {
    937		num_planes = requested_planes;
    938		memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
    939	}
    940
    941	/*
    942	 * Ask the driver, whether the requested number of buffers, planes per
    943	 * buffer and their sizes are acceptable
    944	 */
    945	ret = call_qop(q, queue_setup, q, &num_buffers,
    946		       &num_planes, plane_sizes, q->alloc_devs);
    947	if (ret)
    948		return ret;
    949
    950	/* Finally, allocate buffers and video memory */
    951	allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
    952				num_planes, plane_sizes);
    953	if (allocated_buffers == 0) {
    954		dprintk(q, 1, "memory allocation failed\n");
    955		return -ENOMEM;
    956	}
    957
    958	/*
    959	 * Check if driver can handle the so far allocated number of buffers.
    960	 */
    961	if (allocated_buffers < num_buffers) {
    962		num_buffers = allocated_buffers;
    963
    964		/*
    965		 * q->num_buffers contains the total number of buffers, that the
    966		 * queue driver has set up
    967		 */
    968		ret = call_qop(q, queue_setup, q, &num_buffers,
    969			       &num_planes, plane_sizes, q->alloc_devs);
    970
    971		if (!ret && allocated_buffers < num_buffers)
    972			ret = -ENOMEM;
    973
    974		/*
    975		 * Either the driver has accepted a smaller number of buffers,
    976		 * or .queue_setup() returned an error
    977		 */
    978	}
    979
    980	mutex_lock(&q->mmap_lock);
    981	q->num_buffers += allocated_buffers;
    982
    983	if (ret < 0) {
    984		/*
    985		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
    986		 * from q->num_buffers.
    987		 */
    988		__vb2_queue_free(q, allocated_buffers);
    989		mutex_unlock(&q->mmap_lock);
    990		return -ENOMEM;
    991	}
    992	mutex_unlock(&q->mmap_lock);
    993
    994	/*
    995	 * Return the number of successfully allocated buffers
    996	 * to the userspace.
    997	 */
    998	*count = allocated_buffers;
    999
   1000	return 0;
   1001}
   1002EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
   1003
   1004void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
   1005{
   1006	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
   1007		return NULL;
   1008
   1009	return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
   1010
   1011}
   1012EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
   1013
   1014void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
   1015{
   1016	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
   1017		return NULL;
   1018
   1019	return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
   1020}
   1021EXPORT_SYMBOL_GPL(vb2_plane_cookie);
   1022
   1023void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
   1024{
   1025	struct vb2_queue *q = vb->vb2_queue;
   1026	unsigned long flags;
   1027
   1028	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
   1029		return;
   1030
   1031	if (WARN_ON(state != VB2_BUF_STATE_DONE &&
   1032		    state != VB2_BUF_STATE_ERROR &&
   1033		    state != VB2_BUF_STATE_QUEUED))
   1034		state = VB2_BUF_STATE_ERROR;
   1035
   1036#ifdef CONFIG_VIDEO_ADV_DEBUG
   1037	/*
   1038	 * Although this is not a callback, it still does have to balance
   1039	 * with the buf_queue op. So update this counter manually.
   1040	 */
   1041	vb->cnt_buf_done++;
   1042#endif
   1043	dprintk(q, 4, "done processing on buffer %d, state: %s\n",
   1044		vb->index, vb2_state_name(state));
   1045
   1046	if (state != VB2_BUF_STATE_QUEUED)
   1047		__vb2_buf_mem_finish(vb);
   1048
   1049	spin_lock_irqsave(&q->done_lock, flags);
   1050	if (state == VB2_BUF_STATE_QUEUED) {
   1051		vb->state = VB2_BUF_STATE_QUEUED;
   1052	} else {
   1053		/* Add the buffer to the done buffers list */
   1054		list_add_tail(&vb->done_entry, &q->done_list);
   1055		vb->state = state;
   1056	}
   1057	atomic_dec(&q->owned_by_drv_count);
   1058
   1059	if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
   1060		media_request_object_unbind(&vb->req_obj);
   1061		media_request_object_put(&vb->req_obj);
   1062	}
   1063
   1064	spin_unlock_irqrestore(&q->done_lock, flags);
   1065
   1066	trace_vb2_buf_done(q, vb);
   1067
   1068	switch (state) {
   1069	case VB2_BUF_STATE_QUEUED:
   1070		return;
   1071	default:
   1072		/* Inform any processes that may be waiting for buffers */
   1073		wake_up(&q->done_wq);
   1074		break;
   1075	}
   1076}
   1077EXPORT_SYMBOL_GPL(vb2_buffer_done);
   1078
   1079void vb2_discard_done(struct vb2_queue *q)
   1080{
   1081	struct vb2_buffer *vb;
   1082	unsigned long flags;
   1083
   1084	spin_lock_irqsave(&q->done_lock, flags);
   1085	list_for_each_entry(vb, &q->done_list, done_entry)
   1086		vb->state = VB2_BUF_STATE_ERROR;
   1087	spin_unlock_irqrestore(&q->done_lock, flags);
   1088}
   1089EXPORT_SYMBOL_GPL(vb2_discard_done);
   1090
   1091/*
   1092 * __prepare_mmap() - prepare an MMAP buffer
   1093 */
   1094static int __prepare_mmap(struct vb2_buffer *vb)
   1095{
   1096	int ret = 0;
   1097
   1098	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
   1099			 vb, vb->planes);
   1100	return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
   1101}
   1102
   1103/*
   1104 * __prepare_userptr() - prepare a USERPTR buffer
   1105 */
   1106static int __prepare_userptr(struct vb2_buffer *vb)
   1107{
   1108	struct vb2_plane planes[VB2_MAX_PLANES];
   1109	struct vb2_queue *q = vb->vb2_queue;
   1110	void *mem_priv;
   1111	unsigned int plane;
   1112	int ret = 0;
   1113	bool reacquired = vb->planes[0].mem_priv == NULL;
   1114
   1115	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
   1116	/* Copy relevant information provided by the userspace */
   1117	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
   1118			 vb, planes);
   1119	if (ret)
   1120		return ret;
   1121
   1122	for (plane = 0; plane < vb->num_planes; ++plane) {
   1123		/* Skip the plane if already verified */
   1124		if (vb->planes[plane].m.userptr &&
   1125			vb->planes[plane].m.userptr == planes[plane].m.userptr
   1126			&& vb->planes[plane].length == planes[plane].length)
   1127			continue;
   1128
   1129		dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n",
   1130			plane);
   1131
   1132		/* Check if the provided plane buffer is large enough */
   1133		if (planes[plane].length < vb->planes[plane].min_length) {
   1134			dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n",
   1135						planes[plane].length,
   1136						vb->planes[plane].min_length,
   1137						plane);
   1138			ret = -EINVAL;
   1139			goto err;
   1140		}
   1141
   1142		/* Release previously acquired memory if present */
   1143		if (vb->planes[plane].mem_priv) {
   1144			if (!reacquired) {
   1145				reacquired = true;
   1146				vb->copied_timestamp = 0;
   1147				call_void_vb_qop(vb, buf_cleanup, vb);
   1148			}
   1149			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
   1150		}
   1151
   1152		vb->planes[plane].mem_priv = NULL;
   1153		vb->planes[plane].bytesused = 0;
   1154		vb->planes[plane].length = 0;
   1155		vb->planes[plane].m.userptr = 0;
   1156		vb->planes[plane].data_offset = 0;
   1157
   1158		/* Acquire each plane's memory */
   1159		mem_priv = call_ptr_memop(get_userptr,
   1160					  vb,
   1161					  q->alloc_devs[plane] ? : q->dev,
   1162					  planes[plane].m.userptr,
   1163					  planes[plane].length);
   1164		if (IS_ERR(mem_priv)) {
   1165			dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
   1166				plane);
   1167			ret = PTR_ERR(mem_priv);
   1168			goto err;
   1169		}
   1170		vb->planes[plane].mem_priv = mem_priv;
   1171	}
   1172
   1173	/*
   1174	 * Now that everything is in order, copy relevant information
   1175	 * provided by userspace.
   1176	 */
   1177	for (plane = 0; plane < vb->num_planes; ++plane) {
   1178		vb->planes[plane].bytesused = planes[plane].bytesused;
   1179		vb->planes[plane].length = planes[plane].length;
   1180		vb->planes[plane].m.userptr = planes[plane].m.userptr;
   1181		vb->planes[plane].data_offset = planes[plane].data_offset;
   1182	}
   1183
   1184	if (reacquired) {
   1185		/*
   1186		 * One or more planes changed, so we must call buf_init to do
   1187		 * the driver-specific initialization on the newly acquired
   1188		 * buffer, if provided.
   1189		 */
   1190		ret = call_vb_qop(vb, buf_init, vb);
   1191		if (ret) {
   1192			dprintk(q, 1, "buffer initialization failed\n");
   1193			goto err;
   1194		}
   1195	}
   1196
   1197	ret = call_vb_qop(vb, buf_prepare, vb);
   1198	if (ret) {
   1199		dprintk(q, 1, "buffer preparation failed\n");
   1200		call_void_vb_qop(vb, buf_cleanup, vb);
   1201		goto err;
   1202	}
   1203
   1204	return 0;
   1205err:
   1206	/* In case of errors, release planes that were already acquired */
   1207	for (plane = 0; plane < vb->num_planes; ++plane) {
   1208		if (vb->planes[plane].mem_priv)
   1209			call_void_memop(vb, put_userptr,
   1210				vb->planes[plane].mem_priv);
   1211		vb->planes[plane].mem_priv = NULL;
   1212		vb->planes[plane].m.userptr = 0;
   1213		vb->planes[plane].length = 0;
   1214	}
   1215
   1216	return ret;
   1217}
   1218
   1219/*
   1220 * __prepare_dmabuf() - prepare a DMABUF buffer
   1221 */
   1222static int __prepare_dmabuf(struct vb2_buffer *vb)
   1223{
   1224	struct vb2_plane planes[VB2_MAX_PLANES];
   1225	struct vb2_queue *q = vb->vb2_queue;
   1226	void *mem_priv;
   1227	unsigned int plane;
   1228	int ret = 0;
   1229	bool reacquired = vb->planes[0].mem_priv == NULL;
   1230
   1231	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
   1232	/* Copy relevant information provided by the userspace */
   1233	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
   1234			 vb, planes);
   1235	if (ret)
   1236		return ret;
   1237
   1238	for (plane = 0; plane < vb->num_planes; ++plane) {
   1239		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
   1240
   1241		if (IS_ERR_OR_NULL(dbuf)) {
   1242			dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
   1243				plane);
   1244			ret = -EINVAL;
   1245			goto err;
   1246		}
   1247
   1248		/* use DMABUF size if length is not provided */
   1249		if (planes[plane].length == 0)
   1250			planes[plane].length = dbuf->size;
   1251
   1252		if (planes[plane].length < vb->planes[plane].min_length) {
   1253			dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
   1254				planes[plane].length, plane,
   1255				vb->planes[plane].min_length);
   1256			dma_buf_put(dbuf);
   1257			ret = -EINVAL;
   1258			goto err;
   1259		}
   1260
   1261		/* Skip the plane if already verified */
   1262		if (dbuf == vb->planes[plane].dbuf &&
   1263			vb->planes[plane].length == planes[plane].length) {
   1264			dma_buf_put(dbuf);
   1265			continue;
   1266		}
   1267
   1268		dprintk(q, 3, "buffer for plane %d changed\n", plane);
   1269
   1270		if (!reacquired) {
   1271			reacquired = true;
   1272			vb->copied_timestamp = 0;
   1273			call_void_vb_qop(vb, buf_cleanup, vb);
   1274		}
   1275
   1276		/* Release previously acquired memory if present */
   1277		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
   1278		vb->planes[plane].bytesused = 0;
   1279		vb->planes[plane].length = 0;
   1280		vb->planes[plane].m.fd = 0;
   1281		vb->planes[plane].data_offset = 0;
   1282
   1283		/* Acquire each plane's memory */
   1284		mem_priv = call_ptr_memop(attach_dmabuf,
   1285					  vb,
   1286					  q->alloc_devs[plane] ? : q->dev,
   1287					  dbuf,
   1288					  planes[plane].length);
   1289		if (IS_ERR(mem_priv)) {
   1290			dprintk(q, 1, "failed to attach dmabuf\n");
   1291			ret = PTR_ERR(mem_priv);
   1292			dma_buf_put(dbuf);
   1293			goto err;
   1294		}
   1295
   1296		vb->planes[plane].dbuf = dbuf;
   1297		vb->planes[plane].mem_priv = mem_priv;
   1298	}
   1299
   1300	/*
   1301	 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
   1302	 * here instead just before the DMA, while queueing the buffer(s) so
   1303	 * userspace knows sooner rather than later if the dma-buf map fails.
   1304	 */
   1305	for (plane = 0; plane < vb->num_planes; ++plane) {
   1306		if (vb->planes[plane].dbuf_mapped)
   1307			continue;
   1308
   1309		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
   1310		if (ret) {
   1311			dprintk(q, 1, "failed to map dmabuf for plane %d\n",
   1312				plane);
   1313			goto err;
   1314		}
   1315		vb->planes[plane].dbuf_mapped = 1;
   1316	}
   1317
   1318	/*
   1319	 * Now that everything is in order, copy relevant information
   1320	 * provided by userspace.
   1321	 */
   1322	for (plane = 0; plane < vb->num_planes; ++plane) {
   1323		vb->planes[plane].bytesused = planes[plane].bytesused;
   1324		vb->planes[plane].length = planes[plane].length;
   1325		vb->planes[plane].m.fd = planes[plane].m.fd;
   1326		vb->planes[plane].data_offset = planes[plane].data_offset;
   1327	}
   1328
   1329	if (reacquired) {
   1330		/*
   1331		 * Call driver-specific initialization on the newly acquired buffer,
   1332		 * if provided.
   1333		 */
   1334		ret = call_vb_qop(vb, buf_init, vb);
   1335		if (ret) {
   1336			dprintk(q, 1, "buffer initialization failed\n");
   1337			goto err;
   1338		}
   1339	}
   1340
   1341	ret = call_vb_qop(vb, buf_prepare, vb);
   1342	if (ret) {
   1343		dprintk(q, 1, "buffer preparation failed\n");
   1344		call_void_vb_qop(vb, buf_cleanup, vb);
   1345		goto err;
   1346	}
   1347
   1348	return 0;
   1349err:
   1350	/* In case of errors, release planes that were already acquired */
   1351	__vb2_buf_dmabuf_put(vb);
   1352
   1353	return ret;
   1354}
   1355
   1356/*
   1357 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
   1358 */
   1359static void __enqueue_in_driver(struct vb2_buffer *vb)
   1360{
   1361	struct vb2_queue *q = vb->vb2_queue;
   1362
   1363	vb->state = VB2_BUF_STATE_ACTIVE;
   1364	atomic_inc(&q->owned_by_drv_count);
   1365
   1366	trace_vb2_buf_queue(q, vb);
   1367
   1368	call_void_vb_qop(vb, buf_queue, vb);
   1369}
   1370
   1371static int __buf_prepare(struct vb2_buffer *vb)
   1372{
   1373	struct vb2_queue *q = vb->vb2_queue;
   1374	enum vb2_buffer_state orig_state = vb->state;
   1375	int ret;
   1376
   1377	if (q->error) {
   1378		dprintk(q, 1, "fatal error occurred on queue\n");
   1379		return -EIO;
   1380	}
   1381
   1382	if (vb->prepared)
   1383		return 0;
   1384	WARN_ON(vb->synced);
   1385
   1386	if (q->is_output) {
   1387		ret = call_vb_qop(vb, buf_out_validate, vb);
   1388		if (ret) {
   1389			dprintk(q, 1, "buffer validation failed\n");
   1390			return ret;
   1391		}
   1392	}
   1393
   1394	vb->state = VB2_BUF_STATE_PREPARING;
   1395
   1396	switch (q->memory) {
   1397	case VB2_MEMORY_MMAP:
   1398		ret = __prepare_mmap(vb);
   1399		break;
   1400	case VB2_MEMORY_USERPTR:
   1401		ret = __prepare_userptr(vb);
   1402		break;
   1403	case VB2_MEMORY_DMABUF:
   1404		ret = __prepare_dmabuf(vb);
   1405		break;
   1406	default:
   1407		WARN(1, "Invalid queue type\n");
   1408		ret = -EINVAL;
   1409		break;
   1410	}
   1411
   1412	if (ret) {
   1413		dprintk(q, 1, "buffer preparation failed: %d\n", ret);
   1414		vb->state = orig_state;
   1415		return ret;
   1416	}
   1417
   1418	__vb2_buf_mem_prepare(vb);
   1419	vb->prepared = 1;
   1420	vb->state = orig_state;
   1421
   1422	return 0;
   1423}
   1424
   1425static int vb2_req_prepare(struct media_request_object *obj)
   1426{
   1427	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
   1428	int ret;
   1429
   1430	if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
   1431		return -EINVAL;
   1432
   1433	mutex_lock(vb->vb2_queue->lock);
   1434	ret = __buf_prepare(vb);
   1435	mutex_unlock(vb->vb2_queue->lock);
   1436	return ret;
   1437}
   1438
   1439static void __vb2_dqbuf(struct vb2_buffer *vb);
   1440
   1441static void vb2_req_unprepare(struct media_request_object *obj)
   1442{
   1443	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
   1444
   1445	mutex_lock(vb->vb2_queue->lock);
   1446	__vb2_dqbuf(vb);
   1447	vb->state = VB2_BUF_STATE_IN_REQUEST;
   1448	mutex_unlock(vb->vb2_queue->lock);
   1449	WARN_ON(!vb->req_obj.req);
   1450}
   1451
   1452int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
   1453		  struct media_request *req);
   1454
   1455static void vb2_req_queue(struct media_request_object *obj)
   1456{
   1457	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
   1458	int err;
   1459
   1460	mutex_lock(vb->vb2_queue->lock);
   1461	/*
   1462	 * There is no method to propagate an error from vb2_core_qbuf(),
   1463	 * so if this returns a non-0 value, then WARN.
   1464	 *
   1465	 * The only exception is -EIO which is returned if q->error is
   1466	 * set. We just ignore that, and expect this will be caught the
   1467	 * next time vb2_req_prepare() is called.
   1468	 */
   1469	err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
   1470	WARN_ON_ONCE(err && err != -EIO);
   1471	mutex_unlock(vb->vb2_queue->lock);
   1472}
   1473
   1474static void vb2_req_unbind(struct media_request_object *obj)
   1475{
   1476	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
   1477
   1478	if (vb->state == VB2_BUF_STATE_IN_REQUEST)
   1479		call_void_bufop(vb->vb2_queue, init_buffer, vb);
   1480}
   1481
   1482static void vb2_req_release(struct media_request_object *obj)
   1483{
   1484	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
   1485
   1486	if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
   1487		vb->state = VB2_BUF_STATE_DEQUEUED;
   1488		if (vb->request)
   1489			media_request_put(vb->request);
   1490		vb->request = NULL;
   1491	}
   1492}
   1493
   1494static const struct media_request_object_ops vb2_core_req_ops = {
   1495	.prepare = vb2_req_prepare,
   1496	.unprepare = vb2_req_unprepare,
   1497	.queue = vb2_req_queue,
   1498	.unbind = vb2_req_unbind,
   1499	.release = vb2_req_release,
   1500};
   1501
   1502bool vb2_request_object_is_buffer(struct media_request_object *obj)
   1503{
   1504	return obj->ops == &vb2_core_req_ops;
   1505}
   1506EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
   1507
   1508unsigned int vb2_request_buffer_cnt(struct media_request *req)
   1509{
   1510	struct media_request_object *obj;
   1511	unsigned long flags;
   1512	unsigned int buffer_cnt = 0;
   1513
   1514	spin_lock_irqsave(&req->lock, flags);
   1515	list_for_each_entry(obj, &req->objects, list)
   1516		if (vb2_request_object_is_buffer(obj))
   1517			buffer_cnt++;
   1518	spin_unlock_irqrestore(&req->lock, flags);
   1519
   1520	return buffer_cnt;
   1521}
   1522EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
   1523
   1524int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
   1525{
   1526	struct vb2_buffer *vb;
   1527	int ret;
   1528
   1529	vb = q->bufs[index];
   1530	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
   1531		dprintk(q, 1, "invalid buffer state %s\n",
   1532			vb2_state_name(vb->state));
   1533		return -EINVAL;
   1534	}
   1535	if (vb->prepared) {
   1536		dprintk(q, 1, "buffer already prepared\n");
   1537		return -EINVAL;
   1538	}
   1539
   1540	ret = __buf_prepare(vb);
   1541	if (ret)
   1542		return ret;
   1543
   1544	/* Fill buffer information for the userspace */
   1545	call_void_bufop(q, fill_user_buffer, vb, pb);
   1546
   1547	dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index);
   1548
   1549	return 0;
   1550}
   1551EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
   1552
   1553/*
   1554 * vb2_start_streaming() - Attempt to start streaming.
   1555 * @q:		videobuf2 queue
   1556 *
   1557 * Attempt to start streaming. When this function is called there must be
   1558 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
   1559 * number of buffers required for the DMA engine to function). If the
   1560 * @start_streaming op fails it is supposed to return all the driver-owned
   1561 * buffers back to vb2 in state QUEUED. Check if that happened and if
   1562 * not warn and reclaim them forcefully.
   1563 */
   1564static int vb2_start_streaming(struct vb2_queue *q)
   1565{
   1566	struct vb2_buffer *vb;
   1567	int ret;
   1568
   1569	/*
   1570	 * If any buffers were queued before streamon,
   1571	 * we can now pass them to driver for processing.
   1572	 */
   1573	list_for_each_entry(vb, &q->queued_list, queued_entry)
   1574		__enqueue_in_driver(vb);
   1575
   1576	/* Tell the driver to start streaming */
   1577	q->start_streaming_called = 1;
   1578	ret = call_qop(q, start_streaming, q,
   1579		       atomic_read(&q->owned_by_drv_count));
   1580	if (!ret)
   1581		return 0;
   1582
   1583	q->start_streaming_called = 0;
   1584
   1585	dprintk(q, 1, "driver refused to start streaming\n");
   1586	/*
   1587	 * If you see this warning, then the driver isn't cleaning up properly
   1588	 * after a failed start_streaming(). See the start_streaming()
   1589	 * documentation in videobuf2-core.h for more information how buffers
   1590	 * should be returned to vb2 in start_streaming().
   1591	 */
   1592	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
   1593		unsigned i;
   1594
   1595		/*
   1596		 * Forcefully reclaim buffers if the driver did not
   1597		 * correctly return them to vb2.
   1598		 */
   1599		for (i = 0; i < q->num_buffers; ++i) {
   1600			vb = q->bufs[i];
   1601			if (vb->state == VB2_BUF_STATE_ACTIVE)
   1602				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
   1603		}
   1604		/* Must be zero now */
   1605		WARN_ON(atomic_read(&q->owned_by_drv_count));
   1606	}
   1607	/*
   1608	 * If done_list is not empty, then start_streaming() didn't call
   1609	 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
   1610	 * STATE_DONE.
   1611	 */
   1612	WARN_ON(!list_empty(&q->done_list));
   1613	return ret;
   1614}
   1615
   1616int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
   1617		  struct media_request *req)
   1618{
   1619	struct vb2_buffer *vb;
   1620	enum vb2_buffer_state orig_state;
   1621	int ret;
   1622
   1623	if (q->error) {
   1624		dprintk(q, 1, "fatal error occurred on queue\n");
   1625		return -EIO;
   1626	}
   1627
   1628	vb = q->bufs[index];
   1629
   1630	if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
   1631	    q->requires_requests) {
   1632		dprintk(q, 1, "qbuf requires a request\n");
   1633		return -EBADR;
   1634	}
   1635
   1636	if ((req && q->uses_qbuf) ||
   1637	    (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
   1638	     q->uses_requests)) {
   1639		dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n");
   1640		return -EBUSY;
   1641	}
   1642
   1643	if (req) {
   1644		int ret;
   1645
   1646		q->uses_requests = 1;
   1647		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
   1648			dprintk(q, 1, "buffer %d not in dequeued state\n",
   1649				vb->index);
   1650			return -EINVAL;
   1651		}
   1652
   1653		if (q->is_output && !vb->prepared) {
   1654			ret = call_vb_qop(vb, buf_out_validate, vb);
   1655			if (ret) {
   1656				dprintk(q, 1, "buffer validation failed\n");
   1657				return ret;
   1658			}
   1659		}
   1660
   1661		media_request_object_init(&vb->req_obj);
   1662
   1663		/* Make sure the request is in a safe state for updating. */
   1664		ret = media_request_lock_for_update(req);
   1665		if (ret)
   1666			return ret;
   1667		ret = media_request_object_bind(req, &vb2_core_req_ops,
   1668						q, true, &vb->req_obj);
   1669		media_request_unlock_for_update(req);
   1670		if (ret)
   1671			return ret;
   1672
   1673		vb->state = VB2_BUF_STATE_IN_REQUEST;
   1674
   1675		/*
   1676		 * Increment the refcount and store the request.
   1677		 * The request refcount is decremented again when the
   1678		 * buffer is dequeued. This is to prevent vb2_buffer_done()
   1679		 * from freeing the request from interrupt context, which can
   1680		 * happen if the application closed the request fd after
   1681		 * queueing the request.
   1682		 */
   1683		media_request_get(req);
   1684		vb->request = req;
   1685
   1686		/* Fill buffer information for the userspace */
   1687		if (pb) {
   1688			call_void_bufop(q, copy_timestamp, vb, pb);
   1689			call_void_bufop(q, fill_user_buffer, vb, pb);
   1690		}
   1691
   1692		dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
   1693		return 0;
   1694	}
   1695
   1696	if (vb->state != VB2_BUF_STATE_IN_REQUEST)
   1697		q->uses_qbuf = 1;
   1698
   1699	switch (vb->state) {
   1700	case VB2_BUF_STATE_DEQUEUED:
   1701	case VB2_BUF_STATE_IN_REQUEST:
   1702		if (!vb->prepared) {
   1703			ret = __buf_prepare(vb);
   1704			if (ret)
   1705				return ret;
   1706		}
   1707		break;
   1708	case VB2_BUF_STATE_PREPARING:
   1709		dprintk(q, 1, "buffer still being prepared\n");
   1710		return -EINVAL;
   1711	default:
   1712		dprintk(q, 1, "invalid buffer state %s\n",
   1713			vb2_state_name(vb->state));
   1714		return -EINVAL;
   1715	}
   1716
   1717	/*
   1718	 * Add to the queued buffers list, a buffer will stay on it until
   1719	 * dequeued in dqbuf.
   1720	 */
   1721	orig_state = vb->state;
   1722	list_add_tail(&vb->queued_entry, &q->queued_list);
   1723	q->queued_count++;
   1724	q->waiting_for_buffers = false;
   1725	vb->state = VB2_BUF_STATE_QUEUED;
   1726
   1727	if (pb)
   1728		call_void_bufop(q, copy_timestamp, vb, pb);
   1729
   1730	trace_vb2_qbuf(q, vb);
   1731
   1732	/*
   1733	 * If already streaming, give the buffer to driver for processing.
   1734	 * If not, the buffer will be given to driver on next streamon.
   1735	 */
   1736	if (q->start_streaming_called)
   1737		__enqueue_in_driver(vb);
   1738
   1739	/* Fill buffer information for the userspace */
   1740	if (pb)
   1741		call_void_bufop(q, fill_user_buffer, vb, pb);
   1742
   1743	/*
   1744	 * If streamon has been called, and we haven't yet called
   1745	 * start_streaming() since not enough buffers were queued, and
   1746	 * we now have reached the minimum number of queued buffers,
   1747	 * then we can finally call start_streaming().
   1748	 */
   1749	if (q->streaming && !q->start_streaming_called &&
   1750	    q->queued_count >= q->min_buffers_needed) {
   1751		ret = vb2_start_streaming(q);
   1752		if (ret) {
   1753			/*
   1754			 * Since vb2_core_qbuf will return with an error,
   1755			 * we should return it to state DEQUEUED since
   1756			 * the error indicates that the buffer wasn't queued.
   1757			 */
   1758			list_del(&vb->queued_entry);
   1759			q->queued_count--;
   1760			vb->state = orig_state;
   1761			return ret;
   1762		}
   1763	}
   1764
   1765	dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
   1766	return 0;
   1767}
   1768EXPORT_SYMBOL_GPL(vb2_core_qbuf);
   1769
   1770/*
   1771 * __vb2_wait_for_done_vb() - wait for a buffer to become available
   1772 * for dequeuing
   1773 *
   1774 * Will sleep if required for nonblocking == false.
   1775 */
   1776static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
   1777{
   1778	/*
   1779	 * All operations on vb_done_list are performed under done_lock
   1780	 * spinlock protection. However, buffers may be removed from
   1781	 * it and returned to userspace only while holding both driver's
   1782	 * lock and the done_lock spinlock. Thus we can be sure that as
   1783	 * long as we hold the driver's lock, the list will remain not
   1784	 * empty if list_empty() check succeeds.
   1785	 */
   1786
   1787	for (;;) {
   1788		int ret;
   1789
   1790		if (q->waiting_in_dqbuf) {
   1791			dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
   1792			return -EBUSY;
   1793		}
   1794
   1795		if (!q->streaming) {
   1796			dprintk(q, 1, "streaming off, will not wait for buffers\n");
   1797			return -EINVAL;
   1798		}
   1799
   1800		if (q->error) {
   1801			dprintk(q, 1, "Queue in error state, will not wait for buffers\n");
   1802			return -EIO;
   1803		}
   1804
   1805		if (q->last_buffer_dequeued) {
   1806			dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n");
   1807			return -EPIPE;
   1808		}
   1809
   1810		if (!list_empty(&q->done_list)) {
   1811			/*
   1812			 * Found a buffer that we were waiting for.
   1813			 */
   1814			break;
   1815		}
   1816
   1817		if (nonblocking) {
   1818			dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n");
   1819			return -EAGAIN;
   1820		}
   1821
   1822		q->waiting_in_dqbuf = 1;
   1823		/*
   1824		 * We are streaming and blocking, wait for another buffer to
   1825		 * become ready or for streamoff. Driver's lock is released to
   1826		 * allow streamoff or qbuf to be called while waiting.
   1827		 */
   1828		call_void_qop(q, wait_prepare, q);
   1829
   1830		/*
   1831		 * All locks have been released, it is safe to sleep now.
   1832		 */
   1833		dprintk(q, 3, "will sleep waiting for buffers\n");
   1834		ret = wait_event_interruptible(q->done_wq,
   1835				!list_empty(&q->done_list) || !q->streaming ||
   1836				q->error);
   1837
   1838		/*
   1839		 * We need to reevaluate both conditions again after reacquiring
   1840		 * the locks or return an error if one occurred.
   1841		 */
   1842		call_void_qop(q, wait_finish, q);
   1843		q->waiting_in_dqbuf = 0;
   1844		if (ret) {
   1845			dprintk(q, 1, "sleep was interrupted\n");
   1846			return ret;
   1847		}
   1848	}
   1849	return 0;
   1850}
   1851
   1852/*
   1853 * __vb2_get_done_vb() - get a buffer ready for dequeuing
   1854 *
   1855 * Will sleep if required for nonblocking == false.
   1856 */
   1857static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
   1858			     void *pb, int nonblocking)
   1859{
   1860	unsigned long flags;
   1861	int ret = 0;
   1862
   1863	/*
   1864	 * Wait for at least one buffer to become available on the done_list.
   1865	 */
   1866	ret = __vb2_wait_for_done_vb(q, nonblocking);
   1867	if (ret)
   1868		return ret;
   1869
   1870	/*
   1871	 * Driver's lock has been held since we last verified that done_list
   1872	 * is not empty, so no need for another list_empty(done_list) check.
   1873	 */
   1874	spin_lock_irqsave(&q->done_lock, flags);
   1875	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
   1876	/*
   1877	 * Only remove the buffer from done_list if all planes can be
   1878	 * handled. Some cases such as V4L2 file I/O and DVB have pb
   1879	 * == NULL; skip the check then as there's nothing to verify.
   1880	 */
   1881	if (pb)
   1882		ret = call_bufop(q, verify_planes_array, *vb, pb);
   1883	if (!ret)
   1884		list_del(&(*vb)->done_entry);
   1885	spin_unlock_irqrestore(&q->done_lock, flags);
   1886
   1887	return ret;
   1888}
   1889
   1890int vb2_wait_for_all_buffers(struct vb2_queue *q)
   1891{
   1892	if (!q->streaming) {
   1893		dprintk(q, 1, "streaming off, will not wait for buffers\n");
   1894		return -EINVAL;
   1895	}
   1896
   1897	if (q->start_streaming_called)
   1898		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
   1899	return 0;
   1900}
   1901EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
   1902
   1903/*
   1904 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
   1905 */
   1906static void __vb2_dqbuf(struct vb2_buffer *vb)
   1907{
   1908	struct vb2_queue *q = vb->vb2_queue;
   1909
   1910	/* nothing to do if the buffer is already dequeued */
   1911	if (vb->state == VB2_BUF_STATE_DEQUEUED)
   1912		return;
   1913
   1914	vb->state = VB2_BUF_STATE_DEQUEUED;
   1915
   1916	call_void_bufop(q, init_buffer, vb);
   1917}
   1918
   1919int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
   1920		   bool nonblocking)
   1921{
   1922	struct vb2_buffer *vb = NULL;
   1923	int ret;
   1924
   1925	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
   1926	if (ret < 0)
   1927		return ret;
   1928
   1929	switch (vb->state) {
   1930	case VB2_BUF_STATE_DONE:
   1931		dprintk(q, 3, "returning done buffer\n");
   1932		break;
   1933	case VB2_BUF_STATE_ERROR:
   1934		dprintk(q, 3, "returning done buffer with errors\n");
   1935		break;
   1936	default:
   1937		dprintk(q, 1, "invalid buffer state %s\n",
   1938			vb2_state_name(vb->state));
   1939		return -EINVAL;
   1940	}
   1941
   1942	call_void_vb_qop(vb, buf_finish, vb);
   1943	vb->prepared = 0;
   1944
   1945	if (pindex)
   1946		*pindex = vb->index;
   1947
   1948	/* Fill buffer information for the userspace */
   1949	if (pb)
   1950		call_void_bufop(q, fill_user_buffer, vb, pb);
   1951
   1952	/* Remove from videobuf queue */
   1953	list_del(&vb->queued_entry);
   1954	q->queued_count--;
   1955
   1956	trace_vb2_dqbuf(q, vb);
   1957
   1958	/* go back to dequeued state */
   1959	__vb2_dqbuf(vb);
   1960
   1961	if (WARN_ON(vb->req_obj.req)) {
   1962		media_request_object_unbind(&vb->req_obj);
   1963		media_request_object_put(&vb->req_obj);
   1964	}
   1965	if (vb->request)
   1966		media_request_put(vb->request);
   1967	vb->request = NULL;
   1968
   1969	dprintk(q, 2, "dqbuf of buffer %d, state: %s\n",
   1970		vb->index, vb2_state_name(vb->state));
   1971
   1972	return 0;
   1973
   1974}
   1975EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
   1976
   1977/*
   1978 * __vb2_queue_cancel() - cancel and stop (pause) streaming
   1979 *
   1980 * Removes all queued buffers from driver's queue and all buffers queued by
   1981 * userspace from videobuf's queue. Returns to state after reqbufs.
   1982 */
   1983static void __vb2_queue_cancel(struct vb2_queue *q)
   1984{
   1985	unsigned int i;
   1986
   1987	/*
   1988	 * Tell driver to stop all transactions and release all queued
   1989	 * buffers.
   1990	 */
   1991	if (q->start_streaming_called)
   1992		call_void_qop(q, stop_streaming, q);
   1993
   1994	/*
   1995	 * If you see this warning, then the driver isn't cleaning up properly
   1996	 * in stop_streaming(). See the stop_streaming() documentation in
   1997	 * videobuf2-core.h for more information how buffers should be returned
   1998	 * to vb2 in stop_streaming().
   1999	 */
   2000	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
   2001		for (i = 0; i < q->num_buffers; ++i)
   2002			if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
   2003				pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
   2004					q->bufs[i]);
   2005				vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
   2006			}
   2007		/* Must be zero now */
   2008		WARN_ON(atomic_read(&q->owned_by_drv_count));
   2009	}
   2010
   2011	q->streaming = 0;
   2012	q->start_streaming_called = 0;
   2013	q->queued_count = 0;
   2014	q->error = 0;
   2015	q->uses_requests = 0;
   2016	q->uses_qbuf = 0;
   2017
   2018	/*
   2019	 * Remove all buffers from videobuf's list...
   2020	 */
   2021	INIT_LIST_HEAD(&q->queued_list);
   2022	/*
   2023	 * ...and done list; userspace will not receive any buffers it
   2024	 * has not already dequeued before initiating cancel.
   2025	 */
   2026	INIT_LIST_HEAD(&q->done_list);
   2027	atomic_set(&q->owned_by_drv_count, 0);
   2028	wake_up_all(&q->done_wq);
   2029
   2030	/*
   2031	 * Reinitialize all buffers for next use.
   2032	 * Make sure to call buf_finish for any queued buffers. Normally
   2033	 * that's done in dqbuf, but that's not going to happen when we
   2034	 * cancel the whole queue. Note: this code belongs here, not in
   2035	 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
   2036	 * call to __fill_user_buffer() after buf_finish(). That order can't
   2037	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
   2038	 */
   2039	for (i = 0; i < q->num_buffers; ++i) {
   2040		struct vb2_buffer *vb = q->bufs[i];
   2041		struct media_request *req = vb->req_obj.req;
   2042
   2043		/*
   2044		 * If a request is associated with this buffer, then
   2045		 * call buf_request_cancel() to give the driver to complete()
   2046		 * related request objects. Otherwise those objects would
   2047		 * never complete.
   2048		 */
   2049		if (req) {
   2050			enum media_request_state state;
   2051			unsigned long flags;
   2052
   2053			spin_lock_irqsave(&req->lock, flags);
   2054			state = req->state;
   2055			spin_unlock_irqrestore(&req->lock, flags);
   2056
   2057			if (state == MEDIA_REQUEST_STATE_QUEUED)
   2058				call_void_vb_qop(vb, buf_request_complete, vb);
   2059		}
   2060
   2061		__vb2_buf_mem_finish(vb);
   2062
   2063		if (vb->prepared) {
   2064			call_void_vb_qop(vb, buf_finish, vb);
   2065			vb->prepared = 0;
   2066		}
   2067		__vb2_dqbuf(vb);
   2068
   2069		if (vb->req_obj.req) {
   2070			media_request_object_unbind(&vb->req_obj);
   2071			media_request_object_put(&vb->req_obj);
   2072		}
   2073		if (vb->request)
   2074			media_request_put(vb->request);
   2075		vb->request = NULL;
   2076		vb->copied_timestamp = 0;
   2077	}
   2078}
   2079
   2080int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
   2081{
   2082	int ret;
   2083
   2084	if (type != q->type) {
   2085		dprintk(q, 1, "invalid stream type\n");
   2086		return -EINVAL;
   2087	}
   2088
   2089	if (q->streaming) {
   2090		dprintk(q, 3, "already streaming\n");
   2091		return 0;
   2092	}
   2093
   2094	if (!q->num_buffers) {
   2095		dprintk(q, 1, "no buffers have been allocated\n");
   2096		return -EINVAL;
   2097	}
   2098
   2099	if (q->num_buffers < q->min_buffers_needed) {
   2100		dprintk(q, 1, "need at least %u allocated buffers\n",
   2101				q->min_buffers_needed);
   2102		return -EINVAL;
   2103	}
   2104
   2105	/*
   2106	 * Tell driver to start streaming provided sufficient buffers
   2107	 * are available.
   2108	 */
   2109	if (q->queued_count >= q->min_buffers_needed) {
   2110		ret = v4l_vb2q_enable_media_source(q);
   2111		if (ret)
   2112			return ret;
   2113		ret = vb2_start_streaming(q);
   2114		if (ret)
   2115			return ret;
   2116	}
   2117
   2118	q->streaming = 1;
   2119
   2120	dprintk(q, 3, "successful\n");
   2121	return 0;
   2122}
   2123EXPORT_SYMBOL_GPL(vb2_core_streamon);
   2124
   2125void vb2_queue_error(struct vb2_queue *q)
   2126{
   2127	q->error = 1;
   2128
   2129	wake_up_all(&q->done_wq);
   2130}
   2131EXPORT_SYMBOL_GPL(vb2_queue_error);
   2132
   2133int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
   2134{
   2135	if (type != q->type) {
   2136		dprintk(q, 1, "invalid stream type\n");
   2137		return -EINVAL;
   2138	}
   2139
   2140	/*
   2141	 * Cancel will pause streaming and remove all buffers from the driver
   2142	 * and videobuf, effectively returning control over them to userspace.
   2143	 *
   2144	 * Note that we do this even if q->streaming == 0: if you prepare or
   2145	 * queue buffers, and then call streamoff without ever having called
   2146	 * streamon, you would still expect those buffers to be returned to
   2147	 * their normal dequeued state.
   2148	 */
   2149	__vb2_queue_cancel(q);
   2150	q->waiting_for_buffers = !q->is_output;
   2151	q->last_buffer_dequeued = false;
   2152
   2153	dprintk(q, 3, "successful\n");
   2154	return 0;
   2155}
   2156EXPORT_SYMBOL_GPL(vb2_core_streamoff);
   2157
   2158/*
   2159 * __find_plane_by_offset() - find plane associated with the given offset off
   2160 */
   2161static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
   2162			unsigned int *_buffer, unsigned int *_plane)
   2163{
   2164	struct vb2_buffer *vb;
   2165	unsigned int buffer, plane;
   2166
   2167	/*
   2168	 * Go over all buffers and their planes, comparing the given offset
   2169	 * with an offset assigned to each plane. If a match is found,
   2170	 * return its buffer and plane numbers.
   2171	 */
   2172	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
   2173		vb = q->bufs[buffer];
   2174
   2175		for (plane = 0; plane < vb->num_planes; ++plane) {
   2176			if (vb->planes[plane].m.offset == off) {
   2177				*_buffer = buffer;
   2178				*_plane = plane;
   2179				return 0;
   2180			}
   2181		}
   2182	}
   2183
   2184	return -EINVAL;
   2185}
   2186
   2187int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
   2188		unsigned int index, unsigned int plane, unsigned int flags)
   2189{
   2190	struct vb2_buffer *vb = NULL;
   2191	struct vb2_plane *vb_plane;
   2192	int ret;
   2193	struct dma_buf *dbuf;
   2194
   2195	if (q->memory != VB2_MEMORY_MMAP) {
   2196		dprintk(q, 1, "queue is not currently set up for mmap\n");
   2197		return -EINVAL;
   2198	}
   2199
   2200	if (!q->mem_ops->get_dmabuf) {
   2201		dprintk(q, 1, "queue does not support DMA buffer exporting\n");
   2202		return -EINVAL;
   2203	}
   2204
   2205	if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
   2206		dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n");
   2207		return -EINVAL;
   2208	}
   2209
   2210	if (type != q->type) {
   2211		dprintk(q, 1, "invalid buffer type\n");
   2212		return -EINVAL;
   2213	}
   2214
   2215	if (index >= q->num_buffers) {
   2216		dprintk(q, 1, "buffer index out of range\n");
   2217		return -EINVAL;
   2218	}
   2219
   2220	vb = q->bufs[index];
   2221
   2222	if (plane >= vb->num_planes) {
   2223		dprintk(q, 1, "buffer plane out of range\n");
   2224		return -EINVAL;
   2225	}
   2226
   2227	if (vb2_fileio_is_active(q)) {
   2228		dprintk(q, 1, "expbuf: file io in progress\n");
   2229		return -EBUSY;
   2230	}
   2231
   2232	vb_plane = &vb->planes[plane];
   2233
   2234	dbuf = call_ptr_memop(get_dmabuf,
   2235			      vb,
   2236			      vb_plane->mem_priv,
   2237			      flags & O_ACCMODE);
   2238	if (IS_ERR_OR_NULL(dbuf)) {
   2239		dprintk(q, 1, "failed to export buffer %d, plane %d\n",
   2240			index, plane);
   2241		return -EINVAL;
   2242	}
   2243
   2244	ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
   2245	if (ret < 0) {
   2246		dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
   2247			index, plane, ret);
   2248		dma_buf_put(dbuf);
   2249		return ret;
   2250	}
   2251
   2252	dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
   2253		index, plane, ret);
   2254	*fd = ret;
   2255
   2256	return 0;
   2257}
   2258EXPORT_SYMBOL_GPL(vb2_core_expbuf);
   2259
   2260int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
   2261{
   2262	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
   2263	struct vb2_buffer *vb;
   2264	unsigned int buffer = 0, plane = 0;
   2265	int ret;
   2266	unsigned long length;
   2267
   2268	if (q->memory != VB2_MEMORY_MMAP) {
   2269		dprintk(q, 1, "queue is not currently set up for mmap\n");
   2270		return -EINVAL;
   2271	}
   2272
   2273	/*
   2274	 * Check memory area access mode.
   2275	 */
   2276	if (!(vma->vm_flags & VM_SHARED)) {
   2277		dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n");
   2278		return -EINVAL;
   2279	}
   2280	if (q->is_output) {
   2281		if (!(vma->vm_flags & VM_WRITE)) {
   2282			dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n");
   2283			return -EINVAL;
   2284		}
   2285	} else {
   2286		if (!(vma->vm_flags & VM_READ)) {
   2287			dprintk(q, 1, "invalid vma flags, VM_READ needed\n");
   2288			return -EINVAL;
   2289		}
   2290	}
   2291
   2292	mutex_lock(&q->mmap_lock);
   2293
   2294	if (vb2_fileio_is_active(q)) {
   2295		dprintk(q, 1, "mmap: file io in progress\n");
   2296		ret = -EBUSY;
   2297		goto unlock;
   2298	}
   2299
   2300	/*
   2301	 * Find the plane corresponding to the offset passed by userspace.
   2302	 */
   2303	ret = __find_plane_by_offset(q, off, &buffer, &plane);
   2304	if (ret)
   2305		goto unlock;
   2306
   2307	vb = q->bufs[buffer];
   2308
   2309	/*
   2310	 * MMAP requires page_aligned buffers.
   2311	 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
   2312	 * so, we need to do the same here.
   2313	 */
   2314	length = PAGE_ALIGN(vb->planes[plane].length);
   2315	if (length < (vma->vm_end - vma->vm_start)) {
   2316		dprintk(q, 1,
   2317			"MMAP invalid, as it would overflow buffer length\n");
   2318		ret = -EINVAL;
   2319		goto unlock;
   2320	}
   2321
   2322	/*
   2323	 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
   2324	 * not as a in-buffer offset. We always want to mmap a whole buffer
   2325	 * from its beginning.
   2326	 */
   2327	vma->vm_pgoff = 0;
   2328
   2329	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
   2330
   2331unlock:
   2332	mutex_unlock(&q->mmap_lock);
   2333	if (ret)
   2334		return ret;
   2335
   2336	dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
   2337	return 0;
   2338}
   2339EXPORT_SYMBOL_GPL(vb2_mmap);
   2340
   2341#ifndef CONFIG_MMU
   2342unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
   2343				    unsigned long addr,
   2344				    unsigned long len,
   2345				    unsigned long pgoff,
   2346				    unsigned long flags)
   2347{
   2348	unsigned long off = pgoff << PAGE_SHIFT;
   2349	struct vb2_buffer *vb;
   2350	unsigned int buffer, plane;
   2351	void *vaddr;
   2352	int ret;
   2353
   2354	if (q->memory != VB2_MEMORY_MMAP) {
   2355		dprintk(q, 1, "queue is not currently set up for mmap\n");
   2356		return -EINVAL;
   2357	}
   2358
   2359	/*
   2360	 * Find the plane corresponding to the offset passed by userspace.
   2361	 */
   2362	ret = __find_plane_by_offset(q, off, &buffer, &plane);
   2363	if (ret)
   2364		return ret;
   2365
   2366	vb = q->bufs[buffer];
   2367
   2368	vaddr = vb2_plane_vaddr(vb, plane);
   2369	return vaddr ? (unsigned long)vaddr : -EINVAL;
   2370}
   2371EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
   2372#endif
   2373
   2374int vb2_core_queue_init(struct vb2_queue *q)
   2375{
   2376	/*
   2377	 * Sanity check
   2378	 */
   2379	if (WARN_ON(!q)			  ||
   2380	    WARN_ON(!q->ops)		  ||
   2381	    WARN_ON(!q->mem_ops)	  ||
   2382	    WARN_ON(!q->type)		  ||
   2383	    WARN_ON(!q->io_modes)	  ||
   2384	    WARN_ON(!q->ops->queue_setup) ||
   2385	    WARN_ON(!q->ops->buf_queue))
   2386		return -EINVAL;
   2387
   2388	if (WARN_ON(q->requires_requests && !q->supports_requests))
   2389		return -EINVAL;
   2390
   2391	/*
   2392	 * This combination is not allowed since a non-zero value of
   2393	 * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
   2394	 * it has to call start_streaming(), and the Request API expects
   2395	 * that queueing a request (and thus queueing a buffer contained
   2396	 * in that request) will always succeed. There is no method of
   2397	 * propagating an error back to userspace.
   2398	 */
   2399	if (WARN_ON(q->supports_requests && q->min_buffers_needed))
   2400		return -EINVAL;
   2401
   2402	INIT_LIST_HEAD(&q->queued_list);
   2403	INIT_LIST_HEAD(&q->done_list);
   2404	spin_lock_init(&q->done_lock);
   2405	mutex_init(&q->mmap_lock);
   2406	init_waitqueue_head(&q->done_wq);
   2407
   2408	q->memory = VB2_MEMORY_UNKNOWN;
   2409
   2410	if (q->buf_struct_size == 0)
   2411		q->buf_struct_size = sizeof(struct vb2_buffer);
   2412
   2413	if (q->bidirectional)
   2414		q->dma_dir = DMA_BIDIRECTIONAL;
   2415	else
   2416		q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
   2417
   2418	if (q->name[0] == '\0')
   2419		snprintf(q->name, sizeof(q->name), "%s-%p",
   2420			 q->is_output ? "out" : "cap", q);
   2421
   2422	return 0;
   2423}
   2424EXPORT_SYMBOL_GPL(vb2_core_queue_init);
   2425
   2426static int __vb2_init_fileio(struct vb2_queue *q, int read);
   2427static int __vb2_cleanup_fileio(struct vb2_queue *q);
   2428void vb2_core_queue_release(struct vb2_queue *q)
   2429{
   2430	__vb2_cleanup_fileio(q);
   2431	__vb2_queue_cancel(q);
   2432	mutex_lock(&q->mmap_lock);
   2433	__vb2_queue_free(q, q->num_buffers);
   2434	mutex_unlock(&q->mmap_lock);
   2435}
   2436EXPORT_SYMBOL_GPL(vb2_core_queue_release);
   2437
   2438__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
   2439		poll_table *wait)
   2440{
   2441	__poll_t req_events = poll_requested_events(wait);
   2442	struct vb2_buffer *vb = NULL;
   2443	unsigned long flags;
   2444
   2445	/*
   2446	 * poll_wait() MUST be called on the first invocation on all the
   2447	 * potential queues of interest, even if we are not interested in their
   2448	 * events during this first call. Failure to do so will result in
   2449	 * queue's events to be ignored because the poll_table won't be capable
   2450	 * of adding new wait queues thereafter.
   2451	 */
   2452	poll_wait(file, &q->done_wq, wait);
   2453
   2454	if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
   2455		return 0;
   2456	if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
   2457		return 0;
   2458
   2459	/*
   2460	 * Start file I/O emulator only if streaming API has not been used yet.
   2461	 */
   2462	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
   2463		if (!q->is_output && (q->io_modes & VB2_READ) &&
   2464				(req_events & (EPOLLIN | EPOLLRDNORM))) {
   2465			if (__vb2_init_fileio(q, 1))
   2466				return EPOLLERR;
   2467		}
   2468		if (q->is_output && (q->io_modes & VB2_WRITE) &&
   2469				(req_events & (EPOLLOUT | EPOLLWRNORM))) {
   2470			if (__vb2_init_fileio(q, 0))
   2471				return EPOLLERR;
   2472			/*
   2473			 * Write to OUTPUT queue can be done immediately.
   2474			 */
   2475			return EPOLLOUT | EPOLLWRNORM;
   2476		}
   2477	}
   2478
   2479	/*
   2480	 * There is nothing to wait for if the queue isn't streaming, or if the
   2481	 * error flag is set.
   2482	 */
   2483	if (!vb2_is_streaming(q) || q->error)
   2484		return EPOLLERR;
   2485
   2486	/*
   2487	 * If this quirk is set and QBUF hasn't been called yet then
   2488	 * return EPOLLERR as well. This only affects capture queues, output
   2489	 * queues will always initialize waiting_for_buffers to false.
   2490	 * This quirk is set by V4L2 for backwards compatibility reasons.
   2491	 */
   2492	if (q->quirk_poll_must_check_waiting_for_buffers &&
   2493	    q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
   2494		return EPOLLERR;
   2495
   2496	/*
   2497	 * For output streams you can call write() as long as there are fewer
   2498	 * buffers queued than there are buffers available.
   2499	 */
   2500	if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
   2501		return EPOLLOUT | EPOLLWRNORM;
   2502
   2503	if (list_empty(&q->done_list)) {
   2504		/*
   2505		 * If the last buffer was dequeued from a capture queue,
   2506		 * return immediately. DQBUF will return -EPIPE.
   2507		 */
   2508		if (q->last_buffer_dequeued)
   2509			return EPOLLIN | EPOLLRDNORM;
   2510	}
   2511
   2512	/*
   2513	 * Take first buffer available for dequeuing.
   2514	 */
   2515	spin_lock_irqsave(&q->done_lock, flags);
   2516	if (!list_empty(&q->done_list))
   2517		vb = list_first_entry(&q->done_list, struct vb2_buffer,
   2518					done_entry);
   2519	spin_unlock_irqrestore(&q->done_lock, flags);
   2520
   2521	if (vb && (vb->state == VB2_BUF_STATE_DONE
   2522			|| vb->state == VB2_BUF_STATE_ERROR)) {
   2523		return (q->is_output) ?
   2524				EPOLLOUT | EPOLLWRNORM :
   2525				EPOLLIN | EPOLLRDNORM;
   2526	}
   2527	return 0;
   2528}
   2529EXPORT_SYMBOL_GPL(vb2_core_poll);
   2530
   2531/*
   2532 * struct vb2_fileio_buf - buffer context used by file io emulator
   2533 *
   2534 * vb2 provides a compatibility layer and emulator of file io (read and
   2535 * write) calls on top of streaming API. This structure is used for
   2536 * tracking context related to the buffers.
   2537 */
   2538struct vb2_fileio_buf {
   2539	void *vaddr;
   2540	unsigned int size;
   2541	unsigned int pos;
   2542	unsigned int queued:1;
   2543};
   2544
   2545/*
   2546 * struct vb2_fileio_data - queue context used by file io emulator
   2547 *
   2548 * @cur_index:	the index of the buffer currently being read from or
   2549 *		written to. If equal to q->num_buffers then a new buffer
   2550 *		must be dequeued.
   2551 * @initial_index: in the read() case all buffers are queued up immediately
   2552 *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
   2553 *		buffers. However, in the write() case no buffers are initially
   2554 *		queued, instead whenever a buffer is full it is queued up by
   2555 *		__vb2_perform_fileio(). Only once all available buffers have
   2556 *		been queued up will __vb2_perform_fileio() start to dequeue
   2557 *		buffers. This means that initially __vb2_perform_fileio()
   2558 *		needs to know what buffer index to use when it is queuing up
   2559 *		the buffers for the first time. That initial index is stored
   2560 *		in this field. Once it is equal to q->num_buffers all
   2561 *		available buffers have been queued and __vb2_perform_fileio()
   2562 *		should start the normal dequeue/queue cycle.
   2563 *
   2564 * vb2 provides a compatibility layer and emulator of file io (read and
   2565 * write) calls on top of streaming API. For proper operation it required
   2566 * this structure to save the driver state between each call of the read
   2567 * or write function.
   2568 */
   2569struct vb2_fileio_data {
   2570	unsigned int count;
   2571	unsigned int type;
   2572	unsigned int memory;
   2573	struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
   2574	unsigned int cur_index;
   2575	unsigned int initial_index;
   2576	unsigned int q_count;
   2577	unsigned int dq_count;
   2578	unsigned read_once:1;
   2579	unsigned write_immediately:1;
   2580};
   2581
   2582/*
   2583 * __vb2_init_fileio() - initialize file io emulator
   2584 * @q:		videobuf2 queue
   2585 * @read:	mode selector (1 means read, 0 means write)
   2586 */
   2587static int __vb2_init_fileio(struct vb2_queue *q, int read)
   2588{
   2589	struct vb2_fileio_data *fileio;
   2590	int i, ret;
   2591	unsigned int count = 0;
   2592
   2593	/*
   2594	 * Sanity check
   2595	 */
   2596	if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
   2597		    (!read && !(q->io_modes & VB2_WRITE))))
   2598		return -EINVAL;
   2599
   2600	/*
   2601	 * Check if device supports mapping buffers to kernel virtual space.
   2602	 */
   2603	if (!q->mem_ops->vaddr)
   2604		return -EBUSY;
   2605
   2606	/*
   2607	 * Check if streaming api has not been already activated.
   2608	 */
   2609	if (q->streaming || q->num_buffers > 0)
   2610		return -EBUSY;
   2611
   2612	/*
   2613	 * Start with count 1, driver can increase it in queue_setup()
   2614	 */
   2615	count = 1;
   2616
   2617	dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
   2618		(read) ? "read" : "write", count, q->fileio_read_once,
   2619		q->fileio_write_immediately);
   2620
   2621	fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
   2622	if (fileio == NULL)
   2623		return -ENOMEM;
   2624
   2625	fileio->read_once = q->fileio_read_once;
   2626	fileio->write_immediately = q->fileio_write_immediately;
   2627
   2628	/*
   2629	 * Request buffers and use MMAP type to force driver
   2630	 * to allocate buffers by itself.
   2631	 */
   2632	fileio->count = count;
   2633	fileio->memory = VB2_MEMORY_MMAP;
   2634	fileio->type = q->type;
   2635	q->fileio = fileio;
   2636	ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
   2637	if (ret)
   2638		goto err_kfree;
   2639
   2640	/*
   2641	 * Check if plane_count is correct
   2642	 * (multiplane buffers are not supported).
   2643	 */
   2644	if (q->bufs[0]->num_planes != 1) {
   2645		ret = -EBUSY;
   2646		goto err_reqbufs;
   2647	}
   2648
   2649	/*
   2650	 * Get kernel address of each buffer.
   2651	 */
   2652	for (i = 0; i < q->num_buffers; i++) {
   2653		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
   2654		if (fileio->bufs[i].vaddr == NULL) {
   2655			ret = -EINVAL;
   2656			goto err_reqbufs;
   2657		}
   2658		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
   2659	}
   2660
   2661	/*
   2662	 * Read mode requires pre queuing of all buffers.
   2663	 */
   2664	if (read) {
   2665		/*
   2666		 * Queue all buffers.
   2667		 */
   2668		for (i = 0; i < q->num_buffers; i++) {
   2669			ret = vb2_core_qbuf(q, i, NULL, NULL);
   2670			if (ret)
   2671				goto err_reqbufs;
   2672			fileio->bufs[i].queued = 1;
   2673		}
   2674		/*
   2675		 * All buffers have been queued, so mark that by setting
   2676		 * initial_index to q->num_buffers
   2677		 */
   2678		fileio->initial_index = q->num_buffers;
   2679		fileio->cur_index = q->num_buffers;
   2680	}
   2681
   2682	/*
   2683	 * Start streaming.
   2684	 */
   2685	ret = vb2_core_streamon(q, q->type);
   2686	if (ret)
   2687		goto err_reqbufs;
   2688
   2689	return ret;
   2690
   2691err_reqbufs:
   2692	fileio->count = 0;
   2693	vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
   2694
   2695err_kfree:
   2696	q->fileio = NULL;
   2697	kfree(fileio);
   2698	return ret;
   2699}
   2700
   2701/*
   2702 * __vb2_cleanup_fileio() - free resourced used by file io emulator
   2703 * @q:		videobuf2 queue
   2704 */
   2705static int __vb2_cleanup_fileio(struct vb2_queue *q)
   2706{
   2707	struct vb2_fileio_data *fileio = q->fileio;
   2708
   2709	if (fileio) {
   2710		vb2_core_streamoff(q, q->type);
   2711		q->fileio = NULL;
   2712		fileio->count = 0;
   2713		vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
   2714		kfree(fileio);
   2715		dprintk(q, 3, "file io emulator closed\n");
   2716	}
   2717	return 0;
   2718}
   2719
   2720/*
   2721 * __vb2_perform_fileio() - perform a single file io (read or write) operation
   2722 * @q:		videobuf2 queue
   2723 * @data:	pointed to target userspace buffer
   2724 * @count:	number of bytes to read or write
   2725 * @ppos:	file handle position tracking pointer
   2726 * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
   2727 * @read:	access mode selector (1 means read, 0 means write)
   2728 */
   2729static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
   2730		loff_t *ppos, int nonblock, int read)
   2731{
   2732	struct vb2_fileio_data *fileio;
   2733	struct vb2_fileio_buf *buf;
   2734	bool is_multiplanar = q->is_multiplanar;
   2735	/*
   2736	 * When using write() to write data to an output video node the vb2 core
   2737	 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
   2738	 * else is able to provide this information with the write() operation.
   2739	 */
   2740	bool copy_timestamp = !read && q->copy_timestamp;
   2741	unsigned index;
   2742	int ret;
   2743
   2744	dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n",
   2745		read ? "read" : "write", (long)*ppos, count,
   2746		nonblock ? "non" : "");
   2747
   2748	if (!data)
   2749		return -EINVAL;
   2750
   2751	if (q->waiting_in_dqbuf) {
   2752		dprintk(q, 3, "another dup()ped fd is %s\n",
   2753			read ? "reading" : "writing");
   2754		return -EBUSY;
   2755	}
   2756
   2757	/*
   2758	 * Initialize emulator on first call.
   2759	 */
   2760	if (!vb2_fileio_is_active(q)) {
   2761		ret = __vb2_init_fileio(q, read);
   2762		dprintk(q, 3, "vb2_init_fileio result: %d\n", ret);
   2763		if (ret)
   2764			return ret;
   2765	}
   2766	fileio = q->fileio;
   2767
   2768	/*
   2769	 * Check if we need to dequeue the buffer.
   2770	 */
   2771	index = fileio->cur_index;
   2772	if (index >= q->num_buffers) {
   2773		struct vb2_buffer *b;
   2774
   2775		/*
   2776		 * Call vb2_dqbuf to get buffer back.
   2777		 */
   2778		ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
   2779		dprintk(q, 5, "vb2_dqbuf result: %d\n", ret);
   2780		if (ret)
   2781			return ret;
   2782		fileio->dq_count += 1;
   2783
   2784		fileio->cur_index = index;
   2785		buf = &fileio->bufs[index];
   2786		b = q->bufs[index];
   2787
   2788		/*
   2789		 * Get number of bytes filled by the driver
   2790		 */
   2791		buf->pos = 0;
   2792		buf->queued = 0;
   2793		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
   2794				 : vb2_plane_size(q->bufs[index], 0);
   2795		/* Compensate for data_offset on read in the multiplanar case. */
   2796		if (is_multiplanar && read &&
   2797				b->planes[0].data_offset < buf->size) {
   2798			buf->pos = b->planes[0].data_offset;
   2799			buf->size -= buf->pos;
   2800		}
   2801	} else {
   2802		buf = &fileio->bufs[index];
   2803	}
   2804
   2805	/*
   2806	 * Limit count on last few bytes of the buffer.
   2807	 */
   2808	if (buf->pos + count > buf->size) {
   2809		count = buf->size - buf->pos;
   2810		dprintk(q, 5, "reducing read count: %zd\n", count);
   2811	}
   2812
   2813	/*
   2814	 * Transfer data to userspace.
   2815	 */
   2816	dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n",
   2817		count, index, buf->pos);
   2818	if (read)
   2819		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
   2820	else
   2821		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
   2822	if (ret) {
   2823		dprintk(q, 3, "error copying data\n");
   2824		return -EFAULT;
   2825	}
   2826
   2827	/*
   2828	 * Update counters.
   2829	 */
   2830	buf->pos += count;
   2831	*ppos += count;
   2832
   2833	/*
   2834	 * Queue next buffer if required.
   2835	 */
   2836	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
   2837		struct vb2_buffer *b = q->bufs[index];
   2838
   2839		/*
   2840		 * Check if this is the last buffer to read.
   2841		 */
   2842		if (read && fileio->read_once && fileio->dq_count == 1) {
   2843			dprintk(q, 3, "read limit reached\n");
   2844			return __vb2_cleanup_fileio(q);
   2845		}
   2846
   2847		/*
   2848		 * Call vb2_qbuf and give buffer to the driver.
   2849		 */
   2850		b->planes[0].bytesused = buf->pos;
   2851
   2852		if (copy_timestamp)
   2853			b->timestamp = ktime_get_ns();
   2854		ret = vb2_core_qbuf(q, index, NULL, NULL);
   2855		dprintk(q, 5, "vb2_dbuf result: %d\n", ret);
   2856		if (ret)
   2857			return ret;
   2858
   2859		/*
   2860		 * Buffer has been queued, update the status
   2861		 */
   2862		buf->pos = 0;
   2863		buf->queued = 1;
   2864		buf->size = vb2_plane_size(q->bufs[index], 0);
   2865		fileio->q_count += 1;
   2866		/*
   2867		 * If we are queuing up buffers for the first time, then
   2868		 * increase initial_index by one.
   2869		 */
   2870		if (fileio->initial_index < q->num_buffers)
   2871			fileio->initial_index++;
   2872		/*
   2873		 * The next buffer to use is either a buffer that's going to be
   2874		 * queued for the first time (initial_index < q->num_buffers)
   2875		 * or it is equal to q->num_buffers, meaning that the next
   2876		 * time we need to dequeue a buffer since we've now queued up
   2877		 * all the 'first time' buffers.
   2878		 */
   2879		fileio->cur_index = fileio->initial_index;
   2880	}
   2881
   2882	/*
   2883	 * Return proper number of bytes processed.
   2884	 */
   2885	if (ret == 0)
   2886		ret = count;
   2887	return ret;
   2888}
   2889
   2890size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
   2891		loff_t *ppos, int nonblocking)
   2892{
   2893	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
   2894}
   2895EXPORT_SYMBOL_GPL(vb2_read);
   2896
   2897size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
   2898		loff_t *ppos, int nonblocking)
   2899{
   2900	return __vb2_perform_fileio(q, (char __user *) data, count,
   2901							ppos, nonblocking, 0);
   2902}
   2903EXPORT_SYMBOL_GPL(vb2_write);
   2904
   2905struct vb2_threadio_data {
   2906	struct task_struct *thread;
   2907	vb2_thread_fnc fnc;
   2908	void *priv;
   2909	bool stop;
   2910};
   2911
   2912static int vb2_thread(void *data)
   2913{
   2914	struct vb2_queue *q = data;
   2915	struct vb2_threadio_data *threadio = q->threadio;
   2916	bool copy_timestamp = false;
   2917	unsigned prequeue = 0;
   2918	unsigned index = 0;
   2919	int ret = 0;
   2920
   2921	if (q->is_output) {
   2922		prequeue = q->num_buffers;
   2923		copy_timestamp = q->copy_timestamp;
   2924	}
   2925
   2926	set_freezable();
   2927
   2928	for (;;) {
   2929		struct vb2_buffer *vb;
   2930
   2931		/*
   2932		 * Call vb2_dqbuf to get buffer back.
   2933		 */
   2934		if (prequeue) {
   2935			vb = q->bufs[index++];
   2936			prequeue--;
   2937		} else {
   2938			call_void_qop(q, wait_finish, q);
   2939			if (!threadio->stop)
   2940				ret = vb2_core_dqbuf(q, &index, NULL, 0);
   2941			call_void_qop(q, wait_prepare, q);
   2942			dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
   2943			if (!ret)
   2944				vb = q->bufs[index];
   2945		}
   2946		if (ret || threadio->stop)
   2947			break;
   2948		try_to_freeze();
   2949
   2950		if (vb->state != VB2_BUF_STATE_ERROR)
   2951			if (threadio->fnc(vb, threadio->priv))
   2952				break;
   2953		call_void_qop(q, wait_finish, q);
   2954		if (copy_timestamp)
   2955			vb->timestamp = ktime_get_ns();
   2956		if (!threadio->stop)
   2957			ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
   2958		call_void_qop(q, wait_prepare, q);
   2959		if (ret || threadio->stop)
   2960			break;
   2961	}
   2962
   2963	/* Hmm, linux becomes *very* unhappy without this ... */
   2964	while (!kthread_should_stop()) {
   2965		set_current_state(TASK_INTERRUPTIBLE);
   2966		schedule();
   2967	}
   2968	return 0;
   2969}
   2970
   2971/*
   2972 * This function should not be used for anything else but the videobuf2-dvb
   2973 * support. If you think you have another good use-case for this, then please
   2974 * contact the linux-media mailinglist first.
   2975 */
   2976int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
   2977		     const char *thread_name)
   2978{
   2979	struct vb2_threadio_data *threadio;
   2980	int ret = 0;
   2981
   2982	if (q->threadio)
   2983		return -EBUSY;
   2984	if (vb2_is_busy(q))
   2985		return -EBUSY;
   2986	if (WARN_ON(q->fileio))
   2987		return -EBUSY;
   2988
   2989	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
   2990	if (threadio == NULL)
   2991		return -ENOMEM;
   2992	threadio->fnc = fnc;
   2993	threadio->priv = priv;
   2994
   2995	ret = __vb2_init_fileio(q, !q->is_output);
   2996	dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret);
   2997	if (ret)
   2998		goto nomem;
   2999	q->threadio = threadio;
   3000	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
   3001	if (IS_ERR(threadio->thread)) {
   3002		ret = PTR_ERR(threadio->thread);
   3003		threadio->thread = NULL;
   3004		goto nothread;
   3005	}
   3006	return 0;
   3007
   3008nothread:
   3009	__vb2_cleanup_fileio(q);
   3010nomem:
   3011	kfree(threadio);
   3012	return ret;
   3013}
   3014EXPORT_SYMBOL_GPL(vb2_thread_start);
   3015
   3016int vb2_thread_stop(struct vb2_queue *q)
   3017{
   3018	struct vb2_threadio_data *threadio = q->threadio;
   3019	int err;
   3020
   3021	if (threadio == NULL)
   3022		return 0;
   3023	threadio->stop = true;
   3024	/* Wake up all pending sleeps in the thread */
   3025	vb2_queue_error(q);
   3026	err = kthread_stop(threadio->thread);
   3027	__vb2_cleanup_fileio(q);
   3028	threadio->thread = NULL;
   3029	kfree(threadio);
   3030	q->threadio = NULL;
   3031	return err;
   3032}
   3033EXPORT_SYMBOL_GPL(vb2_thread_stop);
   3034
   3035MODULE_DESCRIPTION("Media buffer core framework");
   3036MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
   3037MODULE_LICENSE("GPL");
   3038MODULE_IMPORT_NS(DMA_BUF);