cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xilinx-dma.c (21148B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Xilinx Video DMA
      4 *
      5 * Copyright (C) 2013-2015 Ideas on Board
      6 * Copyright (C) 2013-2015 Xilinx, Inc.
      7 *
      8 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
      9 *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
     10 */
     11
     12#include <linux/dma/xilinx_dma.h>
     13#include <linux/lcm.h>
     14#include <linux/list.h>
     15#include <linux/module.h>
     16#include <linux/of.h>
     17#include <linux/slab.h>
     18
     19#include <media/v4l2-dev.h>
     20#include <media/v4l2-fh.h>
     21#include <media/v4l2-ioctl.h>
     22#include <media/videobuf2-v4l2.h>
     23#include <media/videobuf2-dma-contig.h>
     24
     25#include "xilinx-dma.h"
     26#include "xilinx-vip.h"
     27#include "xilinx-vipp.h"
     28
     29#define XVIP_DMA_DEF_WIDTH		1920
     30#define XVIP_DMA_DEF_HEIGHT		1080
     31
     32/* Minimum and maximum widths are expressed in bytes */
     33#define XVIP_DMA_MIN_WIDTH		1U
     34#define XVIP_DMA_MAX_WIDTH		65535U
     35#define XVIP_DMA_MIN_HEIGHT		1U
     36#define XVIP_DMA_MAX_HEIGHT		8191U
     37
     38/* -----------------------------------------------------------------------------
     39 * Helper functions
     40 */
     41
     42static struct v4l2_subdev *
     43xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
     44{
     45	struct media_pad *remote;
     46
     47	remote = media_entity_remote_pad(local);
     48	if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
     49		return NULL;
     50
     51	if (pad)
     52		*pad = remote->index;
     53
     54	return media_entity_to_v4l2_subdev(remote->entity);
     55}
     56
     57static int xvip_dma_verify_format(struct xvip_dma *dma)
     58{
     59	struct v4l2_subdev_format fmt;
     60	struct v4l2_subdev *subdev;
     61	int ret;
     62
     63	subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
     64	if (subdev == NULL)
     65		return -EPIPE;
     66
     67	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
     68	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
     69	if (ret < 0)
     70		return ret == -ENOIOCTLCMD ? -EINVAL : ret;
     71
     72	if (dma->fmtinfo->code != fmt.format.code ||
     73	    dma->format.height != fmt.format.height ||
     74	    dma->format.width != fmt.format.width ||
     75	    dma->format.colorspace != fmt.format.colorspace)
     76		return -EINVAL;
     77
     78	return 0;
     79}
     80
     81/* -----------------------------------------------------------------------------
     82 * Pipeline Stream Management
     83 */
     84
     85/**
     86 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
     87 * @pipe: The pipeline
     88 * @start: Start (when true) or stop (when false) the pipeline
     89 *
     90 * Walk the entities chain starting at the pipeline output video node and start
     91 * or stop all of them.
     92 *
     93 * Return: 0 if successful, or the return value of the failed video::s_stream
     94 * operation otherwise.
     95 */
     96static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
     97{
     98	struct xvip_dma *dma = pipe->output;
     99	struct media_entity *entity;
    100	struct media_pad *pad;
    101	struct v4l2_subdev *subdev;
    102	int ret;
    103
    104	entity = &dma->video.entity;
    105	while (1) {
    106		pad = &entity->pads[0];
    107		if (!(pad->flags & MEDIA_PAD_FL_SINK))
    108			break;
    109
    110		pad = media_entity_remote_pad(pad);
    111		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
    112			break;
    113
    114		entity = pad->entity;
    115		subdev = media_entity_to_v4l2_subdev(entity);
    116
    117		ret = v4l2_subdev_call(subdev, video, s_stream, start);
    118		if (start && ret < 0 && ret != -ENOIOCTLCMD)
    119			return ret;
    120	}
    121
    122	return 0;
    123}
    124
    125/**
    126 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
    127 * @pipe: The pipeline
    128 * @on: Turn the stream on when true or off when false
    129 *
    130 * The pipeline is shared between all DMA engines connect at its input and
    131 * output. While the stream state of DMA engines can be controlled
    132 * independently, pipelines have a shared stream state that enable or disable
    133 * all entities in the pipeline. For this reason the pipeline uses a streaming
    134 * counter that tracks the number of DMA engines that have requested the stream
    135 * to be enabled.
    136 *
    137 * When called with the @on argument set to true, this function will increment
    138 * the pipeline streaming count. If the streaming count reaches the number of
    139 * DMA engines in the pipeline it will enable all entities that belong to the
    140 * pipeline.
    141 *
    142 * Similarly, when called with the @on argument set to false, this function will
    143 * decrement the pipeline streaming count and disable all entities in the
    144 * pipeline when the streaming count reaches zero.
    145 *
    146 * Return: 0 if successful, or the return value of the failed video::s_stream
    147 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
    148 * not updated when the operation fails.
    149 */
    150static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
    151{
    152	int ret = 0;
    153
    154	mutex_lock(&pipe->lock);
    155
    156	if (on) {
    157		if (pipe->stream_count == pipe->num_dmas - 1) {
    158			ret = xvip_pipeline_start_stop(pipe, true);
    159			if (ret < 0)
    160				goto done;
    161		}
    162		pipe->stream_count++;
    163	} else {
    164		if (--pipe->stream_count == 0)
    165			xvip_pipeline_start_stop(pipe, false);
    166	}
    167
    168done:
    169	mutex_unlock(&pipe->lock);
    170	return ret;
    171}
    172
    173static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
    174				  struct xvip_dma *start)
    175{
    176	struct media_graph graph;
    177	struct media_entity *entity = &start->video.entity;
    178	struct media_device *mdev = entity->graph_obj.mdev;
    179	unsigned int num_inputs = 0;
    180	unsigned int num_outputs = 0;
    181	int ret;
    182
    183	mutex_lock(&mdev->graph_mutex);
    184
    185	/* Walk the graph to locate the video nodes. */
    186	ret = media_graph_walk_init(&graph, mdev);
    187	if (ret) {
    188		mutex_unlock(&mdev->graph_mutex);
    189		return ret;
    190	}
    191
    192	media_graph_walk_start(&graph, entity);
    193
    194	while ((entity = media_graph_walk_next(&graph))) {
    195		struct xvip_dma *dma;
    196
    197		if (entity->function != MEDIA_ENT_F_IO_V4L)
    198			continue;
    199
    200		dma = to_xvip_dma(media_entity_to_video_device(entity));
    201
    202		if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
    203			pipe->output = dma;
    204			num_outputs++;
    205		} else {
    206			num_inputs++;
    207		}
    208	}
    209
    210	mutex_unlock(&mdev->graph_mutex);
    211
    212	media_graph_walk_cleanup(&graph);
    213
    214	/* We need exactly one output and zero or one input. */
    215	if (num_outputs != 1 || num_inputs > 1)
    216		return -EPIPE;
    217
    218	pipe->num_dmas = num_inputs + num_outputs;
    219
    220	return 0;
    221}
    222
    223static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
    224{
    225	pipe->num_dmas = 0;
    226	pipe->output = NULL;
    227}
    228
    229/**
    230 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
    231 * @pipe: the pipeline
    232 *
    233 * Decrease the pipeline use count and clean it up if we were the last user.
    234 */
    235static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
    236{
    237	mutex_lock(&pipe->lock);
    238
    239	/* If we're the last user clean up the pipeline. */
    240	if (--pipe->use_count == 0)
    241		__xvip_pipeline_cleanup(pipe);
    242
    243	mutex_unlock(&pipe->lock);
    244}
    245
    246/**
    247 * xvip_pipeline_prepare - Prepare the pipeline for streaming
    248 * @pipe: the pipeline
    249 * @dma: DMA engine at one end of the pipeline
    250 *
    251 * Validate the pipeline if no user exists yet, otherwise just increase the use
    252 * count.
    253 *
    254 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
    255 */
    256static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
    257				 struct xvip_dma *dma)
    258{
    259	int ret;
    260
    261	mutex_lock(&pipe->lock);
    262
    263	/* If we're the first user validate and initialize the pipeline. */
    264	if (pipe->use_count == 0) {
    265		ret = xvip_pipeline_validate(pipe, dma);
    266		if (ret < 0) {
    267			__xvip_pipeline_cleanup(pipe);
    268			goto done;
    269		}
    270	}
    271
    272	pipe->use_count++;
    273	ret = 0;
    274
    275done:
    276	mutex_unlock(&pipe->lock);
    277	return ret;
    278}
    279
    280/* -----------------------------------------------------------------------------
    281 * videobuf2 queue operations
    282 */
    283
    284/**
    285 * struct xvip_dma_buffer - Video DMA buffer
    286 * @buf: vb2 buffer base object
    287 * @queue: buffer list entry in the DMA engine queued buffers list
    288 * @dma: DMA channel that uses the buffer
    289 */
    290struct xvip_dma_buffer {
    291	struct vb2_v4l2_buffer buf;
    292	struct list_head queue;
    293	struct xvip_dma *dma;
    294};
    295
    296#define to_xvip_dma_buffer(vb)	container_of(vb, struct xvip_dma_buffer, buf)
    297
    298static void xvip_dma_complete(void *param)
    299{
    300	struct xvip_dma_buffer *buf = param;
    301	struct xvip_dma *dma = buf->dma;
    302
    303	spin_lock(&dma->queued_lock);
    304	list_del(&buf->queue);
    305	spin_unlock(&dma->queued_lock);
    306
    307	buf->buf.field = V4L2_FIELD_NONE;
    308	buf->buf.sequence = dma->sequence++;
    309	buf->buf.vb2_buf.timestamp = ktime_get_ns();
    310	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
    311	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
    312}
    313
    314static int
    315xvip_dma_queue_setup(struct vb2_queue *vq,
    316		     unsigned int *nbuffers, unsigned int *nplanes,
    317		     unsigned int sizes[], struct device *alloc_devs[])
    318{
    319	struct xvip_dma *dma = vb2_get_drv_priv(vq);
    320
    321	/* Make sure the image size is large enough. */
    322	if (*nplanes)
    323		return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
    324
    325	*nplanes = 1;
    326	sizes[0] = dma->format.sizeimage;
    327
    328	return 0;
    329}
    330
    331static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
    332{
    333	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
    334	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
    335	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
    336
    337	buf->dma = dma;
    338
    339	return 0;
    340}
    341
    342static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
    343{
    344	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
    345	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
    346	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
    347	struct dma_async_tx_descriptor *desc;
    348	dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
    349	u32 flags;
    350
    351	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
    352		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
    353		dma->xt.dir = DMA_DEV_TO_MEM;
    354		dma->xt.src_sgl = false;
    355		dma->xt.dst_sgl = true;
    356		dma->xt.dst_start = addr;
    357	} else {
    358		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
    359		dma->xt.dir = DMA_MEM_TO_DEV;
    360		dma->xt.src_sgl = true;
    361		dma->xt.dst_sgl = false;
    362		dma->xt.src_start = addr;
    363	}
    364
    365	dma->xt.frame_size = 1;
    366	dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
    367	dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
    368	dma->xt.numf = dma->format.height;
    369
    370	desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
    371	if (!desc) {
    372		dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
    373		vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
    374		return;
    375	}
    376	desc->callback = xvip_dma_complete;
    377	desc->callback_param = buf;
    378
    379	spin_lock_irq(&dma->queued_lock);
    380	list_add_tail(&buf->queue, &dma->queued_bufs);
    381	spin_unlock_irq(&dma->queued_lock);
    382
    383	dmaengine_submit(desc);
    384
    385	if (vb2_is_streaming(&dma->queue))
    386		dma_async_issue_pending(dma->dma);
    387}
    388
    389static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
    390{
    391	struct xvip_dma *dma = vb2_get_drv_priv(vq);
    392	struct xvip_dma_buffer *buf, *nbuf;
    393	struct xvip_pipeline *pipe;
    394	int ret;
    395
    396	dma->sequence = 0;
    397
    398	/*
    399	 * Start streaming on the pipeline. No link touching an entity in the
    400	 * pipeline can be activated or deactivated once streaming is started.
    401	 *
    402	 * Use the pipeline object embedded in the first DMA object that starts
    403	 * streaming.
    404	 */
    405	pipe = dma->video.entity.pipe
    406	     ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
    407
    408	ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
    409	if (ret < 0)
    410		goto error;
    411
    412	/* Verify that the configured format matches the output of the
    413	 * connected subdev.
    414	 */
    415	ret = xvip_dma_verify_format(dma);
    416	if (ret < 0)
    417		goto error_stop;
    418
    419	ret = xvip_pipeline_prepare(pipe, dma);
    420	if (ret < 0)
    421		goto error_stop;
    422
    423	/* Start the DMA engine. This must be done before starting the blocks
    424	 * in the pipeline to avoid DMA synchronization issues.
    425	 */
    426	dma_async_issue_pending(dma->dma);
    427
    428	/* Start the pipeline. */
    429	xvip_pipeline_set_stream(pipe, true);
    430
    431	return 0;
    432
    433error_stop:
    434	media_pipeline_stop(&dma->video.entity);
    435
    436error:
    437	/* Give back all queued buffers to videobuf2. */
    438	spin_lock_irq(&dma->queued_lock);
    439	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
    440		vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
    441		list_del(&buf->queue);
    442	}
    443	spin_unlock_irq(&dma->queued_lock);
    444
    445	return ret;
    446}
    447
    448static void xvip_dma_stop_streaming(struct vb2_queue *vq)
    449{
    450	struct xvip_dma *dma = vb2_get_drv_priv(vq);
    451	struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
    452	struct xvip_dma_buffer *buf, *nbuf;
    453
    454	/* Stop the pipeline. */
    455	xvip_pipeline_set_stream(pipe, false);
    456
    457	/* Stop and reset the DMA engine. */
    458	dmaengine_terminate_all(dma->dma);
    459
    460	/* Cleanup the pipeline and mark it as being stopped. */
    461	xvip_pipeline_cleanup(pipe);
    462	media_pipeline_stop(&dma->video.entity);
    463
    464	/* Give back all queued buffers to videobuf2. */
    465	spin_lock_irq(&dma->queued_lock);
    466	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
    467		vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
    468		list_del(&buf->queue);
    469	}
    470	spin_unlock_irq(&dma->queued_lock);
    471}
    472
    473static const struct vb2_ops xvip_dma_queue_qops = {
    474	.queue_setup = xvip_dma_queue_setup,
    475	.buf_prepare = xvip_dma_buffer_prepare,
    476	.buf_queue = xvip_dma_buffer_queue,
    477	.wait_prepare = vb2_ops_wait_prepare,
    478	.wait_finish = vb2_ops_wait_finish,
    479	.start_streaming = xvip_dma_start_streaming,
    480	.stop_streaming = xvip_dma_stop_streaming,
    481};
    482
    483/* -----------------------------------------------------------------------------
    484 * V4L2 ioctls
    485 */
    486
    487static int
    488xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
    489{
    490	struct v4l2_fh *vfh = file->private_data;
    491	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
    492
    493	cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
    494			    V4L2_CAP_DEVICE_CAPS;
    495
    496	strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
    497	strscpy(cap->card, dma->video.name, sizeof(cap->card));
    498	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%pOFn:%u",
    499		 dma->xdev->dev->of_node, dma->port);
    500
    501	return 0;
    502}
    503
    504/* FIXME: without this callback function, some applications are not configured
    505 * with correct formats, and it results in frames in wrong format. Whether this
    506 * callback needs to be required is not clearly defined, so it should be
    507 * clarified through the mailing list.
    508 */
    509static int
    510xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
    511{
    512	struct v4l2_fh *vfh = file->private_data;
    513	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
    514
    515	if (f->index > 0)
    516		return -EINVAL;
    517
    518	f->pixelformat = dma->format.pixelformat;
    519
    520	return 0;
    521}
    522
    523static int
    524xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
    525{
    526	struct v4l2_fh *vfh = file->private_data;
    527	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
    528
    529	format->fmt.pix = dma->format;
    530
    531	return 0;
    532}
    533
    534static void
    535__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
    536		      const struct xvip_video_format **fmtinfo)
    537{
    538	const struct xvip_video_format *info;
    539	unsigned int min_width;
    540	unsigned int max_width;
    541	unsigned int min_bpl;
    542	unsigned int max_bpl;
    543	unsigned int width;
    544	unsigned int align;
    545	unsigned int bpl;
    546
    547	/* Retrieve format information and select the default format if the
    548	 * requested format isn't supported.
    549	 */
    550	info = xvip_get_format_by_fourcc(pix->pixelformat);
    551
    552	pix->pixelformat = info->fourcc;
    553	pix->field = V4L2_FIELD_NONE;
    554
    555	/* The transfer alignment requirements are expressed in bytes. Compute
    556	 * the minimum and maximum values, clamp the requested width and convert
    557	 * it back to pixels.
    558	 */
    559	align = lcm(dma->align, info->bpp);
    560	min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
    561	max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
    562	width = rounddown(pix->width * info->bpp, align);
    563
    564	pix->width = clamp(width, min_width, max_width) / info->bpp;
    565	pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
    566			    XVIP_DMA_MAX_HEIGHT);
    567
    568	/* Clamp the requested bytes per line value. If the maximum bytes per
    569	 * line value is zero, the module doesn't support user configurable line
    570	 * sizes. Override the requested value with the minimum in that case.
    571	 */
    572	min_bpl = pix->width * info->bpp;
    573	max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
    574	bpl = rounddown(pix->bytesperline, dma->align);
    575
    576	pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
    577	pix->sizeimage = pix->bytesperline * pix->height;
    578
    579	if (fmtinfo)
    580		*fmtinfo = info;
    581}
    582
    583static int
    584xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
    585{
    586	struct v4l2_fh *vfh = file->private_data;
    587	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
    588
    589	__xvip_dma_try_format(dma, &format->fmt.pix, NULL);
    590	return 0;
    591}
    592
    593static int
    594xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
    595{
    596	struct v4l2_fh *vfh = file->private_data;
    597	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
    598	const struct xvip_video_format *info;
    599
    600	__xvip_dma_try_format(dma, &format->fmt.pix, &info);
    601
    602	if (vb2_is_busy(&dma->queue))
    603		return -EBUSY;
    604
    605	dma->format = format->fmt.pix;
    606	dma->fmtinfo = info;
    607
    608	return 0;
    609}
    610
    611static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
    612	.vidioc_querycap		= xvip_dma_querycap,
    613	.vidioc_enum_fmt_vid_cap	= xvip_dma_enum_format,
    614	.vidioc_g_fmt_vid_cap		= xvip_dma_get_format,
    615	.vidioc_g_fmt_vid_out		= xvip_dma_get_format,
    616	.vidioc_s_fmt_vid_cap		= xvip_dma_set_format,
    617	.vidioc_s_fmt_vid_out		= xvip_dma_set_format,
    618	.vidioc_try_fmt_vid_cap		= xvip_dma_try_format,
    619	.vidioc_try_fmt_vid_out		= xvip_dma_try_format,
    620	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
    621	.vidioc_querybuf		= vb2_ioctl_querybuf,
    622	.vidioc_qbuf			= vb2_ioctl_qbuf,
    623	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
    624	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
    625	.vidioc_expbuf			= vb2_ioctl_expbuf,
    626	.vidioc_streamon		= vb2_ioctl_streamon,
    627	.vidioc_streamoff		= vb2_ioctl_streamoff,
    628};
    629
    630/* -----------------------------------------------------------------------------
    631 * V4L2 file operations
    632 */
    633
    634static const struct v4l2_file_operations xvip_dma_fops = {
    635	.owner		= THIS_MODULE,
    636	.unlocked_ioctl	= video_ioctl2,
    637	.open		= v4l2_fh_open,
    638	.release	= vb2_fop_release,
    639	.poll		= vb2_fop_poll,
    640	.mmap		= vb2_fop_mmap,
    641};
    642
    643/* -----------------------------------------------------------------------------
    644 * Xilinx Video DMA Core
    645 */
    646
    647int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
    648		  enum v4l2_buf_type type, unsigned int port)
    649{
    650	char name[16];
    651	int ret;
    652
    653	dma->xdev = xdev;
    654	dma->port = port;
    655	mutex_init(&dma->lock);
    656	mutex_init(&dma->pipe.lock);
    657	INIT_LIST_HEAD(&dma->queued_bufs);
    658	spin_lock_init(&dma->queued_lock);
    659
    660	dma->fmtinfo = xvip_get_format_by_fourcc(V4L2_PIX_FMT_YUYV);
    661	dma->format.pixelformat = dma->fmtinfo->fourcc;
    662	dma->format.colorspace = V4L2_COLORSPACE_SRGB;
    663	dma->format.field = V4L2_FIELD_NONE;
    664	dma->format.width = XVIP_DMA_DEF_WIDTH;
    665	dma->format.height = XVIP_DMA_DEF_HEIGHT;
    666	dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
    667	dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
    668
    669	/* Initialize the media entity... */
    670	dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
    671		       ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
    672
    673	ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
    674	if (ret < 0)
    675		goto error;
    676
    677	/* ... and the video node... */
    678	dma->video.fops = &xvip_dma_fops;
    679	dma->video.v4l2_dev = &xdev->v4l2_dev;
    680	dma->video.queue = &dma->queue;
    681	snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
    682		 xdev->dev->of_node,
    683		 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
    684		 port);
    685	dma->video.vfl_type = VFL_TYPE_VIDEO;
    686	dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
    687			   ? VFL_DIR_RX : VFL_DIR_TX;
    688	dma->video.release = video_device_release_empty;
    689	dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
    690	dma->video.lock = &dma->lock;
    691	dma->video.device_caps = V4L2_CAP_STREAMING;
    692	if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
    693		dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
    694	else
    695		dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
    696
    697	video_set_drvdata(&dma->video, dma);
    698
    699	/* ... and the buffers queue... */
    700	/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
    701	 * V4L2 APIs would be inefficient. Testing on the command line with a
    702	 * 'cat /dev/video?' thus won't be possible, but given that the driver
    703	 * anyway requires a test tool to setup the pipeline before any video
    704	 * stream can be started, requiring a specific V4L2 test tool as well
    705	 * instead of 'cat' isn't really a drawback.
    706	 */
    707	dma->queue.type = type;
    708	dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
    709	dma->queue.lock = &dma->lock;
    710	dma->queue.drv_priv = dma;
    711	dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
    712	dma->queue.ops = &xvip_dma_queue_qops;
    713	dma->queue.mem_ops = &vb2_dma_contig_memops;
    714	dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
    715				   | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
    716	dma->queue.dev = dma->xdev->dev;
    717	ret = vb2_queue_init(&dma->queue);
    718	if (ret < 0) {
    719		dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
    720		goto error;
    721	}
    722
    723	/* ... and the DMA channel. */
    724	snprintf(name, sizeof(name), "port%u", port);
    725	dma->dma = dma_request_chan(dma->xdev->dev, name);
    726	if (IS_ERR(dma->dma)) {
    727		ret = PTR_ERR(dma->dma);
    728		if (ret != -EPROBE_DEFER)
    729			dev_err(dma->xdev->dev, "no VDMA channel found\n");
    730		goto error;
    731	}
    732
    733	dma->align = 1 << dma->dma->device->copy_align;
    734
    735	ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
    736	if (ret < 0) {
    737		dev_err(dma->xdev->dev, "failed to register video device\n");
    738		goto error;
    739	}
    740
    741	return 0;
    742
    743error:
    744	xvip_dma_cleanup(dma);
    745	return ret;
    746}
    747
    748void xvip_dma_cleanup(struct xvip_dma *dma)
    749{
    750	if (video_is_registered(&dma->video))
    751		video_unregister_device(&dma->video);
    752
    753	if (!IS_ERR_OR_NULL(dma->dma))
    754		dma_release_channel(dma->dma);
    755
    756	media_entity_cleanup(&dma->video.entity);
    757
    758	mutex_destroy(&dma->lock);
    759	mutex_destroy(&dma->pipe.lock);
    760}