cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cx231xx-vbi.c (16448B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3   cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
      4
      5   Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
      6	Based on cx88 driver
      7
      8 */
      9
     10#include "cx231xx.h"
     11#include <linux/init.h>
     12#include <linux/list.h>
     13#include <linux/module.h>
     14#include <linux/kernel.h>
     15#include <linux/bitmap.h>
     16#include <linux/i2c.h>
     17#include <linux/mm.h>
     18#include <linux/mutex.h>
     19#include <linux/slab.h>
     20
     21#include <media/v4l2-common.h>
     22#include <media/v4l2-ioctl.h>
     23#include <media/drv-intf/msp3400.h>
     24#include <media/tuner.h>
     25
     26#include "cx231xx-vbi.h"
     27
     28static inline void print_err_status(struct cx231xx *dev, int packet, int status)
     29{
     30	char *errmsg = "Unknown";
     31
     32	switch (status) {
     33	case -ENOENT:
     34		errmsg = "unlinked synchronously";
     35		break;
     36	case -ECONNRESET:
     37		errmsg = "unlinked asynchronously";
     38		break;
     39	case -ENOSR:
     40		errmsg = "Buffer error (overrun)";
     41		break;
     42	case -EPIPE:
     43		errmsg = "Stalled (device not responding)";
     44		break;
     45	case -EOVERFLOW:
     46		errmsg = "Babble (bad cable?)";
     47		break;
     48	case -EPROTO:
     49		errmsg = "Bit-stuff error (bad cable?)";
     50		break;
     51	case -EILSEQ:
     52		errmsg = "CRC/Timeout (could be anything)";
     53		break;
     54	case -ETIME:
     55		errmsg = "Device does not respond";
     56		break;
     57	}
     58	if (packet < 0) {
     59		dev_err(dev->dev,
     60			"URB status %d [%s].\n", status, errmsg);
     61	} else {
     62		dev_err(dev->dev,
     63			"URB packet %d, status %d [%s].\n",
     64			packet, status, errmsg);
     65	}
     66}
     67
     68/*
     69 * Controls the isoc copy of each urb packet
     70 */
     71static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
     72{
     73	struct cx231xx_dmaqueue *dma_q = urb->context;
     74	int rc = 1;
     75	unsigned char *p_buffer;
     76	u32 bytes_parsed = 0, buffer_size = 0;
     77	u8 sav_eav = 0;
     78
     79	if (!dev)
     80		return 0;
     81
     82	if (dev->state & DEV_DISCONNECTED)
     83		return 0;
     84
     85	if (urb->status < 0) {
     86		print_err_status(dev, -1, urb->status);
     87		if (urb->status == -ENOENT)
     88			return 0;
     89	}
     90
     91	/* get buffer pointer and length */
     92	p_buffer = urb->transfer_buffer;
     93	buffer_size = urb->actual_length;
     94
     95	if (buffer_size > 0) {
     96		bytes_parsed = 0;
     97
     98		if (dma_q->is_partial_line) {
     99			/* Handle the case where we were working on a partial
    100			   line */
    101			sav_eav = dma_q->last_sav;
    102		} else {
    103			/* Check for a SAV/EAV overlapping the
    104			   buffer boundary */
    105
    106			sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer,
    107							  dma_q->partial_buf,
    108							  &bytes_parsed);
    109		}
    110
    111		sav_eav &= 0xF0;
    112		/* Get the first line if we have some portion of an SAV/EAV from
    113		   the last buffer or a partial line */
    114		if (sav_eav) {
    115			bytes_parsed += cx231xx_get_vbi_line(dev, dma_q,
    116				sav_eav,		       /* SAV/EAV */
    117				p_buffer + bytes_parsed,       /* p_buffer */
    118				buffer_size - bytes_parsed);   /* buffer size */
    119		}
    120
    121		/* Now parse data that is completely in this buffer */
    122		dma_q->is_partial_line = 0;
    123
    124		while (bytes_parsed < buffer_size) {
    125			u32 bytes_used = 0;
    126
    127			sav_eav = cx231xx_find_next_SAV_EAV(
    128				p_buffer + bytes_parsed,	/* p_buffer */
    129				buffer_size - bytes_parsed, /* buffer size */
    130				&bytes_used);	/* bytes used to get SAV/EAV */
    131
    132			bytes_parsed += bytes_used;
    133
    134			sav_eav &= 0xF0;
    135			if (sav_eav && (bytes_parsed < buffer_size)) {
    136				bytes_parsed += cx231xx_get_vbi_line(dev,
    137					dma_q, sav_eav,	/* SAV/EAV */
    138					p_buffer+bytes_parsed, /* p_buffer */
    139					buffer_size-bytes_parsed);/*buf size*/
    140			}
    141		}
    142
    143		/* Save the last four bytes of the buffer so we can
    144		check the buffer boundary condition next time */
    145		memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
    146		bytes_parsed = 0;
    147	}
    148
    149	return rc;
    150}
    151
    152/* ------------------------------------------------------------------
    153	Vbi buf operations
    154   ------------------------------------------------------------------*/
    155
    156static int vbi_queue_setup(struct vb2_queue *vq,
    157			   unsigned int *nbuffers, unsigned int *nplanes,
    158			   unsigned int sizes[], struct device *alloc_devs[])
    159{
    160	struct cx231xx *dev = vb2_get_drv_priv(vq);
    161	u32 height = 0;
    162
    163	height = ((dev->norm & V4L2_STD_625_50) ?
    164		  PAL_VBI_LINES : NTSC_VBI_LINES);
    165
    166	*nplanes = 1;
    167	sizes[0] = (dev->width * height * 2 * 2);
    168	return 0;
    169}
    170
    171/* This is called *without* dev->slock held; please keep it that way */
    172static int vbi_buf_prepare(struct vb2_buffer *vb)
    173{
    174	struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
    175	u32 height = 0;
    176	u32 size;
    177
    178	height = ((dev->norm & V4L2_STD_625_50) ?
    179		  PAL_VBI_LINES : NTSC_VBI_LINES);
    180	size = ((dev->width << 1) * height * 2);
    181
    182	if (vb2_plane_size(vb, 0) < size)
    183		return -EINVAL;
    184	vb2_set_plane_payload(vb, 0, size);
    185	return 0;
    186}
    187
    188static void vbi_buf_queue(struct vb2_buffer *vb)
    189{
    190	struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
    191	struct cx231xx_buffer *buf =
    192	    container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
    193	struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
    194	unsigned long flags;
    195
    196	spin_lock_irqsave(&dev->vbi_mode.slock, flags);
    197	list_add_tail(&buf->list, &vidq->active);
    198	spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
    199}
    200
    201static void return_all_buffers(struct cx231xx *dev,
    202			       enum vb2_buffer_state state)
    203{
    204	struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
    205	struct cx231xx_buffer *buf, *node;
    206	unsigned long flags;
    207
    208	spin_lock_irqsave(&dev->vbi_mode.slock, flags);
    209	dev->vbi_mode.bulk_ctl.buf = NULL;
    210	list_for_each_entry_safe(buf, node, &vidq->active, list) {
    211		list_del(&buf->list);
    212		vb2_buffer_done(&buf->vb.vb2_buf, state);
    213	}
    214	spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
    215}
    216
    217static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count)
    218{
    219	struct cx231xx *dev = vb2_get_drv_priv(vq);
    220	struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
    221	int ret;
    222
    223	vidq->sequence = 0;
    224	ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
    225				    CX231XX_NUM_VBI_BUFS,
    226				    dev->vbi_mode.alt_max_pkt_size[0],
    227				    cx231xx_isoc_vbi_copy);
    228	if (ret)
    229		return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
    230	return ret;
    231}
    232
    233static void vbi_stop_streaming(struct vb2_queue *vq)
    234{
    235	struct cx231xx *dev = vb2_get_drv_priv(vq);
    236
    237	return_all_buffers(dev, VB2_BUF_STATE_ERROR);
    238}
    239
    240struct vb2_ops cx231xx_vbi_qops = {
    241	.queue_setup = vbi_queue_setup,
    242	.buf_prepare = vbi_buf_prepare,
    243	.buf_queue = vbi_buf_queue,
    244	.start_streaming = vbi_start_streaming,
    245	.stop_streaming = vbi_stop_streaming,
    246	.wait_prepare = vb2_ops_wait_prepare,
    247	.wait_finish = vb2_ops_wait_finish,
    248};
    249
    250/* ------------------------------------------------------------------
    251	URB control
    252   ------------------------------------------------------------------*/
    253
    254/*
    255 * IRQ callback, called by URB callback
    256 */
    257static void cx231xx_irq_vbi_callback(struct urb *urb)
    258{
    259	struct cx231xx_dmaqueue *dma_q = urb->context;
    260	struct cx231xx_video_mode *vmode =
    261	    container_of(dma_q, struct cx231xx_video_mode, vidq);
    262	struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
    263	unsigned long flags;
    264
    265	switch (urb->status) {
    266	case 0:		/* success */
    267	case -ETIMEDOUT:	/* NAK */
    268		break;
    269	case -ECONNRESET:	/* kill */
    270	case -ENOENT:
    271	case -ESHUTDOWN:
    272		return;
    273	default:		/* error */
    274		dev_err(dev->dev,
    275			"urb completion error %d.\n", urb->status);
    276		break;
    277	}
    278
    279	/* Copy data from URB */
    280	spin_lock_irqsave(&dev->vbi_mode.slock, flags);
    281	dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
    282	spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
    283
    284	/* Reset status */
    285	urb->status = 0;
    286
    287	urb->status = usb_submit_urb(urb, GFP_ATOMIC);
    288	if (urb->status) {
    289		dev_err(dev->dev, "urb resubmit failed (error=%i)\n",
    290			urb->status);
    291	}
    292}
    293
    294/*
    295 * Stop and Deallocate URBs
    296 */
    297void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
    298{
    299	struct urb *urb;
    300	int i;
    301
    302	dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n");
    303
    304	dev->vbi_mode.bulk_ctl.nfields = -1;
    305	for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
    306		urb = dev->vbi_mode.bulk_ctl.urb[i];
    307		if (urb) {
    308			if (!irqs_disabled())
    309				usb_kill_urb(urb);
    310			else
    311				usb_unlink_urb(urb);
    312
    313			if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
    314
    315				kfree(dev->vbi_mode.bulk_ctl.
    316				      transfer_buffer[i]);
    317				dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
    318				    NULL;
    319			}
    320			usb_free_urb(urb);
    321			dev->vbi_mode.bulk_ctl.urb[i] = NULL;
    322		}
    323		dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL;
    324	}
    325
    326	kfree(dev->vbi_mode.bulk_ctl.urb);
    327	kfree(dev->vbi_mode.bulk_ctl.transfer_buffer);
    328
    329	dev->vbi_mode.bulk_ctl.urb = NULL;
    330	dev->vbi_mode.bulk_ctl.transfer_buffer = NULL;
    331	dev->vbi_mode.bulk_ctl.num_bufs = 0;
    332
    333	cx231xx_capture_start(dev, 0, Vbi);
    334}
    335EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc);
    336
    337/*
    338 * Allocate URBs and start IRQ
    339 */
    340int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
    341			  int num_bufs, int max_pkt_size,
    342			  int (*bulk_copy) (struct cx231xx *dev,
    343					    struct urb *urb))
    344{
    345	struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
    346	int i;
    347	int sb_size, pipe;
    348	struct urb *urb;
    349	int rc;
    350
    351	dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n");
    352
    353	/* De-allocates all pending stuff */
    354	cx231xx_uninit_vbi_isoc(dev);
    355
    356	/* clear if any halt */
    357	usb_clear_halt(dev->udev,
    358		       usb_rcvbulkpipe(dev->udev,
    359				       dev->vbi_mode.end_point_addr));
    360
    361	dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy;
    362	dev->vbi_mode.bulk_ctl.num_bufs = num_bufs;
    363	dma_q->pos = 0;
    364	dma_q->is_partial_line = 0;
    365	dma_q->last_sav = 0;
    366	dma_q->current_field = -1;
    367	dma_q->bytes_left_in_line = dev->width << 1;
    368	dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ?
    369				  PAL_VBI_LINES : NTSC_VBI_LINES);
    370	dma_q->lines_completed = 0;
    371	for (i = 0; i < 8; i++)
    372		dma_q->partial_buf[i] = 0;
    373
    374	dev->vbi_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *),
    375					     GFP_KERNEL);
    376	if (!dev->vbi_mode.bulk_ctl.urb) {
    377		dev_err(dev->dev,
    378			"cannot alloc memory for usb buffers\n");
    379		return -ENOMEM;
    380	}
    381
    382	dev->vbi_mode.bulk_ctl.transfer_buffer =
    383	    kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
    384	if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
    385		dev_err(dev->dev,
    386			"cannot allocate memory for usbtransfer\n");
    387		kfree(dev->vbi_mode.bulk_ctl.urb);
    388		return -ENOMEM;
    389	}
    390
    391	dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size;
    392	dev->vbi_mode.bulk_ctl.buf = NULL;
    393
    394	sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size;
    395
    396	/* allocate urbs and transfer buffers */
    397	for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
    398
    399		urb = usb_alloc_urb(0, GFP_KERNEL);
    400		if (!urb) {
    401			cx231xx_uninit_vbi_isoc(dev);
    402			return -ENOMEM;
    403		}
    404		dev->vbi_mode.bulk_ctl.urb[i] = urb;
    405		urb->transfer_flags = 0;
    406
    407		dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
    408		    kzalloc(sb_size, GFP_KERNEL);
    409		if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
    410			dev_err(dev->dev,
    411				"unable to allocate %i bytes for transfer buffer %i\n",
    412				sb_size, i);
    413			cx231xx_uninit_vbi_isoc(dev);
    414			return -ENOMEM;
    415		}
    416
    417		pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
    418		usb_fill_bulk_urb(urb, dev->udev, pipe,
    419				  dev->vbi_mode.bulk_ctl.transfer_buffer[i],
    420				  sb_size, cx231xx_irq_vbi_callback, dma_q);
    421	}
    422
    423	init_waitqueue_head(&dma_q->wq);
    424
    425	/* submit urbs and enables IRQ */
    426	for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
    427		rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
    428		if (rc) {
    429			dev_err(dev->dev,
    430				"submit of urb %i failed (error=%i)\n", i, rc);
    431			cx231xx_uninit_vbi_isoc(dev);
    432			return rc;
    433		}
    434	}
    435
    436	cx231xx_capture_start(dev, 1, Vbi);
    437
    438	return 0;
    439}
    440EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc);
    441
    442u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
    443			 u8 sav_eav, u8 *p_buffer, u32 buffer_size)
    444{
    445	u32 bytes_copied = 0;
    446	int current_field = -1;
    447
    448	switch (sav_eav) {
    449
    450	case SAV_VBI_FIELD1:
    451		current_field = 1;
    452		break;
    453
    454	case SAV_VBI_FIELD2:
    455		current_field = 2;
    456		break;
    457	default:
    458		break;
    459	}
    460
    461	if (current_field < 0)
    462		return bytes_copied;
    463
    464	dma_q->last_sav = sav_eav;
    465
    466	bytes_copied =
    467	    cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size,
    468				  current_field);
    469
    470	return bytes_copied;
    471}
    472
    473/*
    474 * Announces that a buffer were filled and request the next
    475 */
    476static inline void vbi_buffer_filled(struct cx231xx *dev,
    477				     struct cx231xx_dmaqueue *dma_q,
    478				     struct cx231xx_buffer *buf)
    479{
    480	/* Advice that buffer was filled */
    481	/* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
    482
    483	buf->vb.sequence = dma_q->sequence++;
    484	buf->vb.vb2_buf.timestamp = ktime_get_ns();
    485
    486	dev->vbi_mode.bulk_ctl.buf = NULL;
    487
    488	list_del(&buf->list);
    489	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
    490}
    491
    492u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
    493			  u8 *p_line, u32 length, int field_number)
    494{
    495	u32 bytes_to_copy;
    496	struct cx231xx_buffer *buf;
    497	u32 _line_size = dev->width * 2;
    498
    499	if (dma_q->current_field == -1) {
    500		/* Just starting up */
    501		cx231xx_reset_vbi_buffer(dev, dma_q);
    502	}
    503
    504	if (dma_q->current_field != field_number)
    505		dma_q->lines_completed = 0;
    506
    507	/* get the buffer pointer */
    508	buf = dev->vbi_mode.bulk_ctl.buf;
    509
    510	/* Remember the field number for next time */
    511	dma_q->current_field = field_number;
    512
    513	bytes_to_copy = dma_q->bytes_left_in_line;
    514	if (bytes_to_copy > length)
    515		bytes_to_copy = length;
    516
    517	if (dma_q->lines_completed >= dma_q->lines_per_field) {
    518		dma_q->bytes_left_in_line -= bytes_to_copy;
    519		dma_q->is_partial_line =
    520		    (dma_q->bytes_left_in_line == 0) ? 0 : 1;
    521		return 0;
    522	}
    523
    524	dma_q->is_partial_line = 1;
    525
    526	/* If we don't have a buffer, just return the number of bytes we would
    527	   have copied if we had a buffer. */
    528	if (!buf) {
    529		dma_q->bytes_left_in_line -= bytes_to_copy;
    530		dma_q->is_partial_line =
    531		    (dma_q->bytes_left_in_line == 0) ? 0 : 1;
    532		return bytes_to_copy;
    533	}
    534
    535	/* copy the data to video buffer */
    536	cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy);
    537
    538	dma_q->pos += bytes_to_copy;
    539	dma_q->bytes_left_in_line -= bytes_to_copy;
    540
    541	if (dma_q->bytes_left_in_line == 0) {
    542
    543		dma_q->bytes_left_in_line = _line_size;
    544		dma_q->lines_completed++;
    545		dma_q->is_partial_line = 0;
    546
    547		if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) {
    548
    549			vbi_buffer_filled(dev, dma_q, buf);
    550
    551			dma_q->pos = 0;
    552			dma_q->lines_completed = 0;
    553			cx231xx_reset_vbi_buffer(dev, dma_q);
    554		}
    555	}
    556
    557	return bytes_to_copy;
    558}
    559
    560/*
    561 * video-buf generic routine to get the next available buffer
    562 */
    563static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
    564				    struct cx231xx_buffer **buf)
    565{
    566	struct cx231xx_video_mode *vmode =
    567	    container_of(dma_q, struct cx231xx_video_mode, vidq);
    568	struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
    569	char *outp;
    570
    571	if (list_empty(&dma_q->active)) {
    572		dev_err(dev->dev, "No active queue to serve\n");
    573		dev->vbi_mode.bulk_ctl.buf = NULL;
    574		*buf = NULL;
    575		return;
    576	}
    577
    578	/* Get the next buffer */
    579	*buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
    580
    581	/* Cleans up buffer - Useful for testing for frame/URB loss */
    582	outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
    583	memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
    584
    585	dev->vbi_mode.bulk_ctl.buf = *buf;
    586
    587	return;
    588}
    589
    590void cx231xx_reset_vbi_buffer(struct cx231xx *dev,
    591			      struct cx231xx_dmaqueue *dma_q)
    592{
    593	struct cx231xx_buffer *buf;
    594
    595	buf = dev->vbi_mode.bulk_ctl.buf;
    596
    597	if (buf == NULL) {
    598		/* first try to get the buffer */
    599		get_next_vbi_buf(dma_q, &buf);
    600
    601		dma_q->pos = 0;
    602		dma_q->current_field = -1;
    603	}
    604
    605	dma_q->bytes_left_in_line = dev->width << 1;
    606	dma_q->lines_completed = 0;
    607}
    608
    609int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
    610			u8 *p_buffer, u32 bytes_to_copy)
    611{
    612	u8 *p_out_buffer = NULL;
    613	u32 current_line_bytes_copied = 0;
    614	struct cx231xx_buffer *buf;
    615	u32 _line_size = dev->width << 1;
    616	void *startwrite;
    617	int offset, lencopy;
    618
    619	buf = dev->vbi_mode.bulk_ctl.buf;
    620
    621	if (buf == NULL)
    622		return -EINVAL;
    623
    624	p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
    625
    626	if (dma_q->bytes_left_in_line != _line_size) {
    627		current_line_bytes_copied =
    628		    _line_size - dma_q->bytes_left_in_line;
    629	}
    630
    631	offset = (dma_q->lines_completed * _line_size) +
    632		 current_line_bytes_copied;
    633
    634	if (dma_q->current_field == 2) {
    635		/* Populate the second half of the frame */
    636		offset += (dev->width * 2 * dma_q->lines_per_field);
    637	}
    638
    639	/* prepare destination address */
    640	startwrite = p_out_buffer + offset;
    641
    642	lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
    643		  bytes_to_copy : dma_q->bytes_left_in_line;
    644
    645	memcpy(startwrite, p_buffer, lencopy);
    646
    647	return 0;
    648}
    649
    650u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,
    651			      struct cx231xx_dmaqueue *dma_q)
    652{
    653	u32 height = 0;
    654
    655	height = ((dev->norm & V4L2_STD_625_50) ?
    656		  PAL_VBI_LINES : NTSC_VBI_LINES);
    657	if (dma_q->lines_completed == height && dma_q->current_field == 2)
    658		return 1;
    659	else
    660		return 0;
    661}