cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

netup_unidvb_core.c (30093B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * netup_unidvb_core.c
      4 *
      5 * Main module for NetUP Universal Dual DVB-CI
      6 *
      7 * Copyright (C) 2014 NetUP Inc.
      8 * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
      9 * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
     10 */
     11
     12#include <linux/init.h>
     13#include <linux/module.h>
     14#include <linux/moduleparam.h>
     15#include <linux/kmod.h>
     16#include <linux/kernel.h>
     17#include <linux/slab.h>
     18#include <linux/interrupt.h>
     19#include <linux/delay.h>
     20#include <linux/list.h>
     21#include <media/videobuf2-v4l2.h>
     22#include <media/videobuf2-vmalloc.h>
     23
     24#include "netup_unidvb.h"
     25#include "cxd2841er.h"
     26#include "horus3a.h"
     27#include "ascot2e.h"
     28#include "helene.h"
     29#include "lnbh25.h"
     30
     31static int spi_enable;
     32module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
     33
     34MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
     35MODULE_AUTHOR("info@netup.ru");
     36MODULE_VERSION(NETUP_UNIDVB_VERSION);
     37MODULE_LICENSE("GPL");
     38
     39DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
     40
     41/* Avalon-MM PCI-E registers */
     42#define	AVL_PCIE_IENR		0x50
     43#define AVL_PCIE_ISR		0x40
     44#define AVL_IRQ_ENABLE		0x80
     45#define AVL_IRQ_ASSERTED	0x80
     46/* GPIO registers */
     47#define GPIO_REG_IO		0x4880
     48#define GPIO_REG_IO_TOGGLE	0x4882
     49#define GPIO_REG_IO_SET		0x4884
     50#define GPIO_REG_IO_CLEAR	0x4886
     51/* GPIO bits */
     52#define GPIO_FEA_RESET		(1 << 0)
     53#define GPIO_FEB_RESET		(1 << 1)
     54#define GPIO_RFA_CTL		(1 << 2)
     55#define GPIO_RFB_CTL		(1 << 3)
     56#define GPIO_FEA_TU_RESET	(1 << 4)
     57#define GPIO_FEB_TU_RESET	(1 << 5)
     58/* DMA base address */
     59#define NETUP_DMA0_ADDR		0x4900
     60#define NETUP_DMA1_ADDR		0x4940
     61/* 8 DMA blocks * 128 packets * 188 bytes*/
     62#define NETUP_DMA_BLOCKS_COUNT	8
     63#define NETUP_DMA_PACKETS_COUNT	128
     64/* DMA status bits */
     65#define BIT_DMA_RUN		1
     66#define BIT_DMA_ERROR		2
     67#define BIT_DMA_IRQ		0x200
     68
     69/**
     70 * struct netup_dma_regs - the map of DMA module registers
     71 * @ctrlstat_set:	Control register, write to set control bits
     72 * @ctrlstat_clear:	Control register, write to clear control bits
     73 * @start_addr_lo:	DMA ring buffer start address, lower part
     74 * @start_addr_hi:	DMA ring buffer start address, higher part
     75 * @size:		DMA ring buffer size register
     76 *			* Bits [0-7]:	DMA packet size, 188 bytes
     77 *			* Bits [16-23]:	packets count in block, 128 packets
     78 *			* Bits [24-31]:	blocks count, 8 blocks
     79 * @timeout:		DMA timeout in units of 8ns
     80 *			For example, value of 375000000 equals to 3 sec
     81 * @curr_addr_lo:	Current ring buffer head address, lower part
     82 * @curr_addr_hi:	Current ring buffer head address, higher part
     83 * @stat_pkt_received:	Statistic register, not tested
     84 * @stat_pkt_accepted:	Statistic register, not tested
     85 * @stat_pkt_overruns:	Statistic register, not tested
     86 * @stat_pkt_underruns:	Statistic register, not tested
     87 * @stat_fifo_overruns:	Statistic register, not tested
     88 */
     89struct netup_dma_regs {
     90	__le32	ctrlstat_set;
     91	__le32	ctrlstat_clear;
     92	__le32	start_addr_lo;
     93	__le32	start_addr_hi;
     94	__le32	size;
     95	__le32	timeout;
     96	__le32	curr_addr_lo;
     97	__le32	curr_addr_hi;
     98	__le32	stat_pkt_received;
     99	__le32	stat_pkt_accepted;
    100	__le32	stat_pkt_overruns;
    101	__le32	stat_pkt_underruns;
    102	__le32	stat_fifo_overruns;
    103} __packed __aligned(1);
    104
    105struct netup_unidvb_buffer {
    106	struct vb2_v4l2_buffer vb;
    107	struct list_head	list;
    108	u32			size;
    109};
    110
    111static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
    112static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
    113
    114static struct cxd2841er_config demod_config = {
    115	.i2c_addr = 0xc8,
    116	.xtal = SONY_XTAL_24000,
    117	.flags = CXD2841ER_USE_GATECTRL | CXD2841ER_ASCOT
    118};
    119
    120static struct horus3a_config horus3a_conf = {
    121	.i2c_address = 0xc0,
    122	.xtal_freq_mhz = 16,
    123	.set_tuner_callback = netup_unidvb_tuner_ctrl
    124};
    125
    126static struct ascot2e_config ascot2e_conf = {
    127	.i2c_address = 0xc2,
    128	.set_tuner_callback = netup_unidvb_tuner_ctrl
    129};
    130
    131static struct helene_config helene_conf = {
    132	.i2c_address = 0xc0,
    133	.xtal = SONY_HELENE_XTAL_24000,
    134	.set_tuner_callback = netup_unidvb_tuner_ctrl
    135};
    136
    137static struct lnbh25_config lnbh25_conf = {
    138	.i2c_address = 0x10,
    139	.data2_config = LNBH25_TEN | LNBH25_EXTM
    140};
    141
    142static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
    143{
    144	u8 reg, mask;
    145	struct netup_dma *dma = priv;
    146	struct netup_unidvb_dev *ndev;
    147
    148	if (!priv)
    149		return -EINVAL;
    150	ndev = dma->ndev;
    151	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
    152		__func__, dma->num, is_dvb_tc);
    153	reg = readb(ndev->bmmio0 + GPIO_REG_IO);
    154	mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
    155
    156	/* inverted tuner control in hw rev. 1.4 */
    157	if (ndev->rev == NETUP_HW_REV_1_4)
    158		is_dvb_tc = !is_dvb_tc;
    159
    160	if (!is_dvb_tc)
    161		reg |= mask;
    162	else
    163		reg &= ~mask;
    164	writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
    165	return 0;
    166}
    167
    168static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
    169{
    170	u16 gpio_reg;
    171
    172	/* enable PCI-E interrupts */
    173	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
    174	/* unreset frontends bits[0:1] */
    175	writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
    176	msleep(100);
    177	gpio_reg =
    178		GPIO_FEA_RESET | GPIO_FEB_RESET |
    179		GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
    180		GPIO_RFA_CTL | GPIO_RFB_CTL;
    181	writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
    182	dev_dbg(&ndev->pci_dev->dev,
    183		"%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
    184		__func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
    185		(int)readb(ndev->bmmio0 + GPIO_REG_IO));
    186
    187}
    188
    189static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
    190{
    191	u32 irq_mask = (dma->num == 0 ?
    192		NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
    193
    194	dev_dbg(&dma->ndev->pci_dev->dev,
    195		"%s(): DMA%d enable %d\n", __func__, dma->num, enable);
    196	if (enable) {
    197		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
    198		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
    199	} else {
    200		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
    201		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
    202	}
    203}
    204
    205static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
    206{
    207	u64 addr_curr;
    208	u32 size;
    209	unsigned long flags;
    210	struct device *dev = &dma->ndev->pci_dev->dev;
    211
    212	spin_lock_irqsave(&dma->lock, flags);
    213	addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
    214		(u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
    215	/* clear IRQ */
    216	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
    217	/* sanity check */
    218	if (addr_curr < dma->addr_phys ||
    219			addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
    220		if (addr_curr != 0) {
    221			dev_err(dev,
    222				"%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
    223				__func__, addr_curr, (u64)dma->addr_phys,
    224				(u64)(dma->addr_phys + dma->ring_buffer_size));
    225		}
    226		goto irq_handled;
    227	}
    228	size = (addr_curr >= dma->addr_last) ?
    229		(u32)(addr_curr - dma->addr_last) :
    230		(u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
    231	if (dma->data_size != 0) {
    232		printk_ratelimited("%s(): lost interrupt, data size %d\n",
    233			__func__, dma->data_size);
    234		dma->data_size += size;
    235	}
    236	if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
    237		dma->data_size = size;
    238		dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
    239	}
    240	dma->addr_last = addr_curr;
    241	queue_work(dma->ndev->wq, &dma->work);
    242irq_handled:
    243	spin_unlock_irqrestore(&dma->lock, flags);
    244	return IRQ_HANDLED;
    245}
    246
    247static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
    248{
    249	struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
    250	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
    251	u32 reg40, reg_isr;
    252	irqreturn_t iret = IRQ_NONE;
    253
    254	/* disable interrupts */
    255	writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
    256	/* check IRQ source */
    257	reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
    258	if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
    259		/* IRQ is being signaled */
    260		reg_isr = readw(ndev->bmmio0 + REG_ISR);
    261		if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
    262			iret = netup_spi_interrupt(ndev->spi);
    263		else if (!ndev->old_fw) {
    264			if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
    265				iret = netup_i2c_interrupt(&ndev->i2c[0]);
    266			} else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
    267				iret = netup_i2c_interrupt(&ndev->i2c[1]);
    268			} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
    269				iret = netup_dma_interrupt(&ndev->dma[0]);
    270			} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
    271				iret = netup_dma_interrupt(&ndev->dma[1]);
    272			} else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
    273				iret = netup_ci_interrupt(ndev);
    274			} else {
    275				goto err;
    276			}
    277		} else {
    278err:
    279			dev_err(&pci_dev->dev,
    280				"%s(): unknown interrupt 0x%x\n",
    281				__func__, reg_isr);
    282		}
    283	}
    284	/* re-enable interrupts */
    285	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
    286	return iret;
    287}
    288
    289static int netup_unidvb_queue_setup(struct vb2_queue *vq,
    290				    unsigned int *nbuffers,
    291				    unsigned int *nplanes,
    292				    unsigned int sizes[],
    293				    struct device *alloc_devs[])
    294{
    295	struct netup_dma *dma = vb2_get_drv_priv(vq);
    296
    297	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
    298
    299	*nplanes = 1;
    300	if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
    301		*nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
    302	sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
    303	dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
    304		__func__, *nbuffers, sizes[0]);
    305	return 0;
    306}
    307
    308static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
    309{
    310	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
    311	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
    312	struct netup_unidvb_buffer *buf = container_of(vbuf,
    313				struct netup_unidvb_buffer, vb);
    314
    315	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
    316	buf->size = 0;
    317	return 0;
    318}
    319
    320static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
    321{
    322	unsigned long flags;
    323	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
    324	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
    325	struct netup_unidvb_buffer *buf = container_of(vbuf,
    326				struct netup_unidvb_buffer, vb);
    327
    328	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
    329	spin_lock_irqsave(&dma->lock, flags);
    330	list_add_tail(&buf->list, &dma->free_buffers);
    331	spin_unlock_irqrestore(&dma->lock, flags);
    332	mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
    333}
    334
    335static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
    336{
    337	struct netup_dma *dma = vb2_get_drv_priv(q);
    338
    339	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
    340	netup_unidvb_dma_enable(dma, 1);
    341	return 0;
    342}
    343
    344static void netup_unidvb_stop_streaming(struct vb2_queue *q)
    345{
    346	struct netup_dma *dma = vb2_get_drv_priv(q);
    347
    348	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
    349	netup_unidvb_dma_enable(dma, 0);
    350	netup_unidvb_queue_cleanup(dma);
    351}
    352
    353static const struct vb2_ops dvb_qops = {
    354	.queue_setup		= netup_unidvb_queue_setup,
    355	.buf_prepare		= netup_unidvb_buf_prepare,
    356	.buf_queue		= netup_unidvb_buf_queue,
    357	.start_streaming	= netup_unidvb_start_streaming,
    358	.stop_streaming		= netup_unidvb_stop_streaming,
    359};
    360
    361static int netup_unidvb_queue_init(struct netup_dma *dma,
    362				   struct vb2_queue *vb_queue)
    363{
    364	int res;
    365
    366	/* Init videobuf2 queue structure */
    367	vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    368	vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
    369	vb_queue->drv_priv = dma;
    370	vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
    371	vb_queue->ops = &dvb_qops;
    372	vb_queue->mem_ops = &vb2_vmalloc_memops;
    373	vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
    374	res = vb2_queue_init(vb_queue);
    375	if (res != 0) {
    376		dev_err(&dma->ndev->pci_dev->dev,
    377			"%s(): vb2_queue_init failed (%d)\n", __func__, res);
    378	}
    379	return res;
    380}
    381
    382static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
    383				 int num)
    384{
    385	int fe_count = 2;
    386	int i = 0;
    387	struct vb2_dvb_frontend *fes[2];
    388	u8 fe_name[32];
    389
    390	if (ndev->rev == NETUP_HW_REV_1_3)
    391		demod_config.xtal = SONY_XTAL_20500;
    392	else
    393		demod_config.xtal = SONY_XTAL_24000;
    394
    395	if (num < 0 || num > 1) {
    396		dev_dbg(&ndev->pci_dev->dev,
    397			"%s(): unable to init DVB bus %d\n", __func__, num);
    398		return -ENODEV;
    399	}
    400	mutex_init(&ndev->frontends[num].lock);
    401	INIT_LIST_HEAD(&ndev->frontends[num].felist);
    402
    403	for (i = 0; i < fe_count; i++) {
    404		if (vb2_dvb_alloc_frontend(&ndev->frontends[num], i+1)
    405				== NULL) {
    406			dev_err(&ndev->pci_dev->dev,
    407					"%s(): unable to allocate vb2_dvb_frontend\n",
    408					__func__);
    409			return -ENOMEM;
    410		}
    411	}
    412
    413	for (i = 0; i < fe_count; i++) {
    414		fes[i] = vb2_dvb_get_frontend(&ndev->frontends[num], i+1);
    415		if (fes[i] == NULL) {
    416			dev_err(&ndev->pci_dev->dev,
    417				"%s(): frontends has not been allocated\n",
    418				__func__);
    419			return -EINVAL;
    420		}
    421	}
    422
    423	for (i = 0; i < fe_count; i++) {
    424		netup_unidvb_queue_init(&ndev->dma[num], &fes[i]->dvb.dvbq);
    425		snprintf(fe_name, sizeof(fe_name), "netup_fe%d", i);
    426		fes[i]->dvb.name = fe_name;
    427	}
    428
    429	fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
    430		&demod_config, &ndev->i2c[num].adap);
    431	if (fes[0]->dvb.frontend == NULL) {
    432		dev_dbg(&ndev->pci_dev->dev,
    433			"%s(): unable to attach DVB-S/S2 frontend\n",
    434			__func__);
    435		goto frontend_detach;
    436	}
    437
    438	if (ndev->rev == NETUP_HW_REV_1_3) {
    439		horus3a_conf.set_tuner_priv = &ndev->dma[num];
    440		if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
    441					&horus3a_conf, &ndev->i2c[num].adap)) {
    442			dev_dbg(&ndev->pci_dev->dev,
    443					"%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
    444					__func__);
    445			goto frontend_detach;
    446		}
    447	} else {
    448		helene_conf.set_tuner_priv = &ndev->dma[num];
    449		if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
    450					&helene_conf, &ndev->i2c[num].adap)) {
    451			dev_err(&ndev->pci_dev->dev,
    452					"%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
    453					__func__);
    454			goto frontend_detach;
    455		}
    456	}
    457
    458	if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
    459			&lnbh25_conf, &ndev->i2c[num].adap)) {
    460		dev_dbg(&ndev->pci_dev->dev,
    461			"%s(): unable to attach SEC frontend\n", __func__);
    462		goto frontend_detach;
    463	}
    464
    465	/* DVB-T/T2 frontend */
    466	fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
    467		&demod_config, &ndev->i2c[num].adap);
    468	if (fes[1]->dvb.frontend == NULL) {
    469		dev_dbg(&ndev->pci_dev->dev,
    470			"%s(): unable to attach Ter frontend\n", __func__);
    471		goto frontend_detach;
    472	}
    473	fes[1]->dvb.frontend->id = 1;
    474	if (ndev->rev == NETUP_HW_REV_1_3) {
    475		ascot2e_conf.set_tuner_priv = &ndev->dma[num];
    476		if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
    477					&ascot2e_conf, &ndev->i2c[num].adap)) {
    478			dev_dbg(&ndev->pci_dev->dev,
    479					"%s(): unable to attach Ter tuner frontend\n",
    480					__func__);
    481			goto frontend_detach;
    482		}
    483	} else {
    484		helene_conf.set_tuner_priv = &ndev->dma[num];
    485		if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
    486					&helene_conf, &ndev->i2c[num].adap)) {
    487			dev_err(&ndev->pci_dev->dev,
    488					"%s(): unable to attach HELENE Ter tuner frontend\n",
    489					__func__);
    490			goto frontend_detach;
    491		}
    492	}
    493
    494	if (vb2_dvb_register_bus(&ndev->frontends[num],
    495				 THIS_MODULE, NULL,
    496				 &ndev->pci_dev->dev, NULL, adapter_nr, 1)) {
    497		dev_dbg(&ndev->pci_dev->dev,
    498			"%s(): unable to register DVB bus %d\n",
    499			__func__, num);
    500		goto frontend_detach;
    501	}
    502	dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
    503	return 0;
    504frontend_detach:
    505	vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
    506	return -EINVAL;
    507}
    508
    509static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
    510{
    511	if (num < 0 || num > 1) {
    512		dev_err(&ndev->pci_dev->dev,
    513			"%s(): unable to unregister DVB bus %d\n",
    514			__func__, num);
    515		return;
    516	}
    517	vb2_dvb_unregister_bus(&ndev->frontends[num]);
    518	dev_info(&ndev->pci_dev->dev,
    519		"%s(): DVB bus %d unregistered\n", __func__, num);
    520}
    521
    522static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
    523{
    524	int res;
    525
    526	res = netup_unidvb_dvb_init(ndev, 0);
    527	if (res)
    528		return res;
    529	res = netup_unidvb_dvb_init(ndev, 1);
    530	if (res) {
    531		netup_unidvb_dvb_fini(ndev, 0);
    532		return res;
    533	}
    534	return 0;
    535}
    536
    537static int netup_unidvb_ring_copy(struct netup_dma *dma,
    538				  struct netup_unidvb_buffer *buf)
    539{
    540	u32 copy_bytes, ring_bytes;
    541	u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
    542	u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
    543	struct netup_unidvb_dev *ndev = dma->ndev;
    544
    545	if (p == NULL) {
    546		dev_err(&ndev->pci_dev->dev,
    547			"%s(): buffer is NULL\n", __func__);
    548		return -EINVAL;
    549	}
    550	p += buf->size;
    551	if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
    552		ring_bytes = dma->ring_buffer_size - dma->data_offset;
    553		copy_bytes = (ring_bytes > buff_bytes) ?
    554			buff_bytes : ring_bytes;
    555		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
    556		p += copy_bytes;
    557		buf->size += copy_bytes;
    558		buff_bytes -= copy_bytes;
    559		dma->data_size -= copy_bytes;
    560		dma->data_offset += copy_bytes;
    561		if (dma->data_offset == dma->ring_buffer_size)
    562			dma->data_offset = 0;
    563	}
    564	if (buff_bytes > 0) {
    565		ring_bytes = dma->data_size;
    566		copy_bytes = (ring_bytes > buff_bytes) ?
    567				buff_bytes : ring_bytes;
    568		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
    569		buf->size += copy_bytes;
    570		dma->data_size -= copy_bytes;
    571		dma->data_offset += copy_bytes;
    572		if (dma->data_offset == dma->ring_buffer_size)
    573			dma->data_offset = 0;
    574	}
    575	return 0;
    576}
    577
    578static void netup_unidvb_dma_worker(struct work_struct *work)
    579{
    580	struct netup_dma *dma = container_of(work, struct netup_dma, work);
    581	struct netup_unidvb_dev *ndev = dma->ndev;
    582	struct netup_unidvb_buffer *buf;
    583	unsigned long flags;
    584
    585	spin_lock_irqsave(&dma->lock, flags);
    586	if (dma->data_size == 0) {
    587		dev_dbg(&ndev->pci_dev->dev,
    588			"%s(): data_size == 0\n", __func__);
    589		goto work_done;
    590	}
    591	while (dma->data_size > 0) {
    592		if (list_empty(&dma->free_buffers)) {
    593			dev_dbg(&ndev->pci_dev->dev,
    594				"%s(): no free buffers\n", __func__);
    595			goto work_done;
    596		}
    597		buf = list_first_entry(&dma->free_buffers,
    598			struct netup_unidvb_buffer, list);
    599		if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
    600			dev_dbg(&ndev->pci_dev->dev,
    601				"%s(): buffer overflow, size %d\n",
    602				__func__, buf->size);
    603			goto work_done;
    604		}
    605		if (netup_unidvb_ring_copy(dma, buf))
    606			goto work_done;
    607		if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
    608			list_del(&buf->list);
    609			dev_dbg(&ndev->pci_dev->dev,
    610				"%s(): buffer %p done, size %d\n",
    611				__func__, buf, buf->size);
    612			buf->vb.vb2_buf.timestamp = ktime_get_ns();
    613			vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
    614			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
    615		}
    616	}
    617work_done:
    618	dma->data_size = 0;
    619	spin_unlock_irqrestore(&dma->lock, flags);
    620}
    621
    622static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
    623{
    624	struct netup_unidvb_buffer *buf;
    625	unsigned long flags;
    626
    627	spin_lock_irqsave(&dma->lock, flags);
    628	while (!list_empty(&dma->free_buffers)) {
    629		buf = list_first_entry(&dma->free_buffers,
    630			struct netup_unidvb_buffer, list);
    631		list_del(&buf->list);
    632		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
    633	}
    634	spin_unlock_irqrestore(&dma->lock, flags);
    635}
    636
    637static void netup_unidvb_dma_timeout(struct timer_list *t)
    638{
    639	struct netup_dma *dma = from_timer(dma, t, timeout);
    640	struct netup_unidvb_dev *ndev = dma->ndev;
    641
    642	dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
    643	netup_unidvb_queue_cleanup(dma);
    644}
    645
    646static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
    647{
    648	struct netup_dma *dma;
    649	struct device *dev = &ndev->pci_dev->dev;
    650
    651	if (num < 0 || num > 1) {
    652		dev_err(dev, "%s(): unable to register DMA%d\n",
    653			__func__, num);
    654		return -ENODEV;
    655	}
    656	dma = &ndev->dma[num];
    657	dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
    658	dma->num = num;
    659	dma->ndev = ndev;
    660	spin_lock_init(&dma->lock);
    661	INIT_WORK(&dma->work, netup_unidvb_dma_worker);
    662	INIT_LIST_HEAD(&dma->free_buffers);
    663	timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
    664	dma->ring_buffer_size = ndev->dma_size / 2;
    665	dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
    666	dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
    667		dma->ring_buffer_size * num);
    668	dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
    669		__func__, num, dma->addr_virt,
    670		(unsigned long long)dma->addr_phys,
    671		dma->ring_buffer_size);
    672	memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
    673	dma->addr_last = dma->addr_phys;
    674	dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
    675	dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
    676		ndev->bmmio0 + NETUP_DMA0_ADDR :
    677		ndev->bmmio0 + NETUP_DMA1_ADDR);
    678	writel((NETUP_DMA_BLOCKS_COUNT << 24) |
    679		(NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
    680	writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
    681	writel(0, &dma->regs->start_addr_hi);
    682	writel(dma->high_addr, ndev->bmmio0 + 0x1000);
    683	writel(375000000, &dma->regs->timeout);
    684	msleep(1000);
    685	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
    686	return 0;
    687}
    688
    689static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
    690{
    691	struct netup_dma *dma;
    692
    693	if (num < 0 || num > 1)
    694		return;
    695	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
    696	dma = &ndev->dma[num];
    697	netup_unidvb_dma_enable(dma, 0);
    698	msleep(50);
    699	cancel_work_sync(&dma->work);
    700	del_timer(&dma->timeout);
    701}
    702
    703static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
    704{
    705	int res;
    706
    707	res = netup_unidvb_dma_init(ndev, 0);
    708	if (res)
    709		return res;
    710	res = netup_unidvb_dma_init(ndev, 1);
    711	if (res) {
    712		netup_unidvb_dma_fini(ndev, 0);
    713		return res;
    714	}
    715	netup_unidvb_dma_enable(&ndev->dma[0], 0);
    716	netup_unidvb_dma_enable(&ndev->dma[1], 0);
    717	return 0;
    718}
    719
    720static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
    721				 struct pci_dev *pci_dev)
    722{
    723	int res;
    724
    725	writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
    726	res = netup_unidvb_ci_register(ndev, 0, pci_dev);
    727	if (res)
    728		return res;
    729	res = netup_unidvb_ci_register(ndev, 1, pci_dev);
    730	if (res)
    731		netup_unidvb_ci_unregister(ndev, 0);
    732	return res;
    733}
    734
    735static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
    736{
    737	if (!request_mem_region(pci_resource_start(pci_dev, 0),
    738			pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
    739		dev_err(&pci_dev->dev,
    740			"%s(): unable to request MMIO bar 0 at 0x%llx\n",
    741			__func__,
    742			(unsigned long long)pci_resource_start(pci_dev, 0));
    743		return -EBUSY;
    744	}
    745	if (!request_mem_region(pci_resource_start(pci_dev, 1),
    746			pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
    747		dev_err(&pci_dev->dev,
    748			"%s(): unable to request MMIO bar 1 at 0x%llx\n",
    749			__func__,
    750			(unsigned long long)pci_resource_start(pci_dev, 1));
    751		release_mem_region(pci_resource_start(pci_dev, 0),
    752			pci_resource_len(pci_dev, 0));
    753		return -EBUSY;
    754	}
    755	return 0;
    756}
    757
    758static int netup_unidvb_request_modules(struct device *dev)
    759{
    760	static const char * const modules[] = {
    761		"lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
    762	};
    763	const char * const *curr_mod = modules;
    764	int err;
    765
    766	while (*curr_mod != NULL) {
    767		err = request_module(*curr_mod);
    768		if (err) {
    769			dev_warn(dev, "request_module(%s) failed: %d\n",
    770				*curr_mod, err);
    771		}
    772		++curr_mod;
    773	}
    774	return 0;
    775}
    776
    777static int netup_unidvb_initdev(struct pci_dev *pci_dev,
    778				const struct pci_device_id *pci_id)
    779{
    780	u8 board_revision;
    781	u16 board_vendor;
    782	struct netup_unidvb_dev *ndev;
    783	int old_firmware = 0;
    784
    785	netup_unidvb_request_modules(&pci_dev->dev);
    786
    787	/* Check card revision */
    788	if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
    789		dev_err(&pci_dev->dev,
    790			"netup_unidvb: expected card revision %d, got %d\n",
    791			NETUP_PCI_DEV_REVISION, pci_dev->revision);
    792		dev_err(&pci_dev->dev,
    793			"Please upgrade firmware!\n");
    794		dev_err(&pci_dev->dev,
    795			"Instructions on http://www.netup.tv\n");
    796		old_firmware = 1;
    797		spi_enable = 1;
    798	}
    799
    800	/* allocate device context */
    801	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
    802	if (!ndev)
    803		goto dev_alloc_err;
    804
    805	/* detect hardware revision */
    806	if (pci_dev->device == NETUP_HW_REV_1_3)
    807		ndev->rev = NETUP_HW_REV_1_3;
    808	else
    809		ndev->rev = NETUP_HW_REV_1_4;
    810
    811	dev_info(&pci_dev->dev,
    812		"%s(): board (0x%x) hardware revision 0x%x\n",
    813		__func__, pci_dev->device, ndev->rev);
    814
    815	ndev->old_fw = old_firmware;
    816	ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
    817	if (!ndev->wq) {
    818		dev_err(&pci_dev->dev,
    819			"%s(): unable to create workqueue\n", __func__);
    820		goto wq_create_err;
    821	}
    822	ndev->pci_dev = pci_dev;
    823	ndev->pci_bus = pci_dev->bus->number;
    824	ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
    825	ndev->pci_func = PCI_FUNC(pci_dev->devfn);
    826	ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
    827	pci_set_drvdata(pci_dev, ndev);
    828	/* PCI init */
    829	dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
    830		__func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
    831
    832	if (pci_enable_device(pci_dev)) {
    833		dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
    834			__func__);
    835		goto pci_enable_err;
    836	}
    837	/* read PCI info */
    838	pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
    839	pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
    840	if (board_vendor != NETUP_VENDOR_ID) {
    841		dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
    842			__func__, board_vendor);
    843		goto pci_detect_err;
    844	}
    845	dev_info(&pci_dev->dev,
    846		"%s(): board vendor 0x%x, revision 0x%x\n",
    847		__func__, board_vendor, board_revision);
    848	pci_set_master(pci_dev);
    849	if (dma_set_mask(&pci_dev->dev, 0xffffffff) < 0) {
    850		dev_err(&pci_dev->dev,
    851			"%s(): 32bit PCI DMA is not supported\n", __func__);
    852		goto pci_detect_err;
    853	}
    854	dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
    855	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
    856	pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
    857		PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
    858		PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
    859	/* Adjust PCIe completion timeout. */
    860	pcie_capability_clear_and_set_word(pci_dev,
    861		PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
    862
    863	if (netup_unidvb_request_mmio(pci_dev)) {
    864		dev_err(&pci_dev->dev,
    865			"%s(): unable to request MMIO regions\n", __func__);
    866		goto pci_detect_err;
    867	}
    868	ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
    869		pci_resource_len(pci_dev, 0));
    870	if (!ndev->lmmio0) {
    871		dev_err(&pci_dev->dev,
    872			"%s(): unable to remap MMIO bar 0\n", __func__);
    873		goto pci_bar0_error;
    874	}
    875	ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
    876		pci_resource_len(pci_dev, 1));
    877	if (!ndev->lmmio1) {
    878		dev_err(&pci_dev->dev,
    879			"%s(): unable to remap MMIO bar 1\n", __func__);
    880		goto pci_bar1_error;
    881	}
    882	ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
    883	ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
    884	dev_info(&pci_dev->dev,
    885		"%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
    886		__func__,
    887		ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
    888		ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
    889		pci_dev->irq);
    890	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
    891			"netup_unidvb", pci_dev) < 0) {
    892		dev_err(&pci_dev->dev,
    893			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
    894		goto irq_request_err;
    895	}
    896	ndev->dma_size = 2 * 188 *
    897		NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
    898	ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
    899		ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
    900	if (!ndev->dma_virt) {
    901		dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
    902			__func__);
    903		goto dma_alloc_err;
    904	}
    905	netup_unidvb_dev_enable(ndev);
    906	if (spi_enable && netup_spi_init(ndev)) {
    907		dev_warn(&pci_dev->dev,
    908			"netup_unidvb: SPI flash setup failed\n");
    909		goto spi_setup_err;
    910	}
    911	if (old_firmware) {
    912		dev_err(&pci_dev->dev,
    913			"netup_unidvb: card initialization was incomplete\n");
    914		return 0;
    915	}
    916	if (netup_i2c_register(ndev)) {
    917		dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
    918		goto i2c_setup_err;
    919	}
    920	/* enable I2C IRQs */
    921	writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
    922		ndev->bmmio0 + REG_IMASK_SET);
    923	usleep_range(5000, 10000);
    924	if (netup_unidvb_dvb_setup(ndev)) {
    925		dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
    926		goto dvb_setup_err;
    927	}
    928	if (netup_unidvb_ci_setup(ndev, pci_dev)) {
    929		dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
    930		goto ci_setup_err;
    931	}
    932	if (netup_unidvb_dma_setup(ndev)) {
    933		dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
    934		goto dma_setup_err;
    935	}
    936	dev_info(&pci_dev->dev,
    937		"netup_unidvb: device has been initialized\n");
    938	return 0;
    939dma_setup_err:
    940	netup_unidvb_ci_unregister(ndev, 0);
    941	netup_unidvb_ci_unregister(ndev, 1);
    942ci_setup_err:
    943	netup_unidvb_dvb_fini(ndev, 0);
    944	netup_unidvb_dvb_fini(ndev, 1);
    945dvb_setup_err:
    946	netup_i2c_unregister(ndev);
    947i2c_setup_err:
    948	if (ndev->spi)
    949		netup_spi_release(ndev);
    950spi_setup_err:
    951	dma_free_coherent(&pci_dev->dev, ndev->dma_size,
    952			ndev->dma_virt, ndev->dma_phys);
    953dma_alloc_err:
    954	free_irq(pci_dev->irq, pci_dev);
    955irq_request_err:
    956	iounmap(ndev->lmmio1);
    957pci_bar1_error:
    958	iounmap(ndev->lmmio0);
    959pci_bar0_error:
    960	release_mem_region(pci_resource_start(pci_dev, 0),
    961		pci_resource_len(pci_dev, 0));
    962	release_mem_region(pci_resource_start(pci_dev, 1),
    963		pci_resource_len(pci_dev, 1));
    964pci_detect_err:
    965	pci_disable_device(pci_dev);
    966pci_enable_err:
    967	pci_set_drvdata(pci_dev, NULL);
    968	destroy_workqueue(ndev->wq);
    969wq_create_err:
    970	kfree(ndev);
    971dev_alloc_err:
    972	dev_err(&pci_dev->dev,
    973		"%s(): failed to initialize device\n", __func__);
    974	return -EIO;
    975}
    976
    977static void netup_unidvb_finidev(struct pci_dev *pci_dev)
    978{
    979	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
    980
    981	dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
    982	if (!ndev->old_fw) {
    983		netup_unidvb_dma_fini(ndev, 0);
    984		netup_unidvb_dma_fini(ndev, 1);
    985		netup_unidvb_ci_unregister(ndev, 0);
    986		netup_unidvb_ci_unregister(ndev, 1);
    987		netup_unidvb_dvb_fini(ndev, 0);
    988		netup_unidvb_dvb_fini(ndev, 1);
    989		netup_i2c_unregister(ndev);
    990	}
    991	if (ndev->spi)
    992		netup_spi_release(ndev);
    993	writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
    994	dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
    995			ndev->dma_virt, ndev->dma_phys);
    996	free_irq(pci_dev->irq, pci_dev);
    997	iounmap(ndev->lmmio0);
    998	iounmap(ndev->lmmio1);
    999	release_mem_region(pci_resource_start(pci_dev, 0),
   1000		pci_resource_len(pci_dev, 0));
   1001	release_mem_region(pci_resource_start(pci_dev, 1),
   1002		pci_resource_len(pci_dev, 1));
   1003	pci_disable_device(pci_dev);
   1004	pci_set_drvdata(pci_dev, NULL);
   1005	destroy_workqueue(ndev->wq);
   1006	kfree(ndev);
   1007	dev_info(&pci_dev->dev,
   1008		"%s(): device has been successfully stopped\n", __func__);
   1009}
   1010
   1011
   1012static const struct pci_device_id netup_unidvb_pci_tbl[] = {
   1013	{ PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
   1014	{ PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
   1015	{ 0, }
   1016};
   1017MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
   1018
   1019static struct pci_driver netup_unidvb_pci_driver = {
   1020	.name     = "netup_unidvb",
   1021	.id_table = netup_unidvb_pci_tbl,
   1022	.probe    = netup_unidvb_initdev,
   1023	.remove   = netup_unidvb_finidev,
   1024};
   1025
   1026module_pci_driver(netup_unidvb_pci_driver);