cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

c8sectpfe-core.c (30504B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
      4 *
      5 * Copyright (c) STMicroelectronics 2015
      6 *
      7 *   Author:Peter Bennett <peter.bennett@st.com>
      8 *	    Peter Griffin <peter.griffin@linaro.org>
      9 *
     10 */
     11#include <linux/atomic.h>
     12#include <linux/clk.h>
     13#include <linux/completion.h>
     14#include <linux/delay.h>
     15#include <linux/device.h>
     16#include <linux/dma-mapping.h>
     17#include <linux/dvb/dmx.h>
     18#include <linux/dvb/frontend.h>
     19#include <linux/errno.h>
     20#include <linux/firmware.h>
     21#include <linux/init.h>
     22#include <linux/interrupt.h>
     23#include <linux/io.h>
     24#include <linux/module.h>
     25#include <linux/of_gpio.h>
     26#include <linux/of_platform.h>
     27#include <linux/platform_device.h>
     28#include <linux/usb.h>
     29#include <linux/slab.h>
     30#include <linux/time.h>
     31#include <linux/wait.h>
     32#include <linux/pinctrl/pinctrl.h>
     33
     34#include "c8sectpfe-core.h"
     35#include "c8sectpfe-common.h"
     36#include "c8sectpfe-debugfs.h"
     37#include <media/dmxdev.h>
     38#include <media/dvb_demux.h>
     39#include <media/dvb_frontend.h>
     40#include <media/dvb_net.h>
     41
     42#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
     43MODULE_FIRMWARE(FIRMWARE_MEMDMA);
     44
     45#define PID_TABLE_SIZE 1024
     46#define POLL_MSECS 50
     47
     48static int load_c8sectpfe_fw(struct c8sectpfei *fei);
     49
     50#define TS_PKT_SIZE 188
     51#define HEADER_SIZE (4)
     52#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
     53
     54#define FEI_ALIGNMENT (32)
     55/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
     56#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
     57
     58#define FIFO_LEN 1024
     59
     60static void c8sectpfe_timer_interrupt(struct timer_list *t)
     61{
     62	struct c8sectpfei *fei = from_timer(fei, t, timer);
     63	struct channel_info *channel;
     64	int chan_num;
     65
     66	/* iterate through input block channels */
     67	for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
     68		channel = fei->channel_data[chan_num];
     69
     70		/* is this descriptor initialised and TP enabled */
     71		if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
     72			tasklet_schedule(&channel->tsklet);
     73	}
     74
     75	fei->timer.expires = jiffies +	msecs_to_jiffies(POLL_MSECS);
     76	add_timer(&fei->timer);
     77}
     78
     79static void channel_swdemux_tsklet(struct tasklet_struct *t)
     80{
     81	struct channel_info *channel = from_tasklet(channel, t, tsklet);
     82	struct c8sectpfei *fei;
     83	unsigned long wp, rp;
     84	int pos, num_packets, n, size;
     85	u8 *buf;
     86
     87	if (unlikely(!channel || !channel->irec))
     88		return;
     89
     90	fei = channel->fei;
     91
     92	wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
     93	rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
     94
     95	pos = rp - channel->back_buffer_busaddr;
     96
     97	/* has it wrapped */
     98	if (wp < rp)
     99		wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
    100
    101	size = wp - rp;
    102	num_packets = size / PACKET_SIZE;
    103
    104	/* manage cache so data is visible to CPU */
    105	dma_sync_single_for_cpu(fei->dev,
    106				rp,
    107				size,
    108				DMA_FROM_DEVICE);
    109
    110	buf = (u8 *) channel->back_buffer_aligned;
    111
    112	dev_dbg(fei->dev,
    113		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
    114		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
    115
    116	for (n = 0; n < num_packets; n++) {
    117		dvb_dmx_swfilter_packets(
    118			&fei->c8sectpfe[0]->
    119				demux[channel->demux_mapping].dvb_demux,
    120			&buf[pos], 1);
    121
    122		pos += PACKET_SIZE;
    123	}
    124
    125	/* advance the read pointer */
    126	if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
    127		writel(channel->back_buffer_busaddr, channel->irec +
    128			DMA_PRDS_BUSRP_TP(0));
    129	else
    130		writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
    131}
    132
    133static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
    134{
    135	struct dvb_demux *demux = dvbdmxfeed->demux;
    136	struct stdemux *stdemux = (struct stdemux *)demux->priv;
    137	struct c8sectpfei *fei = stdemux->c8sectpfei;
    138	struct channel_info *channel;
    139	u32 tmp;
    140	unsigned long *bitmap;
    141	int ret;
    142
    143	switch (dvbdmxfeed->type) {
    144	case DMX_TYPE_TS:
    145		break;
    146	case DMX_TYPE_SEC:
    147		break;
    148	default:
    149		dev_err(fei->dev, "%s:%d Error bailing\n"
    150			, __func__, __LINE__);
    151		return -EINVAL;
    152	}
    153
    154	if (dvbdmxfeed->type == DMX_TYPE_TS) {
    155		switch (dvbdmxfeed->pes_type) {
    156		case DMX_PES_VIDEO:
    157		case DMX_PES_AUDIO:
    158		case DMX_PES_TELETEXT:
    159		case DMX_PES_PCR:
    160		case DMX_PES_OTHER:
    161			break;
    162		default:
    163			dev_err(fei->dev, "%s:%d Error bailing\n"
    164				, __func__, __LINE__);
    165			return -EINVAL;
    166		}
    167	}
    168
    169	if (!atomic_read(&fei->fw_loaded)) {
    170		ret = load_c8sectpfe_fw(fei);
    171		if (ret)
    172			return ret;
    173	}
    174
    175	mutex_lock(&fei->lock);
    176
    177	channel = fei->channel_data[stdemux->tsin_index];
    178
    179	bitmap = (unsigned long *) channel->pid_buffer_aligned;
    180
    181	/* 8192 is a special PID */
    182	if (dvbdmxfeed->pid == 8192) {
    183		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
    184		tmp &= ~C8SECTPFE_PID_ENABLE;
    185		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
    186
    187	} else {
    188		bitmap_set(bitmap, dvbdmxfeed->pid, 1);
    189	}
    190
    191	/* manage cache so PID bitmap is visible to HW */
    192	dma_sync_single_for_device(fei->dev,
    193					channel->pid_buffer_busaddr,
    194					PID_TABLE_SIZE,
    195					DMA_TO_DEVICE);
    196
    197	channel->active = 1;
    198
    199	if (fei->global_feed_count == 0) {
    200		fei->timer.expires = jiffies +
    201			msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
    202
    203		add_timer(&fei->timer);
    204	}
    205
    206	if (stdemux->running_feed_count == 0) {
    207
    208		dev_dbg(fei->dev, "Starting channel=%p\n", channel);
    209
    210		tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
    211
    212		/* Reset the internal inputblock sram pointers */
    213		writel(channel->fifo,
    214			fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
    215		writel(channel->fifo + FIFO_LEN - 1,
    216			fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
    217
    218		writel(channel->fifo,
    219			fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
    220		writel(channel->fifo,
    221			fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
    222
    223
    224		/* reset read / write memdma ptrs for this channel */
    225		writel(channel->back_buffer_busaddr, channel->irec +
    226			DMA_PRDS_BUSBASE_TP(0));
    227
    228		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
    229		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
    230
    231		writel(channel->back_buffer_busaddr, channel->irec +
    232			DMA_PRDS_BUSWP_TP(0));
    233
    234		/* Issue a reset and enable InputBlock */
    235		writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
    236			, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
    237
    238		/* and enable the tp */
    239		writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
    240
    241		dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
    242			, __func__, __LINE__, stdemux);
    243	}
    244
    245	stdemux->running_feed_count++;
    246	fei->global_feed_count++;
    247
    248	mutex_unlock(&fei->lock);
    249
    250	return 0;
    251}
    252
    253static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
    254{
    255
    256	struct dvb_demux *demux = dvbdmxfeed->demux;
    257	struct stdemux *stdemux = (struct stdemux *)demux->priv;
    258	struct c8sectpfei *fei = stdemux->c8sectpfei;
    259	struct channel_info *channel;
    260	int idlereq;
    261	u32 tmp;
    262	int ret;
    263	unsigned long *bitmap;
    264
    265	if (!atomic_read(&fei->fw_loaded)) {
    266		ret = load_c8sectpfe_fw(fei);
    267		if (ret)
    268			return ret;
    269	}
    270
    271	mutex_lock(&fei->lock);
    272
    273	channel = fei->channel_data[stdemux->tsin_index];
    274
    275	bitmap = (unsigned long *) channel->pid_buffer_aligned;
    276
    277	if (dvbdmxfeed->pid == 8192) {
    278		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
    279		tmp |= C8SECTPFE_PID_ENABLE;
    280		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
    281	} else {
    282		bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
    283	}
    284
    285	/* manage cache so data is visible to HW */
    286	dma_sync_single_for_device(fei->dev,
    287					channel->pid_buffer_busaddr,
    288					PID_TABLE_SIZE,
    289					DMA_TO_DEVICE);
    290
    291	if (--stdemux->running_feed_count == 0) {
    292
    293		channel = fei->channel_data[stdemux->tsin_index];
    294
    295		/* TP re-configuration on page 168 of functional spec */
    296
    297		/* disable IB (prevents more TS data going to memdma) */
    298		writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
    299
    300		/* disable this channels descriptor */
    301		writel(0,  channel->irec + DMA_PRDS_TPENABLE);
    302
    303		tasklet_disable(&channel->tsklet);
    304
    305		/* now request memdma channel goes idle */
    306		idlereq = (1 << channel->tsin_id) | IDLEREQ;
    307		writel(idlereq, fei->io + DMA_IDLE_REQ);
    308
    309		/* wait for idle irq handler to signal completion */
    310		ret = wait_for_completion_timeout(&channel->idle_completion,
    311						msecs_to_jiffies(100));
    312
    313		if (ret == 0)
    314			dev_warn(fei->dev,
    315				"Timeout waiting for idle irq on tsin%d\n",
    316				channel->tsin_id);
    317
    318		reinit_completion(&channel->idle_completion);
    319
    320		/* reset read / write ptrs for this channel */
    321
    322		writel(channel->back_buffer_busaddr,
    323			channel->irec + DMA_PRDS_BUSBASE_TP(0));
    324
    325		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
    326		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
    327
    328		writel(channel->back_buffer_busaddr,
    329			channel->irec + DMA_PRDS_BUSWP_TP(0));
    330
    331		dev_dbg(fei->dev,
    332			"%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
    333			__func__, __LINE__, stdemux, channel->tsin_id);
    334
    335		/* turn off all PIDS in the bitmap */
    336		memset((void *)channel->pid_buffer_aligned
    337			, 0x00, PID_TABLE_SIZE);
    338
    339		/* manage cache so data is visible to HW */
    340		dma_sync_single_for_device(fei->dev,
    341					channel->pid_buffer_busaddr,
    342					PID_TABLE_SIZE,
    343					DMA_TO_DEVICE);
    344
    345		channel->active = 0;
    346	}
    347
    348	if (--fei->global_feed_count == 0) {
    349		dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
    350			, __func__, __LINE__, fei->global_feed_count);
    351
    352		del_timer(&fei->timer);
    353	}
    354
    355	mutex_unlock(&fei->lock);
    356
    357	return 0;
    358}
    359
    360static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
    361{
    362	int i;
    363
    364	for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
    365		if (!fei->channel_data[i])
    366			continue;
    367
    368		if (fei->channel_data[i]->tsin_id == tsin_num)
    369			return fei->channel_data[i];
    370	}
    371
    372	return NULL;
    373}
    374
    375static void c8sectpfe_getconfig(struct c8sectpfei *fei)
    376{
    377	struct c8sectpfe_hw *hw = &fei->hw_stats;
    378
    379	hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
    380	hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
    381	hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
    382	hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
    383	hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
    384	hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
    385	hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
    386
    387	dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
    388	dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
    389	dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
    390	dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
    391				, hw->num_swts);
    392	dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
    393	dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
    394	dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
    395	dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
    396			, hw->num_tp);
    397}
    398
    399static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
    400{
    401	struct c8sectpfei *fei = priv;
    402	struct channel_info *chan;
    403	int bit;
    404	unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
    405
    406	/* page 168 of functional spec: Clear the idle request
    407	   by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
    408
    409	/* signal idle completion */
    410	for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
    411
    412		chan = find_channel(fei, bit);
    413
    414		if (chan)
    415			complete(&chan->idle_completion);
    416	}
    417
    418	writel(0, fei->io + DMA_IDLE_REQ);
    419
    420	return IRQ_HANDLED;
    421}
    422
    423
    424static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
    425{
    426	if (!fei || !tsin)
    427		return;
    428
    429	if (tsin->back_buffer_busaddr)
    430		if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
    431			dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
    432				FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
    433
    434	kfree(tsin->back_buffer_start);
    435
    436	if (tsin->pid_buffer_busaddr)
    437		if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
    438			dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
    439				PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
    440
    441	kfree(tsin->pid_buffer_start);
    442}
    443
    444#define MAX_NAME 20
    445
    446static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
    447				struct channel_info *tsin)
    448{
    449	int ret;
    450	u32 tmp;
    451	char tsin_pin_name[MAX_NAME];
    452
    453	if (!fei || !tsin)
    454		return -EINVAL;
    455
    456	dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
    457		, __func__, __LINE__, tsin, tsin->tsin_id);
    458
    459	init_completion(&tsin->idle_completion);
    460
    461	tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
    462					FEI_ALIGNMENT, GFP_KERNEL);
    463
    464	if (!tsin->back_buffer_start) {
    465		ret = -ENOMEM;
    466		goto err_unmap;
    467	}
    468
    469	/* Ensure backbuffer is 32byte aligned */
    470	tsin->back_buffer_aligned = tsin->back_buffer_start
    471		+ FEI_ALIGNMENT;
    472
    473	tsin->back_buffer_aligned = (void *)
    474		(((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
    475
    476	tsin->back_buffer_busaddr = dma_map_single(fei->dev,
    477					(void *)tsin->back_buffer_aligned,
    478					FEI_BUFFER_SIZE,
    479					DMA_BIDIRECTIONAL);
    480
    481	if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
    482		dev_err(fei->dev, "failed to map back_buffer\n");
    483		ret = -EFAULT;
    484		goto err_unmap;
    485	}
    486
    487	/*
    488	 * The pid buffer can be configured (in hw) for byte or bit
    489	 * per pid. By powers of deduction we conclude stih407 family
    490	 * is configured (at SoC design stage) for bit per pid.
    491	 */
    492	tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
    493
    494	if (!tsin->pid_buffer_start) {
    495		ret = -ENOMEM;
    496		goto err_unmap;
    497	}
    498
    499	/*
    500	 * PID buffer needs to be aligned to size of the pid table
    501	 * which at bit per pid is 1024 bytes (8192 pids / 8).
    502	 * PIDF_BASE register enforces this alignment when writing
    503	 * the register.
    504	 */
    505
    506	tsin->pid_buffer_aligned = tsin->pid_buffer_start +
    507		PID_TABLE_SIZE;
    508
    509	tsin->pid_buffer_aligned = (void *)
    510		(((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
    511
    512	tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
    513						tsin->pid_buffer_aligned,
    514						PID_TABLE_SIZE,
    515						DMA_BIDIRECTIONAL);
    516
    517	if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
    518		dev_err(fei->dev, "failed to map pid_bitmap\n");
    519		ret = -EFAULT;
    520		goto err_unmap;
    521	}
    522
    523	/* manage cache so pid bitmap is visible to HW */
    524	dma_sync_single_for_device(fei->dev,
    525				tsin->pid_buffer_busaddr,
    526				PID_TABLE_SIZE,
    527				DMA_TO_DEVICE);
    528
    529	snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
    530		(tsin->serial_not_parallel ? "serial" : "parallel"));
    531
    532	tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
    533	if (IS_ERR(tsin->pstate)) {
    534		dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
    535			, __func__, tsin_pin_name);
    536		ret = PTR_ERR(tsin->pstate);
    537		goto err_unmap;
    538	}
    539
    540	ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
    541
    542	if (ret) {
    543		dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
    544			, __func__);
    545		goto err_unmap;
    546	}
    547
    548	/* Enable this input block */
    549	tmp = readl(fei->io + SYS_INPUT_CLKEN);
    550	tmp |= BIT(tsin->tsin_id);
    551	writel(tmp, fei->io + SYS_INPUT_CLKEN);
    552
    553	if (tsin->serial_not_parallel)
    554		tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
    555
    556	if (tsin->invert_ts_clk)
    557		tmp |= C8SECTPFE_INVERT_TSCLK;
    558
    559	if (tsin->async_not_sync)
    560		tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
    561
    562	tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
    563
    564	writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
    565
    566	writel(C8SECTPFE_SYNC(0x9) |
    567		C8SECTPFE_DROP(0x9) |
    568		C8SECTPFE_TOKEN(0x47),
    569		fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
    570
    571	writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
    572
    573	/* Place the FIFO's at the end of the irec descriptors */
    574
    575	tsin->fifo = (tsin->tsin_id * FIFO_LEN);
    576
    577	writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
    578	writel(tsin->fifo + FIFO_LEN - 1,
    579		fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
    580
    581	writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
    582	writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
    583
    584	writel(tsin->pid_buffer_busaddr,
    585		fei->io + PIDF_BASE(tsin->tsin_id));
    586
    587	dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
    588		tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
    589		&tsin->pid_buffer_busaddr);
    590
    591	/* Configure and enable HW PID filtering */
    592
    593	/*
    594	 * The PID value is created by assembling the first 8 bytes of
    595	 * the TS packet into a 64-bit word in big-endian format. A
    596	 * slice of that 64-bit word is taken from
    597	 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
    598	 */
    599	tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
    600		| C8SECTPFE_PID_OFFSET(40));
    601
    602	writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
    603
    604	dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
    605		tsin->tsin_id,
    606		readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
    607		readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
    608		readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
    609		readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
    610
    611	/* Get base addpress of pointer record block from DMEM */
    612	tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
    613			readl(fei->io + DMA_PTRREC_BASE);
    614
    615	/* fill out pointer record data structure */
    616
    617	/* advance pointer record block to our channel */
    618	tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
    619
    620	writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
    621
    622	writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
    623
    624	writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
    625
    626	writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
    627
    628	/* read/write pointers with physical bus address */
    629
    630	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
    631
    632	tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
    633	writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
    634
    635	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
    636	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
    637
    638	/* initialize tasklet */
    639	tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
    640
    641	return 0;
    642
    643err_unmap:
    644	free_input_block(fei, tsin);
    645	return ret;
    646}
    647
    648static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
    649{
    650	struct c8sectpfei *fei = priv;
    651
    652	dev_err(fei->dev, "%s: error handling not yet implemented\n"
    653		, __func__);
    654
    655	/*
    656	 * TODO FIXME we should detect some error conditions here
    657	 * and ideally do something about them!
    658	 */
    659
    660	return IRQ_HANDLED;
    661}
    662
    663static int c8sectpfe_probe(struct platform_device *pdev)
    664{
    665	struct device *dev = &pdev->dev;
    666	struct device_node *child, *np = dev->of_node;
    667	struct c8sectpfei *fei;
    668	struct resource *res;
    669	int ret, index = 0;
    670	struct channel_info *tsin;
    671
    672	/* Allocate the c8sectpfei structure */
    673	fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
    674	if (!fei)
    675		return -ENOMEM;
    676
    677	fei->dev = dev;
    678
    679	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
    680	fei->io = devm_ioremap_resource(dev, res);
    681	if (IS_ERR(fei->io))
    682		return PTR_ERR(fei->io);
    683
    684	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
    685					"c8sectpfe-ram");
    686	fei->sram = devm_ioremap_resource(dev, res);
    687	if (IS_ERR(fei->sram))
    688		return PTR_ERR(fei->sram);
    689
    690	fei->sram_size = resource_size(res);
    691
    692	fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
    693	if (fei->idle_irq < 0)
    694		return fei->idle_irq;
    695
    696	fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
    697	if (fei->error_irq < 0)
    698		return fei->error_irq;
    699
    700	platform_set_drvdata(pdev, fei);
    701
    702	fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
    703	if (IS_ERR(fei->c8sectpfeclk)) {
    704		dev_err(dev, "c8sectpfe clk not found\n");
    705		return PTR_ERR(fei->c8sectpfeclk);
    706	}
    707
    708	ret = clk_prepare_enable(fei->c8sectpfeclk);
    709	if (ret) {
    710		dev_err(dev, "Failed to enable c8sectpfe clock\n");
    711		return ret;
    712	}
    713
    714	/* to save power disable all IP's (on by default) */
    715	writel(0, fei->io + SYS_INPUT_CLKEN);
    716
    717	/* Enable memdma clock */
    718	writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
    719
    720	/* clear internal sram */
    721	memset_io(fei->sram, 0x0, fei->sram_size);
    722
    723	c8sectpfe_getconfig(fei);
    724
    725	ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
    726			0, "c8sectpfe-idle-irq", fei);
    727	if (ret) {
    728		dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
    729		goto err_clk_disable;
    730	}
    731
    732	ret = devm_request_irq(dev, fei->error_irq,
    733				c8sectpfe_error_irq_handler, 0,
    734				"c8sectpfe-error-irq", fei);
    735	if (ret) {
    736		dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
    737		goto err_clk_disable;
    738	}
    739
    740	fei->tsin_count = of_get_child_count(np);
    741
    742	if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
    743		fei->tsin_count > fei->hw_stats.num_ib) {
    744
    745		dev_err(dev, "More tsin declared than exist on SoC!\n");
    746		ret = -EINVAL;
    747		goto err_clk_disable;
    748	}
    749
    750	fei->pinctrl = devm_pinctrl_get(dev);
    751
    752	if (IS_ERR(fei->pinctrl)) {
    753		dev_err(dev, "Error getting tsin pins\n");
    754		ret = PTR_ERR(fei->pinctrl);
    755		goto err_clk_disable;
    756	}
    757
    758	for_each_child_of_node(np, child) {
    759		struct device_node *i2c_bus;
    760
    761		fei->channel_data[index] = devm_kzalloc(dev,
    762						sizeof(struct channel_info),
    763						GFP_KERNEL);
    764
    765		if (!fei->channel_data[index]) {
    766			ret = -ENOMEM;
    767			goto err_node_put;
    768		}
    769
    770		tsin = fei->channel_data[index];
    771
    772		tsin->fei = fei;
    773
    774		ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
    775		if (ret) {
    776			dev_err(&pdev->dev, "No tsin_num found\n");
    777			goto err_node_put;
    778		}
    779
    780		/* sanity check value */
    781		if (tsin->tsin_id > fei->hw_stats.num_ib) {
    782			dev_err(&pdev->dev,
    783				"tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
    784				tsin->tsin_id, fei->hw_stats.num_ib);
    785			ret = -EINVAL;
    786			goto err_node_put;
    787		}
    788
    789		tsin->invert_ts_clk = of_property_read_bool(child,
    790							"invert-ts-clk");
    791
    792		tsin->serial_not_parallel = of_property_read_bool(child,
    793							"serial-not-parallel");
    794
    795		tsin->async_not_sync = of_property_read_bool(child,
    796							"async-not-sync");
    797
    798		ret = of_property_read_u32(child, "dvb-card",
    799					&tsin->dvb_card);
    800		if (ret) {
    801			dev_err(&pdev->dev, "No dvb-card found\n");
    802			goto err_node_put;
    803		}
    804
    805		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
    806		if (!i2c_bus) {
    807			dev_err(&pdev->dev, "No i2c-bus found\n");
    808			ret = -ENODEV;
    809			goto err_node_put;
    810		}
    811		tsin->i2c_adapter =
    812			of_find_i2c_adapter_by_node(i2c_bus);
    813		if (!tsin->i2c_adapter) {
    814			dev_err(&pdev->dev, "No i2c adapter found\n");
    815			of_node_put(i2c_bus);
    816			ret = -ENODEV;
    817			goto err_node_put;
    818		}
    819		of_node_put(i2c_bus);
    820
    821		tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
    822
    823		ret = gpio_is_valid(tsin->rst_gpio);
    824		if (!ret) {
    825			dev_err(dev,
    826				"reset gpio for tsin%d not valid (gpio=%d)\n",
    827				tsin->tsin_id, tsin->rst_gpio);
    828			ret = -EINVAL;
    829			goto err_node_put;
    830		}
    831
    832		ret = devm_gpio_request_one(dev, tsin->rst_gpio,
    833					GPIOF_OUT_INIT_LOW, "NIM reset");
    834		if (ret && ret != -EBUSY) {
    835			dev_err(dev, "Can't request tsin%d reset gpio\n"
    836				, fei->channel_data[index]->tsin_id);
    837			goto err_node_put;
    838		}
    839
    840		if (!ret) {
    841			/* toggle reset lines */
    842			gpio_direction_output(tsin->rst_gpio, 0);
    843			usleep_range(3500, 5000);
    844			gpio_direction_output(tsin->rst_gpio, 1);
    845			usleep_range(3000, 5000);
    846		}
    847
    848		tsin->demux_mapping = index;
    849
    850		dev_dbg(fei->dev,
    851			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
    852			fei->channel_data[index], index,
    853			tsin->tsin_id, tsin->invert_ts_clk,
    854			tsin->serial_not_parallel, tsin->async_not_sync,
    855			tsin->dvb_card);
    856
    857		index++;
    858	}
    859
    860	/* Setup timer interrupt */
    861	timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
    862
    863	mutex_init(&fei->lock);
    864
    865	/* Get the configuration information about the tuners */
    866	ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
    867					(void *)fei,
    868					c8sectpfe_start_feed,
    869					c8sectpfe_stop_feed);
    870	if (ret) {
    871		dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
    872			ret);
    873		goto err_clk_disable;
    874	}
    875
    876	c8sectpfe_debugfs_init(fei);
    877
    878	return 0;
    879
    880err_node_put:
    881	of_node_put(child);
    882err_clk_disable:
    883	clk_disable_unprepare(fei->c8sectpfeclk);
    884	return ret;
    885}
    886
    887static int c8sectpfe_remove(struct platform_device *pdev)
    888{
    889	struct c8sectpfei *fei = platform_get_drvdata(pdev);
    890	struct channel_info *channel;
    891	int i;
    892
    893	wait_for_completion(&fei->fw_ack);
    894
    895	c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
    896
    897	/*
    898	 * Now loop through and un-configure each of the InputBlock resources
    899	 */
    900	for (i = 0; i < fei->tsin_count; i++) {
    901		channel = fei->channel_data[i];
    902		free_input_block(fei, channel);
    903	}
    904
    905	c8sectpfe_debugfs_exit(fei);
    906
    907	dev_info(fei->dev, "Stopping memdma SLIM core\n");
    908	if (readl(fei->io + DMA_CPU_RUN))
    909		writel(0x0,  fei->io + DMA_CPU_RUN);
    910
    911	/* unclock all internal IP's */
    912	if (readl(fei->io + SYS_INPUT_CLKEN))
    913		writel(0, fei->io + SYS_INPUT_CLKEN);
    914
    915	if (readl(fei->io + SYS_OTHER_CLKEN))
    916		writel(0, fei->io + SYS_OTHER_CLKEN);
    917
    918	if (fei->c8sectpfeclk)
    919		clk_disable_unprepare(fei->c8sectpfeclk);
    920
    921	return 0;
    922}
    923
    924
    925static int configure_channels(struct c8sectpfei *fei)
    926{
    927	int index = 0, ret;
    928	struct device_node *child, *np = fei->dev->of_node;
    929
    930	/* iterate round each tsin and configure memdma descriptor and IB hw */
    931	for_each_child_of_node(np, child) {
    932		ret = configure_memdma_and_inputblock(fei,
    933						fei->channel_data[index]);
    934		if (ret) {
    935			dev_err(fei->dev,
    936				"configure_memdma_and_inputblock failed\n");
    937			goto err_unmap;
    938		}
    939		index++;
    940	}
    941
    942	return 0;
    943
    944err_unmap:
    945	while (--index >= 0)
    946		free_input_block(fei, fei->channel_data[index]);
    947
    948	return ret;
    949}
    950
    951static int
    952c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
    953{
    954	struct elf32_hdr *ehdr;
    955	char class;
    956
    957	if (!fw) {
    958		dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
    959		return -EINVAL;
    960	}
    961
    962	if (fw->size < sizeof(struct elf32_hdr)) {
    963		dev_err(fei->dev, "Image is too small\n");
    964		return -EINVAL;
    965	}
    966
    967	ehdr = (struct elf32_hdr *)fw->data;
    968
    969	/* We only support ELF32 at this point */
    970	class = ehdr->e_ident[EI_CLASS];
    971	if (class != ELFCLASS32) {
    972		dev_err(fei->dev, "Unsupported class: %d\n", class);
    973		return -EINVAL;
    974	}
    975
    976	if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
    977		dev_err(fei->dev, "Unsupported firmware endianness\n");
    978		return -EINVAL;
    979	}
    980
    981	if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
    982		dev_err(fei->dev, "Image is too small\n");
    983		return -EINVAL;
    984	}
    985
    986	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
    987		dev_err(fei->dev, "Image is corrupted (bad magic)\n");
    988		return -EINVAL;
    989	}
    990
    991	/* Check ELF magic */
    992	ehdr = (Elf32_Ehdr *)fw->data;
    993	if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
    994	    ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
    995	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
    996	    ehdr->e_ident[EI_MAG3] != ELFMAG3) {
    997		dev_err(fei->dev, "Invalid ELF magic\n");
    998		return -EINVAL;
    999	}
   1000
   1001	if (ehdr->e_type != ET_EXEC) {
   1002		dev_err(fei->dev, "Unsupported ELF header type\n");
   1003		return -EINVAL;
   1004	}
   1005
   1006	if (ehdr->e_phoff > fw->size) {
   1007		dev_err(fei->dev, "Firmware size is too small\n");
   1008		return -EINVAL;
   1009	}
   1010
   1011	return 0;
   1012}
   1013
   1014
   1015static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
   1016			const struct firmware *fw, u8 __iomem *dest,
   1017			int seg_num)
   1018{
   1019	const u8 *imem_src = fw->data + phdr->p_offset;
   1020	int i;
   1021
   1022	/*
   1023	 * For IMEM segments, the segment contains 24-bit
   1024	 * instructions which must be padded to 32-bit
   1025	 * instructions before being written. The written
   1026	 * segment is padded with NOP instructions.
   1027	 */
   1028
   1029	dev_dbg(fei->dev,
   1030		"Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
   1031		seg_num, phdr->p_paddr, phdr->p_filesz, dest,
   1032		phdr->p_memsz + phdr->p_memsz / 3);
   1033
   1034	for (i = 0; i < phdr->p_filesz; i++) {
   1035
   1036		writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
   1037
   1038		/* Every 3 bytes, add an additional
   1039		 * padding zero in destination */
   1040		if (i % 3 == 2) {
   1041			dest++;
   1042			writeb(0x00, (void __iomem *)dest);
   1043		}
   1044
   1045		dest++;
   1046		imem_src++;
   1047	}
   1048}
   1049
   1050static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
   1051			const struct firmware *fw, u8 __iomem *dst, int seg_num)
   1052{
   1053	/*
   1054	 * For DMEM segments copy the segment data from the ELF
   1055	 * file and pad segment with zeroes
   1056	 */
   1057
   1058	dev_dbg(fei->dev,
   1059		"Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
   1060		seg_num, phdr->p_paddr, phdr->p_filesz,
   1061		dst, phdr->p_memsz);
   1062
   1063	memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
   1064		phdr->p_filesz);
   1065
   1066	memset((void __force *)dst + phdr->p_filesz, 0,
   1067		phdr->p_memsz - phdr->p_filesz);
   1068}
   1069
   1070static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
   1071{
   1072	Elf32_Ehdr *ehdr;
   1073	Elf32_Phdr *phdr;
   1074	u8 __iomem *dst;
   1075	int err = 0, i;
   1076
   1077	if (!fw || !fei)
   1078		return -EINVAL;
   1079
   1080	ehdr = (Elf32_Ehdr *)fw->data;
   1081	phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
   1082
   1083	/* go through the available ELF segments */
   1084	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
   1085
   1086		/* Only consider LOAD segments */
   1087		if (phdr->p_type != PT_LOAD)
   1088			continue;
   1089
   1090		/*
   1091		 * Check segment is contained within the fw->data buffer
   1092		 */
   1093		if (phdr->p_offset + phdr->p_filesz > fw->size) {
   1094			dev_err(fei->dev,
   1095				"Segment %d is outside of firmware file\n", i);
   1096			err = -EINVAL;
   1097			break;
   1098		}
   1099
   1100		/*
   1101		 * MEMDMA IMEM has executable flag set, otherwise load
   1102		 * this segment into DMEM.
   1103		 *
   1104		 */
   1105
   1106		if (phdr->p_flags & PF_X) {
   1107			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
   1108			/*
   1109			 * The Slim ELF file uses 32-bit word addressing for
   1110			 * load offsets.
   1111			 */
   1112			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
   1113			load_imem_segment(fei, phdr, fw, dst, i);
   1114		} else {
   1115			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
   1116			/*
   1117			 * The Slim ELF file uses 32-bit word addressing for
   1118			 * load offsets.
   1119			 */
   1120			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
   1121			load_dmem_segment(fei, phdr, fw, dst, i);
   1122		}
   1123	}
   1124
   1125	release_firmware(fw);
   1126	return err;
   1127}
   1128
   1129static int load_c8sectpfe_fw(struct c8sectpfei *fei)
   1130{
   1131	const struct firmware *fw;
   1132	int err;
   1133
   1134	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
   1135
   1136	err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
   1137	if (err)
   1138		return err;
   1139
   1140	err = c8sectpfe_elf_sanity_check(fei, fw);
   1141	if (err) {
   1142		dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
   1143			, err);
   1144		release_firmware(fw);
   1145		return err;
   1146	}
   1147
   1148	err = load_slim_core_fw(fw, fei);
   1149	if (err) {
   1150		dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
   1151		return err;
   1152	}
   1153
   1154	/* now the firmware is loaded configure the input blocks */
   1155	err = configure_channels(fei);
   1156	if (err) {
   1157		dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
   1158		return err;
   1159	}
   1160
   1161	/*
   1162	 * STBus target port can access IMEM and DMEM ports
   1163	 * without waiting for CPU
   1164	 */
   1165	writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
   1166
   1167	dev_info(fei->dev, "Boot the memdma SLIM core\n");
   1168	writel(0x1,  fei->io + DMA_CPU_RUN);
   1169
   1170	atomic_set(&fei->fw_loaded, 1);
   1171
   1172	return 0;
   1173}
   1174
   1175static const struct of_device_id c8sectpfe_match[] = {
   1176	{ .compatible = "st,stih407-c8sectpfe" },
   1177	{ /* sentinel */ },
   1178};
   1179MODULE_DEVICE_TABLE(of, c8sectpfe_match);
   1180
   1181static struct platform_driver c8sectpfe_driver = {
   1182	.driver = {
   1183		.name = "c8sectpfe",
   1184		.of_match_table = of_match_ptr(c8sectpfe_match),
   1185	},
   1186	.probe	= c8sectpfe_probe,
   1187	.remove	= c8sectpfe_remove,
   1188};
   1189
   1190module_platform_driver(c8sectpfe_driver);
   1191
   1192MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
   1193MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
   1194MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
   1195MODULE_LICENSE("GPL");