cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ipu-common.c (36485B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
      4 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
      5 */
      6#include <linux/module.h>
      7#include <linux/export.h>
      8#include <linux/types.h>
      9#include <linux/reset.h>
     10#include <linux/platform_device.h>
     11#include <linux/err.h>
     12#include <linux/spinlock.h>
     13#include <linux/delay.h>
     14#include <linux/interrupt.h>
     15#include <linux/io.h>
     16#include <linux/clk.h>
     17#include <linux/list.h>
     18#include <linux/irq.h>
     19#include <linux/irqchip/chained_irq.h>
     20#include <linux/irqdomain.h>
     21#include <linux/of_device.h>
     22#include <linux/of_graph.h>
     23
     24#include <drm/drm_fourcc.h>
     25
     26#include <video/imx-ipu-v3.h>
     27#include "ipu-prv.h"
     28
     29static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
     30{
     31	return readl(ipu->cm_reg + offset);
     32}
     33
     34static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
     35{
     36	writel(value, ipu->cm_reg + offset);
     37}
     38
     39int ipu_get_num(struct ipu_soc *ipu)
     40{
     41	return ipu->id;
     42}
     43EXPORT_SYMBOL_GPL(ipu_get_num);
     44
     45void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
     46{
     47	u32 val;
     48
     49	val = ipu_cm_read(ipu, IPU_SRM_PRI2);
     50	val &= ~DP_S_SRM_MODE_MASK;
     51	val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
     52		      DP_S_SRM_MODE_NOW;
     53	ipu_cm_write(ipu, val, IPU_SRM_PRI2);
     54}
     55EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
     56
     57enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
     58{
     59	switch (drm_fourcc) {
     60	case DRM_FORMAT_ARGB1555:
     61	case DRM_FORMAT_ABGR1555:
     62	case DRM_FORMAT_RGBA5551:
     63	case DRM_FORMAT_BGRA5551:
     64	case DRM_FORMAT_RGB565:
     65	case DRM_FORMAT_BGR565:
     66	case DRM_FORMAT_RGB888:
     67	case DRM_FORMAT_BGR888:
     68	case DRM_FORMAT_ARGB4444:
     69	case DRM_FORMAT_XRGB8888:
     70	case DRM_FORMAT_XBGR8888:
     71	case DRM_FORMAT_RGBX8888:
     72	case DRM_FORMAT_BGRX8888:
     73	case DRM_FORMAT_ARGB8888:
     74	case DRM_FORMAT_ABGR8888:
     75	case DRM_FORMAT_RGBA8888:
     76	case DRM_FORMAT_BGRA8888:
     77	case DRM_FORMAT_RGB565_A8:
     78	case DRM_FORMAT_BGR565_A8:
     79	case DRM_FORMAT_RGB888_A8:
     80	case DRM_FORMAT_BGR888_A8:
     81	case DRM_FORMAT_RGBX8888_A8:
     82	case DRM_FORMAT_BGRX8888_A8:
     83		return IPUV3_COLORSPACE_RGB;
     84	case DRM_FORMAT_YUYV:
     85	case DRM_FORMAT_UYVY:
     86	case DRM_FORMAT_YUV420:
     87	case DRM_FORMAT_YVU420:
     88	case DRM_FORMAT_YUV422:
     89	case DRM_FORMAT_YVU422:
     90	case DRM_FORMAT_YUV444:
     91	case DRM_FORMAT_YVU444:
     92	case DRM_FORMAT_NV12:
     93	case DRM_FORMAT_NV21:
     94	case DRM_FORMAT_NV16:
     95	case DRM_FORMAT_NV61:
     96		return IPUV3_COLORSPACE_YUV;
     97	default:
     98		return IPUV3_COLORSPACE_UNKNOWN;
     99	}
    100}
    101EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
    102
    103enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
    104{
    105	switch (pixelformat) {
    106	case V4L2_PIX_FMT_YUV420:
    107	case V4L2_PIX_FMT_YVU420:
    108	case V4L2_PIX_FMT_YUV422P:
    109	case V4L2_PIX_FMT_UYVY:
    110	case V4L2_PIX_FMT_YUYV:
    111	case V4L2_PIX_FMT_NV12:
    112	case V4L2_PIX_FMT_NV21:
    113	case V4L2_PIX_FMT_NV16:
    114	case V4L2_PIX_FMT_NV61:
    115		return IPUV3_COLORSPACE_YUV;
    116	case V4L2_PIX_FMT_RGB565:
    117	case V4L2_PIX_FMT_BGR24:
    118	case V4L2_PIX_FMT_RGB24:
    119	case V4L2_PIX_FMT_ABGR32:
    120	case V4L2_PIX_FMT_XBGR32:
    121	case V4L2_PIX_FMT_BGRA32:
    122	case V4L2_PIX_FMT_BGRX32:
    123	case V4L2_PIX_FMT_RGBA32:
    124	case V4L2_PIX_FMT_RGBX32:
    125	case V4L2_PIX_FMT_ARGB32:
    126	case V4L2_PIX_FMT_XRGB32:
    127	case V4L2_PIX_FMT_RGB32:
    128	case V4L2_PIX_FMT_BGR32:
    129		return IPUV3_COLORSPACE_RGB;
    130	default:
    131		return IPUV3_COLORSPACE_UNKNOWN;
    132	}
    133}
    134EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
    135
    136int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
    137			    bool hflip, bool vflip)
    138{
    139	u32 r90, vf, hf;
    140
    141	switch (degrees) {
    142	case 0:
    143		vf = hf = r90 = 0;
    144		break;
    145	case 90:
    146		vf = hf = 0;
    147		r90 = 1;
    148		break;
    149	case 180:
    150		vf = hf = 1;
    151		r90 = 0;
    152		break;
    153	case 270:
    154		vf = hf = r90 = 1;
    155		break;
    156	default:
    157		return -EINVAL;
    158	}
    159
    160	hf ^= (u32)hflip;
    161	vf ^= (u32)vflip;
    162
    163	*mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
    164	return 0;
    165}
    166EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
    167
    168int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
    169			    bool hflip, bool vflip)
    170{
    171	u32 r90, vf, hf;
    172
    173	r90 = ((u32)mode >> 2) & 0x1;
    174	hf = ((u32)mode >> 1) & 0x1;
    175	vf = ((u32)mode >> 0) & 0x1;
    176	hf ^= (u32)hflip;
    177	vf ^= (u32)vflip;
    178
    179	switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
    180	case IPU_ROTATE_NONE:
    181		*degrees = 0;
    182		break;
    183	case IPU_ROTATE_90_RIGHT:
    184		*degrees = 90;
    185		break;
    186	case IPU_ROTATE_180:
    187		*degrees = 180;
    188		break;
    189	case IPU_ROTATE_90_LEFT:
    190		*degrees = 270;
    191		break;
    192	default:
    193		return -EINVAL;
    194	}
    195
    196	return 0;
    197}
    198EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
    199
    200struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
    201{
    202	struct ipuv3_channel *channel;
    203
    204	dev_dbg(ipu->dev, "%s %d\n", __func__, num);
    205
    206	if (num > 63)
    207		return ERR_PTR(-ENODEV);
    208
    209	mutex_lock(&ipu->channel_lock);
    210
    211	list_for_each_entry(channel, &ipu->channels, list) {
    212		if (channel->num == num) {
    213			channel = ERR_PTR(-EBUSY);
    214			goto out;
    215		}
    216	}
    217
    218	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
    219	if (!channel) {
    220		channel = ERR_PTR(-ENOMEM);
    221		goto out;
    222	}
    223
    224	channel->num = num;
    225	channel->ipu = ipu;
    226	list_add(&channel->list, &ipu->channels);
    227
    228out:
    229	mutex_unlock(&ipu->channel_lock);
    230
    231	return channel;
    232}
    233EXPORT_SYMBOL_GPL(ipu_idmac_get);
    234
    235void ipu_idmac_put(struct ipuv3_channel *channel)
    236{
    237	struct ipu_soc *ipu = channel->ipu;
    238
    239	dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
    240
    241	mutex_lock(&ipu->channel_lock);
    242
    243	list_del(&channel->list);
    244	kfree(channel);
    245
    246	mutex_unlock(&ipu->channel_lock);
    247}
    248EXPORT_SYMBOL_GPL(ipu_idmac_put);
    249
    250#define idma_mask(ch)			(1 << ((ch) & 0x1f))
    251
    252/*
    253 * This is an undocumented feature, a write one to a channel bit in
    254 * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
    255 * internal current buffer pointer so that transfers start from buffer
    256 * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
    257 * only says these are read-only registers). This operation is required
    258 * for channel linking to work correctly, for instance video capture
    259 * pipelines that carry out image rotations will fail after the first
    260 * streaming unless this function is called for each channel before
    261 * re-enabling the channels.
    262 */
    263static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
    264{
    265	struct ipu_soc *ipu = channel->ipu;
    266	unsigned int chno = channel->num;
    267
    268	ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
    269}
    270
    271void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
    272		bool doublebuffer)
    273{
    274	struct ipu_soc *ipu = channel->ipu;
    275	unsigned long flags;
    276	u32 reg;
    277
    278	spin_lock_irqsave(&ipu->lock, flags);
    279
    280	reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
    281	if (doublebuffer)
    282		reg |= idma_mask(channel->num);
    283	else
    284		reg &= ~idma_mask(channel->num);
    285	ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
    286
    287	__ipu_idmac_reset_current_buffer(channel);
    288
    289	spin_unlock_irqrestore(&ipu->lock, flags);
    290}
    291EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
    292
    293static const struct {
    294	int chnum;
    295	u32 reg;
    296	int shift;
    297} idmac_lock_en_info[] = {
    298	{ .chnum =  5, .reg = IDMAC_CH_LOCK_EN_1, .shift =  0, },
    299	{ .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift =  2, },
    300	{ .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift =  4, },
    301	{ .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift =  6, },
    302	{ .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift =  8, },
    303	{ .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
    304	{ .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
    305	{ .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
    306	{ .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
    307	{ .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
    308	{ .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
    309	{ .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift =  0, },
    310	{ .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift =  2, },
    311	{ .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift =  4, },
    312	{ .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift =  6, },
    313	{ .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift =  8, },
    314	{ .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
    315};
    316
    317int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
    318{
    319	struct ipu_soc *ipu = channel->ipu;
    320	unsigned long flags;
    321	u32 bursts, regval;
    322	int i;
    323
    324	switch (num_bursts) {
    325	case 0:
    326	case 1:
    327		bursts = 0x00; /* locking disabled */
    328		break;
    329	case 2:
    330		bursts = 0x01;
    331		break;
    332	case 4:
    333		bursts = 0x02;
    334		break;
    335	case 8:
    336		bursts = 0x03;
    337		break;
    338	default:
    339		return -EINVAL;
    340	}
    341
    342	/*
    343	 * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
    344	 * i.MX53 channel arbitration locking doesn't seem to work properly.
    345	 * Allow enabling the lock feature on IPUv3H / i.MX6 only.
    346	 */
    347	if (bursts && ipu->ipu_type != IPUV3H)
    348		return -EINVAL;
    349
    350	for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
    351		if (channel->num == idmac_lock_en_info[i].chnum)
    352			break;
    353	}
    354	if (i >= ARRAY_SIZE(idmac_lock_en_info))
    355		return -EINVAL;
    356
    357	spin_lock_irqsave(&ipu->lock, flags);
    358
    359	regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
    360	regval &= ~(0x03 << idmac_lock_en_info[i].shift);
    361	regval |= (bursts << idmac_lock_en_info[i].shift);
    362	ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
    363
    364	spin_unlock_irqrestore(&ipu->lock, flags);
    365
    366	return 0;
    367}
    368EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
    369
    370int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
    371{
    372	unsigned long lock_flags;
    373	u32 val;
    374
    375	spin_lock_irqsave(&ipu->lock, lock_flags);
    376
    377	val = ipu_cm_read(ipu, IPU_DISP_GEN);
    378
    379	if (mask & IPU_CONF_DI0_EN)
    380		val |= IPU_DI0_COUNTER_RELEASE;
    381	if (mask & IPU_CONF_DI1_EN)
    382		val |= IPU_DI1_COUNTER_RELEASE;
    383
    384	ipu_cm_write(ipu, val, IPU_DISP_GEN);
    385
    386	val = ipu_cm_read(ipu, IPU_CONF);
    387	val |= mask;
    388	ipu_cm_write(ipu, val, IPU_CONF);
    389
    390	spin_unlock_irqrestore(&ipu->lock, lock_flags);
    391
    392	return 0;
    393}
    394EXPORT_SYMBOL_GPL(ipu_module_enable);
    395
    396int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
    397{
    398	unsigned long lock_flags;
    399	u32 val;
    400
    401	spin_lock_irqsave(&ipu->lock, lock_flags);
    402
    403	val = ipu_cm_read(ipu, IPU_CONF);
    404	val &= ~mask;
    405	ipu_cm_write(ipu, val, IPU_CONF);
    406
    407	val = ipu_cm_read(ipu, IPU_DISP_GEN);
    408
    409	if (mask & IPU_CONF_DI0_EN)
    410		val &= ~IPU_DI0_COUNTER_RELEASE;
    411	if (mask & IPU_CONF_DI1_EN)
    412		val &= ~IPU_DI1_COUNTER_RELEASE;
    413
    414	ipu_cm_write(ipu, val, IPU_DISP_GEN);
    415
    416	spin_unlock_irqrestore(&ipu->lock, lock_flags);
    417
    418	return 0;
    419}
    420EXPORT_SYMBOL_GPL(ipu_module_disable);
    421
    422int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
    423{
    424	struct ipu_soc *ipu = channel->ipu;
    425	unsigned int chno = channel->num;
    426
    427	return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
    428}
    429EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
    430
    431bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
    432{
    433	struct ipu_soc *ipu = channel->ipu;
    434	unsigned long flags;
    435	u32 reg = 0;
    436
    437	spin_lock_irqsave(&ipu->lock, flags);
    438	switch (buf_num) {
    439	case 0:
    440		reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
    441		break;
    442	case 1:
    443		reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
    444		break;
    445	case 2:
    446		reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
    447		break;
    448	}
    449	spin_unlock_irqrestore(&ipu->lock, flags);
    450
    451	return ((reg & idma_mask(channel->num)) != 0);
    452}
    453EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
    454
    455void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
    456{
    457	struct ipu_soc *ipu = channel->ipu;
    458	unsigned int chno = channel->num;
    459	unsigned long flags;
    460
    461	spin_lock_irqsave(&ipu->lock, flags);
    462
    463	/* Mark buffer as ready. */
    464	if (buf_num == 0)
    465		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
    466	else
    467		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
    468
    469	spin_unlock_irqrestore(&ipu->lock, flags);
    470}
    471EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
    472
    473void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
    474{
    475	struct ipu_soc *ipu = channel->ipu;
    476	unsigned int chno = channel->num;
    477	unsigned long flags;
    478
    479	spin_lock_irqsave(&ipu->lock, flags);
    480
    481	ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
    482	switch (buf_num) {
    483	case 0:
    484		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
    485		break;
    486	case 1:
    487		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
    488		break;
    489	case 2:
    490		ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
    491		break;
    492	default:
    493		break;
    494	}
    495	ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
    496
    497	spin_unlock_irqrestore(&ipu->lock, flags);
    498}
    499EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
    500
    501int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
    502{
    503	struct ipu_soc *ipu = channel->ipu;
    504	u32 val;
    505	unsigned long flags;
    506
    507	spin_lock_irqsave(&ipu->lock, flags);
    508
    509	val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
    510	val |= idma_mask(channel->num);
    511	ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
    512
    513	spin_unlock_irqrestore(&ipu->lock, flags);
    514
    515	return 0;
    516}
    517EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
    518
    519bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
    520{
    521	return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
    522}
    523EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
    524
    525int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
    526{
    527	struct ipu_soc *ipu = channel->ipu;
    528	unsigned long timeout;
    529
    530	timeout = jiffies + msecs_to_jiffies(ms);
    531	while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
    532			idma_mask(channel->num)) {
    533		if (time_after(jiffies, timeout))
    534			return -ETIMEDOUT;
    535		cpu_relax();
    536	}
    537
    538	return 0;
    539}
    540EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
    541
    542int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
    543{
    544	struct ipu_soc *ipu = channel->ipu;
    545	u32 val;
    546	unsigned long flags;
    547
    548	spin_lock_irqsave(&ipu->lock, flags);
    549
    550	/* Disable DMA channel(s) */
    551	val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
    552	val &= ~idma_mask(channel->num);
    553	ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
    554
    555	__ipu_idmac_reset_current_buffer(channel);
    556
    557	/* Set channel buffers NOT to be ready */
    558	ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
    559
    560	if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
    561			idma_mask(channel->num)) {
    562		ipu_cm_write(ipu, idma_mask(channel->num),
    563			     IPU_CHA_BUF0_RDY(channel->num));
    564	}
    565
    566	if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
    567			idma_mask(channel->num)) {
    568		ipu_cm_write(ipu, idma_mask(channel->num),
    569			     IPU_CHA_BUF1_RDY(channel->num));
    570	}
    571
    572	ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
    573
    574	/* Reset the double buffer */
    575	val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
    576	val &= ~idma_mask(channel->num);
    577	ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
    578
    579	spin_unlock_irqrestore(&ipu->lock, flags);
    580
    581	return 0;
    582}
    583EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
    584
    585/*
    586 * The imx6 rev. D TRM says that enabling the WM feature will increase
    587 * a channel's priority. Refer to Table 36-8 Calculated priority value.
    588 * The sub-module that is the sink or source for the channel must enable
    589 * watermark signal for this to take effect (SMFC_WM for instance).
    590 */
    591void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
    592{
    593	struct ipu_soc *ipu = channel->ipu;
    594	unsigned long flags;
    595	u32 val;
    596
    597	spin_lock_irqsave(&ipu->lock, flags);
    598
    599	val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
    600	if (enable)
    601		val |= 1 << (channel->num % 32);
    602	else
    603		val &= ~(1 << (channel->num % 32));
    604	ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
    605
    606	spin_unlock_irqrestore(&ipu->lock, flags);
    607}
    608EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
    609
    610static int ipu_memory_reset(struct ipu_soc *ipu)
    611{
    612	unsigned long timeout;
    613
    614	ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
    615
    616	timeout = jiffies + msecs_to_jiffies(1000);
    617	while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
    618		if (time_after(jiffies, timeout))
    619			return -ETIME;
    620		cpu_relax();
    621	}
    622
    623	return 0;
    624}
    625
    626/*
    627 * Set the source mux for the given CSI. Selects either parallel or
    628 * MIPI CSI2 sources.
    629 */
    630void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
    631{
    632	unsigned long flags;
    633	u32 val, mask;
    634
    635	mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
    636		IPU_CONF_CSI0_DATA_SOURCE;
    637
    638	spin_lock_irqsave(&ipu->lock, flags);
    639
    640	val = ipu_cm_read(ipu, IPU_CONF);
    641	if (mipi_csi2)
    642		val |= mask;
    643	else
    644		val &= ~mask;
    645	ipu_cm_write(ipu, val, IPU_CONF);
    646
    647	spin_unlock_irqrestore(&ipu->lock, flags);
    648}
    649EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
    650
    651/*
    652 * Set the source mux for the IC. Selects either CSI[01] or the VDI.
    653 */
    654void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
    655{
    656	unsigned long flags;
    657	u32 val;
    658
    659	spin_lock_irqsave(&ipu->lock, flags);
    660
    661	val = ipu_cm_read(ipu, IPU_CONF);
    662	if (vdi)
    663		val |= IPU_CONF_IC_INPUT;
    664	else
    665		val &= ~IPU_CONF_IC_INPUT;
    666
    667	if (csi_id == 1)
    668		val |= IPU_CONF_CSI_SEL;
    669	else
    670		val &= ~IPU_CONF_CSI_SEL;
    671
    672	ipu_cm_write(ipu, val, IPU_CONF);
    673
    674	spin_unlock_irqrestore(&ipu->lock, flags);
    675}
    676EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
    677
    678
    679/* Frame Synchronization Unit Channel Linking */
    680
    681struct fsu_link_reg_info {
    682	int chno;
    683	u32 reg;
    684	u32 mask;
    685	u32 val;
    686};
    687
    688struct fsu_link_info {
    689	struct fsu_link_reg_info src;
    690	struct fsu_link_reg_info sink;
    691};
    692
    693static const struct fsu_link_info fsu_link_info[] = {
    694	{
    695		.src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
    696			  FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
    697		.sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
    698			  FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
    699	}, {
    700		.src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
    701			  FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
    702		.sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
    703			  FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
    704	}, {
    705		.src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
    706			  FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
    707		.sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
    708			  FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
    709	}, {
    710		.src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
    711		.sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
    712			  FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
    713	},
    714};
    715
    716static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
    717{
    718	int i;
    719
    720	for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
    721		if (src == fsu_link_info[i].src.chno &&
    722		    sink == fsu_link_info[i].sink.chno)
    723			return &fsu_link_info[i];
    724	}
    725
    726	return NULL;
    727}
    728
    729/*
    730 * Links a source channel to a sink channel in the FSU.
    731 */
    732int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
    733{
    734	const struct fsu_link_info *link;
    735	u32 src_reg, sink_reg;
    736	unsigned long flags;
    737
    738	link = find_fsu_link_info(src_ch, sink_ch);
    739	if (!link)
    740		return -EINVAL;
    741
    742	spin_lock_irqsave(&ipu->lock, flags);
    743
    744	if (link->src.mask) {
    745		src_reg = ipu_cm_read(ipu, link->src.reg);
    746		src_reg &= ~link->src.mask;
    747		src_reg |= link->src.val;
    748		ipu_cm_write(ipu, src_reg, link->src.reg);
    749	}
    750
    751	if (link->sink.mask) {
    752		sink_reg = ipu_cm_read(ipu, link->sink.reg);
    753		sink_reg &= ~link->sink.mask;
    754		sink_reg |= link->sink.val;
    755		ipu_cm_write(ipu, sink_reg, link->sink.reg);
    756	}
    757
    758	spin_unlock_irqrestore(&ipu->lock, flags);
    759	return 0;
    760}
    761EXPORT_SYMBOL_GPL(ipu_fsu_link);
    762
    763/*
    764 * Unlinks source and sink channels in the FSU.
    765 */
    766int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
    767{
    768	const struct fsu_link_info *link;
    769	u32 src_reg, sink_reg;
    770	unsigned long flags;
    771
    772	link = find_fsu_link_info(src_ch, sink_ch);
    773	if (!link)
    774		return -EINVAL;
    775
    776	spin_lock_irqsave(&ipu->lock, flags);
    777
    778	if (link->src.mask) {
    779		src_reg = ipu_cm_read(ipu, link->src.reg);
    780		src_reg &= ~link->src.mask;
    781		ipu_cm_write(ipu, src_reg, link->src.reg);
    782	}
    783
    784	if (link->sink.mask) {
    785		sink_reg = ipu_cm_read(ipu, link->sink.reg);
    786		sink_reg &= ~link->sink.mask;
    787		ipu_cm_write(ipu, sink_reg, link->sink.reg);
    788	}
    789
    790	spin_unlock_irqrestore(&ipu->lock, flags);
    791	return 0;
    792}
    793EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
    794
    795/* Link IDMAC channels in the FSU */
    796int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
    797{
    798	return ipu_fsu_link(src->ipu, src->num, sink->num);
    799}
    800EXPORT_SYMBOL_GPL(ipu_idmac_link);
    801
    802/* Unlink IDMAC channels in the FSU */
    803int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
    804{
    805	return ipu_fsu_unlink(src->ipu, src->num, sink->num);
    806}
    807EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
    808
    809struct ipu_devtype {
    810	const char *name;
    811	unsigned long cm_ofs;
    812	unsigned long cpmem_ofs;
    813	unsigned long srm_ofs;
    814	unsigned long tpm_ofs;
    815	unsigned long csi0_ofs;
    816	unsigned long csi1_ofs;
    817	unsigned long ic_ofs;
    818	unsigned long disp0_ofs;
    819	unsigned long disp1_ofs;
    820	unsigned long dc_tmpl_ofs;
    821	unsigned long vdi_ofs;
    822	enum ipuv3_type type;
    823};
    824
    825static struct ipu_devtype ipu_type_imx51 = {
    826	.name = "IPUv3EX",
    827	.cm_ofs = 0x1e000000,
    828	.cpmem_ofs = 0x1f000000,
    829	.srm_ofs = 0x1f040000,
    830	.tpm_ofs = 0x1f060000,
    831	.csi0_ofs = 0x1e030000,
    832	.csi1_ofs = 0x1e038000,
    833	.ic_ofs = 0x1e020000,
    834	.disp0_ofs = 0x1e040000,
    835	.disp1_ofs = 0x1e048000,
    836	.dc_tmpl_ofs = 0x1f080000,
    837	.vdi_ofs = 0x1e068000,
    838	.type = IPUV3EX,
    839};
    840
    841static struct ipu_devtype ipu_type_imx53 = {
    842	.name = "IPUv3M",
    843	.cm_ofs = 0x06000000,
    844	.cpmem_ofs = 0x07000000,
    845	.srm_ofs = 0x07040000,
    846	.tpm_ofs = 0x07060000,
    847	.csi0_ofs = 0x06030000,
    848	.csi1_ofs = 0x06038000,
    849	.ic_ofs = 0x06020000,
    850	.disp0_ofs = 0x06040000,
    851	.disp1_ofs = 0x06048000,
    852	.dc_tmpl_ofs = 0x07080000,
    853	.vdi_ofs = 0x06068000,
    854	.type = IPUV3M,
    855};
    856
    857static struct ipu_devtype ipu_type_imx6q = {
    858	.name = "IPUv3H",
    859	.cm_ofs = 0x00200000,
    860	.cpmem_ofs = 0x00300000,
    861	.srm_ofs = 0x00340000,
    862	.tpm_ofs = 0x00360000,
    863	.csi0_ofs = 0x00230000,
    864	.csi1_ofs = 0x00238000,
    865	.ic_ofs = 0x00220000,
    866	.disp0_ofs = 0x00240000,
    867	.disp1_ofs = 0x00248000,
    868	.dc_tmpl_ofs = 0x00380000,
    869	.vdi_ofs = 0x00268000,
    870	.type = IPUV3H,
    871};
    872
    873static const struct of_device_id imx_ipu_dt_ids[] = {
    874	{ .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
    875	{ .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
    876	{ .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
    877	{ .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
    878	{ /* sentinel */ }
    879};
    880MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
    881
    882static int ipu_submodules_init(struct ipu_soc *ipu,
    883		struct platform_device *pdev, unsigned long ipu_base,
    884		struct clk *ipu_clk)
    885{
    886	char *unit;
    887	int ret;
    888	struct device *dev = &pdev->dev;
    889	const struct ipu_devtype *devtype = ipu->devtype;
    890
    891	ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
    892	if (ret) {
    893		unit = "cpmem";
    894		goto err_cpmem;
    895	}
    896
    897	ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
    898			   IPU_CONF_CSI0_EN, ipu_clk);
    899	if (ret) {
    900		unit = "csi0";
    901		goto err_csi_0;
    902	}
    903
    904	ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
    905			   IPU_CONF_CSI1_EN, ipu_clk);
    906	if (ret) {
    907		unit = "csi1";
    908		goto err_csi_1;
    909	}
    910
    911	ret = ipu_ic_init(ipu, dev,
    912			  ipu_base + devtype->ic_ofs,
    913			  ipu_base + devtype->tpm_ofs);
    914	if (ret) {
    915		unit = "ic";
    916		goto err_ic;
    917	}
    918
    919	ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
    920			   IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
    921			   IPU_CONF_IC_INPUT);
    922	if (ret) {
    923		unit = "vdi";
    924		goto err_vdi;
    925	}
    926
    927	ret = ipu_image_convert_init(ipu, dev);
    928	if (ret) {
    929		unit = "image_convert";
    930		goto err_image_convert;
    931	}
    932
    933	ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
    934			  IPU_CONF_DI0_EN, ipu_clk);
    935	if (ret) {
    936		unit = "di0";
    937		goto err_di_0;
    938	}
    939
    940	ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
    941			IPU_CONF_DI1_EN, ipu_clk);
    942	if (ret) {
    943		unit = "di1";
    944		goto err_di_1;
    945	}
    946
    947	ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
    948			IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
    949	if (ret) {
    950		unit = "dc_template";
    951		goto err_dc;
    952	}
    953
    954	ret = ipu_dmfc_init(ipu, dev, ipu_base +
    955			devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
    956	if (ret) {
    957		unit = "dmfc";
    958		goto err_dmfc;
    959	}
    960
    961	ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
    962	if (ret) {
    963		unit = "dp";
    964		goto err_dp;
    965	}
    966
    967	ret = ipu_smfc_init(ipu, dev, ipu_base +
    968			devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
    969	if (ret) {
    970		unit = "smfc";
    971		goto err_smfc;
    972	}
    973
    974	return 0;
    975
    976err_smfc:
    977	ipu_dp_exit(ipu);
    978err_dp:
    979	ipu_dmfc_exit(ipu);
    980err_dmfc:
    981	ipu_dc_exit(ipu);
    982err_dc:
    983	ipu_di_exit(ipu, 1);
    984err_di_1:
    985	ipu_di_exit(ipu, 0);
    986err_di_0:
    987	ipu_image_convert_exit(ipu);
    988err_image_convert:
    989	ipu_vdi_exit(ipu);
    990err_vdi:
    991	ipu_ic_exit(ipu);
    992err_ic:
    993	ipu_csi_exit(ipu, 1);
    994err_csi_1:
    995	ipu_csi_exit(ipu, 0);
    996err_csi_0:
    997	ipu_cpmem_exit(ipu);
    998err_cpmem:
    999	dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
   1000	return ret;
   1001}
   1002
   1003static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
   1004{
   1005	unsigned long status;
   1006	int i, bit;
   1007
   1008	for (i = 0; i < num_regs; i++) {
   1009
   1010		status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
   1011		status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
   1012
   1013		for_each_set_bit(bit, &status, 32)
   1014			generic_handle_domain_irq(ipu->domain,
   1015						  regs[i] * 32 + bit);
   1016	}
   1017}
   1018
   1019static void ipu_irq_handler(struct irq_desc *desc)
   1020{
   1021	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
   1022	struct irq_chip *chip = irq_desc_get_chip(desc);
   1023	static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
   1024
   1025	chained_irq_enter(chip, desc);
   1026
   1027	ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
   1028
   1029	chained_irq_exit(chip, desc);
   1030}
   1031
   1032static void ipu_err_irq_handler(struct irq_desc *desc)
   1033{
   1034	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
   1035	struct irq_chip *chip = irq_desc_get_chip(desc);
   1036	static const int int_reg[] = { 4, 5, 8, 9};
   1037
   1038	chained_irq_enter(chip, desc);
   1039
   1040	ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
   1041
   1042	chained_irq_exit(chip, desc);
   1043}
   1044
   1045int ipu_map_irq(struct ipu_soc *ipu, int irq)
   1046{
   1047	int virq;
   1048
   1049	virq = irq_linear_revmap(ipu->domain, irq);
   1050	if (!virq)
   1051		virq = irq_create_mapping(ipu->domain, irq);
   1052
   1053	return virq;
   1054}
   1055EXPORT_SYMBOL_GPL(ipu_map_irq);
   1056
   1057int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
   1058		enum ipu_channel_irq irq_type)
   1059{
   1060	return ipu_map_irq(ipu, irq_type + channel->num);
   1061}
   1062EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
   1063
   1064static void ipu_submodules_exit(struct ipu_soc *ipu)
   1065{
   1066	ipu_smfc_exit(ipu);
   1067	ipu_dp_exit(ipu);
   1068	ipu_dmfc_exit(ipu);
   1069	ipu_dc_exit(ipu);
   1070	ipu_di_exit(ipu, 1);
   1071	ipu_di_exit(ipu, 0);
   1072	ipu_image_convert_exit(ipu);
   1073	ipu_vdi_exit(ipu);
   1074	ipu_ic_exit(ipu);
   1075	ipu_csi_exit(ipu, 1);
   1076	ipu_csi_exit(ipu, 0);
   1077	ipu_cpmem_exit(ipu);
   1078}
   1079
   1080static int platform_remove_devices_fn(struct device *dev, void *unused)
   1081{
   1082	struct platform_device *pdev = to_platform_device(dev);
   1083
   1084	platform_device_unregister(pdev);
   1085
   1086	return 0;
   1087}
   1088
   1089static void platform_device_unregister_children(struct platform_device *pdev)
   1090{
   1091	device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
   1092}
   1093
   1094struct ipu_platform_reg {
   1095	struct ipu_client_platformdata pdata;
   1096	const char *name;
   1097};
   1098
   1099/* These must be in the order of the corresponding device tree port nodes */
   1100static struct ipu_platform_reg client_reg[] = {
   1101	{
   1102		.pdata = {
   1103			.csi = 0,
   1104			.dma[0] = IPUV3_CHANNEL_CSI0,
   1105			.dma[1] = -EINVAL,
   1106		},
   1107		.name = "imx-ipuv3-csi",
   1108	}, {
   1109		.pdata = {
   1110			.csi = 1,
   1111			.dma[0] = IPUV3_CHANNEL_CSI1,
   1112			.dma[1] = -EINVAL,
   1113		},
   1114		.name = "imx-ipuv3-csi",
   1115	}, {
   1116		.pdata = {
   1117			.di = 0,
   1118			.dc = 5,
   1119			.dp = IPU_DP_FLOW_SYNC_BG,
   1120			.dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
   1121			.dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
   1122		},
   1123		.name = "imx-ipuv3-crtc",
   1124	}, {
   1125		.pdata = {
   1126			.di = 1,
   1127			.dc = 1,
   1128			.dp = -EINVAL,
   1129			.dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
   1130			.dma[1] = -EINVAL,
   1131		},
   1132		.name = "imx-ipuv3-crtc",
   1133	},
   1134};
   1135
   1136static DEFINE_MUTEX(ipu_client_id_mutex);
   1137static int ipu_client_id;
   1138
   1139static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
   1140{
   1141	struct device *dev = ipu->dev;
   1142	unsigned i;
   1143	int id, ret;
   1144
   1145	mutex_lock(&ipu_client_id_mutex);
   1146	id = ipu_client_id;
   1147	ipu_client_id += ARRAY_SIZE(client_reg);
   1148	mutex_unlock(&ipu_client_id_mutex);
   1149
   1150	for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
   1151		struct ipu_platform_reg *reg = &client_reg[i];
   1152		struct platform_device *pdev;
   1153		struct device_node *of_node;
   1154
   1155		/* Associate subdevice with the corresponding port node */
   1156		of_node = of_graph_get_port_by_id(dev->of_node, i);
   1157		if (!of_node) {
   1158			dev_info(dev,
   1159				 "no port@%d node in %pOF, not using %s%d\n",
   1160				 i, dev->of_node,
   1161				 (i / 2) ? "DI" : "CSI", i % 2);
   1162			continue;
   1163		}
   1164
   1165		pdev = platform_device_alloc(reg->name, id++);
   1166		if (!pdev) {
   1167			ret = -ENOMEM;
   1168			goto err_register;
   1169		}
   1170
   1171		pdev->dev.parent = dev;
   1172
   1173		reg->pdata.of_node = of_node;
   1174		ret = platform_device_add_data(pdev, &reg->pdata,
   1175					       sizeof(reg->pdata));
   1176		if (!ret)
   1177			ret = platform_device_add(pdev);
   1178		if (ret) {
   1179			platform_device_put(pdev);
   1180			goto err_register;
   1181		}
   1182	}
   1183
   1184	return 0;
   1185
   1186err_register:
   1187	platform_device_unregister_children(to_platform_device(dev));
   1188
   1189	return ret;
   1190}
   1191
   1192
   1193static int ipu_irq_init(struct ipu_soc *ipu)
   1194{
   1195	struct irq_chip_generic *gc;
   1196	struct irq_chip_type *ct;
   1197	unsigned long unused[IPU_NUM_IRQS / 32] = {
   1198		0x400100d0, 0xffe000fd,
   1199		0x400100d0, 0xffe000fd,
   1200		0x400100d0, 0xffe000fd,
   1201		0x4077ffff, 0xffe7e1fd,
   1202		0x23fffffe, 0x8880fff0,
   1203		0xf98fe7d0, 0xfff81fff,
   1204		0x400100d0, 0xffe000fd,
   1205		0x00000000,
   1206	};
   1207	int ret, i;
   1208
   1209	ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
   1210					    &irq_generic_chip_ops, ipu);
   1211	if (!ipu->domain) {
   1212		dev_err(ipu->dev, "failed to add irq domain\n");
   1213		return -ENODEV;
   1214	}
   1215
   1216	ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
   1217					     handle_level_irq, 0, 0, 0);
   1218	if (ret < 0) {
   1219		dev_err(ipu->dev, "failed to alloc generic irq chips\n");
   1220		irq_domain_remove(ipu->domain);
   1221		return ret;
   1222	}
   1223
   1224	/* Mask and clear all interrupts */
   1225	for (i = 0; i < IPU_NUM_IRQS; i += 32) {
   1226		ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
   1227		ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
   1228	}
   1229
   1230	for (i = 0; i < IPU_NUM_IRQS; i += 32) {
   1231		gc = irq_get_domain_generic_chip(ipu->domain, i);
   1232		gc->reg_base = ipu->cm_reg;
   1233		gc->unused = unused[i / 32];
   1234		ct = gc->chip_types;
   1235		ct->chip.irq_ack = irq_gc_ack_set_bit;
   1236		ct->chip.irq_mask = irq_gc_mask_clr_bit;
   1237		ct->chip.irq_unmask = irq_gc_mask_set_bit;
   1238		ct->regs.ack = IPU_INT_STAT(i / 32);
   1239		ct->regs.mask = IPU_INT_CTRL(i / 32);
   1240	}
   1241
   1242	irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
   1243	irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
   1244					 ipu);
   1245
   1246	return 0;
   1247}
   1248
   1249static void ipu_irq_exit(struct ipu_soc *ipu)
   1250{
   1251	int i, irq;
   1252
   1253	irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
   1254	irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
   1255
   1256	/* TODO: remove irq_domain_generic_chips */
   1257
   1258	for (i = 0; i < IPU_NUM_IRQS; i++) {
   1259		irq = irq_linear_revmap(ipu->domain, i);
   1260		if (irq)
   1261			irq_dispose_mapping(irq);
   1262	}
   1263
   1264	irq_domain_remove(ipu->domain);
   1265}
   1266
   1267void ipu_dump(struct ipu_soc *ipu)
   1268{
   1269	int i;
   1270
   1271	dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
   1272		ipu_cm_read(ipu, IPU_CONF));
   1273	dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
   1274		ipu_idmac_read(ipu, IDMAC_CONF));
   1275	dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
   1276		ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
   1277	dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
   1278		ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
   1279	dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
   1280		ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
   1281	dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
   1282		ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
   1283	dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
   1284		ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
   1285	dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
   1286		ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
   1287	dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
   1288		ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
   1289	dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
   1290		ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
   1291	dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
   1292		ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
   1293	dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
   1294		ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
   1295	dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
   1296		ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
   1297	dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
   1298		ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
   1299	for (i = 0; i < 15; i++)
   1300		dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
   1301			ipu_cm_read(ipu, IPU_INT_CTRL(i)));
   1302}
   1303EXPORT_SYMBOL_GPL(ipu_dump);
   1304
   1305static int ipu_probe(struct platform_device *pdev)
   1306{
   1307	struct device_node *np = pdev->dev.of_node;
   1308	struct ipu_soc *ipu;
   1309	struct resource *res;
   1310	unsigned long ipu_base;
   1311	int ret, irq_sync, irq_err;
   1312	const struct ipu_devtype *devtype;
   1313
   1314	devtype = of_device_get_match_data(&pdev->dev);
   1315	if (!devtype)
   1316		return -EINVAL;
   1317
   1318	irq_sync = platform_get_irq(pdev, 0);
   1319	irq_err = platform_get_irq(pdev, 1);
   1320	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1321
   1322	dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
   1323			irq_sync, irq_err);
   1324
   1325	if (!res || irq_sync < 0 || irq_err < 0)
   1326		return -ENODEV;
   1327
   1328	ipu_base = res->start;
   1329
   1330	ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
   1331	if (!ipu)
   1332		return -ENODEV;
   1333
   1334	ipu->id = of_alias_get_id(np, "ipu");
   1335	if (ipu->id < 0)
   1336		ipu->id = 0;
   1337
   1338	if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
   1339	    IS_ENABLED(CONFIG_DRM)) {
   1340		ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
   1341							  "fsl,prg", ipu->id);
   1342		if (!ipu->prg_priv)
   1343			return -EPROBE_DEFER;
   1344	}
   1345
   1346	ipu->devtype = devtype;
   1347	ipu->ipu_type = devtype->type;
   1348
   1349	spin_lock_init(&ipu->lock);
   1350	mutex_init(&ipu->channel_lock);
   1351	INIT_LIST_HEAD(&ipu->channels);
   1352
   1353	dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
   1354			ipu_base + devtype->cm_ofs);
   1355	dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
   1356			ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
   1357	dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
   1358			ipu_base + devtype->cpmem_ofs);
   1359	dev_dbg(&pdev->dev, "csi0:    0x%08lx\n",
   1360			ipu_base + devtype->csi0_ofs);
   1361	dev_dbg(&pdev->dev, "csi1:    0x%08lx\n",
   1362			ipu_base + devtype->csi1_ofs);
   1363	dev_dbg(&pdev->dev, "ic:      0x%08lx\n",
   1364			ipu_base + devtype->ic_ofs);
   1365	dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
   1366			ipu_base + devtype->disp0_ofs);
   1367	dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
   1368			ipu_base + devtype->disp1_ofs);
   1369	dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
   1370			ipu_base + devtype->srm_ofs);
   1371	dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
   1372			ipu_base + devtype->tpm_ofs);
   1373	dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
   1374			ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
   1375	dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
   1376			ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
   1377	dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
   1378			ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
   1379	dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
   1380			ipu_base + devtype->vdi_ofs);
   1381
   1382	ipu->cm_reg = devm_ioremap(&pdev->dev,
   1383			ipu_base + devtype->cm_ofs, PAGE_SIZE);
   1384	ipu->idmac_reg = devm_ioremap(&pdev->dev,
   1385			ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
   1386			PAGE_SIZE);
   1387
   1388	if (!ipu->cm_reg || !ipu->idmac_reg)
   1389		return -ENOMEM;
   1390
   1391	ipu->clk = devm_clk_get(&pdev->dev, "bus");
   1392	if (IS_ERR(ipu->clk)) {
   1393		ret = PTR_ERR(ipu->clk);
   1394		dev_err(&pdev->dev, "clk_get failed with %d", ret);
   1395		return ret;
   1396	}
   1397
   1398	platform_set_drvdata(pdev, ipu);
   1399
   1400	ret = clk_prepare_enable(ipu->clk);
   1401	if (ret) {
   1402		dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
   1403		return ret;
   1404	}
   1405
   1406	ipu->dev = &pdev->dev;
   1407	ipu->irq_sync = irq_sync;
   1408	ipu->irq_err = irq_err;
   1409
   1410	ret = device_reset(&pdev->dev);
   1411	if (ret) {
   1412		dev_err(&pdev->dev, "failed to reset: %d\n", ret);
   1413		goto out_failed_reset;
   1414	}
   1415	ret = ipu_memory_reset(ipu);
   1416	if (ret)
   1417		goto out_failed_reset;
   1418
   1419	ret = ipu_irq_init(ipu);
   1420	if (ret)
   1421		goto out_failed_irq;
   1422
   1423	/* Set MCU_T to divide MCU access window into 2 */
   1424	ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
   1425			IPU_DISP_GEN);
   1426
   1427	ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
   1428	if (ret)
   1429		goto failed_submodules_init;
   1430
   1431	ret = ipu_add_client_devices(ipu, ipu_base);
   1432	if (ret) {
   1433		dev_err(&pdev->dev, "adding client devices failed with %d\n",
   1434				ret);
   1435		goto failed_add_clients;
   1436	}
   1437
   1438	dev_info(&pdev->dev, "%s probed\n", devtype->name);
   1439
   1440	return 0;
   1441
   1442failed_add_clients:
   1443	ipu_submodules_exit(ipu);
   1444failed_submodules_init:
   1445	ipu_irq_exit(ipu);
   1446out_failed_irq:
   1447out_failed_reset:
   1448	clk_disable_unprepare(ipu->clk);
   1449	return ret;
   1450}
   1451
   1452static int ipu_remove(struct platform_device *pdev)
   1453{
   1454	struct ipu_soc *ipu = platform_get_drvdata(pdev);
   1455
   1456	platform_device_unregister_children(pdev);
   1457	ipu_submodules_exit(ipu);
   1458	ipu_irq_exit(ipu);
   1459
   1460	clk_disable_unprepare(ipu->clk);
   1461
   1462	return 0;
   1463}
   1464
   1465static struct platform_driver imx_ipu_driver = {
   1466	.driver = {
   1467		.name = "imx-ipuv3",
   1468		.of_match_table = imx_ipu_dt_ids,
   1469	},
   1470	.probe = ipu_probe,
   1471	.remove = ipu_remove,
   1472};
   1473
   1474static struct platform_driver * const drivers[] = {
   1475#if IS_ENABLED(CONFIG_DRM)
   1476	&ipu_pre_drv,
   1477	&ipu_prg_drv,
   1478#endif
   1479	&imx_ipu_driver,
   1480};
   1481
   1482static int __init imx_ipu_init(void)
   1483{
   1484	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
   1485}
   1486module_init(imx_ipu_init);
   1487
   1488static void __exit imx_ipu_exit(void)
   1489{
   1490	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
   1491}
   1492module_exit(imx_ipu_exit);
   1493
   1494MODULE_ALIAS("platform:imx-ipuv3");
   1495MODULE_DESCRIPTION("i.MX IPU v3 driver");
   1496MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
   1497MODULE_LICENSE("GPL");