cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

imx-sdma.c (63259B)


      1// SPDX-License-Identifier: GPL-2.0+
      2//
      3// drivers/dma/imx-sdma.c
      4//
      5// This file contains a driver for the Freescale Smart DMA engine
      6//
      7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
      8//
      9// Based on code from Freescale:
     10//
     11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
     12
     13#include <linux/init.h>
     14#include <linux/iopoll.h>
     15#include <linux/module.h>
     16#include <linux/types.h>
     17#include <linux/bitfield.h>
     18#include <linux/bitops.h>
     19#include <linux/mm.h>
     20#include <linux/interrupt.h>
     21#include <linux/clk.h>
     22#include <linux/delay.h>
     23#include <linux/sched.h>
     24#include <linux/semaphore.h>
     25#include <linux/spinlock.h>
     26#include <linux/device.h>
     27#include <linux/dma-mapping.h>
     28#include <linux/firmware.h>
     29#include <linux/slab.h>
     30#include <linux/platform_device.h>
     31#include <linux/dmaengine.h>
     32#include <linux/of.h>
     33#include <linux/of_address.h>
     34#include <linux/of_device.h>
     35#include <linux/of_dma.h>
     36#include <linux/workqueue.h>
     37
     38#include <asm/irq.h>
     39#include <linux/dma/imx-dma.h>
     40#include <linux/regmap.h>
     41#include <linux/mfd/syscon.h>
     42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
     43
     44#include "dmaengine.h"
     45#include "virt-dma.h"
     46
     47/* SDMA registers */
     48#define SDMA_H_C0PTR		0x000
     49#define SDMA_H_INTR		0x004
     50#define SDMA_H_STATSTOP		0x008
     51#define SDMA_H_START		0x00c
     52#define SDMA_H_EVTOVR		0x010
     53#define SDMA_H_DSPOVR		0x014
     54#define SDMA_H_HOSTOVR		0x018
     55#define SDMA_H_EVTPEND		0x01c
     56#define SDMA_H_DSPENBL		0x020
     57#define SDMA_H_RESET		0x024
     58#define SDMA_H_EVTERR		0x028
     59#define SDMA_H_INTRMSK		0x02c
     60#define SDMA_H_PSW		0x030
     61#define SDMA_H_EVTERRDBG	0x034
     62#define SDMA_H_CONFIG		0x038
     63#define SDMA_ONCE_ENB		0x040
     64#define SDMA_ONCE_DATA		0x044
     65#define SDMA_ONCE_INSTR		0x048
     66#define SDMA_ONCE_STAT		0x04c
     67#define SDMA_ONCE_CMD		0x050
     68#define SDMA_EVT_MIRROR		0x054
     69#define SDMA_ILLINSTADDR	0x058
     70#define SDMA_CHN0ADDR		0x05c
     71#define SDMA_ONCE_RTB		0x060
     72#define SDMA_XTRIG_CONF1	0x070
     73#define SDMA_XTRIG_CONF2	0x074
     74#define SDMA_CHNENBL0_IMX35	0x200
     75#define SDMA_CHNENBL0_IMX31	0x080
     76#define SDMA_CHNPRI_0		0x100
     77#define SDMA_DONE0_CONFIG	0x1000
     78
     79/*
     80 * Buffer descriptor status values.
     81 */
     82#define BD_DONE  0x01
     83#define BD_WRAP  0x02
     84#define BD_CONT  0x04
     85#define BD_INTR  0x08
     86#define BD_RROR  0x10
     87#define BD_LAST  0x20
     88#define BD_EXTD  0x80
     89
     90/*
     91 * Data Node descriptor status values.
     92 */
     93#define DND_END_OF_FRAME  0x80
     94#define DND_END_OF_XFER   0x40
     95#define DND_DONE          0x20
     96#define DND_UNUSED        0x01
     97
     98/*
     99 * IPCV2 descriptor status values.
    100 */
    101#define BD_IPCV2_END_OF_FRAME  0x40
    102
    103#define IPCV2_MAX_NODES        50
    104/*
    105 * Error bit set in the CCB status field by the SDMA,
    106 * in setbd routine, in case of a transfer error
    107 */
    108#define DATA_ERROR  0x10000000
    109
    110/*
    111 * Buffer descriptor commands.
    112 */
    113#define C0_ADDR             0x01
    114#define C0_LOAD             0x02
    115#define C0_DUMP             0x03
    116#define C0_SETCTX           0x07
    117#define C0_GETCTX           0x03
    118#define C0_SETDM            0x01
    119#define C0_SETPM            0x04
    120#define C0_GETDM            0x02
    121#define C0_GETPM            0x08
    122/*
    123 * Change endianness indicator in the BD command field
    124 */
    125#define CHANGE_ENDIANNESS   0x80
    126
    127/*
    128 *  p_2_p watermark_level description
    129 *	Bits		Name			Description
    130 *	0-7		Lower WML		Lower watermark level
    131 *	8		PS			1: Pad Swallowing
    132 *						0: No Pad Swallowing
    133 *	9		PA			1: Pad Adding
    134 *						0: No Pad Adding
    135 *	10		SPDIF			If this bit is set both source
    136 *						and destination are on SPBA
    137 *	11		Source Bit(SP)		1: Source on SPBA
    138 *						0: Source on AIPS
    139 *	12		Destination Bit(DP)	1: Destination on SPBA
    140 *						0: Destination on AIPS
    141 *	13-15		---------		MUST BE 0
    142 *	16-23		Higher WML		HWML
    143 *	24-27		N			Total number of samples after
    144 *						which Pad adding/Swallowing
    145 *						must be done. It must be odd.
    146 *	28		Lower WML Event(LWE)	SDMA events reg to check for
    147 *						LWML event mask
    148 *						0: LWE in EVENTS register
    149 *						1: LWE in EVENTS2 register
    150 *	29		Higher WML Event(HWE)	SDMA events reg to check for
    151 *						HWML event mask
    152 *						0: HWE in EVENTS register
    153 *						1: HWE in EVENTS2 register
    154 *	30		---------		MUST BE 0
    155 *	31		CONT			1: Amount of samples to be
    156 *						transferred is unknown and
    157 *						script will keep on
    158 *						transferring samples as long as
    159 *						both events are detected and
    160 *						script must be manually stopped
    161 *						by the application
    162 *						0: The amount of samples to be
    163 *						transferred is equal to the
    164 *						count field of mode word
    165 */
    166#define SDMA_WATERMARK_LEVEL_LWML	0xFF
    167#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
    168#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
    169#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
    170#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
    171#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
    172#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
    173#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
    174#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
    175#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
    176
    177#define SDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
    178				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
    179				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
    180
    181#define SDMA_DMA_DIRECTIONS	(BIT(DMA_DEV_TO_MEM) | \
    182				 BIT(DMA_MEM_TO_DEV) | \
    183				 BIT(DMA_DEV_TO_DEV))
    184
    185#define SDMA_WATERMARK_LEVEL_N_FIFOS	GENMASK(15, 12)
    186#define SDMA_WATERMARK_LEVEL_SW_DONE	BIT(23)
    187
    188#define SDMA_DONE0_CONFIG_DONE_SEL	BIT(7)
    189#define SDMA_DONE0_CONFIG_DONE_DIS	BIT(6)
    190
    191/**
    192 * struct sdma_script_start_addrs - SDMA script start pointers
    193 *
    194 * start addresses of the different functions in the physical
    195 * address space of the SDMA engine.
    196 */
    197struct sdma_script_start_addrs {
    198	s32 ap_2_ap_addr;
    199	s32 ap_2_bp_addr;
    200	s32 ap_2_ap_fixed_addr;
    201	s32 bp_2_ap_addr;
    202	s32 loopback_on_dsp_side_addr;
    203	s32 mcu_interrupt_only_addr;
    204	s32 firi_2_per_addr;
    205	s32 firi_2_mcu_addr;
    206	s32 per_2_firi_addr;
    207	s32 mcu_2_firi_addr;
    208	s32 uart_2_per_addr;
    209	s32 uart_2_mcu_addr;
    210	s32 per_2_app_addr;
    211	s32 mcu_2_app_addr;
    212	s32 per_2_per_addr;
    213	s32 uartsh_2_per_addr;
    214	s32 uartsh_2_mcu_addr;
    215	s32 per_2_shp_addr;
    216	s32 mcu_2_shp_addr;
    217	s32 ata_2_mcu_addr;
    218	s32 mcu_2_ata_addr;
    219	s32 app_2_per_addr;
    220	s32 app_2_mcu_addr;
    221	s32 shp_2_per_addr;
    222	s32 shp_2_mcu_addr;
    223	s32 mshc_2_mcu_addr;
    224	s32 mcu_2_mshc_addr;
    225	s32 spdif_2_mcu_addr;
    226	s32 mcu_2_spdif_addr;
    227	s32 asrc_2_mcu_addr;
    228	s32 ext_mem_2_ipu_addr;
    229	s32 descrambler_addr;
    230	s32 dptc_dvfs_addr;
    231	s32 utra_addr;
    232	s32 ram_code_start_addr;
    233	/* End of v1 array */
    234	s32 mcu_2_ssish_addr;
    235	s32 ssish_2_mcu_addr;
    236	s32 hdmi_dma_addr;
    237	/* End of v2 array */
    238	s32 zcanfd_2_mcu_addr;
    239	s32 zqspi_2_mcu_addr;
    240	s32 mcu_2_ecspi_addr;
    241	s32 mcu_2_sai_addr;
    242	s32 sai_2_mcu_addr;
    243	s32 uart_2_mcu_rom_addr;
    244	s32 uartsh_2_mcu_rom_addr;
    245	/* End of v3 array */
    246	s32 mcu_2_zqspi_addr;
    247	/* End of v4 array */
    248};
    249
    250/*
    251 * Mode/Count of data node descriptors - IPCv2
    252 */
    253struct sdma_mode_count {
    254#define SDMA_BD_MAX_CNT	0xffff
    255	u32 count   : 16; /* size of the buffer pointed by this BD */
    256	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
    257	u32 command :  8; /* command mostly used for channel 0 */
    258};
    259
    260/*
    261 * Buffer descriptor
    262 */
    263struct sdma_buffer_descriptor {
    264	struct sdma_mode_count  mode;
    265	u32 buffer_addr;	/* address of the buffer described */
    266	u32 ext_buffer_addr;	/* extended buffer address */
    267} __attribute__ ((packed));
    268
    269/**
    270 * struct sdma_channel_control - Channel control Block
    271 *
    272 * @current_bd_ptr:	current buffer descriptor processed
    273 * @base_bd_ptr:	first element of buffer descriptor array
    274 * @unused:		padding. The SDMA engine expects an array of 128 byte
    275 *			control blocks
    276 */
    277struct sdma_channel_control {
    278	u32 current_bd_ptr;
    279	u32 base_bd_ptr;
    280	u32 unused[2];
    281} __attribute__ ((packed));
    282
    283/**
    284 * struct sdma_state_registers - SDMA context for a channel
    285 *
    286 * @pc:		program counter
    287 * @unused1:	unused
    288 * @t:		test bit: status of arithmetic & test instruction
    289 * @rpc:	return program counter
    290 * @unused0:	unused
    291 * @sf:		source fault while loading data
    292 * @spc:	loop start program counter
    293 * @unused2:	unused
    294 * @df:		destination fault while storing data
    295 * @epc:	loop end program counter
    296 * @lm:		loop mode
    297 */
    298struct sdma_state_registers {
    299	u32 pc     :14;
    300	u32 unused1: 1;
    301	u32 t      : 1;
    302	u32 rpc    :14;
    303	u32 unused0: 1;
    304	u32 sf     : 1;
    305	u32 spc    :14;
    306	u32 unused2: 1;
    307	u32 df     : 1;
    308	u32 epc    :14;
    309	u32 lm     : 2;
    310} __attribute__ ((packed));
    311
    312/**
    313 * struct sdma_context_data - sdma context specific to a channel
    314 *
    315 * @channel_state:	channel state bits
    316 * @gReg:		general registers
    317 * @mda:		burst dma destination address register
    318 * @msa:		burst dma source address register
    319 * @ms:			burst dma status register
    320 * @md:			burst dma data register
    321 * @pda:		peripheral dma destination address register
    322 * @psa:		peripheral dma source address register
    323 * @ps:			peripheral dma status register
    324 * @pd:			peripheral dma data register
    325 * @ca:			CRC polynomial register
    326 * @cs:			CRC accumulator register
    327 * @dda:		dedicated core destination address register
    328 * @dsa:		dedicated core source address register
    329 * @ds:			dedicated core status register
    330 * @dd:			dedicated core data register
    331 * @scratch0:		1st word of dedicated ram for context switch
    332 * @scratch1:		2nd word of dedicated ram for context switch
    333 * @scratch2:		3rd word of dedicated ram for context switch
    334 * @scratch3:		4th word of dedicated ram for context switch
    335 * @scratch4:		5th word of dedicated ram for context switch
    336 * @scratch5:		6th word of dedicated ram for context switch
    337 * @scratch6:		7th word of dedicated ram for context switch
    338 * @scratch7:		8th word of dedicated ram for context switch
    339 */
    340struct sdma_context_data {
    341	struct sdma_state_registers  channel_state;
    342	u32  gReg[8];
    343	u32  mda;
    344	u32  msa;
    345	u32  ms;
    346	u32  md;
    347	u32  pda;
    348	u32  psa;
    349	u32  ps;
    350	u32  pd;
    351	u32  ca;
    352	u32  cs;
    353	u32  dda;
    354	u32  dsa;
    355	u32  ds;
    356	u32  dd;
    357	u32  scratch0;
    358	u32  scratch1;
    359	u32  scratch2;
    360	u32  scratch3;
    361	u32  scratch4;
    362	u32  scratch5;
    363	u32  scratch6;
    364	u32  scratch7;
    365} __attribute__ ((packed));
    366
    367
    368struct sdma_engine;
    369
    370/**
    371 * struct sdma_desc - descriptor structor for one transfer
    372 * @vd:			descriptor for virt dma
    373 * @num_bd:		number of descriptors currently handling
    374 * @bd_phys:		physical address of bd
    375 * @buf_tail:		ID of the buffer that was processed
    376 * @buf_ptail:		ID of the previous buffer that was processed
    377 * @period_len:		period length, used in cyclic.
    378 * @chn_real_count:	the real count updated from bd->mode.count
    379 * @chn_count:		the transfer count set
    380 * @sdmac:		sdma_channel pointer
    381 * @bd:			pointer of allocate bd
    382 */
    383struct sdma_desc {
    384	struct virt_dma_desc	vd;
    385	unsigned int		num_bd;
    386	dma_addr_t		bd_phys;
    387	unsigned int		buf_tail;
    388	unsigned int		buf_ptail;
    389	unsigned int		period_len;
    390	unsigned int		chn_real_count;
    391	unsigned int		chn_count;
    392	struct sdma_channel	*sdmac;
    393	struct sdma_buffer_descriptor *bd;
    394};
    395
    396/**
    397 * struct sdma_channel - housekeeping for a SDMA channel
    398 *
    399 * @vc:			virt_dma base structure
    400 * @desc:		sdma description including vd and other special member
    401 * @sdma:		pointer to the SDMA engine for this channel
    402 * @channel:		the channel number, matches dmaengine chan_id + 1
    403 * @direction:		transfer type. Needed for setting SDMA script
    404 * @slave_config:	Slave configuration
    405 * @peripheral_type:	Peripheral type. Needed for setting SDMA script
    406 * @event_id0:		aka dma request line
    407 * @event_id1:		for channels that use 2 events
    408 * @word_size:		peripheral access size
    409 * @pc_from_device:	script address for those device_2_memory
    410 * @pc_to_device:	script address for those memory_2_device
    411 * @device_to_device:	script address for those device_2_device
    412 * @pc_to_pc:		script address for those memory_2_memory
    413 * @flags:		loop mode or not
    414 * @per_address:	peripheral source or destination address in common case
    415 *                      destination address in p_2_p case
    416 * @per_address2:	peripheral source address in p_2_p case
    417 * @event_mask:		event mask used in p_2_p script
    418 * @watermark_level:	value for gReg[7], some script will extend it from
    419 *			basic watermark such as p_2_p
    420 * @shp_addr:		value for gReg[6]
    421 * @per_addr:		value for gReg[2]
    422 * @status:		status of dma channel
    423 * @context_loaded:	ensure context is only loaded once
    424 * @data:		specific sdma interface structure
    425 * @bd_pool:		dma_pool for bd
    426 * @terminate_worker:	used to call back into terminate work function
    427 */
    428struct sdma_channel {
    429	struct virt_dma_chan		vc;
    430	struct sdma_desc		*desc;
    431	struct sdma_engine		*sdma;
    432	unsigned int			channel;
    433	enum dma_transfer_direction		direction;
    434	struct dma_slave_config		slave_config;
    435	enum sdma_peripheral_type	peripheral_type;
    436	unsigned int			event_id0;
    437	unsigned int			event_id1;
    438	enum dma_slave_buswidth		word_size;
    439	unsigned int			pc_from_device, pc_to_device;
    440	unsigned int			device_to_device;
    441	unsigned int                    pc_to_pc;
    442	unsigned long			flags;
    443	dma_addr_t			per_address, per_address2;
    444	unsigned long			event_mask[2];
    445	unsigned long			watermark_level;
    446	u32				shp_addr, per_addr;
    447	enum dma_status			status;
    448	struct imx_dma_data		data;
    449	struct work_struct		terminate_worker;
    450	struct list_head                terminated;
    451	bool				is_ram_script;
    452	unsigned int			n_fifos_src;
    453	unsigned int			n_fifos_dst;
    454	bool				sw_done;
    455};
    456
    457#define IMX_DMA_SG_LOOP		BIT(0)
    458
    459#define MAX_DMA_CHANNELS 32
    460#define MXC_SDMA_DEFAULT_PRIORITY 1
    461#define MXC_SDMA_MIN_PRIORITY 1
    462#define MXC_SDMA_MAX_PRIORITY 7
    463
    464#define SDMA_FIRMWARE_MAGIC 0x414d4453
    465
    466/**
    467 * struct sdma_firmware_header - Layout of the firmware image
    468 *
    469 * @magic:		"SDMA"
    470 * @version_major:	increased whenever layout of struct
    471 *			sdma_script_start_addrs changes.
    472 * @version_minor:	firmware minor version (for binary compatible changes)
    473 * @script_addrs_start:	offset of struct sdma_script_start_addrs in this image
    474 * @num_script_addrs:	Number of script addresses in this image
    475 * @ram_code_start:	offset of SDMA ram image in this firmware image
    476 * @ram_code_size:	size of SDMA ram image
    477 * @script_addrs:	Stores the start address of the SDMA scripts
    478 *			(in SDMA memory space)
    479 */
    480struct sdma_firmware_header {
    481	u32	magic;
    482	u32	version_major;
    483	u32	version_minor;
    484	u32	script_addrs_start;
    485	u32	num_script_addrs;
    486	u32	ram_code_start;
    487	u32	ram_code_size;
    488};
    489
    490struct sdma_driver_data {
    491	int chnenbl0;
    492	int num_events;
    493	struct sdma_script_start_addrs	*script_addrs;
    494	bool check_ratio;
    495	/*
    496	 * ecspi ERR009165 fixed should be done in sdma script
    497	 * and it has been fixed in soc from i.mx6ul.
    498	 * please get more information from the below link:
    499	 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
    500	 */
    501	bool ecspi_fixed;
    502};
    503
    504struct sdma_engine {
    505	struct device			*dev;
    506	struct sdma_channel		channel[MAX_DMA_CHANNELS];
    507	struct sdma_channel_control	*channel_control;
    508	void __iomem			*regs;
    509	struct sdma_context_data	*context;
    510	dma_addr_t			context_phys;
    511	struct dma_device		dma_device;
    512	struct clk			*clk_ipg;
    513	struct clk			*clk_ahb;
    514	spinlock_t			channel_0_lock;
    515	u32				script_number;
    516	struct sdma_script_start_addrs	*script_addrs;
    517	const struct sdma_driver_data	*drvdata;
    518	u32				spba_start_addr;
    519	u32				spba_end_addr;
    520	unsigned int			irq;
    521	dma_addr_t			bd0_phys;
    522	struct sdma_buffer_descriptor	*bd0;
    523	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
    524	bool				clk_ratio;
    525	bool                            fw_loaded;
    526};
    527
    528static int sdma_config_write(struct dma_chan *chan,
    529		       struct dma_slave_config *dmaengine_cfg,
    530		       enum dma_transfer_direction direction);
    531
    532static struct sdma_driver_data sdma_imx31 = {
    533	.chnenbl0 = SDMA_CHNENBL0_IMX31,
    534	.num_events = 32,
    535};
    536
    537static struct sdma_script_start_addrs sdma_script_imx25 = {
    538	.ap_2_ap_addr = 729,
    539	.uart_2_mcu_addr = 904,
    540	.per_2_app_addr = 1255,
    541	.mcu_2_app_addr = 834,
    542	.uartsh_2_mcu_addr = 1120,
    543	.per_2_shp_addr = 1329,
    544	.mcu_2_shp_addr = 1048,
    545	.ata_2_mcu_addr = 1560,
    546	.mcu_2_ata_addr = 1479,
    547	.app_2_per_addr = 1189,
    548	.app_2_mcu_addr = 770,
    549	.shp_2_per_addr = 1407,
    550	.shp_2_mcu_addr = 979,
    551};
    552
    553static struct sdma_driver_data sdma_imx25 = {
    554	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    555	.num_events = 48,
    556	.script_addrs = &sdma_script_imx25,
    557};
    558
    559static struct sdma_driver_data sdma_imx35 = {
    560	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    561	.num_events = 48,
    562};
    563
    564static struct sdma_script_start_addrs sdma_script_imx51 = {
    565	.ap_2_ap_addr = 642,
    566	.uart_2_mcu_addr = 817,
    567	.mcu_2_app_addr = 747,
    568	.mcu_2_shp_addr = 961,
    569	.ata_2_mcu_addr = 1473,
    570	.mcu_2_ata_addr = 1392,
    571	.app_2_per_addr = 1033,
    572	.app_2_mcu_addr = 683,
    573	.shp_2_per_addr = 1251,
    574	.shp_2_mcu_addr = 892,
    575};
    576
    577static struct sdma_driver_data sdma_imx51 = {
    578	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    579	.num_events = 48,
    580	.script_addrs = &sdma_script_imx51,
    581};
    582
    583static struct sdma_script_start_addrs sdma_script_imx53 = {
    584	.ap_2_ap_addr = 642,
    585	.app_2_mcu_addr = 683,
    586	.mcu_2_app_addr = 747,
    587	.uart_2_mcu_addr = 817,
    588	.shp_2_mcu_addr = 891,
    589	.mcu_2_shp_addr = 960,
    590	.uartsh_2_mcu_addr = 1032,
    591	.spdif_2_mcu_addr = 1100,
    592	.mcu_2_spdif_addr = 1134,
    593	.firi_2_mcu_addr = 1193,
    594	.mcu_2_firi_addr = 1290,
    595};
    596
    597static struct sdma_driver_data sdma_imx53 = {
    598	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    599	.num_events = 48,
    600	.script_addrs = &sdma_script_imx53,
    601};
    602
    603static struct sdma_script_start_addrs sdma_script_imx6q = {
    604	.ap_2_ap_addr = 642,
    605	.uart_2_mcu_addr = 817,
    606	.mcu_2_app_addr = 747,
    607	.per_2_per_addr = 6331,
    608	.uartsh_2_mcu_addr = 1032,
    609	.mcu_2_shp_addr = 960,
    610	.app_2_mcu_addr = 683,
    611	.shp_2_mcu_addr = 891,
    612	.spdif_2_mcu_addr = 1100,
    613	.mcu_2_spdif_addr = 1134,
    614};
    615
    616static struct sdma_driver_data sdma_imx6q = {
    617	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    618	.num_events = 48,
    619	.script_addrs = &sdma_script_imx6q,
    620};
    621
    622static struct sdma_driver_data sdma_imx6ul = {
    623	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    624	.num_events = 48,
    625	.script_addrs = &sdma_script_imx6q,
    626	.ecspi_fixed = true,
    627};
    628
    629static struct sdma_script_start_addrs sdma_script_imx7d = {
    630	.ap_2_ap_addr = 644,
    631	.uart_2_mcu_addr = 819,
    632	.mcu_2_app_addr = 749,
    633	.uartsh_2_mcu_addr = 1034,
    634	.mcu_2_shp_addr = 962,
    635	.app_2_mcu_addr = 685,
    636	.shp_2_mcu_addr = 893,
    637	.spdif_2_mcu_addr = 1102,
    638	.mcu_2_spdif_addr = 1136,
    639};
    640
    641static struct sdma_driver_data sdma_imx7d = {
    642	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    643	.num_events = 48,
    644	.script_addrs = &sdma_script_imx7d,
    645};
    646
    647static struct sdma_driver_data sdma_imx8mq = {
    648	.chnenbl0 = SDMA_CHNENBL0_IMX35,
    649	.num_events = 48,
    650	.script_addrs = &sdma_script_imx7d,
    651	.check_ratio = 1,
    652};
    653
    654static const struct of_device_id sdma_dt_ids[] = {
    655	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
    656	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
    657	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
    658	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
    659	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
    660	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
    661	{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
    662	{ .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
    663	{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
    664	{ /* sentinel */ }
    665};
    666MODULE_DEVICE_TABLE(of, sdma_dt_ids);
    667
    668#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
    669#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
    670#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
    671#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
    672
    673static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
    674{
    675	u32 chnenbl0 = sdma->drvdata->chnenbl0;
    676	return chnenbl0 + event * 4;
    677}
    678
    679static int sdma_config_ownership(struct sdma_channel *sdmac,
    680		bool event_override, bool mcu_override, bool dsp_override)
    681{
    682	struct sdma_engine *sdma = sdmac->sdma;
    683	int channel = sdmac->channel;
    684	unsigned long evt, mcu, dsp;
    685
    686	if (event_override && mcu_override && dsp_override)
    687		return -EINVAL;
    688
    689	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
    690	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
    691	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
    692
    693	if (dsp_override)
    694		__clear_bit(channel, &dsp);
    695	else
    696		__set_bit(channel, &dsp);
    697
    698	if (event_override)
    699		__clear_bit(channel, &evt);
    700	else
    701		__set_bit(channel, &evt);
    702
    703	if (mcu_override)
    704		__clear_bit(channel, &mcu);
    705	else
    706		__set_bit(channel, &mcu);
    707
    708	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
    709	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
    710	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
    711
    712	return 0;
    713}
    714
    715static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
    716{
    717	return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
    718}
    719
    720static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
    721{
    722	writel(BIT(channel), sdma->regs + SDMA_H_START);
    723}
    724
    725/*
    726 * sdma_run_channel0 - run a channel and wait till it's done
    727 */
    728static int sdma_run_channel0(struct sdma_engine *sdma)
    729{
    730	int ret;
    731	u32 reg;
    732
    733	sdma_enable_channel(sdma, 0);
    734
    735	ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
    736						reg, !(reg & 1), 1, 500);
    737	if (ret)
    738		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
    739
    740	/* Set bits of CONFIG register with dynamic context switching */
    741	reg = readl(sdma->regs + SDMA_H_CONFIG);
    742	if ((reg & SDMA_H_CONFIG_CSM) == 0) {
    743		reg |= SDMA_H_CONFIG_CSM;
    744		writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
    745	}
    746
    747	return ret;
    748}
    749
    750static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
    751		u32 address)
    752{
    753	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
    754	void *buf_virt;
    755	dma_addr_t buf_phys;
    756	int ret;
    757	unsigned long flags;
    758
    759	buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
    760	if (!buf_virt)
    761		return -ENOMEM;
    762
    763	spin_lock_irqsave(&sdma->channel_0_lock, flags);
    764
    765	bd0->mode.command = C0_SETPM;
    766	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
    767	bd0->mode.count = size / 2;
    768	bd0->buffer_addr = buf_phys;
    769	bd0->ext_buffer_addr = address;
    770
    771	memcpy(buf_virt, buf, size);
    772
    773	ret = sdma_run_channel0(sdma);
    774
    775	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
    776
    777	dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
    778
    779	return ret;
    780}
    781
    782static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
    783{
    784	struct sdma_engine *sdma = sdmac->sdma;
    785	int channel = sdmac->channel;
    786	unsigned long val;
    787	u32 chnenbl = chnenbl_ofs(sdma, event);
    788
    789	val = readl_relaxed(sdma->regs + chnenbl);
    790	__set_bit(channel, &val);
    791	writel_relaxed(val, sdma->regs + chnenbl);
    792
    793	/* Set SDMA_DONEx_CONFIG is sw_done enabled */
    794	if (sdmac->sw_done) {
    795		val = readl_relaxed(sdma->regs + SDMA_DONE0_CONFIG);
    796		val |= SDMA_DONE0_CONFIG_DONE_SEL;
    797		val &= ~SDMA_DONE0_CONFIG_DONE_DIS;
    798		writel_relaxed(val, sdma->regs + SDMA_DONE0_CONFIG);
    799	}
    800}
    801
    802static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
    803{
    804	struct sdma_engine *sdma = sdmac->sdma;
    805	int channel = sdmac->channel;
    806	u32 chnenbl = chnenbl_ofs(sdma, event);
    807	unsigned long val;
    808
    809	val = readl_relaxed(sdma->regs + chnenbl);
    810	__clear_bit(channel, &val);
    811	writel_relaxed(val, sdma->regs + chnenbl);
    812}
    813
    814static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
    815{
    816	return container_of(t, struct sdma_desc, vd.tx);
    817}
    818
    819static void sdma_start_desc(struct sdma_channel *sdmac)
    820{
    821	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
    822	struct sdma_desc *desc;
    823	struct sdma_engine *sdma = sdmac->sdma;
    824	int channel = sdmac->channel;
    825
    826	if (!vd) {
    827		sdmac->desc = NULL;
    828		return;
    829	}
    830	sdmac->desc = desc = to_sdma_desc(&vd->tx);
    831
    832	list_del(&vd->node);
    833
    834	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
    835	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
    836	sdma_enable_channel(sdma, sdmac->channel);
    837}
    838
    839static void sdma_update_channel_loop(struct sdma_channel *sdmac)
    840{
    841	struct sdma_buffer_descriptor *bd;
    842	int error = 0;
    843	enum dma_status	old_status = sdmac->status;
    844
    845	/*
    846	 * loop mode. Iterate over descriptors, re-setup them and
    847	 * call callback function.
    848	 */
    849	while (sdmac->desc) {
    850		struct sdma_desc *desc = sdmac->desc;
    851
    852		bd = &desc->bd[desc->buf_tail];
    853
    854		if (bd->mode.status & BD_DONE)
    855			break;
    856
    857		if (bd->mode.status & BD_RROR) {
    858			bd->mode.status &= ~BD_RROR;
    859			sdmac->status = DMA_ERROR;
    860			error = -EIO;
    861		}
    862
    863	       /*
    864		* We use bd->mode.count to calculate the residue, since contains
    865		* the number of bytes present in the current buffer descriptor.
    866		*/
    867
    868		desc->chn_real_count = bd->mode.count;
    869		bd->mode.count = desc->period_len;
    870		desc->buf_ptail = desc->buf_tail;
    871		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
    872
    873		/*
    874		 * The callback is called from the interrupt context in order
    875		 * to reduce latency and to avoid the risk of altering the
    876		 * SDMA transaction status by the time the client tasklet is
    877		 * executed.
    878		 */
    879		spin_unlock(&sdmac->vc.lock);
    880		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
    881		spin_lock(&sdmac->vc.lock);
    882
    883		/* Assign buffer ownership to SDMA */
    884		bd->mode.status |= BD_DONE;
    885
    886		if (error)
    887			sdmac->status = old_status;
    888	}
    889
    890	/*
    891	 * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
    892	 * owned buffer is available (i.e. BD_DONE was set too late).
    893	 */
    894	if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
    895		dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
    896		sdma_enable_channel(sdmac->sdma, sdmac->channel);
    897	}
    898}
    899
    900static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
    901{
    902	struct sdma_channel *sdmac = (struct sdma_channel *) data;
    903	struct sdma_buffer_descriptor *bd;
    904	int i, error = 0;
    905
    906	sdmac->desc->chn_real_count = 0;
    907	/*
    908	 * non loop mode. Iterate over all descriptors, collect
    909	 * errors and call callback function
    910	 */
    911	for (i = 0; i < sdmac->desc->num_bd; i++) {
    912		bd = &sdmac->desc->bd[i];
    913
    914		if (bd->mode.status & (BD_DONE | BD_RROR))
    915			error = -EIO;
    916		sdmac->desc->chn_real_count += bd->mode.count;
    917	}
    918
    919	if (error)
    920		sdmac->status = DMA_ERROR;
    921	else
    922		sdmac->status = DMA_COMPLETE;
    923}
    924
    925static irqreturn_t sdma_int_handler(int irq, void *dev_id)
    926{
    927	struct sdma_engine *sdma = dev_id;
    928	unsigned long stat;
    929
    930	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
    931	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
    932	/* channel 0 is special and not handled here, see run_channel0() */
    933	stat &= ~1;
    934
    935	while (stat) {
    936		int channel = fls(stat) - 1;
    937		struct sdma_channel *sdmac = &sdma->channel[channel];
    938		struct sdma_desc *desc;
    939
    940		spin_lock(&sdmac->vc.lock);
    941		desc = sdmac->desc;
    942		if (desc) {
    943			if (sdmac->flags & IMX_DMA_SG_LOOP) {
    944				sdma_update_channel_loop(sdmac);
    945			} else {
    946				mxc_sdma_handle_channel_normal(sdmac);
    947				vchan_cookie_complete(&desc->vd);
    948				sdma_start_desc(sdmac);
    949			}
    950		}
    951
    952		spin_unlock(&sdmac->vc.lock);
    953		__clear_bit(channel, &stat);
    954	}
    955
    956	return IRQ_HANDLED;
    957}
    958
    959/*
    960 * sets the pc of SDMA script according to the peripheral type
    961 */
    962static int sdma_get_pc(struct sdma_channel *sdmac,
    963		enum sdma_peripheral_type peripheral_type)
    964{
    965	struct sdma_engine *sdma = sdmac->sdma;
    966	int per_2_emi = 0, emi_2_per = 0;
    967	/*
    968	 * These are needed once we start to support transfers between
    969	 * two peripherals or memory-to-memory transfers
    970	 */
    971	int per_2_per = 0, emi_2_emi = 0;
    972
    973	sdmac->pc_from_device = 0;
    974	sdmac->pc_to_device = 0;
    975	sdmac->device_to_device = 0;
    976	sdmac->pc_to_pc = 0;
    977	sdmac->is_ram_script = false;
    978
    979	switch (peripheral_type) {
    980	case IMX_DMATYPE_MEMORY:
    981		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
    982		break;
    983	case IMX_DMATYPE_DSP:
    984		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
    985		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
    986		break;
    987	case IMX_DMATYPE_FIRI:
    988		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
    989		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
    990		break;
    991	case IMX_DMATYPE_UART:
    992		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
    993		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
    994		break;
    995	case IMX_DMATYPE_UART_SP:
    996		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
    997		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
    998		break;
    999	case IMX_DMATYPE_ATA:
   1000		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
   1001		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
   1002		break;
   1003	case IMX_DMATYPE_CSPI:
   1004		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
   1005
   1006		/* Use rom script mcu_2_app if ERR009165 fixed */
   1007		if (sdmac->sdma->drvdata->ecspi_fixed) {
   1008			emi_2_per = sdma->script_addrs->mcu_2_app_addr;
   1009		} else {
   1010			emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
   1011			sdmac->is_ram_script = true;
   1012		}
   1013
   1014		break;
   1015	case IMX_DMATYPE_EXT:
   1016	case IMX_DMATYPE_SSI:
   1017	case IMX_DMATYPE_SAI:
   1018		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
   1019		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
   1020		break;
   1021	case IMX_DMATYPE_SSI_DUAL:
   1022		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
   1023		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
   1024		sdmac->is_ram_script = true;
   1025		break;
   1026	case IMX_DMATYPE_SSI_SP:
   1027	case IMX_DMATYPE_MMC:
   1028	case IMX_DMATYPE_SDHC:
   1029	case IMX_DMATYPE_CSPI_SP:
   1030	case IMX_DMATYPE_ESAI:
   1031	case IMX_DMATYPE_MSHC_SP:
   1032		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
   1033		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
   1034		break;
   1035	case IMX_DMATYPE_ASRC:
   1036		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
   1037		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
   1038		per_2_per = sdma->script_addrs->per_2_per_addr;
   1039		sdmac->is_ram_script = true;
   1040		break;
   1041	case IMX_DMATYPE_ASRC_SP:
   1042		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
   1043		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
   1044		per_2_per = sdma->script_addrs->per_2_per_addr;
   1045		break;
   1046	case IMX_DMATYPE_MSHC:
   1047		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
   1048		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
   1049		break;
   1050	case IMX_DMATYPE_CCM:
   1051		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
   1052		break;
   1053	case IMX_DMATYPE_SPDIF:
   1054		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
   1055		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
   1056		break;
   1057	case IMX_DMATYPE_IPU_MEMORY:
   1058		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
   1059		break;
   1060	case IMX_DMATYPE_MULTI_SAI:
   1061		per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
   1062		emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
   1063		break;
   1064	default:
   1065		dev_err(sdma->dev, "Unsupported transfer type %d\n",
   1066			peripheral_type);
   1067		return -EINVAL;
   1068	}
   1069
   1070	sdmac->pc_from_device = per_2_emi;
   1071	sdmac->pc_to_device = emi_2_per;
   1072	sdmac->device_to_device = per_2_per;
   1073	sdmac->pc_to_pc = emi_2_emi;
   1074
   1075	return 0;
   1076}
   1077
   1078static int sdma_load_context(struct sdma_channel *sdmac)
   1079{
   1080	struct sdma_engine *sdma = sdmac->sdma;
   1081	int channel = sdmac->channel;
   1082	int load_address;
   1083	struct sdma_context_data *context = sdma->context;
   1084	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
   1085	int ret;
   1086	unsigned long flags;
   1087
   1088	if (sdmac->direction == DMA_DEV_TO_MEM)
   1089		load_address = sdmac->pc_from_device;
   1090	else if (sdmac->direction == DMA_DEV_TO_DEV)
   1091		load_address = sdmac->device_to_device;
   1092	else if (sdmac->direction == DMA_MEM_TO_MEM)
   1093		load_address = sdmac->pc_to_pc;
   1094	else
   1095		load_address = sdmac->pc_to_device;
   1096
   1097	if (load_address < 0)
   1098		return load_address;
   1099
   1100	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
   1101	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
   1102	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
   1103	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
   1104	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
   1105	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
   1106
   1107	spin_lock_irqsave(&sdma->channel_0_lock, flags);
   1108
   1109	memset(context, 0, sizeof(*context));
   1110	context->channel_state.pc = load_address;
   1111
   1112	/* Send by context the event mask,base address for peripheral
   1113	 * and watermark level
   1114	 */
   1115	context->gReg[0] = sdmac->event_mask[1];
   1116	context->gReg[1] = sdmac->event_mask[0];
   1117	context->gReg[2] = sdmac->per_addr;
   1118	context->gReg[6] = sdmac->shp_addr;
   1119	context->gReg[7] = sdmac->watermark_level;
   1120
   1121	bd0->mode.command = C0_SETDM;
   1122	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
   1123	bd0->mode.count = sizeof(*context) / 4;
   1124	bd0->buffer_addr = sdma->context_phys;
   1125	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
   1126	ret = sdma_run_channel0(sdma);
   1127
   1128	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
   1129
   1130	return ret;
   1131}
   1132
   1133static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
   1134{
   1135	return container_of(chan, struct sdma_channel, vc.chan);
   1136}
   1137
   1138static int sdma_disable_channel(struct dma_chan *chan)
   1139{
   1140	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1141	struct sdma_engine *sdma = sdmac->sdma;
   1142	int channel = sdmac->channel;
   1143
   1144	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
   1145	sdmac->status = DMA_ERROR;
   1146
   1147	return 0;
   1148}
   1149static void sdma_channel_terminate_work(struct work_struct *work)
   1150{
   1151	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
   1152						  terminate_worker);
   1153	/*
   1154	 * According to NXP R&D team a delay of one BD SDMA cost time
   1155	 * (maximum is 1ms) should be added after disable of the channel
   1156	 * bit, to ensure SDMA core has really been stopped after SDMA
   1157	 * clients call .device_terminate_all.
   1158	 */
   1159	usleep_range(1000, 2000);
   1160
   1161	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
   1162}
   1163
   1164static int sdma_terminate_all(struct dma_chan *chan)
   1165{
   1166	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1167	unsigned long flags;
   1168
   1169	spin_lock_irqsave(&sdmac->vc.lock, flags);
   1170
   1171	sdma_disable_channel(chan);
   1172
   1173	if (sdmac->desc) {
   1174		vchan_terminate_vdesc(&sdmac->desc->vd);
   1175		/*
   1176		 * move out current descriptor into terminated list so that
   1177		 * it could be free in sdma_channel_terminate_work alone
   1178		 * later without potential involving next descriptor raised
   1179		 * up before the last descriptor terminated.
   1180		 */
   1181		vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
   1182		sdmac->desc = NULL;
   1183		schedule_work(&sdmac->terminate_worker);
   1184	}
   1185
   1186	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
   1187
   1188	return 0;
   1189}
   1190
   1191static void sdma_channel_synchronize(struct dma_chan *chan)
   1192{
   1193	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1194
   1195	vchan_synchronize(&sdmac->vc);
   1196
   1197	flush_work(&sdmac->terminate_worker);
   1198}
   1199
   1200static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
   1201{
   1202	struct sdma_engine *sdma = sdmac->sdma;
   1203
   1204	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
   1205	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
   1206
   1207	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
   1208	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
   1209
   1210	if (sdmac->event_id0 > 31)
   1211		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
   1212
   1213	if (sdmac->event_id1 > 31)
   1214		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
   1215
   1216	/*
   1217	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
   1218	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
   1219	 * r0(event_mask[1]) and r1(event_mask[0]).
   1220	 */
   1221	if (lwml > hwml) {
   1222		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
   1223						SDMA_WATERMARK_LEVEL_HWML);
   1224		sdmac->watermark_level |= hwml;
   1225		sdmac->watermark_level |= lwml << 16;
   1226		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
   1227	}
   1228
   1229	if (sdmac->per_address2 >= sdma->spba_start_addr &&
   1230			sdmac->per_address2 <= sdma->spba_end_addr)
   1231		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
   1232
   1233	if (sdmac->per_address >= sdma->spba_start_addr &&
   1234			sdmac->per_address <= sdma->spba_end_addr)
   1235		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
   1236
   1237	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
   1238}
   1239
   1240static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
   1241{
   1242	unsigned int n_fifos;
   1243
   1244	if (sdmac->sw_done)
   1245		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
   1246
   1247	if (sdmac->direction == DMA_DEV_TO_MEM)
   1248		n_fifos = sdmac->n_fifos_src;
   1249	else
   1250		n_fifos = sdmac->n_fifos_dst;
   1251
   1252	sdmac->watermark_level |=
   1253			FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
   1254}
   1255
   1256static int sdma_config_channel(struct dma_chan *chan)
   1257{
   1258	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1259	int ret;
   1260
   1261	sdma_disable_channel(chan);
   1262
   1263	sdmac->event_mask[0] = 0;
   1264	sdmac->event_mask[1] = 0;
   1265	sdmac->shp_addr = 0;
   1266	sdmac->per_addr = 0;
   1267
   1268	switch (sdmac->peripheral_type) {
   1269	case IMX_DMATYPE_DSP:
   1270		sdma_config_ownership(sdmac, false, true, true);
   1271		break;
   1272	case IMX_DMATYPE_MEMORY:
   1273		sdma_config_ownership(sdmac, false, true, false);
   1274		break;
   1275	default:
   1276		sdma_config_ownership(sdmac, true, true, false);
   1277		break;
   1278	}
   1279
   1280	ret = sdma_get_pc(sdmac, sdmac->peripheral_type);
   1281	if (ret)
   1282		return ret;
   1283
   1284	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
   1285			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
   1286		/* Handle multiple event channels differently */
   1287		if (sdmac->event_id1) {
   1288			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
   1289			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
   1290				sdma_set_watermarklevel_for_p2p(sdmac);
   1291		} else {
   1292			if (sdmac->peripheral_type ==
   1293					IMX_DMATYPE_MULTI_SAI)
   1294				sdma_set_watermarklevel_for_sais(sdmac);
   1295
   1296			__set_bit(sdmac->event_id0, sdmac->event_mask);
   1297		}
   1298
   1299		/* Address */
   1300		sdmac->shp_addr = sdmac->per_address;
   1301		sdmac->per_addr = sdmac->per_address2;
   1302	} else {
   1303		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
   1304	}
   1305
   1306	return 0;
   1307}
   1308
   1309static int sdma_set_channel_priority(struct sdma_channel *sdmac,
   1310				     unsigned int priority)
   1311{
   1312	struct sdma_engine *sdma = sdmac->sdma;
   1313	int channel = sdmac->channel;
   1314
   1315	if (priority < MXC_SDMA_MIN_PRIORITY
   1316	    || priority > MXC_SDMA_MAX_PRIORITY) {
   1317		return -EINVAL;
   1318	}
   1319
   1320	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
   1321
   1322	return 0;
   1323}
   1324
   1325static int sdma_request_channel0(struct sdma_engine *sdma)
   1326{
   1327	int ret = -EBUSY;
   1328
   1329	sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
   1330				       GFP_NOWAIT);
   1331	if (!sdma->bd0) {
   1332		ret = -ENOMEM;
   1333		goto out;
   1334	}
   1335
   1336	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
   1337	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
   1338
   1339	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
   1340	return 0;
   1341out:
   1342
   1343	return ret;
   1344}
   1345
   1346
   1347static int sdma_alloc_bd(struct sdma_desc *desc)
   1348{
   1349	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
   1350	int ret = 0;
   1351
   1352	desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
   1353				      &desc->bd_phys, GFP_NOWAIT);
   1354	if (!desc->bd) {
   1355		ret = -ENOMEM;
   1356		goto out;
   1357	}
   1358out:
   1359	return ret;
   1360}
   1361
   1362static void sdma_free_bd(struct sdma_desc *desc)
   1363{
   1364	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
   1365
   1366	dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
   1367			  desc->bd_phys);
   1368}
   1369
   1370static void sdma_desc_free(struct virt_dma_desc *vd)
   1371{
   1372	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
   1373
   1374	sdma_free_bd(desc);
   1375	kfree(desc);
   1376}
   1377
   1378static int sdma_alloc_chan_resources(struct dma_chan *chan)
   1379{
   1380	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1381	struct imx_dma_data *data = chan->private;
   1382	struct imx_dma_data mem_data;
   1383	int prio, ret;
   1384
   1385	/*
   1386	 * MEMCPY may never setup chan->private by filter function such as
   1387	 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
   1388	 * Please note in any other slave case, you have to setup chan->private
   1389	 * with 'struct imx_dma_data' in your own filter function if you want to
   1390	 * request dma channel by dma_request_channel() rather than
   1391	 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
   1392	 * to warn you to correct your filter function.
   1393	 */
   1394	if (!data) {
   1395		dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
   1396		mem_data.priority = 2;
   1397		mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
   1398		mem_data.dma_request = 0;
   1399		mem_data.dma_request2 = 0;
   1400		data = &mem_data;
   1401
   1402		ret = sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
   1403		if (ret)
   1404			return ret;
   1405	}
   1406
   1407	switch (data->priority) {
   1408	case DMA_PRIO_HIGH:
   1409		prio = 3;
   1410		break;
   1411	case DMA_PRIO_MEDIUM:
   1412		prio = 2;
   1413		break;
   1414	case DMA_PRIO_LOW:
   1415	default:
   1416		prio = 1;
   1417		break;
   1418	}
   1419
   1420	sdmac->peripheral_type = data->peripheral_type;
   1421	sdmac->event_id0 = data->dma_request;
   1422	sdmac->event_id1 = data->dma_request2;
   1423
   1424	ret = clk_enable(sdmac->sdma->clk_ipg);
   1425	if (ret)
   1426		return ret;
   1427	ret = clk_enable(sdmac->sdma->clk_ahb);
   1428	if (ret)
   1429		goto disable_clk_ipg;
   1430
   1431	ret = sdma_set_channel_priority(sdmac, prio);
   1432	if (ret)
   1433		goto disable_clk_ahb;
   1434
   1435	return 0;
   1436
   1437disable_clk_ahb:
   1438	clk_disable(sdmac->sdma->clk_ahb);
   1439disable_clk_ipg:
   1440	clk_disable(sdmac->sdma->clk_ipg);
   1441	return ret;
   1442}
   1443
   1444static void sdma_free_chan_resources(struct dma_chan *chan)
   1445{
   1446	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1447	struct sdma_engine *sdma = sdmac->sdma;
   1448
   1449	sdma_terminate_all(chan);
   1450
   1451	sdma_channel_synchronize(chan);
   1452
   1453	sdma_event_disable(sdmac, sdmac->event_id0);
   1454	if (sdmac->event_id1)
   1455		sdma_event_disable(sdmac, sdmac->event_id1);
   1456
   1457	sdmac->event_id0 = 0;
   1458	sdmac->event_id1 = 0;
   1459
   1460	sdma_set_channel_priority(sdmac, 0);
   1461
   1462	clk_disable(sdma->clk_ipg);
   1463	clk_disable(sdma->clk_ahb);
   1464}
   1465
   1466static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
   1467				enum dma_transfer_direction direction, u32 bds)
   1468{
   1469	struct sdma_desc *desc;
   1470
   1471	if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
   1472		dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
   1473		goto err_out;
   1474	}
   1475
   1476	desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
   1477	if (!desc)
   1478		goto err_out;
   1479
   1480	sdmac->status = DMA_IN_PROGRESS;
   1481	sdmac->direction = direction;
   1482	sdmac->flags = 0;
   1483
   1484	desc->chn_count = 0;
   1485	desc->chn_real_count = 0;
   1486	desc->buf_tail = 0;
   1487	desc->buf_ptail = 0;
   1488	desc->sdmac = sdmac;
   1489	desc->num_bd = bds;
   1490
   1491	if (sdma_alloc_bd(desc))
   1492		goto err_desc_out;
   1493
   1494	/* No slave_config called in MEMCPY case, so do here */
   1495	if (direction == DMA_MEM_TO_MEM)
   1496		sdma_config_ownership(sdmac, false, true, false);
   1497
   1498	if (sdma_load_context(sdmac))
   1499		goto err_desc_out;
   1500
   1501	return desc;
   1502
   1503err_desc_out:
   1504	kfree(desc);
   1505err_out:
   1506	return NULL;
   1507}
   1508
   1509static struct dma_async_tx_descriptor *sdma_prep_memcpy(
   1510		struct dma_chan *chan, dma_addr_t dma_dst,
   1511		dma_addr_t dma_src, size_t len, unsigned long flags)
   1512{
   1513	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1514	struct sdma_engine *sdma = sdmac->sdma;
   1515	int channel = sdmac->channel;
   1516	size_t count;
   1517	int i = 0, param;
   1518	struct sdma_buffer_descriptor *bd;
   1519	struct sdma_desc *desc;
   1520
   1521	if (!chan || !len)
   1522		return NULL;
   1523
   1524	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
   1525		&dma_src, &dma_dst, len, channel);
   1526
   1527	desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
   1528					len / SDMA_BD_MAX_CNT + 1);
   1529	if (!desc)
   1530		return NULL;
   1531
   1532	do {
   1533		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
   1534		bd = &desc->bd[i];
   1535		bd->buffer_addr = dma_src;
   1536		bd->ext_buffer_addr = dma_dst;
   1537		bd->mode.count = count;
   1538		desc->chn_count += count;
   1539		bd->mode.command = 0;
   1540
   1541		dma_src += count;
   1542		dma_dst += count;
   1543		len -= count;
   1544		i++;
   1545
   1546		param = BD_DONE | BD_EXTD | BD_CONT;
   1547		/* last bd */
   1548		if (!len) {
   1549			param |= BD_INTR;
   1550			param |= BD_LAST;
   1551			param &= ~BD_CONT;
   1552		}
   1553
   1554		dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
   1555				i, count, bd->buffer_addr,
   1556				param & BD_WRAP ? "wrap" : "",
   1557				param & BD_INTR ? " intr" : "");
   1558
   1559		bd->mode.status = param;
   1560	} while (len);
   1561
   1562	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
   1563}
   1564
   1565static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
   1566		struct dma_chan *chan, struct scatterlist *sgl,
   1567		unsigned int sg_len, enum dma_transfer_direction direction,
   1568		unsigned long flags, void *context)
   1569{
   1570	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1571	struct sdma_engine *sdma = sdmac->sdma;
   1572	int i, count;
   1573	int channel = sdmac->channel;
   1574	struct scatterlist *sg;
   1575	struct sdma_desc *desc;
   1576
   1577	sdma_config_write(chan, &sdmac->slave_config, direction);
   1578
   1579	desc = sdma_transfer_init(sdmac, direction, sg_len);
   1580	if (!desc)
   1581		goto err_out;
   1582
   1583	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
   1584			sg_len, channel);
   1585
   1586	for_each_sg(sgl, sg, sg_len, i) {
   1587		struct sdma_buffer_descriptor *bd = &desc->bd[i];
   1588		int param;
   1589
   1590		bd->buffer_addr = sg->dma_address;
   1591
   1592		count = sg_dma_len(sg);
   1593
   1594		if (count > SDMA_BD_MAX_CNT) {
   1595			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
   1596					channel, count, SDMA_BD_MAX_CNT);
   1597			goto err_bd_out;
   1598		}
   1599
   1600		bd->mode.count = count;
   1601		desc->chn_count += count;
   1602
   1603		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
   1604			goto err_bd_out;
   1605
   1606		switch (sdmac->word_size) {
   1607		case DMA_SLAVE_BUSWIDTH_4_BYTES:
   1608			bd->mode.command = 0;
   1609			if (count & 3 || sg->dma_address & 3)
   1610				goto err_bd_out;
   1611			break;
   1612		case DMA_SLAVE_BUSWIDTH_2_BYTES:
   1613			bd->mode.command = 2;
   1614			if (count & 1 || sg->dma_address & 1)
   1615				goto err_bd_out;
   1616			break;
   1617		case DMA_SLAVE_BUSWIDTH_1_BYTE:
   1618			bd->mode.command = 1;
   1619			break;
   1620		default:
   1621			goto err_bd_out;
   1622		}
   1623
   1624		param = BD_DONE | BD_EXTD | BD_CONT;
   1625
   1626		if (i + 1 == sg_len) {
   1627			param |= BD_INTR;
   1628			param |= BD_LAST;
   1629			param &= ~BD_CONT;
   1630		}
   1631
   1632		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
   1633				i, count, (u64)sg->dma_address,
   1634				param & BD_WRAP ? "wrap" : "",
   1635				param & BD_INTR ? " intr" : "");
   1636
   1637		bd->mode.status = param;
   1638	}
   1639
   1640	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
   1641err_bd_out:
   1642	sdma_free_bd(desc);
   1643	kfree(desc);
   1644err_out:
   1645	sdmac->status = DMA_ERROR;
   1646	return NULL;
   1647}
   1648
   1649static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
   1650		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
   1651		size_t period_len, enum dma_transfer_direction direction,
   1652		unsigned long flags)
   1653{
   1654	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1655	struct sdma_engine *sdma = sdmac->sdma;
   1656	int num_periods = buf_len / period_len;
   1657	int channel = sdmac->channel;
   1658	int i = 0, buf = 0;
   1659	struct sdma_desc *desc;
   1660
   1661	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
   1662
   1663	sdma_config_write(chan, &sdmac->slave_config, direction);
   1664
   1665	desc = sdma_transfer_init(sdmac, direction, num_periods);
   1666	if (!desc)
   1667		goto err_out;
   1668
   1669	desc->period_len = period_len;
   1670
   1671	sdmac->flags |= IMX_DMA_SG_LOOP;
   1672
   1673	if (period_len > SDMA_BD_MAX_CNT) {
   1674		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
   1675				channel, period_len, SDMA_BD_MAX_CNT);
   1676		goto err_bd_out;
   1677	}
   1678
   1679	while (buf < buf_len) {
   1680		struct sdma_buffer_descriptor *bd = &desc->bd[i];
   1681		int param;
   1682
   1683		bd->buffer_addr = dma_addr;
   1684
   1685		bd->mode.count = period_len;
   1686
   1687		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
   1688			goto err_bd_out;
   1689		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
   1690			bd->mode.command = 0;
   1691		else
   1692			bd->mode.command = sdmac->word_size;
   1693
   1694		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
   1695		if (i + 1 == num_periods)
   1696			param |= BD_WRAP;
   1697
   1698		dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
   1699				i, period_len, (u64)dma_addr,
   1700				param & BD_WRAP ? "wrap" : "",
   1701				param & BD_INTR ? " intr" : "");
   1702
   1703		bd->mode.status = param;
   1704
   1705		dma_addr += period_len;
   1706		buf += period_len;
   1707
   1708		i++;
   1709	}
   1710
   1711	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
   1712err_bd_out:
   1713	sdma_free_bd(desc);
   1714	kfree(desc);
   1715err_out:
   1716	sdmac->status = DMA_ERROR;
   1717	return NULL;
   1718}
   1719
   1720static int sdma_config_write(struct dma_chan *chan,
   1721		       struct dma_slave_config *dmaengine_cfg,
   1722		       enum dma_transfer_direction direction)
   1723{
   1724	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1725
   1726	if (direction == DMA_DEV_TO_MEM) {
   1727		sdmac->per_address = dmaengine_cfg->src_addr;
   1728		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
   1729			dmaengine_cfg->src_addr_width;
   1730		sdmac->word_size = dmaengine_cfg->src_addr_width;
   1731	} else if (direction == DMA_DEV_TO_DEV) {
   1732		sdmac->per_address2 = dmaengine_cfg->src_addr;
   1733		sdmac->per_address = dmaengine_cfg->dst_addr;
   1734		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
   1735			SDMA_WATERMARK_LEVEL_LWML;
   1736		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
   1737			SDMA_WATERMARK_LEVEL_HWML;
   1738		sdmac->word_size = dmaengine_cfg->dst_addr_width;
   1739	} else {
   1740		sdmac->per_address = dmaengine_cfg->dst_addr;
   1741		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
   1742			dmaengine_cfg->dst_addr_width;
   1743		sdmac->word_size = dmaengine_cfg->dst_addr_width;
   1744	}
   1745	sdmac->direction = direction;
   1746	return sdma_config_channel(chan);
   1747}
   1748
   1749static int sdma_config(struct dma_chan *chan,
   1750		       struct dma_slave_config *dmaengine_cfg)
   1751{
   1752	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1753	struct sdma_engine *sdma = sdmac->sdma;
   1754
   1755	memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
   1756
   1757	if (dmaengine_cfg->peripheral_config) {
   1758		struct sdma_peripheral_config *sdmacfg = dmaengine_cfg->peripheral_config;
   1759		if (dmaengine_cfg->peripheral_size != sizeof(struct sdma_peripheral_config)) {
   1760			dev_err(sdma->dev, "Invalid peripheral size %zu, expected %zu\n",
   1761				dmaengine_cfg->peripheral_size,
   1762				sizeof(struct sdma_peripheral_config));
   1763			return -EINVAL;
   1764		}
   1765		sdmac->n_fifos_src = sdmacfg->n_fifos_src;
   1766		sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
   1767		sdmac->sw_done = sdmacfg->sw_done;
   1768	}
   1769
   1770	/* Set ENBLn earlier to make sure dma request triggered after that */
   1771	if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
   1772		return -EINVAL;
   1773	sdma_event_enable(sdmac, sdmac->event_id0);
   1774
   1775	if (sdmac->event_id1) {
   1776		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
   1777			return -EINVAL;
   1778		sdma_event_enable(sdmac, sdmac->event_id1);
   1779	}
   1780
   1781	return 0;
   1782}
   1783
   1784static enum dma_status sdma_tx_status(struct dma_chan *chan,
   1785				      dma_cookie_t cookie,
   1786				      struct dma_tx_state *txstate)
   1787{
   1788	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1789	struct sdma_desc *desc = NULL;
   1790	u32 residue;
   1791	struct virt_dma_desc *vd;
   1792	enum dma_status ret;
   1793	unsigned long flags;
   1794
   1795	ret = dma_cookie_status(chan, cookie, txstate);
   1796	if (ret == DMA_COMPLETE || !txstate)
   1797		return ret;
   1798
   1799	spin_lock_irqsave(&sdmac->vc.lock, flags);
   1800
   1801	vd = vchan_find_desc(&sdmac->vc, cookie);
   1802	if (vd)
   1803		desc = to_sdma_desc(&vd->tx);
   1804	else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
   1805		desc = sdmac->desc;
   1806
   1807	if (desc) {
   1808		if (sdmac->flags & IMX_DMA_SG_LOOP)
   1809			residue = (desc->num_bd - desc->buf_ptail) *
   1810				desc->period_len - desc->chn_real_count;
   1811		else
   1812			residue = desc->chn_count - desc->chn_real_count;
   1813	} else {
   1814		residue = 0;
   1815	}
   1816
   1817	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
   1818
   1819	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
   1820			 residue);
   1821
   1822	return sdmac->status;
   1823}
   1824
   1825static void sdma_issue_pending(struct dma_chan *chan)
   1826{
   1827	struct sdma_channel *sdmac = to_sdma_chan(chan);
   1828	unsigned long flags;
   1829
   1830	spin_lock_irqsave(&sdmac->vc.lock, flags);
   1831	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
   1832		sdma_start_desc(sdmac);
   1833	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
   1834}
   1835
   1836#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
   1837#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
   1838#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	45
   1839#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4	46
   1840
   1841static void sdma_add_scripts(struct sdma_engine *sdma,
   1842			     const struct sdma_script_start_addrs *addr)
   1843{
   1844	s32 *addr_arr = (u32 *)addr;
   1845	s32 *saddr_arr = (u32 *)sdma->script_addrs;
   1846	int i;
   1847
   1848	/* use the default firmware in ROM if missing external firmware */
   1849	if (!sdma->script_number)
   1850		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
   1851
   1852	if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
   1853				  / sizeof(s32)) {
   1854		dev_err(sdma->dev,
   1855			"SDMA script number %d not match with firmware.\n",
   1856			sdma->script_number);
   1857		return;
   1858	}
   1859
   1860	for (i = 0; i < sdma->script_number; i++)
   1861		if (addr_arr[i] > 0)
   1862			saddr_arr[i] = addr_arr[i];
   1863
   1864	/*
   1865	 * For compatibility with NXP internal legacy kernel before 4.19 which
   1866	 * is based on uart ram script and mainline kernel based on uart rom
   1867	 * script, both uart ram/rom scripts are present in newer sdma
   1868	 * firmware. Use the rom versions if they are present (V3 or newer).
   1869	 */
   1870	if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
   1871		if (addr->uart_2_mcu_rom_addr)
   1872			sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
   1873		if (addr->uartsh_2_mcu_rom_addr)
   1874			sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
   1875	}
   1876}
   1877
   1878static void sdma_load_firmware(const struct firmware *fw, void *context)
   1879{
   1880	struct sdma_engine *sdma = context;
   1881	const struct sdma_firmware_header *header;
   1882	const struct sdma_script_start_addrs *addr;
   1883	unsigned short *ram_code;
   1884
   1885	if (!fw) {
   1886		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
   1887		/* In this case we just use the ROM firmware. */
   1888		return;
   1889	}
   1890
   1891	if (fw->size < sizeof(*header))
   1892		goto err_firmware;
   1893
   1894	header = (struct sdma_firmware_header *)fw->data;
   1895
   1896	if (header->magic != SDMA_FIRMWARE_MAGIC)
   1897		goto err_firmware;
   1898	if (header->ram_code_start + header->ram_code_size > fw->size)
   1899		goto err_firmware;
   1900	switch (header->version_major) {
   1901	case 1:
   1902		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
   1903		break;
   1904	case 2:
   1905		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
   1906		break;
   1907	case 3:
   1908		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
   1909		break;
   1910	case 4:
   1911		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
   1912		break;
   1913	default:
   1914		dev_err(sdma->dev, "unknown firmware version\n");
   1915		goto err_firmware;
   1916	}
   1917
   1918	addr = (void *)header + header->script_addrs_start;
   1919	ram_code = (void *)header + header->ram_code_start;
   1920
   1921	clk_enable(sdma->clk_ipg);
   1922	clk_enable(sdma->clk_ahb);
   1923	/* download the RAM image for SDMA */
   1924	sdma_load_script(sdma, ram_code,
   1925			 header->ram_code_size,
   1926			 addr->ram_code_start_addr);
   1927	clk_disable(sdma->clk_ipg);
   1928	clk_disable(sdma->clk_ahb);
   1929
   1930	sdma_add_scripts(sdma, addr);
   1931
   1932	sdma->fw_loaded = true;
   1933
   1934	dev_info(sdma->dev, "loaded firmware %d.%d\n",
   1935		 header->version_major,
   1936		 header->version_minor);
   1937
   1938err_firmware:
   1939	release_firmware(fw);
   1940}
   1941
   1942#define EVENT_REMAP_CELLS 3
   1943
   1944static int sdma_event_remap(struct sdma_engine *sdma)
   1945{
   1946	struct device_node *np = sdma->dev->of_node;
   1947	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
   1948	struct property *event_remap;
   1949	struct regmap *gpr;
   1950	char propname[] = "fsl,sdma-event-remap";
   1951	u32 reg, val, shift, num_map, i;
   1952	int ret = 0;
   1953
   1954	if (IS_ERR(np) || !gpr_np)
   1955		goto out;
   1956
   1957	event_remap = of_find_property(np, propname, NULL);
   1958	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
   1959	if (!num_map) {
   1960		dev_dbg(sdma->dev, "no event needs to be remapped\n");
   1961		goto out;
   1962	} else if (num_map % EVENT_REMAP_CELLS) {
   1963		dev_err(sdma->dev, "the property %s must modulo %d\n",
   1964				propname, EVENT_REMAP_CELLS);
   1965		ret = -EINVAL;
   1966		goto out;
   1967	}
   1968
   1969	gpr = syscon_node_to_regmap(gpr_np);
   1970	if (IS_ERR(gpr)) {
   1971		dev_err(sdma->dev, "failed to get gpr regmap\n");
   1972		ret = PTR_ERR(gpr);
   1973		goto out;
   1974	}
   1975
   1976	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
   1977		ret = of_property_read_u32_index(np, propname, i, &reg);
   1978		if (ret) {
   1979			dev_err(sdma->dev, "failed to read property %s index %d\n",
   1980					propname, i);
   1981			goto out;
   1982		}
   1983
   1984		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
   1985		if (ret) {
   1986			dev_err(sdma->dev, "failed to read property %s index %d\n",
   1987					propname, i + 1);
   1988			goto out;
   1989		}
   1990
   1991		ret = of_property_read_u32_index(np, propname, i + 2, &val);
   1992		if (ret) {
   1993			dev_err(sdma->dev, "failed to read property %s index %d\n",
   1994					propname, i + 2);
   1995			goto out;
   1996		}
   1997
   1998		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
   1999	}
   2000
   2001out:
   2002	if (gpr_np)
   2003		of_node_put(gpr_np);
   2004
   2005	return ret;
   2006}
   2007
   2008static int sdma_get_firmware(struct sdma_engine *sdma,
   2009		const char *fw_name)
   2010{
   2011	int ret;
   2012
   2013	ret = request_firmware_nowait(THIS_MODULE,
   2014			FW_ACTION_UEVENT, fw_name, sdma->dev,
   2015			GFP_KERNEL, sdma, sdma_load_firmware);
   2016
   2017	return ret;
   2018}
   2019
   2020static int sdma_init(struct sdma_engine *sdma)
   2021{
   2022	int i, ret;
   2023	dma_addr_t ccb_phys;
   2024
   2025	ret = clk_enable(sdma->clk_ipg);
   2026	if (ret)
   2027		return ret;
   2028	ret = clk_enable(sdma->clk_ahb);
   2029	if (ret)
   2030		goto disable_clk_ipg;
   2031
   2032	if (sdma->drvdata->check_ratio &&
   2033	    (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
   2034		sdma->clk_ratio = 1;
   2035
   2036	/* Be sure SDMA has not started yet */
   2037	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
   2038
   2039	sdma->channel_control = dma_alloc_coherent(sdma->dev,
   2040			MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
   2041			sizeof(struct sdma_context_data),
   2042			&ccb_phys, GFP_KERNEL);
   2043
   2044	if (!sdma->channel_control) {
   2045		ret = -ENOMEM;
   2046		goto err_dma_alloc;
   2047	}
   2048
   2049	sdma->context = (void *)sdma->channel_control +
   2050		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
   2051	sdma->context_phys = ccb_phys +
   2052		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
   2053
   2054	/* disable all channels */
   2055	for (i = 0; i < sdma->drvdata->num_events; i++)
   2056		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
   2057
   2058	/* All channels have priority 0 */
   2059	for (i = 0; i < MAX_DMA_CHANNELS; i++)
   2060		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
   2061
   2062	ret = sdma_request_channel0(sdma);
   2063	if (ret)
   2064		goto err_dma_alloc;
   2065
   2066	sdma_config_ownership(&sdma->channel[0], false, true, false);
   2067
   2068	/* Set Command Channel (Channel Zero) */
   2069	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
   2070
   2071	/* Set bits of CONFIG register but with static context switching */
   2072	if (sdma->clk_ratio)
   2073		writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
   2074	else
   2075		writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
   2076
   2077	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
   2078
   2079	/* Initializes channel's priorities */
   2080	sdma_set_channel_priority(&sdma->channel[0], 7);
   2081
   2082	clk_disable(sdma->clk_ipg);
   2083	clk_disable(sdma->clk_ahb);
   2084
   2085	return 0;
   2086
   2087err_dma_alloc:
   2088	clk_disable(sdma->clk_ahb);
   2089disable_clk_ipg:
   2090	clk_disable(sdma->clk_ipg);
   2091	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
   2092	return ret;
   2093}
   2094
   2095static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
   2096{
   2097	struct sdma_channel *sdmac = to_sdma_chan(chan);
   2098	struct imx_dma_data *data = fn_param;
   2099
   2100	if (!imx_dma_is_general_purpose(chan))
   2101		return false;
   2102
   2103	sdmac->data = *data;
   2104	chan->private = &sdmac->data;
   2105
   2106	return true;
   2107}
   2108
   2109static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
   2110				   struct of_dma *ofdma)
   2111{
   2112	struct sdma_engine *sdma = ofdma->of_dma_data;
   2113	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
   2114	struct imx_dma_data data;
   2115
   2116	if (dma_spec->args_count != 3)
   2117		return NULL;
   2118
   2119	data.dma_request = dma_spec->args[0];
   2120	data.peripheral_type = dma_spec->args[1];
   2121	data.priority = dma_spec->args[2];
   2122	/*
   2123	 * init dma_request2 to zero, which is not used by the dts.
   2124	 * For P2P, dma_request2 is init from dma_request_channel(),
   2125	 * chan->private will point to the imx_dma_data, and in
   2126	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
   2127	 * be set to sdmac->event_id1.
   2128	 */
   2129	data.dma_request2 = 0;
   2130
   2131	return __dma_request_channel(&mask, sdma_filter_fn, &data,
   2132				     ofdma->of_node);
   2133}
   2134
   2135static int sdma_probe(struct platform_device *pdev)
   2136{
   2137	struct device_node *np = pdev->dev.of_node;
   2138	struct device_node *spba_bus;
   2139	const char *fw_name;
   2140	int ret;
   2141	int irq;
   2142	struct resource *iores;
   2143	struct resource spba_res;
   2144	int i;
   2145	struct sdma_engine *sdma;
   2146	s32 *saddr_arr;
   2147
   2148	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
   2149	if (ret)
   2150		return ret;
   2151
   2152	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
   2153	if (!sdma)
   2154		return -ENOMEM;
   2155
   2156	spin_lock_init(&sdma->channel_0_lock);
   2157
   2158	sdma->dev = &pdev->dev;
   2159	sdma->drvdata = of_device_get_match_data(sdma->dev);
   2160
   2161	irq = platform_get_irq(pdev, 0);
   2162	if (irq < 0)
   2163		return irq;
   2164
   2165	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   2166	sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
   2167	if (IS_ERR(sdma->regs))
   2168		return PTR_ERR(sdma->regs);
   2169
   2170	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
   2171	if (IS_ERR(sdma->clk_ipg))
   2172		return PTR_ERR(sdma->clk_ipg);
   2173
   2174	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
   2175	if (IS_ERR(sdma->clk_ahb))
   2176		return PTR_ERR(sdma->clk_ahb);
   2177
   2178	ret = clk_prepare(sdma->clk_ipg);
   2179	if (ret)
   2180		return ret;
   2181
   2182	ret = clk_prepare(sdma->clk_ahb);
   2183	if (ret)
   2184		goto err_clk;
   2185
   2186	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
   2187			       sdma);
   2188	if (ret)
   2189		goto err_irq;
   2190
   2191	sdma->irq = irq;
   2192
   2193	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
   2194	if (!sdma->script_addrs) {
   2195		ret = -ENOMEM;
   2196		goto err_irq;
   2197	}
   2198
   2199	/* initially no scripts available */
   2200	saddr_arr = (s32 *)sdma->script_addrs;
   2201	for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
   2202		saddr_arr[i] = -EINVAL;
   2203
   2204	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
   2205	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
   2206	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
   2207
   2208	INIT_LIST_HEAD(&sdma->dma_device.channels);
   2209	/* Initialize channel parameters */
   2210	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
   2211		struct sdma_channel *sdmac = &sdma->channel[i];
   2212
   2213		sdmac->sdma = sdma;
   2214
   2215		sdmac->channel = i;
   2216		sdmac->vc.desc_free = sdma_desc_free;
   2217		INIT_LIST_HEAD(&sdmac->terminated);
   2218		INIT_WORK(&sdmac->terminate_worker,
   2219				sdma_channel_terminate_work);
   2220		/*
   2221		 * Add the channel to the DMAC list. Do not add channel 0 though
   2222		 * because we need it internally in the SDMA driver. This also means
   2223		 * that channel 0 in dmaengine counting matches sdma channel 1.
   2224		 */
   2225		if (i)
   2226			vchan_init(&sdmac->vc, &sdma->dma_device);
   2227	}
   2228
   2229	ret = sdma_init(sdma);
   2230	if (ret)
   2231		goto err_init;
   2232
   2233	ret = sdma_event_remap(sdma);
   2234	if (ret)
   2235		goto err_init;
   2236
   2237	if (sdma->drvdata->script_addrs)
   2238		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
   2239
   2240	sdma->dma_device.dev = &pdev->dev;
   2241
   2242	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
   2243	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
   2244	sdma->dma_device.device_tx_status = sdma_tx_status;
   2245	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
   2246	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
   2247	sdma->dma_device.device_config = sdma_config;
   2248	sdma->dma_device.device_terminate_all = sdma_terminate_all;
   2249	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
   2250	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
   2251	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
   2252	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
   2253	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
   2254	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
   2255	sdma->dma_device.device_issue_pending = sdma_issue_pending;
   2256	sdma->dma_device.copy_align = 2;
   2257	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
   2258
   2259	platform_set_drvdata(pdev, sdma);
   2260
   2261	ret = dma_async_device_register(&sdma->dma_device);
   2262	if (ret) {
   2263		dev_err(&pdev->dev, "unable to register\n");
   2264		goto err_init;
   2265	}
   2266
   2267	if (np) {
   2268		ret = of_dma_controller_register(np, sdma_xlate, sdma);
   2269		if (ret) {
   2270			dev_err(&pdev->dev, "failed to register controller\n");
   2271			goto err_register;
   2272		}
   2273
   2274		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
   2275		ret = of_address_to_resource(spba_bus, 0, &spba_res);
   2276		if (!ret) {
   2277			sdma->spba_start_addr = spba_res.start;
   2278			sdma->spba_end_addr = spba_res.end;
   2279		}
   2280		of_node_put(spba_bus);
   2281	}
   2282
   2283	/*
   2284	 * Because that device tree does not encode ROM script address,
   2285	 * the RAM script in firmware is mandatory for device tree
   2286	 * probe, otherwise it fails.
   2287	 */
   2288	ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
   2289				      &fw_name);
   2290	if (ret) {
   2291		dev_warn(&pdev->dev, "failed to get firmware name\n");
   2292	} else {
   2293		ret = sdma_get_firmware(sdma, fw_name);
   2294		if (ret)
   2295			dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
   2296	}
   2297
   2298	return 0;
   2299
   2300err_register:
   2301	dma_async_device_unregister(&sdma->dma_device);
   2302err_init:
   2303	kfree(sdma->script_addrs);
   2304err_irq:
   2305	clk_unprepare(sdma->clk_ahb);
   2306err_clk:
   2307	clk_unprepare(sdma->clk_ipg);
   2308	return ret;
   2309}
   2310
   2311static int sdma_remove(struct platform_device *pdev)
   2312{
   2313	struct sdma_engine *sdma = platform_get_drvdata(pdev);
   2314	int i;
   2315
   2316	devm_free_irq(&pdev->dev, sdma->irq, sdma);
   2317	dma_async_device_unregister(&sdma->dma_device);
   2318	kfree(sdma->script_addrs);
   2319	clk_unprepare(sdma->clk_ahb);
   2320	clk_unprepare(sdma->clk_ipg);
   2321	/* Kill the tasklet */
   2322	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
   2323		struct sdma_channel *sdmac = &sdma->channel[i];
   2324
   2325		tasklet_kill(&sdmac->vc.task);
   2326		sdma_free_chan_resources(&sdmac->vc.chan);
   2327	}
   2328
   2329	platform_set_drvdata(pdev, NULL);
   2330	return 0;
   2331}
   2332
   2333static struct platform_driver sdma_driver = {
   2334	.driver		= {
   2335		.name	= "imx-sdma",
   2336		.of_match_table = sdma_dt_ids,
   2337	},
   2338	.remove		= sdma_remove,
   2339	.probe		= sdma_probe,
   2340};
   2341
   2342module_platform_driver(sdma_driver);
   2343
   2344MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
   2345MODULE_DESCRIPTION("i.MX SDMA driver");
   2346#if IS_ENABLED(CONFIG_SOC_IMX6Q)
   2347MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
   2348#endif
   2349#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
   2350MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
   2351#endif
   2352MODULE_LICENSE("GPL");