cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

musb_cppi41.c (21930B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/device.h>
      3#include <linux/dma-mapping.h>
      4#include <linux/dmaengine.h>
      5#include <linux/sizes.h>
      6#include <linux/platform_device.h>
      7#include <linux/of.h>
      8
      9#include "cppi_dma.h"
     10#include "musb_core.h"
     11#include "musb_trace.h"
     12
     13#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
     14
     15#define EP_MODE_AUTOREQ_NONE		0
     16#define EP_MODE_AUTOREQ_ALL_NEOP	1
     17#define EP_MODE_AUTOREQ_ALWAYS		3
     18
     19#define EP_MODE_DMA_TRANSPARENT		0
     20#define EP_MODE_DMA_RNDIS		1
     21#define EP_MODE_DMA_GEN_RNDIS		3
     22
     23#define USB_CTRL_TX_MODE	0x70
     24#define USB_CTRL_RX_MODE	0x74
     25#define USB_CTRL_AUTOREQ	0xd0
     26#define USB_TDOWN		0xd8
     27
     28#define MUSB_DMA_NUM_CHANNELS 15
     29
     30#define DA8XX_USB_MODE		0x10
     31#define DA8XX_USB_AUTOREQ	0x14
     32#define DA8XX_USB_TEARDOWN	0x1c
     33
     34#define DA8XX_DMA_NUM_CHANNELS 4
     35
     36struct cppi41_dma_controller {
     37	struct dma_controller controller;
     38	struct cppi41_dma_channel *rx_channel;
     39	struct cppi41_dma_channel *tx_channel;
     40	struct hrtimer early_tx;
     41	struct list_head early_tx_list;
     42	u32 rx_mode;
     43	u32 tx_mode;
     44	u32 auto_req;
     45
     46	u32 tdown_reg;
     47	u32 autoreq_reg;
     48
     49	void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
     50			     unsigned int mode);
     51	u8 num_channels;
     52};
     53
     54static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
     55{
     56	u16 csr;
     57	u8 toggle;
     58
     59	if (cppi41_channel->is_tx)
     60		return;
     61	if (!is_host_active(cppi41_channel->controller->controller.musb))
     62		return;
     63
     64	csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
     65	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
     66
     67	cppi41_channel->usb_toggle = toggle;
     68}
     69
     70static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
     71{
     72	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
     73	struct musb *musb = hw_ep->musb;
     74	u16 csr;
     75	u8 toggle;
     76
     77	if (cppi41_channel->is_tx)
     78		return;
     79	if (!is_host_active(musb))
     80		return;
     81
     82	musb_ep_select(musb->mregs, hw_ep->epnum);
     83	csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
     84	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
     85
     86	/*
     87	 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
     88	 * data toggle may reset from DATA1 to DATA0 during receiving data from
     89	 * more than one endpoint.
     90	 */
     91	if (!toggle && toggle == cppi41_channel->usb_toggle) {
     92		csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
     93		musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
     94		musb_dbg(musb, "Restoring DATA1 toggle.");
     95	}
     96
     97	cppi41_channel->usb_toggle = toggle;
     98}
     99
    100static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
    101{
    102	u8		epnum = hw_ep->epnum;
    103	struct musb	*musb = hw_ep->musb;
    104	void __iomem	*epio = musb->endpoints[epnum].regs;
    105	u16		csr;
    106
    107	musb_ep_select(musb->mregs, hw_ep->epnum);
    108	csr = musb_readw(epio, MUSB_TXCSR);
    109	if (csr & MUSB_TXCSR_TXPKTRDY)
    110		return false;
    111	return true;
    112}
    113
    114static void cppi41_dma_callback(void *private_data,
    115				const struct dmaengine_result *result);
    116
    117static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
    118{
    119	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
    120	struct musb *musb = hw_ep->musb;
    121	void __iomem *epio = hw_ep->regs;
    122	u16 csr;
    123
    124	if (!cppi41_channel->prog_len ||
    125	    (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
    126
    127		/* done, complete */
    128		cppi41_channel->channel.actual_len =
    129			cppi41_channel->transferred;
    130		cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
    131		cppi41_channel->channel.rx_packet_done = true;
    132
    133		/*
    134		 * transmit ZLP using PIO mode for transfers which size is
    135		 * multiple of EP packet size.
    136		 */
    137		if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
    138					cppi41_channel->packet_sz) == 0) {
    139			musb_ep_select(musb->mregs, hw_ep->epnum);
    140			csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
    141			musb_writew(epio, MUSB_TXCSR, csr);
    142		}
    143
    144		trace_musb_cppi41_done(cppi41_channel);
    145		musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
    146	} else {
    147		/* next iteration, reload */
    148		struct dma_chan *dc = cppi41_channel->dc;
    149		struct dma_async_tx_descriptor *dma_desc;
    150		enum dma_transfer_direction direction;
    151		u32 remain_bytes;
    152
    153		cppi41_channel->buf_addr += cppi41_channel->packet_sz;
    154
    155		remain_bytes = cppi41_channel->total_len;
    156		remain_bytes -= cppi41_channel->transferred;
    157		remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
    158		cppi41_channel->prog_len = remain_bytes;
    159
    160		direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
    161			: DMA_DEV_TO_MEM;
    162		dma_desc = dmaengine_prep_slave_single(dc,
    163				cppi41_channel->buf_addr,
    164				remain_bytes,
    165				direction,
    166				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    167		if (WARN_ON(!dma_desc))
    168			return;
    169
    170		dma_desc->callback_result = cppi41_dma_callback;
    171		dma_desc->callback_param = &cppi41_channel->channel;
    172		cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
    173		trace_musb_cppi41_cont(cppi41_channel);
    174		dma_async_issue_pending(dc);
    175
    176		if (!cppi41_channel->is_tx) {
    177			musb_ep_select(musb->mregs, hw_ep->epnum);
    178			csr = musb_readw(epio, MUSB_RXCSR);
    179			csr |= MUSB_RXCSR_H_REQPKT;
    180			musb_writew(epio, MUSB_RXCSR, csr);
    181		}
    182	}
    183}
    184
    185static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
    186{
    187	struct cppi41_dma_controller *controller;
    188	struct cppi41_dma_channel *cppi41_channel, *n;
    189	struct musb *musb;
    190	unsigned long flags;
    191	enum hrtimer_restart ret = HRTIMER_NORESTART;
    192
    193	controller = container_of(timer, struct cppi41_dma_controller,
    194			early_tx);
    195	musb = controller->controller.musb;
    196
    197	spin_lock_irqsave(&musb->lock, flags);
    198	list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
    199			tx_check) {
    200		bool empty;
    201		struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
    202
    203		empty = musb_is_tx_fifo_empty(hw_ep);
    204		if (empty) {
    205			list_del_init(&cppi41_channel->tx_check);
    206			cppi41_trans_done(cppi41_channel);
    207		}
    208	}
    209
    210	if (!list_empty(&controller->early_tx_list) &&
    211	    !hrtimer_is_queued(&controller->early_tx)) {
    212		ret = HRTIMER_RESTART;
    213		hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
    214	}
    215
    216	spin_unlock_irqrestore(&musb->lock, flags);
    217	return ret;
    218}
    219
    220static void cppi41_dma_callback(void *private_data,
    221				const struct dmaengine_result *result)
    222{
    223	struct dma_channel *channel = private_data;
    224	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
    225	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
    226	struct cppi41_dma_controller *controller;
    227	struct musb *musb = hw_ep->musb;
    228	unsigned long flags;
    229	struct dma_tx_state txstate;
    230	u32 transferred;
    231	int is_hs = 0;
    232	bool empty;
    233
    234	controller = cppi41_channel->controller;
    235	if (controller->controller.dma_callback)
    236		controller->controller.dma_callback(&controller->controller);
    237
    238	if (result->result == DMA_TRANS_ABORTED)
    239		return;
    240
    241	spin_lock_irqsave(&musb->lock, flags);
    242
    243	dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
    244			&txstate);
    245	transferred = cppi41_channel->prog_len - txstate.residue;
    246	cppi41_channel->transferred += transferred;
    247
    248	trace_musb_cppi41_gb(cppi41_channel);
    249	update_rx_toggle(cppi41_channel);
    250
    251	if (cppi41_channel->transferred == cppi41_channel->total_len ||
    252			transferred < cppi41_channel->packet_sz)
    253		cppi41_channel->prog_len = 0;
    254
    255	if (cppi41_channel->is_tx) {
    256		u8 type;
    257
    258		if (is_host_active(musb))
    259			type = hw_ep->out_qh->type;
    260		else
    261			type = hw_ep->ep_in.type;
    262
    263		if (type == USB_ENDPOINT_XFER_ISOC)
    264			/*
    265			 * Don't use the early-TX-interrupt workaround below
    266			 * for Isoch transfter. Since Isoch are periodic
    267			 * transfer, by the time the next transfer is
    268			 * scheduled, the current one should be done already.
    269			 *
    270			 * This avoids audio playback underrun issue.
    271			 */
    272			empty = true;
    273		else
    274			empty = musb_is_tx_fifo_empty(hw_ep);
    275	}
    276
    277	if (!cppi41_channel->is_tx || empty) {
    278		cppi41_trans_done(cppi41_channel);
    279		goto out;
    280	}
    281
    282	/*
    283	 * On AM335x it has been observed that the TX interrupt fires
    284	 * too early that means the TXFIFO is not yet empty but the DMA
    285	 * engine says that it is done with the transfer. We don't
    286	 * receive a FIFO empty interrupt so the only thing we can do is
    287	 * to poll for the bit. On HS it usually takes 2us, on FS around
    288	 * 110us - 150us depending on the transfer size.
    289	 * We spin on HS (no longer than than 25us and setup a timer on
    290	 * FS to check for the bit and complete the transfer.
    291	 */
    292	if (is_host_active(musb)) {
    293		if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
    294			is_hs = 1;
    295	} else {
    296		if (musb->g.speed == USB_SPEED_HIGH)
    297			is_hs = 1;
    298	}
    299	if (is_hs) {
    300		unsigned wait = 25;
    301
    302		do {
    303			empty = musb_is_tx_fifo_empty(hw_ep);
    304			if (empty) {
    305				cppi41_trans_done(cppi41_channel);
    306				goto out;
    307			}
    308			wait--;
    309			if (!wait)
    310				break;
    311			cpu_relax();
    312		} while (1);
    313	}
    314	list_add_tail(&cppi41_channel->tx_check,
    315			&controller->early_tx_list);
    316	if (!hrtimer_is_queued(&controller->early_tx)) {
    317		unsigned long usecs = cppi41_channel->total_len / 10;
    318
    319		hrtimer_start_range_ns(&controller->early_tx,
    320				       usecs * NSEC_PER_USEC,
    321				       20 * NSEC_PER_USEC,
    322				       HRTIMER_MODE_REL);
    323	}
    324
    325out:
    326	spin_unlock_irqrestore(&musb->lock, flags);
    327}
    328
    329static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
    330{
    331	unsigned shift;
    332
    333	shift = (ep - 1) * 2;
    334	old &= ~(3 << shift);
    335	old |= mode << shift;
    336	return old;
    337}
    338
    339static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
    340		unsigned mode)
    341{
    342	struct cppi41_dma_controller *controller = cppi41_channel->controller;
    343	struct musb *musb = controller->controller.musb;
    344	u32 port;
    345	u32 new_mode;
    346	u32 old_mode;
    347
    348	if (cppi41_channel->is_tx)
    349		old_mode = controller->tx_mode;
    350	else
    351		old_mode = controller->rx_mode;
    352	port = cppi41_channel->port_num;
    353	new_mode = update_ep_mode(port, mode, old_mode);
    354
    355	if (new_mode == old_mode)
    356		return;
    357	if (cppi41_channel->is_tx) {
    358		controller->tx_mode = new_mode;
    359		musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
    360	} else {
    361		controller->rx_mode = new_mode;
    362		musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
    363	}
    364}
    365
    366static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
    367		unsigned int mode)
    368{
    369	struct cppi41_dma_controller *controller = cppi41_channel->controller;
    370	struct musb *musb = controller->controller.musb;
    371	unsigned int shift;
    372	u32 port;
    373	u32 new_mode;
    374	u32 old_mode;
    375
    376	old_mode = controller->tx_mode;
    377	port = cppi41_channel->port_num;
    378
    379	shift = (port - 1) * 4;
    380	if (!cppi41_channel->is_tx)
    381		shift += 16;
    382	new_mode = old_mode & ~(3 << shift);
    383	new_mode |= mode << shift;
    384
    385	if (new_mode == old_mode)
    386		return;
    387	controller->tx_mode = new_mode;
    388	musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
    389}
    390
    391
    392static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
    393		unsigned mode)
    394{
    395	struct cppi41_dma_controller *controller = cppi41_channel->controller;
    396	u32 port;
    397	u32 new_mode;
    398	u32 old_mode;
    399
    400	old_mode = controller->auto_req;
    401	port = cppi41_channel->port_num;
    402	new_mode = update_ep_mode(port, mode, old_mode);
    403
    404	if (new_mode == old_mode)
    405		return;
    406	controller->auto_req = new_mode;
    407	musb_writel(controller->controller.musb->ctrl_base,
    408		    controller->autoreq_reg, new_mode);
    409}
    410
    411static bool cppi41_configure_channel(struct dma_channel *channel,
    412				u16 packet_sz, u8 mode,
    413				dma_addr_t dma_addr, u32 len)
    414{
    415	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
    416	struct cppi41_dma_controller *controller = cppi41_channel->controller;
    417	struct dma_chan *dc = cppi41_channel->dc;
    418	struct dma_async_tx_descriptor *dma_desc;
    419	enum dma_transfer_direction direction;
    420	struct musb *musb = cppi41_channel->controller->controller.musb;
    421	unsigned use_gen_rndis = 0;
    422
    423	cppi41_channel->buf_addr = dma_addr;
    424	cppi41_channel->total_len = len;
    425	cppi41_channel->transferred = 0;
    426	cppi41_channel->packet_sz = packet_sz;
    427	cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
    428
    429	/*
    430	 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
    431	 * than max packet size at a time.
    432	 */
    433	if (cppi41_channel->is_tx)
    434		use_gen_rndis = 1;
    435
    436	if (use_gen_rndis) {
    437		/* RNDIS mode */
    438		if (len > packet_sz) {
    439			musb_writel(musb->ctrl_base,
    440				RNDIS_REG(cppi41_channel->port_num), len);
    441			/* gen rndis */
    442			controller->set_dma_mode(cppi41_channel,
    443					EP_MODE_DMA_GEN_RNDIS);
    444
    445			/* auto req */
    446			cppi41_set_autoreq_mode(cppi41_channel,
    447					EP_MODE_AUTOREQ_ALL_NEOP);
    448		} else {
    449			musb_writel(musb->ctrl_base,
    450					RNDIS_REG(cppi41_channel->port_num), 0);
    451			controller->set_dma_mode(cppi41_channel,
    452					EP_MODE_DMA_TRANSPARENT);
    453			cppi41_set_autoreq_mode(cppi41_channel,
    454					EP_MODE_AUTOREQ_NONE);
    455		}
    456	} else {
    457		/* fallback mode */
    458		controller->set_dma_mode(cppi41_channel,
    459				EP_MODE_DMA_TRANSPARENT);
    460		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
    461		len = min_t(u32, packet_sz, len);
    462	}
    463	cppi41_channel->prog_len = len;
    464	direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
    465	dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
    466			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    467	if (!dma_desc)
    468		return false;
    469
    470	dma_desc->callback_result = cppi41_dma_callback;
    471	dma_desc->callback_param = channel;
    472	cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
    473	cppi41_channel->channel.rx_packet_done = false;
    474
    475	trace_musb_cppi41_config(cppi41_channel);
    476
    477	save_rx_toggle(cppi41_channel);
    478	dma_async_issue_pending(dc);
    479	return true;
    480}
    481
    482static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
    483				struct musb_hw_ep *hw_ep, u8 is_tx)
    484{
    485	struct cppi41_dma_controller *controller = container_of(c,
    486			struct cppi41_dma_controller, controller);
    487	struct cppi41_dma_channel *cppi41_channel = NULL;
    488	u8 ch_num = hw_ep->epnum - 1;
    489
    490	if (ch_num >= controller->num_channels)
    491		return NULL;
    492
    493	if (is_tx)
    494		cppi41_channel = &controller->tx_channel[ch_num];
    495	else
    496		cppi41_channel = &controller->rx_channel[ch_num];
    497
    498	if (!cppi41_channel->dc)
    499		return NULL;
    500
    501	if (cppi41_channel->is_allocated)
    502		return NULL;
    503
    504	cppi41_channel->hw_ep = hw_ep;
    505	cppi41_channel->is_allocated = 1;
    506
    507	trace_musb_cppi41_alloc(cppi41_channel);
    508	return &cppi41_channel->channel;
    509}
    510
    511static void cppi41_dma_channel_release(struct dma_channel *channel)
    512{
    513	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
    514
    515	trace_musb_cppi41_free(cppi41_channel);
    516	if (cppi41_channel->is_allocated) {
    517		cppi41_channel->is_allocated = 0;
    518		channel->status = MUSB_DMA_STATUS_FREE;
    519		channel->actual_len = 0;
    520	}
    521}
    522
    523static int cppi41_dma_channel_program(struct dma_channel *channel,
    524				u16 packet_sz, u8 mode,
    525				dma_addr_t dma_addr, u32 len)
    526{
    527	int ret;
    528	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
    529	int hb_mult = 0;
    530
    531	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
    532		channel->status == MUSB_DMA_STATUS_BUSY);
    533
    534	if (is_host_active(cppi41_channel->controller->controller.musb)) {
    535		if (cppi41_channel->is_tx)
    536			hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
    537		else
    538			hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
    539	}
    540
    541	channel->status = MUSB_DMA_STATUS_BUSY;
    542	channel->actual_len = 0;
    543
    544	if (hb_mult)
    545		packet_sz = hb_mult * (packet_sz & 0x7FF);
    546
    547	ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
    548	if (!ret)
    549		channel->status = MUSB_DMA_STATUS_FREE;
    550
    551	return ret;
    552}
    553
    554static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
    555		void *buf, u32 length)
    556{
    557	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
    558	struct cppi41_dma_controller *controller = cppi41_channel->controller;
    559	struct musb *musb = controller->controller.musb;
    560
    561	if (is_host_active(musb)) {
    562		WARN_ON(1);
    563		return 1;
    564	}
    565	if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
    566		return 0;
    567	if (cppi41_channel->is_tx)
    568		return 1;
    569	/* AM335x Advisory 1.0.13. No workaround for device RX mode */
    570	return 0;
    571}
    572
    573static int cppi41_dma_channel_abort(struct dma_channel *channel)
    574{
    575	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
    576	struct cppi41_dma_controller *controller = cppi41_channel->controller;
    577	struct musb *musb = controller->controller.musb;
    578	void __iomem *epio = cppi41_channel->hw_ep->regs;
    579	int tdbit;
    580	int ret;
    581	unsigned is_tx;
    582	u16 csr;
    583
    584	is_tx = cppi41_channel->is_tx;
    585	trace_musb_cppi41_abort(cppi41_channel);
    586
    587	if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
    588		return 0;
    589
    590	list_del_init(&cppi41_channel->tx_check);
    591	if (is_tx) {
    592		csr = musb_readw(epio, MUSB_TXCSR);
    593		csr &= ~MUSB_TXCSR_DMAENAB;
    594		musb_writew(epio, MUSB_TXCSR, csr);
    595	} else {
    596		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
    597
    598		/* delay to drain to cppi dma pipeline for isoch */
    599		udelay(250);
    600
    601		csr = musb_readw(epio, MUSB_RXCSR);
    602		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
    603		musb_writew(epio, MUSB_RXCSR, csr);
    604
    605		/* wait to drain cppi dma pipe line */
    606		udelay(50);
    607
    608		csr = musb_readw(epio, MUSB_RXCSR);
    609		if (csr & MUSB_RXCSR_RXPKTRDY) {
    610			csr |= MUSB_RXCSR_FLUSHFIFO;
    611			musb_writew(epio, MUSB_RXCSR, csr);
    612			musb_writew(epio, MUSB_RXCSR, csr);
    613		}
    614	}
    615
    616	/* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
    617	if (musb->ops->quirks & MUSB_DA8XX)
    618		mdelay(250);
    619
    620	tdbit = 1 << cppi41_channel->port_num;
    621	if (is_tx)
    622		tdbit <<= 16;
    623
    624	do {
    625		if (is_tx)
    626			musb_writel(musb->ctrl_base, controller->tdown_reg,
    627				    tdbit);
    628		ret = dmaengine_terminate_all(cppi41_channel->dc);
    629	} while (ret == -EAGAIN);
    630
    631	if (is_tx) {
    632		musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
    633
    634		csr = musb_readw(epio, MUSB_TXCSR);
    635		if (csr & MUSB_TXCSR_TXPKTRDY) {
    636			csr |= MUSB_TXCSR_FLUSHFIFO;
    637			musb_writew(epio, MUSB_TXCSR, csr);
    638		}
    639	}
    640
    641	cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
    642	return 0;
    643}
    644
    645static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
    646{
    647	struct dma_chan *dc;
    648	int i;
    649
    650	for (i = 0; i < ctrl->num_channels; i++) {
    651		dc = ctrl->tx_channel[i].dc;
    652		if (dc)
    653			dma_release_channel(dc);
    654		dc = ctrl->rx_channel[i].dc;
    655		if (dc)
    656			dma_release_channel(dc);
    657	}
    658}
    659
    660static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
    661{
    662	cppi41_release_all_dma_chans(controller);
    663}
    664
    665static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
    666{
    667	struct musb *musb = controller->controller.musb;
    668	struct device *dev = musb->controller;
    669	struct device_node *np = dev->parent->of_node;
    670	struct cppi41_dma_channel *cppi41_channel;
    671	int count;
    672	int i;
    673	int ret;
    674
    675	count = of_property_count_strings(np, "dma-names");
    676	if (count < 0)
    677		return count;
    678
    679	for (i = 0; i < count; i++) {
    680		struct dma_chan *dc;
    681		struct dma_channel *musb_dma;
    682		const char *str;
    683		unsigned is_tx;
    684		unsigned int port;
    685
    686		ret = of_property_read_string_index(np, "dma-names", i, &str);
    687		if (ret)
    688			goto err;
    689		if (strstarts(str, "tx"))
    690			is_tx = 1;
    691		else if (strstarts(str, "rx"))
    692			is_tx = 0;
    693		else {
    694			dev_err(dev, "Wrong dmatype %s\n", str);
    695			goto err;
    696		}
    697		ret = kstrtouint(str + 2, 0, &port);
    698		if (ret)
    699			goto err;
    700
    701		ret = -EINVAL;
    702		if (port > controller->num_channels || !port)
    703			goto err;
    704		if (is_tx)
    705			cppi41_channel = &controller->tx_channel[port - 1];
    706		else
    707			cppi41_channel = &controller->rx_channel[port - 1];
    708
    709		cppi41_channel->controller = controller;
    710		cppi41_channel->port_num = port;
    711		cppi41_channel->is_tx = is_tx;
    712		INIT_LIST_HEAD(&cppi41_channel->tx_check);
    713
    714		musb_dma = &cppi41_channel->channel;
    715		musb_dma->private_data = cppi41_channel;
    716		musb_dma->status = MUSB_DMA_STATUS_FREE;
    717		musb_dma->max_len = SZ_4M;
    718
    719		dc = dma_request_chan(dev->parent, str);
    720		if (IS_ERR(dc)) {
    721			ret = PTR_ERR(dc);
    722			if (ret != -EPROBE_DEFER)
    723				dev_err(dev, "Failed to request %s: %d.\n",
    724					str, ret);
    725			goto err;
    726		}
    727
    728		cppi41_channel->dc = dc;
    729	}
    730	return 0;
    731err:
    732	cppi41_release_all_dma_chans(controller);
    733	return ret;
    734}
    735
    736void cppi41_dma_controller_destroy(struct dma_controller *c)
    737{
    738	struct cppi41_dma_controller *controller = container_of(c,
    739			struct cppi41_dma_controller, controller);
    740
    741	hrtimer_cancel(&controller->early_tx);
    742	cppi41_dma_controller_stop(controller);
    743	kfree(controller->rx_channel);
    744	kfree(controller->tx_channel);
    745	kfree(controller);
    746}
    747EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
    748
    749struct dma_controller *
    750cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
    751{
    752	struct cppi41_dma_controller *controller;
    753	int channel_size;
    754	int ret = 0;
    755
    756	if (!musb->controller->parent->of_node) {
    757		dev_err(musb->controller, "Need DT for the DMA engine.\n");
    758		return NULL;
    759	}
    760
    761	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
    762	if (!controller)
    763		goto kzalloc_fail;
    764
    765	hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    766	controller->early_tx.function = cppi41_recheck_tx_req;
    767	INIT_LIST_HEAD(&controller->early_tx_list);
    768
    769	controller->controller.channel_alloc = cppi41_dma_channel_allocate;
    770	controller->controller.channel_release = cppi41_dma_channel_release;
    771	controller->controller.channel_program = cppi41_dma_channel_program;
    772	controller->controller.channel_abort = cppi41_dma_channel_abort;
    773	controller->controller.is_compatible = cppi41_is_compatible;
    774	controller->controller.musb = musb;
    775
    776	if (musb->ops->quirks & MUSB_DA8XX) {
    777		controller->tdown_reg = DA8XX_USB_TEARDOWN;
    778		controller->autoreq_reg = DA8XX_USB_AUTOREQ;
    779		controller->set_dma_mode = da8xx_set_dma_mode;
    780		controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
    781	} else {
    782		controller->tdown_reg = USB_TDOWN;
    783		controller->autoreq_reg = USB_CTRL_AUTOREQ;
    784		controller->set_dma_mode = cppi41_set_dma_mode;
    785		controller->num_channels = MUSB_DMA_NUM_CHANNELS;
    786	}
    787
    788	channel_size = controller->num_channels *
    789			sizeof(struct cppi41_dma_channel);
    790	controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
    791	if (!controller->rx_channel)
    792		goto rx_channel_alloc_fail;
    793	controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
    794	if (!controller->tx_channel)
    795		goto tx_channel_alloc_fail;
    796
    797	ret = cppi41_dma_controller_start(controller);
    798	if (ret)
    799		goto plat_get_fail;
    800	return &controller->controller;
    801
    802plat_get_fail:
    803	kfree(controller->tx_channel);
    804tx_channel_alloc_fail:
    805	kfree(controller->rx_channel);
    806rx_channel_alloc_fail:
    807	kfree(controller);
    808kzalloc_fail:
    809	if (ret == -EPROBE_DEFER)
    810		return ERR_PTR(ret);
    811	return NULL;
    812}
    813EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);