cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fsl_ucc_hdlc.c (31650B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* Freescale QUICC Engine HDLC Device Driver
      3 *
      4 * Copyright 2016 Freescale Semiconductor Inc.
      5 */
      6
      7#include <linux/delay.h>
      8#include <linux/dma-mapping.h>
      9#include <linux/hdlc.h>
     10#include <linux/init.h>
     11#include <linux/interrupt.h>
     12#include <linux/io.h>
     13#include <linux/irq.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/netdevice.h>
     17#include <linux/of_address.h>
     18#include <linux/of_irq.h>
     19#include <linux/of_platform.h>
     20#include <linux/platform_device.h>
     21#include <linux/sched.h>
     22#include <linux/skbuff.h>
     23#include <linux/slab.h>
     24#include <linux/spinlock.h>
     25#include <linux/stddef.h>
     26#include <soc/fsl/qe/qe_tdm.h>
     27#include <uapi/linux/if_arp.h>
     28
     29#include "fsl_ucc_hdlc.h"
     30
     31#define DRV_DESC "Freescale QE UCC HDLC Driver"
     32#define DRV_NAME "ucc_hdlc"
     33
     34#define TDM_PPPOHT_SLIC_MAXIN
     35#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
     36
     37static struct ucc_tdm_info utdm_primary_info = {
     38	.uf_info = {
     39		.tsa = 0,
     40		.cdp = 0,
     41		.cds = 1,
     42		.ctsp = 1,
     43		.ctss = 1,
     44		.revd = 0,
     45		.urfs = 256,
     46		.utfs = 256,
     47		.urfet = 128,
     48		.urfset = 192,
     49		.utfet = 128,
     50		.utftt = 0x40,
     51		.ufpt = 256,
     52		.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
     53		.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
     54		.tenc = UCC_FAST_TX_ENCODING_NRZ,
     55		.renc = UCC_FAST_RX_ENCODING_NRZ,
     56		.tcrc = UCC_FAST_16_BIT_CRC,
     57		.synl = UCC_FAST_SYNC_LEN_NOT_USED,
     58	},
     59
     60	.si_info = {
     61#ifdef TDM_PPPOHT_SLIC_MAXIN
     62		.simr_rfsd = 1,
     63		.simr_tfsd = 2,
     64#else
     65		.simr_rfsd = 0,
     66		.simr_tfsd = 0,
     67#endif
     68		.simr_crt = 0,
     69		.simr_sl = 0,
     70		.simr_ce = 1,
     71		.simr_fe = 1,
     72		.simr_gm = 0,
     73	},
     74};
     75
     76static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
     77
     78static int uhdlc_init(struct ucc_hdlc_private *priv)
     79{
     80	struct ucc_tdm_info *ut_info;
     81	struct ucc_fast_info *uf_info;
     82	u32 cecr_subblock;
     83	u16 bd_status;
     84	int ret, i;
     85	void *bd_buffer;
     86	dma_addr_t bd_dma_addr;
     87	s32 riptr;
     88	s32 tiptr;
     89	u32 gumr;
     90
     91	ut_info = priv->ut_info;
     92	uf_info = &ut_info->uf_info;
     93
     94	if (priv->tsa) {
     95		uf_info->tsa = 1;
     96		uf_info->ctsp = 1;
     97		uf_info->cds = 1;
     98		uf_info->ctss = 1;
     99	} else {
    100		uf_info->cds = 0;
    101		uf_info->ctsp = 0;
    102		uf_info->ctss = 0;
    103	}
    104
    105	/* This sets HPM register in CMXUCR register which configures a
    106	 * open drain connected HDLC bus
    107	 */
    108	if (priv->hdlc_bus)
    109		uf_info->brkpt_support = 1;
    110
    111	uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
    112				UCC_HDLC_UCCE_TXB) << 16);
    113
    114	ret = ucc_fast_init(uf_info, &priv->uccf);
    115	if (ret) {
    116		dev_err(priv->dev, "Failed to init uccf.");
    117		return ret;
    118	}
    119
    120	priv->uf_regs = priv->uccf->uf_regs;
    121	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
    122
    123	/* Loopback mode */
    124	if (priv->loopback) {
    125		dev_info(priv->dev, "Loopback Mode\n");
    126		/* use the same clock when work in loopback */
    127		qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
    128
    129		gumr = ioread32be(&priv->uf_regs->gumr);
    130		gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
    131			 UCC_FAST_GUMR_TCI);
    132		gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
    133		iowrite32be(gumr, &priv->uf_regs->gumr);
    134	}
    135
    136	/* Initialize SI */
    137	if (priv->tsa)
    138		ucc_tdm_init(priv->utdm, priv->ut_info);
    139
    140	/* Write to QE CECR, UCCx channel to Stop Transmission */
    141	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
    142	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
    143			   QE_CR_PROTOCOL_UNSPECIFIED, 0);
    144
    145	/* Set UPSMR normal mode (need fixed)*/
    146	iowrite32be(0, &priv->uf_regs->upsmr);
    147
    148	/* hdlc_bus mode */
    149	if (priv->hdlc_bus) {
    150		u32 upsmr;
    151
    152		dev_info(priv->dev, "HDLC bus Mode\n");
    153		upsmr = ioread32be(&priv->uf_regs->upsmr);
    154
    155		/* bus mode and retransmit enable, with collision window
    156		 * set to 8 bytes
    157		 */
    158		upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
    159				UCC_HDLC_UPSMR_CW8;
    160		iowrite32be(upsmr, &priv->uf_regs->upsmr);
    161
    162		/* explicitly disable CDS & CTSP */
    163		gumr = ioread32be(&priv->uf_regs->gumr);
    164		gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
    165		/* set automatic sync to explicitly ignore CD signal */
    166		gumr |= UCC_FAST_GUMR_SYNL_AUTO;
    167		iowrite32be(gumr, &priv->uf_regs->gumr);
    168	}
    169
    170	priv->rx_ring_size = RX_BD_RING_LEN;
    171	priv->tx_ring_size = TX_BD_RING_LEN;
    172	/* Alloc Rx BD */
    173	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
    174			RX_BD_RING_LEN * sizeof(struct qe_bd),
    175			&priv->dma_rx_bd, GFP_KERNEL);
    176
    177	if (!priv->rx_bd_base) {
    178		dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
    179		ret = -ENOMEM;
    180		goto free_uccf;
    181	}
    182
    183	/* Alloc Tx BD */
    184	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
    185			TX_BD_RING_LEN * sizeof(struct qe_bd),
    186			&priv->dma_tx_bd, GFP_KERNEL);
    187
    188	if (!priv->tx_bd_base) {
    189		dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
    190		ret = -ENOMEM;
    191		goto free_rx_bd;
    192	}
    193
    194	/* Alloc parameter ram for ucc hdlc */
    195	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
    196				ALIGNMENT_OF_UCC_HDLC_PRAM);
    197
    198	if (priv->ucc_pram_offset < 0) {
    199		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
    200		ret = -ENOMEM;
    201		goto free_tx_bd;
    202	}
    203
    204	priv->rx_skbuff = kcalloc(priv->rx_ring_size,
    205				  sizeof(*priv->rx_skbuff),
    206				  GFP_KERNEL);
    207	if (!priv->rx_skbuff) {
    208		ret = -ENOMEM;
    209		goto free_ucc_pram;
    210	}
    211
    212	priv->tx_skbuff = kcalloc(priv->tx_ring_size,
    213				  sizeof(*priv->tx_skbuff),
    214				  GFP_KERNEL);
    215	if (!priv->tx_skbuff) {
    216		ret = -ENOMEM;
    217		goto free_rx_skbuff;
    218	}
    219
    220	priv->skb_curtx = 0;
    221	priv->skb_dirtytx = 0;
    222	priv->curtx_bd = priv->tx_bd_base;
    223	priv->dirty_tx = priv->tx_bd_base;
    224	priv->currx_bd = priv->rx_bd_base;
    225	priv->currx_bdnum = 0;
    226
    227	/* init parameter base */
    228	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
    229	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
    230			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
    231
    232	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
    233					qe_muram_addr(priv->ucc_pram_offset);
    234
    235	/* Zero out parameter ram */
    236	memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
    237
    238	/* Alloc riptr, tiptr */
    239	riptr = qe_muram_alloc(32, 32);
    240	if (riptr < 0) {
    241		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
    242		ret = -ENOMEM;
    243		goto free_tx_skbuff;
    244	}
    245
    246	tiptr = qe_muram_alloc(32, 32);
    247	if (tiptr < 0) {
    248		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
    249		ret = -ENOMEM;
    250		goto free_riptr;
    251	}
    252	if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
    253		dev_err(priv->dev, "MURAM allocation out of addressable range\n");
    254		ret = -ENOMEM;
    255		goto free_tiptr;
    256	}
    257
    258	/* Set RIPTR, TIPTR */
    259	iowrite16be(riptr, &priv->ucc_pram->riptr);
    260	iowrite16be(tiptr, &priv->ucc_pram->tiptr);
    261
    262	/* Set MRBLR */
    263	iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
    264
    265	/* Set RBASE, TBASE */
    266	iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
    267	iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
    268
    269	/* Set RSTATE, TSTATE */
    270	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
    271	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
    272
    273	/* Set C_MASK, C_PRES for 16bit CRC */
    274	iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
    275	iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
    276
    277	iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
    278	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
    279	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
    280	iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
    281	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
    282	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
    283	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
    284	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
    285
    286	/* Get BD buffer */
    287	bd_buffer = dma_alloc_coherent(priv->dev,
    288				       (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
    289				       &bd_dma_addr, GFP_KERNEL);
    290
    291	if (!bd_buffer) {
    292		dev_err(priv->dev, "Could not allocate buffer descriptors\n");
    293		ret = -ENOMEM;
    294		goto free_tiptr;
    295	}
    296
    297	priv->rx_buffer = bd_buffer;
    298	priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
    299
    300	priv->dma_rx_addr = bd_dma_addr;
    301	priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
    302
    303	for (i = 0; i < RX_BD_RING_LEN; i++) {
    304		if (i < (RX_BD_RING_LEN - 1))
    305			bd_status = R_E_S | R_I_S;
    306		else
    307			bd_status = R_E_S | R_I_S | R_W_S;
    308
    309		priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
    310		priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
    311	}
    312
    313	for (i = 0; i < TX_BD_RING_LEN; i++) {
    314		if (i < (TX_BD_RING_LEN - 1))
    315			bd_status =  T_I_S | T_TC_S;
    316		else
    317			bd_status =  T_I_S | T_TC_S | T_W_S;
    318
    319		priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
    320		priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
    321	}
    322	dma_wmb();
    323
    324	return 0;
    325
    326free_tiptr:
    327	qe_muram_free(tiptr);
    328free_riptr:
    329	qe_muram_free(riptr);
    330free_tx_skbuff:
    331	kfree(priv->tx_skbuff);
    332free_rx_skbuff:
    333	kfree(priv->rx_skbuff);
    334free_ucc_pram:
    335	qe_muram_free(priv->ucc_pram_offset);
    336free_tx_bd:
    337	dma_free_coherent(priv->dev,
    338			  TX_BD_RING_LEN * sizeof(struct qe_bd),
    339			  priv->tx_bd_base, priv->dma_tx_bd);
    340free_rx_bd:
    341	dma_free_coherent(priv->dev,
    342			  RX_BD_RING_LEN * sizeof(struct qe_bd),
    343			  priv->rx_bd_base, priv->dma_rx_bd);
    344free_uccf:
    345	ucc_fast_free(priv->uccf);
    346
    347	return ret;
    348}
    349
    350static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
    351{
    352	hdlc_device *hdlc = dev_to_hdlc(dev);
    353	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
    354	struct qe_bd *bd;
    355	u16 bd_status;
    356	unsigned long flags;
    357	__be16 *proto_head;
    358
    359	switch (dev->type) {
    360	case ARPHRD_RAWHDLC:
    361		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
    362			dev->stats.tx_dropped++;
    363			dev_kfree_skb(skb);
    364			netdev_err(dev, "No enough space for hdlc head\n");
    365			return -ENOMEM;
    366		}
    367
    368		skb_push(skb, HDLC_HEAD_LEN);
    369
    370		proto_head = (__be16 *)skb->data;
    371		*proto_head = htons(DEFAULT_HDLC_HEAD);
    372
    373		dev->stats.tx_bytes += skb->len;
    374		break;
    375
    376	case ARPHRD_PPP:
    377		proto_head = (__be16 *)skb->data;
    378		if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
    379			dev->stats.tx_dropped++;
    380			dev_kfree_skb(skb);
    381			netdev_err(dev, "Wrong ppp header\n");
    382			return -ENOMEM;
    383		}
    384
    385		dev->stats.tx_bytes += skb->len;
    386		break;
    387
    388	case ARPHRD_ETHER:
    389		dev->stats.tx_bytes += skb->len;
    390		break;
    391
    392	default:
    393		dev->stats.tx_dropped++;
    394		dev_kfree_skb(skb);
    395		return -ENOMEM;
    396	}
    397	netdev_sent_queue(dev, skb->len);
    398	spin_lock_irqsave(&priv->lock, flags);
    399
    400	dma_rmb();
    401	/* Start from the next BD that should be filled */
    402	bd = priv->curtx_bd;
    403	bd_status = be16_to_cpu(bd->status);
    404	/* Save the skb pointer so we can free it later */
    405	priv->tx_skbuff[priv->skb_curtx] = skb;
    406
    407	/* Update the current skb pointer (wrapping if this was the last) */
    408	priv->skb_curtx =
    409	    (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
    410
    411	/* copy skb data to tx buffer for sdma processing */
    412	memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
    413	       skb->data, skb->len);
    414
    415	/* set bd status and length */
    416	bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
    417
    418	bd->length = cpu_to_be16(skb->len);
    419	bd->status = cpu_to_be16(bd_status);
    420
    421	/* Move to next BD in the ring */
    422	if (!(bd_status & T_W_S))
    423		bd += 1;
    424	else
    425		bd = priv->tx_bd_base;
    426
    427	if (bd == priv->dirty_tx) {
    428		if (!netif_queue_stopped(dev))
    429			netif_stop_queue(dev);
    430	}
    431
    432	priv->curtx_bd = bd;
    433
    434	spin_unlock_irqrestore(&priv->lock, flags);
    435
    436	return NETDEV_TX_OK;
    437}
    438
    439static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
    440{
    441	u32 cecr_subblock;
    442
    443	cecr_subblock =
    444		ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
    445
    446	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
    447		     QE_CR_PROTOCOL_UNSPECIFIED, 0);
    448	return 0;
    449}
    450
    451static int hdlc_tx_done(struct ucc_hdlc_private *priv)
    452{
    453	/* Start from the next BD that should be filled */
    454	struct net_device *dev = priv->ndev;
    455	unsigned int bytes_sent = 0;
    456	int howmany = 0;
    457	struct qe_bd *bd;		/* BD pointer */
    458	u16 bd_status;
    459	int tx_restart = 0;
    460
    461	dma_rmb();
    462	bd = priv->dirty_tx;
    463	bd_status = be16_to_cpu(bd->status);
    464
    465	/* Normal processing. */
    466	while ((bd_status & T_R_S) == 0) {
    467		struct sk_buff *skb;
    468
    469		if (bd_status & T_UN_S) { /* Underrun */
    470			dev->stats.tx_fifo_errors++;
    471			tx_restart = 1;
    472		}
    473		if (bd_status & T_CT_S) { /* Carrier lost */
    474			dev->stats.tx_carrier_errors++;
    475			tx_restart = 1;
    476		}
    477
    478		/* BD contains already transmitted buffer.   */
    479		/* Handle the transmitted buffer and release */
    480		/* the BD to be used with the current frame  */
    481
    482		skb = priv->tx_skbuff[priv->skb_dirtytx];
    483		if (!skb)
    484			break;
    485		howmany++;
    486		bytes_sent += skb->len;
    487		dev->stats.tx_packets++;
    488		memset(priv->tx_buffer +
    489		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
    490		       0, skb->len);
    491		dev_consume_skb_irq(skb);
    492
    493		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
    494		priv->skb_dirtytx =
    495		    (priv->skb_dirtytx +
    496		     1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
    497
    498		/* We freed a buffer, so now we can restart transmission */
    499		if (netif_queue_stopped(dev))
    500			netif_wake_queue(dev);
    501
    502		/* Advance the confirmation BD pointer */
    503		if (!(bd_status & T_W_S))
    504			bd += 1;
    505		else
    506			bd = priv->tx_bd_base;
    507		bd_status = be16_to_cpu(bd->status);
    508	}
    509	priv->dirty_tx = bd;
    510
    511	if (tx_restart)
    512		hdlc_tx_restart(priv);
    513
    514	netdev_completed_queue(dev, howmany, bytes_sent);
    515	return 0;
    516}
    517
    518static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
    519{
    520	struct net_device *dev = priv->ndev;
    521	struct sk_buff *skb = NULL;
    522	hdlc_device *hdlc = dev_to_hdlc(dev);
    523	struct qe_bd *bd;
    524	u16 bd_status;
    525	u16 length, howmany = 0;
    526	u8 *bdbuffer;
    527
    528	dma_rmb();
    529	bd = priv->currx_bd;
    530	bd_status = be16_to_cpu(bd->status);
    531
    532	/* while there are received buffers and BD is full (~R_E) */
    533	while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
    534		if (bd_status & (RX_BD_ERRORS)) {
    535			dev->stats.rx_errors++;
    536
    537			if (bd_status & R_CD_S)
    538				dev->stats.collisions++;
    539			if (bd_status & R_OV_S)
    540				dev->stats.rx_fifo_errors++;
    541			if (bd_status & R_CR_S)
    542				dev->stats.rx_crc_errors++;
    543			if (bd_status & R_AB_S)
    544				dev->stats.rx_over_errors++;
    545			if (bd_status & R_NO_S)
    546				dev->stats.rx_frame_errors++;
    547			if (bd_status & R_LG_S)
    548				dev->stats.rx_length_errors++;
    549
    550			goto recycle;
    551		}
    552		bdbuffer = priv->rx_buffer +
    553			(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
    554		length = be16_to_cpu(bd->length);
    555
    556		switch (dev->type) {
    557		case ARPHRD_RAWHDLC:
    558			bdbuffer += HDLC_HEAD_LEN;
    559			length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
    560
    561			skb = dev_alloc_skb(length);
    562			if (!skb) {
    563				dev->stats.rx_dropped++;
    564				return -ENOMEM;
    565			}
    566
    567			skb_put(skb, length);
    568			skb->len = length;
    569			skb->dev = dev;
    570			memcpy(skb->data, bdbuffer, length);
    571			break;
    572
    573		case ARPHRD_PPP:
    574		case ARPHRD_ETHER:
    575			length -= HDLC_CRC_SIZE;
    576
    577			skb = dev_alloc_skb(length);
    578			if (!skb) {
    579				dev->stats.rx_dropped++;
    580				return -ENOMEM;
    581			}
    582
    583			skb_put(skb, length);
    584			skb->len = length;
    585			skb->dev = dev;
    586			memcpy(skb->data, bdbuffer, length);
    587			break;
    588		}
    589
    590		dev->stats.rx_packets++;
    591		dev->stats.rx_bytes += skb->len;
    592		howmany++;
    593		if (hdlc->proto)
    594			skb->protocol = hdlc_type_trans(skb, dev);
    595		netif_receive_skb(skb);
    596
    597recycle:
    598		bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S);
    599
    600		/* update to point at the next bd */
    601		if (bd_status & R_W_S) {
    602			priv->currx_bdnum = 0;
    603			bd = priv->rx_bd_base;
    604		} else {
    605			if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
    606				priv->currx_bdnum += 1;
    607			else
    608				priv->currx_bdnum = RX_BD_RING_LEN - 1;
    609
    610			bd += 1;
    611		}
    612
    613		bd_status = be16_to_cpu(bd->status);
    614	}
    615	dma_rmb();
    616
    617	priv->currx_bd = bd;
    618	return howmany;
    619}
    620
    621static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
    622{
    623	struct ucc_hdlc_private *priv = container_of(napi,
    624						     struct ucc_hdlc_private,
    625						     napi);
    626	int howmany;
    627
    628	/* Tx event processing */
    629	spin_lock(&priv->lock);
    630	hdlc_tx_done(priv);
    631	spin_unlock(&priv->lock);
    632
    633	howmany = 0;
    634	howmany += hdlc_rx_done(priv, budget - howmany);
    635
    636	if (howmany < budget) {
    637		napi_complete_done(napi, howmany);
    638		qe_setbits_be32(priv->uccf->p_uccm,
    639				(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
    640	}
    641
    642	return howmany;
    643}
    644
    645static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
    646{
    647	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
    648	struct net_device *dev = priv->ndev;
    649	struct ucc_fast_private *uccf;
    650	u32 ucce;
    651	u32 uccm;
    652
    653	uccf = priv->uccf;
    654
    655	ucce = ioread32be(uccf->p_ucce);
    656	uccm = ioread32be(uccf->p_uccm);
    657	ucce &= uccm;
    658	iowrite32be(ucce, uccf->p_ucce);
    659	if (!ucce)
    660		return IRQ_NONE;
    661
    662	if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
    663		if (napi_schedule_prep(&priv->napi)) {
    664			uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
    665				  << 16);
    666			iowrite32be(uccm, uccf->p_uccm);
    667			__napi_schedule(&priv->napi);
    668		}
    669	}
    670
    671	/* Errors and other events */
    672	if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
    673		dev->stats.rx_missed_errors++;
    674	if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
    675		dev->stats.tx_errors++;
    676
    677	return IRQ_HANDLED;
    678}
    679
    680static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
    681{
    682	const size_t size = sizeof(te1_settings);
    683	te1_settings line;
    684	struct ucc_hdlc_private *priv = netdev_priv(dev);
    685
    686	switch (ifs->type) {
    687	case IF_GET_IFACE:
    688		ifs->type = IF_IFACE_E1;
    689		if (ifs->size < size) {
    690			ifs->size = size; /* data size wanted */
    691			return -ENOBUFS;
    692		}
    693		memset(&line, 0, sizeof(line));
    694		line.clock_type = priv->clocking;
    695
    696		if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
    697			return -EFAULT;
    698		return 0;
    699
    700	default:
    701		return hdlc_ioctl(dev, ifs);
    702	}
    703}
    704
    705static int uhdlc_open(struct net_device *dev)
    706{
    707	u32 cecr_subblock;
    708	hdlc_device *hdlc = dev_to_hdlc(dev);
    709	struct ucc_hdlc_private *priv = hdlc->priv;
    710	struct ucc_tdm *utdm = priv->utdm;
    711
    712	if (priv->hdlc_busy != 1) {
    713		if (request_irq(priv->ut_info->uf_info.irq,
    714				ucc_hdlc_irq_handler, 0, "hdlc", priv))
    715			return -ENODEV;
    716
    717		cecr_subblock = ucc_fast_get_qe_cr_subblock(
    718					priv->ut_info->uf_info.ucc_num);
    719
    720		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
    721			     QE_CR_PROTOCOL_UNSPECIFIED, 0);
    722
    723		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
    724
    725		/* Enable the TDM port */
    726		if (priv->tsa)
    727			qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
    728
    729		priv->hdlc_busy = 1;
    730		netif_device_attach(priv->ndev);
    731		napi_enable(&priv->napi);
    732		netdev_reset_queue(dev);
    733		netif_start_queue(dev);
    734		hdlc_open(dev);
    735	}
    736
    737	return 0;
    738}
    739
    740static void uhdlc_memclean(struct ucc_hdlc_private *priv)
    741{
    742	qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
    743	qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
    744
    745	if (priv->rx_bd_base) {
    746		dma_free_coherent(priv->dev,
    747				  RX_BD_RING_LEN * sizeof(struct qe_bd),
    748				  priv->rx_bd_base, priv->dma_rx_bd);
    749
    750		priv->rx_bd_base = NULL;
    751		priv->dma_rx_bd = 0;
    752	}
    753
    754	if (priv->tx_bd_base) {
    755		dma_free_coherent(priv->dev,
    756				  TX_BD_RING_LEN * sizeof(struct qe_bd),
    757				  priv->tx_bd_base, priv->dma_tx_bd);
    758
    759		priv->tx_bd_base = NULL;
    760		priv->dma_tx_bd = 0;
    761	}
    762
    763	if (priv->ucc_pram) {
    764		qe_muram_free(priv->ucc_pram_offset);
    765		priv->ucc_pram = NULL;
    766		priv->ucc_pram_offset = 0;
    767	 }
    768
    769	kfree(priv->rx_skbuff);
    770	priv->rx_skbuff = NULL;
    771
    772	kfree(priv->tx_skbuff);
    773	priv->tx_skbuff = NULL;
    774
    775	if (priv->uf_regs) {
    776		iounmap(priv->uf_regs);
    777		priv->uf_regs = NULL;
    778	}
    779
    780	if (priv->uccf) {
    781		ucc_fast_free(priv->uccf);
    782		priv->uccf = NULL;
    783	}
    784
    785	if (priv->rx_buffer) {
    786		dma_free_coherent(priv->dev,
    787				  RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
    788				  priv->rx_buffer, priv->dma_rx_addr);
    789		priv->rx_buffer = NULL;
    790		priv->dma_rx_addr = 0;
    791	}
    792
    793	if (priv->tx_buffer) {
    794		dma_free_coherent(priv->dev,
    795				  TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
    796				  priv->tx_buffer, priv->dma_tx_addr);
    797		priv->tx_buffer = NULL;
    798		priv->dma_tx_addr = 0;
    799	}
    800}
    801
    802static int uhdlc_close(struct net_device *dev)
    803{
    804	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
    805	struct ucc_tdm *utdm = priv->utdm;
    806	u32 cecr_subblock;
    807
    808	napi_disable(&priv->napi);
    809	cecr_subblock = ucc_fast_get_qe_cr_subblock(
    810				priv->ut_info->uf_info.ucc_num);
    811
    812	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
    813		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
    814	qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
    815		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
    816
    817	if (priv->tsa)
    818		qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
    819
    820	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
    821
    822	free_irq(priv->ut_info->uf_info.irq, priv);
    823	netif_stop_queue(dev);
    824	netdev_reset_queue(dev);
    825	priv->hdlc_busy = 0;
    826
    827	return 0;
    828}
    829
    830static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
    831			   unsigned short parity)
    832{
    833	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
    834
    835	if (encoding != ENCODING_NRZ &&
    836	    encoding != ENCODING_NRZI)
    837		return -EINVAL;
    838
    839	if (parity != PARITY_NONE &&
    840	    parity != PARITY_CRC32_PR1_CCITT &&
    841	    parity != PARITY_CRC16_PR0_CCITT &&
    842	    parity != PARITY_CRC16_PR1_CCITT)
    843		return -EINVAL;
    844
    845	priv->encoding = encoding;
    846	priv->parity = parity;
    847
    848	return 0;
    849}
    850
    851#ifdef CONFIG_PM
    852static void store_clk_config(struct ucc_hdlc_private *priv)
    853{
    854	struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
    855
    856	/* store si clk */
    857	priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
    858	priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
    859
    860	/* store si sync */
    861	priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
    862
    863	/* store ucc clk */
    864	memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
    865}
    866
    867static void resume_clk_config(struct ucc_hdlc_private *priv)
    868{
    869	struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
    870
    871	memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
    872
    873	iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
    874	iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
    875
    876	iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
    877}
    878
    879static int uhdlc_suspend(struct device *dev)
    880{
    881	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
    882	struct ucc_fast __iomem *uf_regs;
    883
    884	if (!priv)
    885		return -EINVAL;
    886
    887	if (!netif_running(priv->ndev))
    888		return 0;
    889
    890	netif_device_detach(priv->ndev);
    891	napi_disable(&priv->napi);
    892
    893	uf_regs = priv->uf_regs;
    894
    895	/* backup gumr guemr*/
    896	priv->gumr = ioread32be(&uf_regs->gumr);
    897	priv->guemr = ioread8(&uf_regs->guemr);
    898
    899	priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
    900					GFP_KERNEL);
    901	if (!priv->ucc_pram_bak)
    902		return -ENOMEM;
    903
    904	/* backup HDLC parameter */
    905	memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
    906		      sizeof(struct ucc_hdlc_param));
    907
    908	/* store the clk configuration */
    909	store_clk_config(priv);
    910
    911	/* save power */
    912	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
    913
    914	return 0;
    915}
    916
    917static int uhdlc_resume(struct device *dev)
    918{
    919	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
    920	struct ucc_tdm *utdm;
    921	struct ucc_tdm_info *ut_info;
    922	struct ucc_fast __iomem *uf_regs;
    923	struct ucc_fast_private *uccf;
    924	struct ucc_fast_info *uf_info;
    925	int i;
    926	u32 cecr_subblock;
    927	u16 bd_status;
    928
    929	if (!priv)
    930		return -EINVAL;
    931
    932	if (!netif_running(priv->ndev))
    933		return 0;
    934
    935	utdm = priv->utdm;
    936	ut_info = priv->ut_info;
    937	uf_info = &ut_info->uf_info;
    938	uf_regs = priv->uf_regs;
    939	uccf = priv->uccf;
    940
    941	/* restore gumr guemr */
    942	iowrite8(priv->guemr, &uf_regs->guemr);
    943	iowrite32be(priv->gumr, &uf_regs->gumr);
    944
    945	/* Set Virtual Fifo registers */
    946	iowrite16be(uf_info->urfs, &uf_regs->urfs);
    947	iowrite16be(uf_info->urfet, &uf_regs->urfet);
    948	iowrite16be(uf_info->urfset, &uf_regs->urfset);
    949	iowrite16be(uf_info->utfs, &uf_regs->utfs);
    950	iowrite16be(uf_info->utfet, &uf_regs->utfet);
    951	iowrite16be(uf_info->utftt, &uf_regs->utftt);
    952	/* utfb, urfb are offsets from MURAM base */
    953	iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
    954	iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
    955
    956	/* Rx Tx and sync clock routing */
    957	resume_clk_config(priv);
    958
    959	iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
    960	iowrite32be(0xffffffff, &uf_regs->ucce);
    961
    962	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
    963
    964	/* rebuild SIRAM */
    965	if (priv->tsa)
    966		ucc_tdm_init(priv->utdm, priv->ut_info);
    967
    968	/* Write to QE CECR, UCCx channel to Stop Transmission */
    969	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
    970	qe_issue_cmd(QE_STOP_TX, cecr_subblock,
    971		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
    972
    973	/* Set UPSMR normal mode */
    974	iowrite32be(0, &uf_regs->upsmr);
    975
    976	/* init parameter base */
    977	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
    978	qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
    979		     QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
    980
    981	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
    982				qe_muram_addr(priv->ucc_pram_offset);
    983
    984	/* restore ucc parameter */
    985	memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
    986		    sizeof(struct ucc_hdlc_param));
    987	kfree(priv->ucc_pram_bak);
    988
    989	/* rebuild BD entry */
    990	for (i = 0; i < RX_BD_RING_LEN; i++) {
    991		if (i < (RX_BD_RING_LEN - 1))
    992			bd_status = R_E_S | R_I_S;
    993		else
    994			bd_status = R_E_S | R_I_S | R_W_S;
    995
    996		priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
    997		priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
    998	}
    999
   1000	for (i = 0; i < TX_BD_RING_LEN; i++) {
   1001		if (i < (TX_BD_RING_LEN - 1))
   1002			bd_status =  T_I_S | T_TC_S;
   1003		else
   1004			bd_status =  T_I_S | T_TC_S | T_W_S;
   1005
   1006		priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
   1007		priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
   1008	}
   1009	dma_wmb();
   1010
   1011	/* if hdlc is busy enable TX and RX */
   1012	if (priv->hdlc_busy == 1) {
   1013		cecr_subblock = ucc_fast_get_qe_cr_subblock(
   1014					priv->ut_info->uf_info.ucc_num);
   1015
   1016		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
   1017			     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
   1018
   1019		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
   1020
   1021		/* Enable the TDM port */
   1022		if (priv->tsa)
   1023			qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
   1024	}
   1025
   1026	napi_enable(&priv->napi);
   1027	netif_device_attach(priv->ndev);
   1028
   1029	return 0;
   1030}
   1031
   1032static const struct dev_pm_ops uhdlc_pm_ops = {
   1033	.suspend = uhdlc_suspend,
   1034	.resume = uhdlc_resume,
   1035	.freeze = uhdlc_suspend,
   1036	.thaw = uhdlc_resume,
   1037};
   1038
   1039#define HDLC_PM_OPS (&uhdlc_pm_ops)
   1040
   1041#else
   1042
   1043#define HDLC_PM_OPS NULL
   1044
   1045#endif
   1046static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
   1047{
   1048	netdev_err(ndev, "%s\n", __func__);
   1049}
   1050
   1051static const struct net_device_ops uhdlc_ops = {
   1052	.ndo_open       = uhdlc_open,
   1053	.ndo_stop       = uhdlc_close,
   1054	.ndo_start_xmit = hdlc_start_xmit,
   1055	.ndo_siocwandev = uhdlc_ioctl,
   1056	.ndo_tx_timeout	= uhdlc_tx_timeout,
   1057};
   1058
   1059static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
   1060{
   1061	struct device_node *np;
   1062	struct platform_device *pdev;
   1063	struct resource *res;
   1064	static int siram_init_flag;
   1065	int ret = 0;
   1066
   1067	np = of_find_compatible_node(NULL, NULL, name);
   1068	if (!np)
   1069		return -EINVAL;
   1070
   1071	pdev = of_find_device_by_node(np);
   1072	if (!pdev) {
   1073		pr_err("%pOFn: failed to lookup pdev\n", np);
   1074		of_node_put(np);
   1075		return -EINVAL;
   1076	}
   1077
   1078	of_node_put(np);
   1079	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1080	if (!res) {
   1081		ret = -EINVAL;
   1082		goto error_put_device;
   1083	}
   1084	*ptr = ioremap(res->start, resource_size(res));
   1085	if (!*ptr) {
   1086		ret = -ENOMEM;
   1087		goto error_put_device;
   1088	}
   1089
   1090	/* We've remapped the addresses, and we don't need the device any
   1091	 * more, so we should release it.
   1092	 */
   1093	put_device(&pdev->dev);
   1094
   1095	if (init_flag && siram_init_flag == 0) {
   1096		memset_io(*ptr, 0, resource_size(res));
   1097		siram_init_flag = 1;
   1098	}
   1099	return  0;
   1100
   1101error_put_device:
   1102	put_device(&pdev->dev);
   1103
   1104	return ret;
   1105}
   1106
   1107static int ucc_hdlc_probe(struct platform_device *pdev)
   1108{
   1109	struct device_node *np = pdev->dev.of_node;
   1110	struct ucc_hdlc_private *uhdlc_priv = NULL;
   1111	struct ucc_tdm_info *ut_info;
   1112	struct ucc_tdm *utdm = NULL;
   1113	struct resource res;
   1114	struct net_device *dev;
   1115	hdlc_device *hdlc;
   1116	int ucc_num;
   1117	const char *sprop;
   1118	int ret;
   1119	u32 val;
   1120
   1121	ret = of_property_read_u32_index(np, "cell-index", 0, &val);
   1122	if (ret) {
   1123		dev_err(&pdev->dev, "Invalid ucc property\n");
   1124		return -ENODEV;
   1125	}
   1126
   1127	ucc_num = val - 1;
   1128	if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
   1129		dev_err(&pdev->dev, ": Invalid UCC num\n");
   1130		return -EINVAL;
   1131	}
   1132
   1133	memcpy(&utdm_info[ucc_num], &utdm_primary_info,
   1134	       sizeof(utdm_primary_info));
   1135
   1136	ut_info = &utdm_info[ucc_num];
   1137	ut_info->uf_info.ucc_num = ucc_num;
   1138
   1139	sprop = of_get_property(np, "rx-clock-name", NULL);
   1140	if (sprop) {
   1141		ut_info->uf_info.rx_clock = qe_clock_source(sprop);
   1142		if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
   1143		    (ut_info->uf_info.rx_clock > QE_CLK24)) {
   1144			dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
   1145			return -EINVAL;
   1146		}
   1147	} else {
   1148		dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
   1149		return -EINVAL;
   1150	}
   1151
   1152	sprop = of_get_property(np, "tx-clock-name", NULL);
   1153	if (sprop) {
   1154		ut_info->uf_info.tx_clock = qe_clock_source(sprop);
   1155		if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
   1156		    (ut_info->uf_info.tx_clock > QE_CLK24)) {
   1157			dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
   1158			return -EINVAL;
   1159		}
   1160	} else {
   1161		dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
   1162		return -EINVAL;
   1163	}
   1164
   1165	ret = of_address_to_resource(np, 0, &res);
   1166	if (ret)
   1167		return -EINVAL;
   1168
   1169	ut_info->uf_info.regs = res.start;
   1170	ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
   1171
   1172	uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
   1173	if (!uhdlc_priv)
   1174		return -ENOMEM;
   1175
   1176	dev_set_drvdata(&pdev->dev, uhdlc_priv);
   1177	uhdlc_priv->dev = &pdev->dev;
   1178	uhdlc_priv->ut_info = ut_info;
   1179
   1180	if (of_get_property(np, "fsl,tdm-interface", NULL))
   1181		uhdlc_priv->tsa = 1;
   1182
   1183	if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
   1184		uhdlc_priv->loopback = 1;
   1185
   1186	if (of_get_property(np, "fsl,hdlc-bus", NULL))
   1187		uhdlc_priv->hdlc_bus = 1;
   1188
   1189	if (uhdlc_priv->tsa == 1) {
   1190		utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
   1191		if (!utdm) {
   1192			ret = -ENOMEM;
   1193			dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
   1194			goto free_uhdlc_priv;
   1195		}
   1196		uhdlc_priv->utdm = utdm;
   1197		ret = ucc_of_parse_tdm(np, utdm, ut_info);
   1198		if (ret)
   1199			goto free_utdm;
   1200
   1201		ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
   1202				     (void __iomem **)&utdm->si_regs);
   1203		if (ret)
   1204			goto free_utdm;
   1205		ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
   1206				     (void __iomem **)&utdm->siram);
   1207		if (ret)
   1208			goto unmap_si_regs;
   1209	}
   1210
   1211	if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
   1212		uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
   1213
   1214	ret = uhdlc_init(uhdlc_priv);
   1215	if (ret) {
   1216		dev_err(&pdev->dev, "Failed to init uhdlc\n");
   1217		goto undo_uhdlc_init;
   1218	}
   1219
   1220	dev = alloc_hdlcdev(uhdlc_priv);
   1221	if (!dev) {
   1222		ret = -ENOMEM;
   1223		pr_err("ucc_hdlc: unable to allocate memory\n");
   1224		goto undo_uhdlc_init;
   1225	}
   1226
   1227	uhdlc_priv->ndev = dev;
   1228	hdlc = dev_to_hdlc(dev);
   1229	dev->tx_queue_len = 16;
   1230	dev->netdev_ops = &uhdlc_ops;
   1231	dev->watchdog_timeo = 2 * HZ;
   1232	hdlc->attach = ucc_hdlc_attach;
   1233	hdlc->xmit = ucc_hdlc_tx;
   1234	netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
   1235	if (register_hdlc_device(dev)) {
   1236		ret = -ENOBUFS;
   1237		pr_err("ucc_hdlc: unable to register hdlc device\n");
   1238		goto free_dev;
   1239	}
   1240
   1241	return 0;
   1242
   1243free_dev:
   1244	free_netdev(dev);
   1245undo_uhdlc_init:
   1246	iounmap(utdm->siram);
   1247unmap_si_regs:
   1248	iounmap(utdm->si_regs);
   1249free_utdm:
   1250	if (uhdlc_priv->tsa)
   1251		kfree(utdm);
   1252free_uhdlc_priv:
   1253	kfree(uhdlc_priv);
   1254	return ret;
   1255}
   1256
   1257static int ucc_hdlc_remove(struct platform_device *pdev)
   1258{
   1259	struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
   1260
   1261	uhdlc_memclean(priv);
   1262
   1263	if (priv->utdm->si_regs) {
   1264		iounmap(priv->utdm->si_regs);
   1265		priv->utdm->si_regs = NULL;
   1266	}
   1267
   1268	if (priv->utdm->siram) {
   1269		iounmap(priv->utdm->siram);
   1270		priv->utdm->siram = NULL;
   1271	}
   1272	kfree(priv);
   1273
   1274	dev_info(&pdev->dev, "UCC based hdlc module removed\n");
   1275
   1276	return 0;
   1277}
   1278
   1279static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
   1280	{
   1281	.compatible = "fsl,ucc-hdlc",
   1282	},
   1283	{},
   1284};
   1285
   1286MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
   1287
   1288static struct platform_driver ucc_hdlc_driver = {
   1289	.probe	= ucc_hdlc_probe,
   1290	.remove	= ucc_hdlc_remove,
   1291	.driver	= {
   1292		.name		= DRV_NAME,
   1293		.pm		= HDLC_PM_OPS,
   1294		.of_match_table	= fsl_ucc_hdlc_of_match,
   1295	},
   1296};
   1297
   1298module_platform_driver(ucc_hdlc_driver);
   1299MODULE_LICENSE("GPL");
   1300MODULE_DESCRIPTION(DRV_DESC);