ucc_fast.c (11949B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. 4 * 5 * Authors: Shlomi Gridish <gridish@freescale.com> 6 * Li Yang <leoli@freescale.com> 7 * 8 * Description: 9 * QE UCC Fast API Set - UCC Fast specific routines implementations. 10 */ 11#include <linux/kernel.h> 12#include <linux/errno.h> 13#include <linux/slab.h> 14#include <linux/stddef.h> 15#include <linux/interrupt.h> 16#include <linux/err.h> 17#include <linux/export.h> 18 19#include <asm/io.h> 20#include <soc/fsl/qe/immap_qe.h> 21#include <soc/fsl/qe/qe.h> 22 23#include <soc/fsl/qe/ucc.h> 24#include <soc/fsl/qe/ucc_fast.h> 25 26void ucc_fast_dump_regs(struct ucc_fast_private * uccf) 27{ 28 printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num); 29 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); 30 31 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", 32 &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr)); 33 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", 34 &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr)); 35 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", 36 &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr)); 37 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", 38 &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr)); 39 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", 40 &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce)); 41 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", 42 &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm)); 43 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", 44 &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs)); 45 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", 46 &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb)); 47 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", 48 &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs)); 49 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", 50 &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet)); 51 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", 52 &uccf->uf_regs->urfset, 53 ioread16be(&uccf->uf_regs->urfset)); 54 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", 55 &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb)); 56 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", 57 &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs)); 58 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", 59 &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet)); 60 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", 61 &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt)); 62 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", 63 &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt)); 64 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", 65 &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry)); 66 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", 67 &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr)); 68} 69EXPORT_SYMBOL(ucc_fast_dump_regs); 70 71u32 ucc_fast_get_qe_cr_subblock(int uccf_num) 72{ 73 switch (uccf_num) { 74 case 0: return QE_CR_SUBBLOCK_UCCFAST1; 75 case 1: return QE_CR_SUBBLOCK_UCCFAST2; 76 case 2: return QE_CR_SUBBLOCK_UCCFAST3; 77 case 3: return QE_CR_SUBBLOCK_UCCFAST4; 78 case 4: return QE_CR_SUBBLOCK_UCCFAST5; 79 case 5: return QE_CR_SUBBLOCK_UCCFAST6; 80 case 6: return QE_CR_SUBBLOCK_UCCFAST7; 81 case 7: return QE_CR_SUBBLOCK_UCCFAST8; 82 default: return QE_CR_SUBBLOCK_INVALID; 83 } 84} 85EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); 86 87void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) 88{ 89 iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); 90} 91EXPORT_SYMBOL(ucc_fast_transmit_on_demand); 92 93void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) 94{ 95 struct ucc_fast __iomem *uf_regs; 96 u32 gumr; 97 98 uf_regs = uccf->uf_regs; 99 100 /* Enable reception and/or transmission on this UCC. */ 101 gumr = ioread32be(&uf_regs->gumr); 102 if (mode & COMM_DIR_TX) { 103 gumr |= UCC_FAST_GUMR_ENT; 104 uccf->enabled_tx = 1; 105 } 106 if (mode & COMM_DIR_RX) { 107 gumr |= UCC_FAST_GUMR_ENR; 108 uccf->enabled_rx = 1; 109 } 110 iowrite32be(gumr, &uf_regs->gumr); 111} 112EXPORT_SYMBOL(ucc_fast_enable); 113 114void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) 115{ 116 struct ucc_fast __iomem *uf_regs; 117 u32 gumr; 118 119 uf_regs = uccf->uf_regs; 120 121 /* Disable reception and/or transmission on this UCC. */ 122 gumr = ioread32be(&uf_regs->gumr); 123 if (mode & COMM_DIR_TX) { 124 gumr &= ~UCC_FAST_GUMR_ENT; 125 uccf->enabled_tx = 0; 126 } 127 if (mode & COMM_DIR_RX) { 128 gumr &= ~UCC_FAST_GUMR_ENR; 129 uccf->enabled_rx = 0; 130 } 131 iowrite32be(gumr, &uf_regs->gumr); 132} 133EXPORT_SYMBOL(ucc_fast_disable); 134 135int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret) 136{ 137 struct ucc_fast_private *uccf; 138 struct ucc_fast __iomem *uf_regs; 139 u32 gumr; 140 int ret; 141 142 if (!uf_info) 143 return -EINVAL; 144 145 /* check if the UCC port number is in range. */ 146 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { 147 printk(KERN_ERR "%s: illegal UCC number\n", __func__); 148 return -EINVAL; 149 } 150 151 /* Check that 'max_rx_buf_length' is properly aligned (4). */ 152 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { 153 printk(KERN_ERR "%s: max_rx_buf_length not aligned\n", 154 __func__); 155 return -EINVAL; 156 } 157 158 /* Validate Virtual Fifo register values */ 159 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { 160 printk(KERN_ERR "%s: urfs is too small\n", __func__); 161 return -EINVAL; 162 } 163 164 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 165 printk(KERN_ERR "%s: urfs is not aligned\n", __func__); 166 return -EINVAL; 167 } 168 169 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 170 printk(KERN_ERR "%s: urfet is not aligned.\n", __func__); 171 return -EINVAL; 172 } 173 174 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 175 printk(KERN_ERR "%s: urfset is not aligned\n", __func__); 176 return -EINVAL; 177 } 178 179 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 180 printk(KERN_ERR "%s: utfs is not aligned\n", __func__); 181 return -EINVAL; 182 } 183 184 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 185 printk(KERN_ERR "%s: utfet is not aligned\n", __func__); 186 return -EINVAL; 187 } 188 189 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 190 printk(KERN_ERR "%s: utftt is not aligned\n", __func__); 191 return -EINVAL; 192 } 193 194 uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); 195 if (!uccf) { 196 printk(KERN_ERR "%s: Cannot allocate private data\n", 197 __func__); 198 return -ENOMEM; 199 } 200 uccf->ucc_fast_tx_virtual_fifo_base_offset = -1; 201 uccf->ucc_fast_rx_virtual_fifo_base_offset = -1; 202 203 /* Fill fast UCC structure */ 204 uccf->uf_info = uf_info; 205 /* Set the PHY base address */ 206 uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); 207 if (uccf->uf_regs == NULL) { 208 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); 209 kfree(uccf); 210 return -ENOMEM; 211 } 212 213 uccf->enabled_tx = 0; 214 uccf->enabled_rx = 0; 215 uccf->stopped_tx = 0; 216 uccf->stopped_rx = 0; 217 uf_regs = uccf->uf_regs; 218 uccf->p_ucce = &uf_regs->ucce; 219 uccf->p_uccm = &uf_regs->uccm; 220#ifdef CONFIG_UGETH_TX_ON_DEMAND 221 uccf->p_utodr = &uf_regs->utodr; 222#endif 223#ifdef STATISTICS 224 uccf->tx_frames = 0; 225 uccf->rx_frames = 0; 226 uccf->rx_discarded = 0; 227#endif /* STATISTICS */ 228 229 /* Set UCC to fast type */ 230 ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST); 231 if (ret) { 232 printk(KERN_ERR "%s: cannot set UCC type\n", __func__); 233 ucc_fast_free(uccf); 234 return ret; 235 } 236 237 uccf->mrblr = uf_info->max_rx_buf_length; 238 239 /* Set GUMR */ 240 /* For more details see the hardware spec. */ 241 gumr = uf_info->ttx_trx; 242 if (uf_info->tci) 243 gumr |= UCC_FAST_GUMR_TCI; 244 if (uf_info->cdp) 245 gumr |= UCC_FAST_GUMR_CDP; 246 if (uf_info->ctsp) 247 gumr |= UCC_FAST_GUMR_CTSP; 248 if (uf_info->cds) 249 gumr |= UCC_FAST_GUMR_CDS; 250 if (uf_info->ctss) 251 gumr |= UCC_FAST_GUMR_CTSS; 252 if (uf_info->txsy) 253 gumr |= UCC_FAST_GUMR_TXSY; 254 if (uf_info->rsyn) 255 gumr |= UCC_FAST_GUMR_RSYN; 256 gumr |= uf_info->synl; 257 if (uf_info->rtsm) 258 gumr |= UCC_FAST_GUMR_RTSM; 259 gumr |= uf_info->renc; 260 if (uf_info->revd) 261 gumr |= UCC_FAST_GUMR_REVD; 262 gumr |= uf_info->tenc; 263 gumr |= uf_info->tcrc; 264 gumr |= uf_info->mode; 265 iowrite32be(gumr, &uf_regs->gumr); 266 267 /* Allocate memory for Tx Virtual Fifo */ 268 uccf->ucc_fast_tx_virtual_fifo_base_offset = 269 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 270 if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) { 271 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", 272 __func__); 273 ucc_fast_free(uccf); 274 return -ENOMEM; 275 } 276 277 /* Allocate memory for Rx Virtual Fifo */ 278 uccf->ucc_fast_rx_virtual_fifo_base_offset = 279 qe_muram_alloc(uf_info->urfs + 280 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, 281 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 282 if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) { 283 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", 284 __func__); 285 ucc_fast_free(uccf); 286 return -ENOMEM; 287 } 288 289 /* Set Virtual Fifo registers */ 290 iowrite16be(uf_info->urfs, &uf_regs->urfs); 291 iowrite16be(uf_info->urfet, &uf_regs->urfet); 292 iowrite16be(uf_info->urfset, &uf_regs->urfset); 293 iowrite16be(uf_info->utfs, &uf_regs->utfs); 294 iowrite16be(uf_info->utfet, &uf_regs->utfet); 295 iowrite16be(uf_info->utftt, &uf_regs->utftt); 296 /* utfb, urfb are offsets from MURAM base */ 297 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, 298 &uf_regs->utfb); 299 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, 300 &uf_regs->urfb); 301 302 /* Mux clocking */ 303 /* Grant Support */ 304 ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support); 305 /* Breakpoint Support */ 306 ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support); 307 /* Set Tsa or NMSI mode. */ 308 ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa); 309 /* If NMSI (not Tsa), set Tx and Rx clock. */ 310 if (!uf_info->tsa) { 311 /* Rx clock routing */ 312 if ((uf_info->rx_clock != QE_CLK_NONE) && 313 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, 314 COMM_DIR_RX)) { 315 printk(KERN_ERR "%s: illegal value for RX clock\n", 316 __func__); 317 ucc_fast_free(uccf); 318 return -EINVAL; 319 } 320 /* Tx clock routing */ 321 if ((uf_info->tx_clock != QE_CLK_NONE) && 322 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, 323 COMM_DIR_TX)) { 324 printk(KERN_ERR "%s: illegal value for TX clock\n", 325 __func__); 326 ucc_fast_free(uccf); 327 return -EINVAL; 328 } 329 } else { 330 /* tdm Rx clock routing */ 331 if ((uf_info->rx_clock != QE_CLK_NONE) && 332 ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock, 333 COMM_DIR_RX)) { 334 pr_err("%s: illegal value for RX clock", __func__); 335 ucc_fast_free(uccf); 336 return -EINVAL; 337 } 338 339 /* tdm Tx clock routing */ 340 if ((uf_info->tx_clock != QE_CLK_NONE) && 341 ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock, 342 COMM_DIR_TX)) { 343 pr_err("%s: illegal value for TX clock", __func__); 344 ucc_fast_free(uccf); 345 return -EINVAL; 346 } 347 348 /* tdm Rx sync clock routing */ 349 if ((uf_info->rx_sync != QE_CLK_NONE) && 350 ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync, 351 COMM_DIR_RX)) { 352 pr_err("%s: illegal value for RX clock", __func__); 353 ucc_fast_free(uccf); 354 return -EINVAL; 355 } 356 357 /* tdm Tx sync clock routing */ 358 if ((uf_info->tx_sync != QE_CLK_NONE) && 359 ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync, 360 COMM_DIR_TX)) { 361 pr_err("%s: illegal value for TX clock", __func__); 362 ucc_fast_free(uccf); 363 return -EINVAL; 364 } 365 } 366 367 /* Set interrupt mask register at UCC level. */ 368 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 369 370 /* First, clear anything pending at UCC level, 371 * otherwise, old garbage may come through 372 * as soon as the dam is opened. */ 373 374 /* Writing '1' clears */ 375 iowrite32be(0xffffffff, &uf_regs->ucce); 376 377 *uccf_ret = uccf; 378 return 0; 379} 380EXPORT_SYMBOL(ucc_fast_init); 381 382void ucc_fast_free(struct ucc_fast_private * uccf) 383{ 384 if (!uccf) 385 return; 386 387 qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); 388 qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); 389 390 if (uccf->uf_regs) 391 iounmap(uccf->uf_regs); 392 393 kfree(uccf); 394} 395EXPORT_SYMBOL(ucc_fast_free);