atmel_serial.c (81731B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Driver for Atmel AT91 Serial ports 4 * Copyright (C) 2003 Rick Bronson 5 * 6 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 7 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 8 * 9 * DMA support added by Chip Coldwell. 10 */ 11#include <linux/circ_buf.h> 12#include <linux/tty.h> 13#include <linux/ioport.h> 14#include <linux/slab.h> 15#include <linux/init.h> 16#include <linux/serial.h> 17#include <linux/clk.h> 18#include <linux/console.h> 19#include <linux/sysrq.h> 20#include <linux/tty_flip.h> 21#include <linux/platform_device.h> 22#include <linux/of.h> 23#include <linux/of_device.h> 24#include <linux/dma-mapping.h> 25#include <linux/dmaengine.h> 26#include <linux/atmel_pdc.h> 27#include <linux/uaccess.h> 28#include <linux/platform_data/atmel.h> 29#include <linux/timer.h> 30#include <linux/err.h> 31#include <linux/irq.h> 32#include <linux/suspend.h> 33#include <linux/mm.h> 34#include <linux/io.h> 35 36#include <asm/div64.h> 37#include <asm/ioctls.h> 38 39#define PDC_BUFFER_SIZE 512 40/* Revisit: We should calculate this based on the actual port settings */ 41#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 42 43/* The minium number of data FIFOs should be able to contain */ 44#define ATMEL_MIN_FIFO_SIZE 8 45/* 46 * These two offsets are substracted from the RX FIFO size to define the RTS 47 * high and low thresholds 48 */ 49#define ATMEL_RTS_HIGH_OFFSET 16 50#define ATMEL_RTS_LOW_OFFSET 20 51 52#include <linux/serial_core.h> 53 54#include "serial_mctrl_gpio.h" 55#include "atmel_serial.h" 56 57static void atmel_start_rx(struct uart_port *port); 58static void atmel_stop_rx(struct uart_port *port); 59 60#ifdef CONFIG_SERIAL_ATMEL_TTYAT 61 62/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 63 * should coexist with the 8250 driver, such as if we have an external 16C550 64 * UART. */ 65#define SERIAL_ATMEL_MAJOR 204 66#define MINOR_START 154 67#define ATMEL_DEVICENAME "ttyAT" 68 69#else 70 71/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 72 * name, but it is legally reserved for the 8250 driver. */ 73#define SERIAL_ATMEL_MAJOR TTY_MAJOR 74#define MINOR_START 64 75#define ATMEL_DEVICENAME "ttyS" 76 77#endif 78 79#define ATMEL_ISR_PASS_LIMIT 256 80 81struct atmel_dma_buffer { 82 unsigned char *buf; 83 dma_addr_t dma_addr; 84 unsigned int dma_size; 85 unsigned int ofs; 86}; 87 88struct atmel_uart_char { 89 u16 status; 90 u16 ch; 91}; 92 93/* 94 * Be careful, the real size of the ring buffer is 95 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer 96 * can contain up to 1024 characters in PIO mode and up to 4096 characters in 97 * DMA mode. 98 */ 99#define ATMEL_SERIAL_RINGSIZE 1024 100 101/* 102 * at91: 6 USARTs and one DBGU port (SAM9260) 103 * samx7: 3 USARTs and 5 UARTs 104 */ 105#define ATMEL_MAX_UART 8 106 107/* 108 * We wrap our port structure around the generic uart_port. 109 */ 110struct atmel_uart_port { 111 struct uart_port uart; /* uart */ 112 struct clk *clk; /* uart clock */ 113 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 114 u32 backup_imr; /* IMR saved during suspend */ 115 int break_active; /* break being received */ 116 117 bool use_dma_rx; /* enable DMA receiver */ 118 bool use_pdc_rx; /* enable PDC receiver */ 119 short pdc_rx_idx; /* current PDC RX buffer */ 120 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 121 122 bool use_dma_tx; /* enable DMA transmitter */ 123 bool use_pdc_tx; /* enable PDC transmitter */ 124 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 125 126 spinlock_t lock_tx; /* port lock */ 127 spinlock_t lock_rx; /* port lock */ 128 struct dma_chan *chan_tx; 129 struct dma_chan *chan_rx; 130 struct dma_async_tx_descriptor *desc_tx; 131 struct dma_async_tx_descriptor *desc_rx; 132 dma_cookie_t cookie_tx; 133 dma_cookie_t cookie_rx; 134 struct scatterlist sg_tx; 135 struct scatterlist sg_rx; 136 struct tasklet_struct tasklet_rx; 137 struct tasklet_struct tasklet_tx; 138 atomic_t tasklet_shutdown; 139 unsigned int irq_status_prev; 140 unsigned int tx_len; 141 142 struct circ_buf rx_ring; 143 144 struct mctrl_gpios *gpios; 145 u32 backup_mode; /* MR saved during iso7816 operations */ 146 u32 backup_brgr; /* BRGR saved during iso7816 operations */ 147 unsigned int tx_done_mask; 148 u32 fifo_size; 149 u32 rts_high; 150 u32 rts_low; 151 bool ms_irq_enabled; 152 u32 rtor; /* address of receiver timeout register if it exists */ 153 bool has_frac_baudrate; 154 bool has_hw_timer; 155 struct timer_list uart_timer; 156 157 bool tx_stopped; 158 bool suspended; 159 unsigned int pending; 160 unsigned int pending_status; 161 spinlock_t lock_suspended; 162 163 bool hd_start_rx; /* can start RX during half-duplex operation */ 164 165 /* ISO7816 */ 166 unsigned int fidi_min; 167 unsigned int fidi_max; 168 169#ifdef CONFIG_PM 170 struct { 171 u32 cr; 172 u32 mr; 173 u32 imr; 174 u32 brgr; 175 u32 rtor; 176 u32 ttgr; 177 u32 fmr; 178 u32 fimr; 179 } cache; 180#endif 181 182 int (*prepare_rx)(struct uart_port *port); 183 int (*prepare_tx)(struct uart_port *port); 184 void (*schedule_rx)(struct uart_port *port); 185 void (*schedule_tx)(struct uart_port *port); 186 void (*release_rx)(struct uart_port *port); 187 void (*release_tx)(struct uart_port *port); 188}; 189 190static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 191static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 192 193#if defined(CONFIG_OF) 194static const struct of_device_id atmel_serial_dt_ids[] = { 195 { .compatible = "atmel,at91rm9200-usart-serial" }, 196 { /* sentinel */ } 197}; 198#endif 199 200static inline struct atmel_uart_port * 201to_atmel_uart_port(struct uart_port *uart) 202{ 203 return container_of(uart, struct atmel_uart_port, uart); 204} 205 206static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 207{ 208 return __raw_readl(port->membase + reg); 209} 210 211static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 212{ 213 __raw_writel(value, port->membase + reg); 214} 215 216static inline u8 atmel_uart_read_char(struct uart_port *port) 217{ 218 return __raw_readb(port->membase + ATMEL_US_RHR); 219} 220 221static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 222{ 223 __raw_writeb(value, port->membase + ATMEL_US_THR); 224} 225 226static inline int atmel_uart_is_half_duplex(struct uart_port *port) 227{ 228 return ((port->rs485.flags & SER_RS485_ENABLED) && 229 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 230 (port->iso7816.flags & SER_ISO7816_ENABLED); 231} 232 233#ifdef CONFIG_SERIAL_ATMEL_PDC 234static bool atmel_use_pdc_rx(struct uart_port *port) 235{ 236 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 237 238 return atmel_port->use_pdc_rx; 239} 240 241static bool atmel_use_pdc_tx(struct uart_port *port) 242{ 243 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 244 245 return atmel_port->use_pdc_tx; 246} 247#else 248static bool atmel_use_pdc_rx(struct uart_port *port) 249{ 250 return false; 251} 252 253static bool atmel_use_pdc_tx(struct uart_port *port) 254{ 255 return false; 256} 257#endif 258 259static bool atmel_use_dma_tx(struct uart_port *port) 260{ 261 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 262 263 return atmel_port->use_dma_tx; 264} 265 266static bool atmel_use_dma_rx(struct uart_port *port) 267{ 268 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 269 270 return atmel_port->use_dma_rx; 271} 272 273static bool atmel_use_fifo(struct uart_port *port) 274{ 275 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 276 277 return atmel_port->fifo_size; 278} 279 280static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, 281 struct tasklet_struct *t) 282{ 283 if (!atomic_read(&atmel_port->tasklet_shutdown)) 284 tasklet_schedule(t); 285} 286 287/* Enable or disable the rs485 support */ 288static int atmel_config_rs485(struct uart_port *port, 289 struct serial_rs485 *rs485conf) 290{ 291 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 292 unsigned int mode; 293 294 /* Disable interrupts */ 295 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 296 297 mode = atmel_uart_readl(port, ATMEL_US_MR); 298 299 /* Resetting serial mode to RS232 (0x0) */ 300 mode &= ~ATMEL_US_USMODE; 301 302 if (rs485conf->flags & SER_RS485_ENABLED) { 303 dev_dbg(port->dev, "Setting UART to RS485\n"); 304 if (rs485conf->flags & SER_RS485_RX_DURING_TX) 305 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 306 else 307 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 308 309 atmel_uart_writel(port, ATMEL_US_TTGR, 310 rs485conf->delay_rts_after_send); 311 mode |= ATMEL_US_USMODE_RS485; 312 } else { 313 dev_dbg(port->dev, "Setting UART to RS232\n"); 314 if (atmel_use_pdc_tx(port)) 315 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 316 ATMEL_US_TXBUFE; 317 else 318 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 319 } 320 atmel_uart_writel(port, ATMEL_US_MR, mode); 321 322 /* Enable interrupts */ 323 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 324 325 return 0; 326} 327 328static unsigned int atmel_calc_cd(struct uart_port *port, 329 struct serial_iso7816 *iso7816conf) 330{ 331 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 332 unsigned int cd; 333 u64 mck_rate; 334 335 mck_rate = (u64)clk_get_rate(atmel_port->clk); 336 do_div(mck_rate, iso7816conf->clk); 337 cd = mck_rate; 338 return cd; 339} 340 341static unsigned int atmel_calc_fidi(struct uart_port *port, 342 struct serial_iso7816 *iso7816conf) 343{ 344 u64 fidi = 0; 345 346 if (iso7816conf->sc_fi && iso7816conf->sc_di) { 347 fidi = (u64)iso7816conf->sc_fi; 348 do_div(fidi, iso7816conf->sc_di); 349 } 350 return (u32)fidi; 351} 352 353/* Enable or disable the iso7816 support */ 354/* Called with interrupts disabled */ 355static int atmel_config_iso7816(struct uart_port *port, 356 struct serial_iso7816 *iso7816conf) 357{ 358 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 359 unsigned int mode; 360 unsigned int cd, fidi; 361 int ret = 0; 362 363 /* Disable interrupts */ 364 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 365 366 mode = atmel_uart_readl(port, ATMEL_US_MR); 367 368 if (iso7816conf->flags & SER_ISO7816_ENABLED) { 369 mode &= ~ATMEL_US_USMODE; 370 371 if (iso7816conf->tg > 255) { 372 dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n"); 373 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 374 ret = -EINVAL; 375 goto err_out; 376 } 377 378 if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 379 == SER_ISO7816_T(0)) { 380 mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK; 381 } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 382 == SER_ISO7816_T(1)) { 383 mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK; 384 } else { 385 dev_err(port->dev, "ISO7816: Type not supported\n"); 386 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 387 ret = -EINVAL; 388 goto err_out; 389 } 390 391 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR); 392 393 /* select mck clock, and output */ 394 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 395 /* set parity for normal/inverse mode + max iterations */ 396 mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3); 397 398 cd = atmel_calc_cd(port, iso7816conf); 399 fidi = atmel_calc_fidi(port, iso7816conf); 400 if (fidi == 0) { 401 dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n"); 402 } else if (fidi < atmel_port->fidi_min 403 || fidi > atmel_port->fidi_max) { 404 dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi); 405 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 406 ret = -EINVAL; 407 goto err_out; 408 } 409 410 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) { 411 /* port not yet in iso7816 mode: store configuration */ 412 atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR); 413 atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 414 } 415 416 atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg); 417 atmel_uart_writel(port, ATMEL_US_BRGR, cd); 418 atmel_uart_writel(port, ATMEL_US_FIDI, fidi); 419 420 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN); 421 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION; 422 } else { 423 dev_dbg(port->dev, "Setting UART back to RS232\n"); 424 /* back to last RS232 settings */ 425 mode = atmel_port->backup_mode; 426 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 427 atmel_uart_writel(port, ATMEL_US_TTGR, 0); 428 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr); 429 atmel_uart_writel(port, ATMEL_US_FIDI, 0x174); 430 431 if (atmel_use_pdc_tx(port)) 432 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 433 ATMEL_US_TXBUFE; 434 else 435 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 436 } 437 438 port->iso7816 = *iso7816conf; 439 440 atmel_uart_writel(port, ATMEL_US_MR, mode); 441 442err_out: 443 /* Enable interrupts */ 444 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 445 446 return ret; 447} 448 449/* 450 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 451 */ 452static u_int atmel_tx_empty(struct uart_port *port) 453{ 454 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 455 456 if (atmel_port->tx_stopped) 457 return TIOCSER_TEMT; 458 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 459 TIOCSER_TEMT : 460 0; 461} 462 463/* 464 * Set state of the modem control output lines 465 */ 466static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 467{ 468 unsigned int control = 0; 469 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 470 unsigned int rts_paused, rts_ready; 471 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 472 473 /* override mode to RS485 if needed, otherwise keep the current mode */ 474 if (port->rs485.flags & SER_RS485_ENABLED) { 475 atmel_uart_writel(port, ATMEL_US_TTGR, 476 port->rs485.delay_rts_after_send); 477 mode &= ~ATMEL_US_USMODE; 478 mode |= ATMEL_US_USMODE_RS485; 479 } 480 481 /* set the RTS line state according to the mode */ 482 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 483 /* force RTS line to high level */ 484 rts_paused = ATMEL_US_RTSEN; 485 486 /* give the control of the RTS line back to the hardware */ 487 rts_ready = ATMEL_US_RTSDIS; 488 } else { 489 /* force RTS line to high level */ 490 rts_paused = ATMEL_US_RTSDIS; 491 492 /* force RTS line to low level */ 493 rts_ready = ATMEL_US_RTSEN; 494 } 495 496 if (mctrl & TIOCM_RTS) 497 control |= rts_ready; 498 else 499 control |= rts_paused; 500 501 if (mctrl & TIOCM_DTR) 502 control |= ATMEL_US_DTREN; 503 else 504 control |= ATMEL_US_DTRDIS; 505 506 atmel_uart_writel(port, ATMEL_US_CR, control); 507 508 mctrl_gpio_set(atmel_port->gpios, mctrl); 509 510 /* Local loopback mode? */ 511 mode &= ~ATMEL_US_CHMODE; 512 if (mctrl & TIOCM_LOOP) 513 mode |= ATMEL_US_CHMODE_LOC_LOOP; 514 else 515 mode |= ATMEL_US_CHMODE_NORMAL; 516 517 atmel_uart_writel(port, ATMEL_US_MR, mode); 518} 519 520/* 521 * Get state of the modem control input lines 522 */ 523static u_int atmel_get_mctrl(struct uart_port *port) 524{ 525 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 526 unsigned int ret = 0, status; 527 528 status = atmel_uart_readl(port, ATMEL_US_CSR); 529 530 /* 531 * The control signals are active low. 532 */ 533 if (!(status & ATMEL_US_DCD)) 534 ret |= TIOCM_CD; 535 if (!(status & ATMEL_US_CTS)) 536 ret |= TIOCM_CTS; 537 if (!(status & ATMEL_US_DSR)) 538 ret |= TIOCM_DSR; 539 if (!(status & ATMEL_US_RI)) 540 ret |= TIOCM_RI; 541 542 return mctrl_gpio_get(atmel_port->gpios, &ret); 543} 544 545/* 546 * Stop transmitting. 547 */ 548static void atmel_stop_tx(struct uart_port *port) 549{ 550 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 551 552 if (atmel_use_pdc_tx(port)) { 553 /* disable PDC transmit */ 554 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 555 } 556 557 /* 558 * Disable the transmitter. 559 * This is mandatory when DMA is used, otherwise the DMA buffer 560 * is fully transmitted. 561 */ 562 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); 563 atmel_port->tx_stopped = true; 564 565 /* Disable interrupts */ 566 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 567 568 if (atmel_uart_is_half_duplex(port)) 569 if (!atomic_read(&atmel_port->tasklet_shutdown)) 570 atmel_start_rx(port); 571 572} 573 574/* 575 * Start transmitting. 576 */ 577static void atmel_start_tx(struct uart_port *port) 578{ 579 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 580 581 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) 582 & ATMEL_PDC_TXTEN)) 583 /* The transmitter is already running. Yes, we 584 really need this.*/ 585 return; 586 587 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 588 if (atmel_uart_is_half_duplex(port)) 589 atmel_stop_rx(port); 590 591 if (atmel_use_pdc_tx(port)) 592 /* re-enable PDC transmit */ 593 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 594 595 /* Enable interrupts */ 596 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 597 598 /* re-enable the transmitter */ 599 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 600 atmel_port->tx_stopped = false; 601} 602 603/* 604 * start receiving - port is in process of being opened. 605 */ 606static void atmel_start_rx(struct uart_port *port) 607{ 608 /* reset status and receiver */ 609 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 610 611 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 612 613 if (atmel_use_pdc_rx(port)) { 614 /* enable PDC controller */ 615 atmel_uart_writel(port, ATMEL_US_IER, 616 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 617 port->read_status_mask); 618 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 619 } else { 620 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 621 } 622} 623 624/* 625 * Stop receiving - port is in process of being closed. 626 */ 627static void atmel_stop_rx(struct uart_port *port) 628{ 629 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 630 631 if (atmel_use_pdc_rx(port)) { 632 /* disable PDC receive */ 633 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 634 atmel_uart_writel(port, ATMEL_US_IDR, 635 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 636 port->read_status_mask); 637 } else { 638 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 639 } 640} 641 642/* 643 * Enable modem status interrupts 644 */ 645static void atmel_enable_ms(struct uart_port *port) 646{ 647 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 648 uint32_t ier = 0; 649 650 /* 651 * Interrupt should not be enabled twice 652 */ 653 if (atmel_port->ms_irq_enabled) 654 return; 655 656 atmel_port->ms_irq_enabled = true; 657 658 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 659 ier |= ATMEL_US_CTSIC; 660 661 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 662 ier |= ATMEL_US_DSRIC; 663 664 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 665 ier |= ATMEL_US_RIIC; 666 667 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 668 ier |= ATMEL_US_DCDIC; 669 670 atmel_uart_writel(port, ATMEL_US_IER, ier); 671 672 mctrl_gpio_enable_ms(atmel_port->gpios); 673} 674 675/* 676 * Disable modem status interrupts 677 */ 678static void atmel_disable_ms(struct uart_port *port) 679{ 680 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 681 uint32_t idr = 0; 682 683 /* 684 * Interrupt should not be disabled twice 685 */ 686 if (!atmel_port->ms_irq_enabled) 687 return; 688 689 atmel_port->ms_irq_enabled = false; 690 691 mctrl_gpio_disable_ms(atmel_port->gpios); 692 693 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 694 idr |= ATMEL_US_CTSIC; 695 696 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 697 idr |= ATMEL_US_DSRIC; 698 699 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 700 idr |= ATMEL_US_RIIC; 701 702 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 703 idr |= ATMEL_US_DCDIC; 704 705 atmel_uart_writel(port, ATMEL_US_IDR, idr); 706} 707 708/* 709 * Control the transmission of a break signal 710 */ 711static void atmel_break_ctl(struct uart_port *port, int break_state) 712{ 713 if (break_state != 0) 714 /* start break */ 715 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 716 else 717 /* stop break */ 718 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 719} 720 721/* 722 * Stores the incoming character in the ring buffer 723 */ 724static void 725atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 726 unsigned int ch) 727{ 728 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 729 struct circ_buf *ring = &atmel_port->rx_ring; 730 struct atmel_uart_char *c; 731 732 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 733 /* Buffer overflow, ignore char */ 734 return; 735 736 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 737 c->status = status; 738 c->ch = ch; 739 740 /* Make sure the character is stored before we update head. */ 741 smp_wmb(); 742 743 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 744} 745 746/* 747 * Deal with parity, framing and overrun errors. 748 */ 749static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 750{ 751 /* clear error */ 752 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 753 754 if (status & ATMEL_US_RXBRK) { 755 /* ignore side-effect */ 756 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 757 port->icount.brk++; 758 } 759 if (status & ATMEL_US_PARE) 760 port->icount.parity++; 761 if (status & ATMEL_US_FRAME) 762 port->icount.frame++; 763 if (status & ATMEL_US_OVRE) 764 port->icount.overrun++; 765} 766 767/* 768 * Characters received (called from interrupt handler) 769 */ 770static void atmel_rx_chars(struct uart_port *port) 771{ 772 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 773 unsigned int status, ch; 774 775 status = atmel_uart_readl(port, ATMEL_US_CSR); 776 while (status & ATMEL_US_RXRDY) { 777 ch = atmel_uart_read_char(port); 778 779 /* 780 * note that the error handling code is 781 * out of the main execution path 782 */ 783 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 784 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 785 || atmel_port->break_active)) { 786 787 /* clear error */ 788 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 789 790 if (status & ATMEL_US_RXBRK 791 && !atmel_port->break_active) { 792 atmel_port->break_active = 1; 793 atmel_uart_writel(port, ATMEL_US_IER, 794 ATMEL_US_RXBRK); 795 } else { 796 /* 797 * This is either the end-of-break 798 * condition or we've received at 799 * least one character without RXBRK 800 * being set. In both cases, the next 801 * RXBRK will indicate start-of-break. 802 */ 803 atmel_uart_writel(port, ATMEL_US_IDR, 804 ATMEL_US_RXBRK); 805 status &= ~ATMEL_US_RXBRK; 806 atmel_port->break_active = 0; 807 } 808 } 809 810 atmel_buffer_rx_char(port, status, ch); 811 status = atmel_uart_readl(port, ATMEL_US_CSR); 812 } 813 814 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 815} 816 817/* 818 * Transmit characters (called from tasklet with TXRDY interrupt 819 * disabled) 820 */ 821static void atmel_tx_chars(struct uart_port *port) 822{ 823 struct circ_buf *xmit = &port->state->xmit; 824 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 825 826 if (port->x_char && 827 (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) { 828 atmel_uart_write_char(port, port->x_char); 829 port->icount.tx++; 830 port->x_char = 0; 831 } 832 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 833 return; 834 835 while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) { 836 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 837 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 838 port->icount.tx++; 839 if (uart_circ_empty(xmit)) 840 break; 841 } 842 843 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 844 uart_write_wakeup(port); 845 846 if (!uart_circ_empty(xmit)) { 847 /* we still have characters to transmit, so we should continue 848 * transmitting them when TX is ready, regardless of 849 * mode or duplexity 850 */ 851 atmel_port->tx_done_mask |= ATMEL_US_TXRDY; 852 853 /* Enable interrupts */ 854 atmel_uart_writel(port, ATMEL_US_IER, 855 atmel_port->tx_done_mask); 856 } else { 857 if (atmel_uart_is_half_duplex(port)) 858 atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY; 859 } 860} 861 862static void atmel_complete_tx_dma(void *arg) 863{ 864 struct atmel_uart_port *atmel_port = arg; 865 struct uart_port *port = &atmel_port->uart; 866 struct circ_buf *xmit = &port->state->xmit; 867 struct dma_chan *chan = atmel_port->chan_tx; 868 unsigned long flags; 869 870 spin_lock_irqsave(&port->lock, flags); 871 872 if (chan) 873 dmaengine_terminate_all(chan); 874 xmit->tail += atmel_port->tx_len; 875 xmit->tail &= UART_XMIT_SIZE - 1; 876 877 port->icount.tx += atmel_port->tx_len; 878 879 spin_lock_irq(&atmel_port->lock_tx); 880 async_tx_ack(atmel_port->desc_tx); 881 atmel_port->cookie_tx = -EINVAL; 882 atmel_port->desc_tx = NULL; 883 spin_unlock_irq(&atmel_port->lock_tx); 884 885 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 886 uart_write_wakeup(port); 887 888 /* 889 * xmit is a circular buffer so, if we have just send data from 890 * xmit->tail to the end of xmit->buf, now we have to transmit the 891 * remaining data from the beginning of xmit->buf to xmit->head. 892 */ 893 if (!uart_circ_empty(xmit)) 894 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 895 else if (atmel_uart_is_half_duplex(port)) { 896 /* 897 * DMA done, re-enable TXEMPTY and signal that we can stop 898 * TX and start RX for RS485 899 */ 900 atmel_port->hd_start_rx = true; 901 atmel_uart_writel(port, ATMEL_US_IER, 902 atmel_port->tx_done_mask); 903 } 904 905 spin_unlock_irqrestore(&port->lock, flags); 906} 907 908static void atmel_release_tx_dma(struct uart_port *port) 909{ 910 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 911 struct dma_chan *chan = atmel_port->chan_tx; 912 913 if (chan) { 914 dmaengine_terminate_all(chan); 915 dma_release_channel(chan); 916 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 917 DMA_TO_DEVICE); 918 } 919 920 atmel_port->desc_tx = NULL; 921 atmel_port->chan_tx = NULL; 922 atmel_port->cookie_tx = -EINVAL; 923} 924 925/* 926 * Called from tasklet with TXRDY interrupt is disabled. 927 */ 928static void atmel_tx_dma(struct uart_port *port) 929{ 930 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 931 struct circ_buf *xmit = &port->state->xmit; 932 struct dma_chan *chan = atmel_port->chan_tx; 933 struct dma_async_tx_descriptor *desc; 934 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 935 unsigned int tx_len, part1_len, part2_len, sg_len; 936 dma_addr_t phys_addr; 937 938 /* Make sure we have an idle channel */ 939 if (atmel_port->desc_tx != NULL) 940 return; 941 942 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 943 /* 944 * DMA is idle now. 945 * Port xmit buffer is already mapped, 946 * and it is one page... Just adjust 947 * offsets and lengths. Since it is a circular buffer, 948 * we have to transmit till the end, and then the rest. 949 * Take the port lock to get a 950 * consistent xmit buffer state. 951 */ 952 tx_len = CIRC_CNT_TO_END(xmit->head, 953 xmit->tail, 954 UART_XMIT_SIZE); 955 956 if (atmel_port->fifo_size) { 957 /* multi data mode */ 958 part1_len = (tx_len & ~0x3); /* DWORD access */ 959 part2_len = (tx_len & 0x3); /* BYTE access */ 960 } else { 961 /* single data (legacy) mode */ 962 part1_len = 0; 963 part2_len = tx_len; /* BYTE access only */ 964 } 965 966 sg_init_table(sgl, 2); 967 sg_len = 0; 968 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 969 if (part1_len) { 970 sg = &sgl[sg_len++]; 971 sg_dma_address(sg) = phys_addr; 972 sg_dma_len(sg) = part1_len; 973 974 phys_addr += part1_len; 975 } 976 977 if (part2_len) { 978 sg = &sgl[sg_len++]; 979 sg_dma_address(sg) = phys_addr; 980 sg_dma_len(sg) = part2_len; 981 } 982 983 /* 984 * save tx_len so atmel_complete_tx_dma() will increase 985 * xmit->tail correctly 986 */ 987 atmel_port->tx_len = tx_len; 988 989 desc = dmaengine_prep_slave_sg(chan, 990 sgl, 991 sg_len, 992 DMA_MEM_TO_DEV, 993 DMA_PREP_INTERRUPT | 994 DMA_CTRL_ACK); 995 if (!desc) { 996 dev_err(port->dev, "Failed to send via dma!\n"); 997 return; 998 } 999 1000 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 1001 1002 atmel_port->desc_tx = desc; 1003 desc->callback = atmel_complete_tx_dma; 1004 desc->callback_param = atmel_port; 1005 atmel_port->cookie_tx = dmaengine_submit(desc); 1006 if (dma_submit_error(atmel_port->cookie_tx)) { 1007 dev_err(port->dev, "dma_submit_error %d\n", 1008 atmel_port->cookie_tx); 1009 return; 1010 } 1011 1012 dma_async_issue_pending(chan); 1013 } 1014 1015 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1016 uart_write_wakeup(port); 1017} 1018 1019static int atmel_prepare_tx_dma(struct uart_port *port) 1020{ 1021 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1022 struct device *mfd_dev = port->dev->parent; 1023 dma_cap_mask_t mask; 1024 struct dma_slave_config config; 1025 int ret, nent; 1026 1027 dma_cap_zero(mask); 1028 dma_cap_set(DMA_SLAVE, mask); 1029 1030 atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx"); 1031 if (atmel_port->chan_tx == NULL) 1032 goto chan_err; 1033 dev_info(port->dev, "using %s for tx DMA transfers\n", 1034 dma_chan_name(atmel_port->chan_tx)); 1035 1036 spin_lock_init(&atmel_port->lock_tx); 1037 sg_init_table(&atmel_port->sg_tx, 1); 1038 /* UART circular tx buffer is an aligned page. */ 1039 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 1040 sg_set_page(&atmel_port->sg_tx, 1041 virt_to_page(port->state->xmit.buf), 1042 UART_XMIT_SIZE, 1043 offset_in_page(port->state->xmit.buf)); 1044 nent = dma_map_sg(port->dev, 1045 &atmel_port->sg_tx, 1046 1, 1047 DMA_TO_DEVICE); 1048 1049 if (!nent) { 1050 dev_dbg(port->dev, "need to release resource of dma\n"); 1051 goto chan_err; 1052 } else { 1053 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1054 sg_dma_len(&atmel_port->sg_tx), 1055 port->state->xmit.buf, 1056 &sg_dma_address(&atmel_port->sg_tx)); 1057 } 1058 1059 /* Configure the slave DMA */ 1060 memset(&config, 0, sizeof(config)); 1061 config.direction = DMA_MEM_TO_DEV; 1062 config.dst_addr_width = (atmel_port->fifo_size) ? 1063 DMA_SLAVE_BUSWIDTH_4_BYTES : 1064 DMA_SLAVE_BUSWIDTH_1_BYTE; 1065 config.dst_addr = port->mapbase + ATMEL_US_THR; 1066 config.dst_maxburst = 1; 1067 1068 ret = dmaengine_slave_config(atmel_port->chan_tx, 1069 &config); 1070 if (ret) { 1071 dev_err(port->dev, "DMA tx slave configuration failed\n"); 1072 goto chan_err; 1073 } 1074 1075 return 0; 1076 1077chan_err: 1078 dev_err(port->dev, "TX channel not available, switch to pio\n"); 1079 atmel_port->use_dma_tx = false; 1080 if (atmel_port->chan_tx) 1081 atmel_release_tx_dma(port); 1082 return -EINVAL; 1083} 1084 1085static void atmel_complete_rx_dma(void *arg) 1086{ 1087 struct uart_port *port = arg; 1088 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1089 1090 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1091} 1092 1093static void atmel_release_rx_dma(struct uart_port *port) 1094{ 1095 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1096 struct dma_chan *chan = atmel_port->chan_rx; 1097 1098 if (chan) { 1099 dmaengine_terminate_all(chan); 1100 dma_release_channel(chan); 1101 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 1102 DMA_FROM_DEVICE); 1103 } 1104 1105 atmel_port->desc_rx = NULL; 1106 atmel_port->chan_rx = NULL; 1107 atmel_port->cookie_rx = -EINVAL; 1108} 1109 1110static void atmel_rx_from_dma(struct uart_port *port) 1111{ 1112 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1113 struct tty_port *tport = &port->state->port; 1114 struct circ_buf *ring = &atmel_port->rx_ring; 1115 struct dma_chan *chan = atmel_port->chan_rx; 1116 struct dma_tx_state state; 1117 enum dma_status dmastat; 1118 size_t count; 1119 1120 1121 /* Reset the UART timeout early so that we don't miss one */ 1122 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1123 dmastat = dmaengine_tx_status(chan, 1124 atmel_port->cookie_rx, 1125 &state); 1126 /* Restart a new tasklet if DMA status is error */ 1127 if (dmastat == DMA_ERROR) { 1128 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1129 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1130 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1131 return; 1132 } 1133 1134 /* CPU claims ownership of RX DMA buffer */ 1135 dma_sync_sg_for_cpu(port->dev, 1136 &atmel_port->sg_rx, 1137 1, 1138 DMA_FROM_DEVICE); 1139 1140 /* 1141 * ring->head points to the end of data already written by the DMA. 1142 * ring->tail points to the beginning of data to be read by the 1143 * framework. 1144 * The current transfer size should not be larger than the dma buffer 1145 * length. 1146 */ 1147 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1148 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1149 /* 1150 * At this point ring->head may point to the first byte right after the 1151 * last byte of the dma buffer: 1152 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1153 * 1154 * However ring->tail must always points inside the dma buffer: 1155 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1156 * 1157 * Since we use a ring buffer, we have to handle the case 1158 * where head is lower than tail. In such a case, we first read from 1159 * tail to the end of the buffer then reset tail. 1160 */ 1161 if (ring->head < ring->tail) { 1162 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1163 1164 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1165 ring->tail = 0; 1166 port->icount.rx += count; 1167 } 1168 1169 /* Finally we read data from tail to head */ 1170 if (ring->tail < ring->head) { 1171 count = ring->head - ring->tail; 1172 1173 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1174 /* Wrap ring->head if needed */ 1175 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1176 ring->head = 0; 1177 ring->tail = ring->head; 1178 port->icount.rx += count; 1179 } 1180 1181 /* USART retreives ownership of RX DMA buffer */ 1182 dma_sync_sg_for_device(port->dev, 1183 &atmel_port->sg_rx, 1184 1, 1185 DMA_FROM_DEVICE); 1186 1187 tty_flip_buffer_push(tport); 1188 1189 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1190} 1191 1192static int atmel_prepare_rx_dma(struct uart_port *port) 1193{ 1194 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1195 struct device *mfd_dev = port->dev->parent; 1196 struct dma_async_tx_descriptor *desc; 1197 dma_cap_mask_t mask; 1198 struct dma_slave_config config; 1199 struct circ_buf *ring; 1200 int ret, nent; 1201 1202 ring = &atmel_port->rx_ring; 1203 1204 dma_cap_zero(mask); 1205 dma_cap_set(DMA_CYCLIC, mask); 1206 1207 atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx"); 1208 if (atmel_port->chan_rx == NULL) 1209 goto chan_err; 1210 dev_info(port->dev, "using %s for rx DMA transfers\n", 1211 dma_chan_name(atmel_port->chan_rx)); 1212 1213 spin_lock_init(&atmel_port->lock_rx); 1214 sg_init_table(&atmel_port->sg_rx, 1); 1215 /* UART circular rx buffer is an aligned page. */ 1216 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1217 sg_set_page(&atmel_port->sg_rx, 1218 virt_to_page(ring->buf), 1219 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1220 offset_in_page(ring->buf)); 1221 nent = dma_map_sg(port->dev, 1222 &atmel_port->sg_rx, 1223 1, 1224 DMA_FROM_DEVICE); 1225 1226 if (!nent) { 1227 dev_dbg(port->dev, "need to release resource of dma\n"); 1228 goto chan_err; 1229 } else { 1230 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1231 sg_dma_len(&atmel_port->sg_rx), 1232 ring->buf, 1233 &sg_dma_address(&atmel_port->sg_rx)); 1234 } 1235 1236 /* Configure the slave DMA */ 1237 memset(&config, 0, sizeof(config)); 1238 config.direction = DMA_DEV_TO_MEM; 1239 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1240 config.src_addr = port->mapbase + ATMEL_US_RHR; 1241 config.src_maxburst = 1; 1242 1243 ret = dmaengine_slave_config(atmel_port->chan_rx, 1244 &config); 1245 if (ret) { 1246 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1247 goto chan_err; 1248 } 1249 /* 1250 * Prepare a cyclic dma transfer, assign 2 descriptors, 1251 * each one is half ring buffer size 1252 */ 1253 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1254 sg_dma_address(&atmel_port->sg_rx), 1255 sg_dma_len(&atmel_port->sg_rx), 1256 sg_dma_len(&atmel_port->sg_rx)/2, 1257 DMA_DEV_TO_MEM, 1258 DMA_PREP_INTERRUPT); 1259 if (!desc) { 1260 dev_err(port->dev, "Preparing DMA cyclic failed\n"); 1261 goto chan_err; 1262 } 1263 desc->callback = atmel_complete_rx_dma; 1264 desc->callback_param = port; 1265 atmel_port->desc_rx = desc; 1266 atmel_port->cookie_rx = dmaengine_submit(desc); 1267 if (dma_submit_error(atmel_port->cookie_rx)) { 1268 dev_err(port->dev, "dma_submit_error %d\n", 1269 atmel_port->cookie_rx); 1270 goto chan_err; 1271 } 1272 1273 dma_async_issue_pending(atmel_port->chan_rx); 1274 1275 return 0; 1276 1277chan_err: 1278 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1279 atmel_port->use_dma_rx = false; 1280 if (atmel_port->chan_rx) 1281 atmel_release_rx_dma(port); 1282 return -EINVAL; 1283} 1284 1285static void atmel_uart_timer_callback(struct timer_list *t) 1286{ 1287 struct atmel_uart_port *atmel_port = from_timer(atmel_port, t, 1288 uart_timer); 1289 struct uart_port *port = &atmel_port->uart; 1290 1291 if (!atomic_read(&atmel_port->tasklet_shutdown)) { 1292 tasklet_schedule(&atmel_port->tasklet_rx); 1293 mod_timer(&atmel_port->uart_timer, 1294 jiffies + uart_poll_timeout(port)); 1295 } 1296} 1297 1298/* 1299 * receive interrupt handler. 1300 */ 1301static void 1302atmel_handle_receive(struct uart_port *port, unsigned int pending) 1303{ 1304 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1305 1306 if (atmel_use_pdc_rx(port)) { 1307 /* 1308 * PDC receive. Just schedule the tasklet and let it 1309 * figure out the details. 1310 * 1311 * TODO: We're not handling error flags correctly at 1312 * the moment. 1313 */ 1314 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1315 atmel_uart_writel(port, ATMEL_US_IDR, 1316 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1317 atmel_tasklet_schedule(atmel_port, 1318 &atmel_port->tasklet_rx); 1319 } 1320 1321 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1322 ATMEL_US_FRAME | ATMEL_US_PARE)) 1323 atmel_pdc_rxerr(port, pending); 1324 } 1325 1326 if (atmel_use_dma_rx(port)) { 1327 if (pending & ATMEL_US_TIMEOUT) { 1328 atmel_uart_writel(port, ATMEL_US_IDR, 1329 ATMEL_US_TIMEOUT); 1330 atmel_tasklet_schedule(atmel_port, 1331 &atmel_port->tasklet_rx); 1332 } 1333 } 1334 1335 /* Interrupt receive */ 1336 if (pending & ATMEL_US_RXRDY) 1337 atmel_rx_chars(port); 1338 else if (pending & ATMEL_US_RXBRK) { 1339 /* 1340 * End of break detected. If it came along with a 1341 * character, atmel_rx_chars will handle it. 1342 */ 1343 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1344 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1345 atmel_port->break_active = 0; 1346 } 1347} 1348 1349/* 1350 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1351 */ 1352static void 1353atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1354{ 1355 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1356 1357 if (pending & atmel_port->tx_done_mask) { 1358 atmel_uart_writel(port, ATMEL_US_IDR, 1359 atmel_port->tx_done_mask); 1360 1361 /* Start RX if flag was set and FIFO is empty */ 1362 if (atmel_port->hd_start_rx) { 1363 if (!(atmel_uart_readl(port, ATMEL_US_CSR) 1364 & ATMEL_US_TXEMPTY)) 1365 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); 1366 1367 atmel_port->hd_start_rx = false; 1368 atmel_start_rx(port); 1369 } 1370 1371 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1372 } 1373} 1374 1375/* 1376 * status flags interrupt handler. 1377 */ 1378static void 1379atmel_handle_status(struct uart_port *port, unsigned int pending, 1380 unsigned int status) 1381{ 1382 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1383 unsigned int status_change; 1384 1385 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1386 | ATMEL_US_CTSIC)) { 1387 status_change = status ^ atmel_port->irq_status_prev; 1388 atmel_port->irq_status_prev = status; 1389 1390 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1391 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1392 /* TODO: All reads to CSR will clear these interrupts! */ 1393 if (status_change & ATMEL_US_RI) 1394 port->icount.rng++; 1395 if (status_change & ATMEL_US_DSR) 1396 port->icount.dsr++; 1397 if (status_change & ATMEL_US_DCD) 1398 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1399 if (status_change & ATMEL_US_CTS) 1400 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1401 1402 wake_up_interruptible(&port->state->port.delta_msr_wait); 1403 } 1404 } 1405 1406 if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION)) 1407 dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending); 1408} 1409 1410/* 1411 * Interrupt handler 1412 */ 1413static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1414{ 1415 struct uart_port *port = dev_id; 1416 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1417 unsigned int status, pending, mask, pass_counter = 0; 1418 1419 spin_lock(&atmel_port->lock_suspended); 1420 1421 do { 1422 status = atmel_uart_readl(port, ATMEL_US_CSR); 1423 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1424 pending = status & mask; 1425 if (!pending) 1426 break; 1427 1428 if (atmel_port->suspended) { 1429 atmel_port->pending |= pending; 1430 atmel_port->pending_status = status; 1431 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1432 pm_system_wakeup(); 1433 break; 1434 } 1435 1436 atmel_handle_receive(port, pending); 1437 atmel_handle_status(port, pending, status); 1438 atmel_handle_transmit(port, pending); 1439 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1440 1441 spin_unlock(&atmel_port->lock_suspended); 1442 1443 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1444} 1445 1446static void atmel_release_tx_pdc(struct uart_port *port) 1447{ 1448 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1449 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1450 1451 dma_unmap_single(port->dev, 1452 pdc->dma_addr, 1453 pdc->dma_size, 1454 DMA_TO_DEVICE); 1455} 1456 1457/* 1458 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1459 */ 1460static void atmel_tx_pdc(struct uart_port *port) 1461{ 1462 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1463 struct circ_buf *xmit = &port->state->xmit; 1464 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1465 int count; 1466 1467 /* nothing left to transmit? */ 1468 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1469 return; 1470 1471 xmit->tail += pdc->ofs; 1472 xmit->tail &= UART_XMIT_SIZE - 1; 1473 1474 port->icount.tx += pdc->ofs; 1475 pdc->ofs = 0; 1476 1477 /* more to transmit - setup next transfer */ 1478 1479 /* disable PDC transmit */ 1480 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1481 1482 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1483 dma_sync_single_for_device(port->dev, 1484 pdc->dma_addr, 1485 pdc->dma_size, 1486 DMA_TO_DEVICE); 1487 1488 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1489 pdc->ofs = count; 1490 1491 atmel_uart_writel(port, ATMEL_PDC_TPR, 1492 pdc->dma_addr + xmit->tail); 1493 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1494 /* re-enable PDC transmit */ 1495 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1496 /* Enable interrupts */ 1497 atmel_uart_writel(port, ATMEL_US_IER, 1498 atmel_port->tx_done_mask); 1499 } else { 1500 if (atmel_uart_is_half_duplex(port)) { 1501 /* DMA done, stop TX, start RX for RS485 */ 1502 atmel_start_rx(port); 1503 } 1504 } 1505 1506 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1507 uart_write_wakeup(port); 1508} 1509 1510static int atmel_prepare_tx_pdc(struct uart_port *port) 1511{ 1512 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1513 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1514 struct circ_buf *xmit = &port->state->xmit; 1515 1516 pdc->buf = xmit->buf; 1517 pdc->dma_addr = dma_map_single(port->dev, 1518 pdc->buf, 1519 UART_XMIT_SIZE, 1520 DMA_TO_DEVICE); 1521 pdc->dma_size = UART_XMIT_SIZE; 1522 pdc->ofs = 0; 1523 1524 return 0; 1525} 1526 1527static void atmel_rx_from_ring(struct uart_port *port) 1528{ 1529 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1530 struct circ_buf *ring = &atmel_port->rx_ring; 1531 unsigned int flg; 1532 unsigned int status; 1533 1534 while (ring->head != ring->tail) { 1535 struct atmel_uart_char c; 1536 1537 /* Make sure c is loaded after head. */ 1538 smp_rmb(); 1539 1540 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1541 1542 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1543 1544 port->icount.rx++; 1545 status = c.status; 1546 flg = TTY_NORMAL; 1547 1548 /* 1549 * note that the error handling code is 1550 * out of the main execution path 1551 */ 1552 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1553 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1554 if (status & ATMEL_US_RXBRK) { 1555 /* ignore side-effect */ 1556 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1557 1558 port->icount.brk++; 1559 if (uart_handle_break(port)) 1560 continue; 1561 } 1562 if (status & ATMEL_US_PARE) 1563 port->icount.parity++; 1564 if (status & ATMEL_US_FRAME) 1565 port->icount.frame++; 1566 if (status & ATMEL_US_OVRE) 1567 port->icount.overrun++; 1568 1569 status &= port->read_status_mask; 1570 1571 if (status & ATMEL_US_RXBRK) 1572 flg = TTY_BREAK; 1573 else if (status & ATMEL_US_PARE) 1574 flg = TTY_PARITY; 1575 else if (status & ATMEL_US_FRAME) 1576 flg = TTY_FRAME; 1577 } 1578 1579 1580 if (uart_handle_sysrq_char(port, c.ch)) 1581 continue; 1582 1583 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1584 } 1585 1586 tty_flip_buffer_push(&port->state->port); 1587} 1588 1589static void atmel_release_rx_pdc(struct uart_port *port) 1590{ 1591 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1592 int i; 1593 1594 for (i = 0; i < 2; i++) { 1595 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1596 1597 dma_unmap_single(port->dev, 1598 pdc->dma_addr, 1599 pdc->dma_size, 1600 DMA_FROM_DEVICE); 1601 kfree(pdc->buf); 1602 } 1603} 1604 1605static void atmel_rx_from_pdc(struct uart_port *port) 1606{ 1607 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1608 struct tty_port *tport = &port->state->port; 1609 struct atmel_dma_buffer *pdc; 1610 int rx_idx = atmel_port->pdc_rx_idx; 1611 unsigned int head; 1612 unsigned int tail; 1613 unsigned int count; 1614 1615 do { 1616 /* Reset the UART timeout early so that we don't miss one */ 1617 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1618 1619 pdc = &atmel_port->pdc_rx[rx_idx]; 1620 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1621 tail = pdc->ofs; 1622 1623 /* If the PDC has switched buffers, RPR won't contain 1624 * any address within the current buffer. Since head 1625 * is unsigned, we just need a one-way comparison to 1626 * find out. 1627 * 1628 * In this case, we just need to consume the entire 1629 * buffer and resubmit it for DMA. This will clear the 1630 * ENDRX bit as well, so that we can safely re-enable 1631 * all interrupts below. 1632 */ 1633 head = min(head, pdc->dma_size); 1634 1635 if (likely(head != tail)) { 1636 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1637 pdc->dma_size, DMA_FROM_DEVICE); 1638 1639 /* 1640 * head will only wrap around when we recycle 1641 * the DMA buffer, and when that happens, we 1642 * explicitly set tail to 0. So head will 1643 * always be greater than tail. 1644 */ 1645 count = head - tail; 1646 1647 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1648 count); 1649 1650 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1651 pdc->dma_size, DMA_FROM_DEVICE); 1652 1653 port->icount.rx += count; 1654 pdc->ofs = head; 1655 } 1656 1657 /* 1658 * If the current buffer is full, we need to check if 1659 * the next one contains any additional data. 1660 */ 1661 if (head >= pdc->dma_size) { 1662 pdc->ofs = 0; 1663 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1664 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1665 1666 rx_idx = !rx_idx; 1667 atmel_port->pdc_rx_idx = rx_idx; 1668 } 1669 } while (head >= pdc->dma_size); 1670 1671 tty_flip_buffer_push(tport); 1672 1673 atmel_uart_writel(port, ATMEL_US_IER, 1674 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1675} 1676 1677static int atmel_prepare_rx_pdc(struct uart_port *port) 1678{ 1679 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1680 int i; 1681 1682 for (i = 0; i < 2; i++) { 1683 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1684 1685 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1686 if (pdc->buf == NULL) { 1687 if (i != 0) { 1688 dma_unmap_single(port->dev, 1689 atmel_port->pdc_rx[0].dma_addr, 1690 PDC_BUFFER_SIZE, 1691 DMA_FROM_DEVICE); 1692 kfree(atmel_port->pdc_rx[0].buf); 1693 } 1694 atmel_port->use_pdc_rx = false; 1695 return -ENOMEM; 1696 } 1697 pdc->dma_addr = dma_map_single(port->dev, 1698 pdc->buf, 1699 PDC_BUFFER_SIZE, 1700 DMA_FROM_DEVICE); 1701 pdc->dma_size = PDC_BUFFER_SIZE; 1702 pdc->ofs = 0; 1703 } 1704 1705 atmel_port->pdc_rx_idx = 0; 1706 1707 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1708 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1709 1710 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1711 atmel_port->pdc_rx[1].dma_addr); 1712 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1713 1714 return 0; 1715} 1716 1717/* 1718 * tasklet handling tty stuff outside the interrupt handler. 1719 */ 1720static void atmel_tasklet_rx_func(struct tasklet_struct *t) 1721{ 1722 struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t, 1723 tasklet_rx); 1724 struct uart_port *port = &atmel_port->uart; 1725 1726 /* The interrupt handler does not take the lock */ 1727 spin_lock(&port->lock); 1728 atmel_port->schedule_rx(port); 1729 spin_unlock(&port->lock); 1730} 1731 1732static void atmel_tasklet_tx_func(struct tasklet_struct *t) 1733{ 1734 struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t, 1735 tasklet_tx); 1736 struct uart_port *port = &atmel_port->uart; 1737 1738 /* The interrupt handler does not take the lock */ 1739 spin_lock(&port->lock); 1740 atmel_port->schedule_tx(port); 1741 spin_unlock(&port->lock); 1742} 1743 1744static void atmel_init_property(struct atmel_uart_port *atmel_port, 1745 struct platform_device *pdev) 1746{ 1747 struct device_node *np = pdev->dev.of_node; 1748 1749 /* DMA/PDC usage specification */ 1750 if (of_property_read_bool(np, "atmel,use-dma-rx")) { 1751 if (of_property_read_bool(np, "dmas")) { 1752 atmel_port->use_dma_rx = true; 1753 atmel_port->use_pdc_rx = false; 1754 } else { 1755 atmel_port->use_dma_rx = false; 1756 atmel_port->use_pdc_rx = true; 1757 } 1758 } else { 1759 atmel_port->use_dma_rx = false; 1760 atmel_port->use_pdc_rx = false; 1761 } 1762 1763 if (of_property_read_bool(np, "atmel,use-dma-tx")) { 1764 if (of_property_read_bool(np, "dmas")) { 1765 atmel_port->use_dma_tx = true; 1766 atmel_port->use_pdc_tx = false; 1767 } else { 1768 atmel_port->use_dma_tx = false; 1769 atmel_port->use_pdc_tx = true; 1770 } 1771 } else { 1772 atmel_port->use_dma_tx = false; 1773 atmel_port->use_pdc_tx = false; 1774 } 1775} 1776 1777static void atmel_set_ops(struct uart_port *port) 1778{ 1779 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1780 1781 if (atmel_use_dma_rx(port)) { 1782 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1783 atmel_port->schedule_rx = &atmel_rx_from_dma; 1784 atmel_port->release_rx = &atmel_release_rx_dma; 1785 } else if (atmel_use_pdc_rx(port)) { 1786 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1787 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1788 atmel_port->release_rx = &atmel_release_rx_pdc; 1789 } else { 1790 atmel_port->prepare_rx = NULL; 1791 atmel_port->schedule_rx = &atmel_rx_from_ring; 1792 atmel_port->release_rx = NULL; 1793 } 1794 1795 if (atmel_use_dma_tx(port)) { 1796 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1797 atmel_port->schedule_tx = &atmel_tx_dma; 1798 atmel_port->release_tx = &atmel_release_tx_dma; 1799 } else if (atmel_use_pdc_tx(port)) { 1800 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1801 atmel_port->schedule_tx = &atmel_tx_pdc; 1802 atmel_port->release_tx = &atmel_release_tx_pdc; 1803 } else { 1804 atmel_port->prepare_tx = NULL; 1805 atmel_port->schedule_tx = &atmel_tx_chars; 1806 atmel_port->release_tx = NULL; 1807 } 1808} 1809 1810/* 1811 * Get ip name usart or uart 1812 */ 1813static void atmel_get_ip_name(struct uart_port *port) 1814{ 1815 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1816 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1817 u32 version; 1818 u32 usart, dbgu_uart, new_uart; 1819 /* ASCII decoding for IP version */ 1820 usart = 0x55534152; /* USAR(T) */ 1821 dbgu_uart = 0x44424755; /* DBGU */ 1822 new_uart = 0x55415254; /* UART */ 1823 1824 /* 1825 * Only USART devices from at91sam9260 SOC implement fractional 1826 * baudrate. It is available for all asynchronous modes, with the 1827 * following restriction: the sampling clock's duty cycle is not 1828 * constant. 1829 */ 1830 atmel_port->has_frac_baudrate = false; 1831 atmel_port->has_hw_timer = false; 1832 1833 if (name == new_uart) { 1834 dev_dbg(port->dev, "Uart with hw timer"); 1835 atmel_port->has_hw_timer = true; 1836 atmel_port->rtor = ATMEL_UA_RTOR; 1837 } else if (name == usart) { 1838 dev_dbg(port->dev, "Usart\n"); 1839 atmel_port->has_frac_baudrate = true; 1840 atmel_port->has_hw_timer = true; 1841 atmel_port->rtor = ATMEL_US_RTOR; 1842 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1843 switch (version) { 1844 case 0x814: /* sama5d2 */ 1845 fallthrough; 1846 case 0x701: /* sama5d4 */ 1847 atmel_port->fidi_min = 3; 1848 atmel_port->fidi_max = 65535; 1849 break; 1850 case 0x502: /* sam9x5, sama5d3 */ 1851 atmel_port->fidi_min = 3; 1852 atmel_port->fidi_max = 2047; 1853 break; 1854 default: 1855 atmel_port->fidi_min = 1; 1856 atmel_port->fidi_max = 2047; 1857 } 1858 } else if (name == dbgu_uart) { 1859 dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); 1860 } else { 1861 /* fallback for older SoCs: use version field */ 1862 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1863 switch (version) { 1864 case 0x302: 1865 case 0x10213: 1866 case 0x10302: 1867 dev_dbg(port->dev, "This version is usart\n"); 1868 atmel_port->has_frac_baudrate = true; 1869 atmel_port->has_hw_timer = true; 1870 atmel_port->rtor = ATMEL_US_RTOR; 1871 break; 1872 case 0x203: 1873 case 0x10202: 1874 dev_dbg(port->dev, "This version is uart\n"); 1875 break; 1876 default: 1877 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1878 } 1879 } 1880} 1881 1882/* 1883 * Perform initialization and enable port for reception 1884 */ 1885static int atmel_startup(struct uart_port *port) 1886{ 1887 struct platform_device *pdev = to_platform_device(port->dev); 1888 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1889 int retval; 1890 1891 /* 1892 * Ensure that no interrupts are enabled otherwise when 1893 * request_irq() is called we could get stuck trying to 1894 * handle an unexpected interrupt 1895 */ 1896 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1897 atmel_port->ms_irq_enabled = false; 1898 1899 /* 1900 * Allocate the IRQ 1901 */ 1902 retval = request_irq(port->irq, atmel_interrupt, 1903 IRQF_SHARED | IRQF_COND_SUSPEND, 1904 dev_name(&pdev->dev), port); 1905 if (retval) { 1906 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1907 return retval; 1908 } 1909 1910 atomic_set(&atmel_port->tasklet_shutdown, 0); 1911 tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func); 1912 tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func); 1913 1914 /* 1915 * Initialize DMA (if necessary) 1916 */ 1917 atmel_init_property(atmel_port, pdev); 1918 atmel_set_ops(port); 1919 1920 if (atmel_port->prepare_rx) { 1921 retval = atmel_port->prepare_rx(port); 1922 if (retval < 0) 1923 atmel_set_ops(port); 1924 } 1925 1926 if (atmel_port->prepare_tx) { 1927 retval = atmel_port->prepare_tx(port); 1928 if (retval < 0) 1929 atmel_set_ops(port); 1930 } 1931 1932 /* 1933 * Enable FIFO when available 1934 */ 1935 if (atmel_port->fifo_size) { 1936 unsigned int txrdym = ATMEL_US_ONE_DATA; 1937 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1938 unsigned int fmr; 1939 1940 atmel_uart_writel(port, ATMEL_US_CR, 1941 ATMEL_US_FIFOEN | 1942 ATMEL_US_RXFCLR | 1943 ATMEL_US_TXFLCLR); 1944 1945 if (atmel_use_dma_tx(port)) 1946 txrdym = ATMEL_US_FOUR_DATA; 1947 1948 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1949 if (atmel_port->rts_high && 1950 atmel_port->rts_low) 1951 fmr |= ATMEL_US_FRTSC | 1952 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 1953 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 1954 1955 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 1956 } 1957 1958 /* Save current CSR for comparison in atmel_tasklet_func() */ 1959 atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR); 1960 1961 /* 1962 * Finally, enable the serial port 1963 */ 1964 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1965 /* enable xmit & rcvr */ 1966 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 1967 atmel_port->tx_stopped = false; 1968 1969 timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0); 1970 1971 if (atmel_use_pdc_rx(port)) { 1972 /* set UART timeout */ 1973 if (!atmel_port->has_hw_timer) { 1974 mod_timer(&atmel_port->uart_timer, 1975 jiffies + uart_poll_timeout(port)); 1976 /* set USART timeout */ 1977 } else { 1978 atmel_uart_writel(port, atmel_port->rtor, 1979 PDC_RX_TIMEOUT); 1980 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1981 1982 atmel_uart_writel(port, ATMEL_US_IER, 1983 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1984 } 1985 /* enable PDC controller */ 1986 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 1987 } else if (atmel_use_dma_rx(port)) { 1988 /* set UART timeout */ 1989 if (!atmel_port->has_hw_timer) { 1990 mod_timer(&atmel_port->uart_timer, 1991 jiffies + uart_poll_timeout(port)); 1992 /* set USART timeout */ 1993 } else { 1994 atmel_uart_writel(port, atmel_port->rtor, 1995 PDC_RX_TIMEOUT); 1996 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1997 1998 atmel_uart_writel(port, ATMEL_US_IER, 1999 ATMEL_US_TIMEOUT); 2000 } 2001 } else { 2002 /* enable receive only */ 2003 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 2004 } 2005 2006 return 0; 2007} 2008 2009/* 2010 * Flush any TX data submitted for DMA. Called when the TX circular 2011 * buffer is reset. 2012 */ 2013static void atmel_flush_buffer(struct uart_port *port) 2014{ 2015 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2016 2017 if (atmel_use_pdc_tx(port)) { 2018 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 2019 atmel_port->pdc_tx.ofs = 0; 2020 } 2021 /* 2022 * in uart_flush_buffer(), the xmit circular buffer has just 2023 * been cleared, so we have to reset tx_len accordingly. 2024 */ 2025 atmel_port->tx_len = 0; 2026} 2027 2028/* 2029 * Disable the port 2030 */ 2031static void atmel_shutdown(struct uart_port *port) 2032{ 2033 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2034 2035 /* Disable modem control lines interrupts */ 2036 atmel_disable_ms(port); 2037 2038 /* Disable interrupts at device level */ 2039 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2040 2041 /* Prevent spurious interrupts from scheduling the tasklet */ 2042 atomic_inc(&atmel_port->tasklet_shutdown); 2043 2044 /* 2045 * Prevent any tasklets being scheduled during 2046 * cleanup 2047 */ 2048 del_timer_sync(&atmel_port->uart_timer); 2049 2050 /* Make sure that no interrupt is on the fly */ 2051 synchronize_irq(port->irq); 2052 2053 /* 2054 * Clear out any scheduled tasklets before 2055 * we destroy the buffers 2056 */ 2057 tasklet_kill(&atmel_port->tasklet_rx); 2058 tasklet_kill(&atmel_port->tasklet_tx); 2059 2060 /* 2061 * Ensure everything is stopped and 2062 * disable port and break condition. 2063 */ 2064 atmel_stop_rx(port); 2065 atmel_stop_tx(port); 2066 2067 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 2068 2069 /* 2070 * Shut-down the DMA. 2071 */ 2072 if (atmel_port->release_rx) 2073 atmel_port->release_rx(port); 2074 if (atmel_port->release_tx) 2075 atmel_port->release_tx(port); 2076 2077 /* 2078 * Reset ring buffer pointers 2079 */ 2080 atmel_port->rx_ring.head = 0; 2081 atmel_port->rx_ring.tail = 0; 2082 2083 /* 2084 * Free the interrupts 2085 */ 2086 free_irq(port->irq, port); 2087 2088 atmel_flush_buffer(port); 2089} 2090 2091/* 2092 * Power / Clock management. 2093 */ 2094static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2095 unsigned int oldstate) 2096{ 2097 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2098 2099 switch (state) { 2100 case UART_PM_STATE_ON: 2101 /* 2102 * Enable the peripheral clock for this serial port. 2103 * This is called on uart_open() or a resume event. 2104 */ 2105 clk_prepare_enable(atmel_port->clk); 2106 2107 /* re-enable interrupts if we disabled some on suspend */ 2108 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2109 break; 2110 case UART_PM_STATE_OFF: 2111 /* Back up the interrupt mask and disable all interrupts */ 2112 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2113 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2114 2115 /* 2116 * Disable the peripheral clock for this serial port. 2117 * This is called on uart_close() or a suspend event. 2118 */ 2119 clk_disable_unprepare(atmel_port->clk); 2120 break; 2121 default: 2122 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2123 } 2124} 2125 2126/* 2127 * Change the port parameters 2128 */ 2129static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2130 struct ktermios *old) 2131{ 2132 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2133 unsigned long flags; 2134 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0; 2135 2136 /* save the current mode register */ 2137 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2138 2139 /* reset the mode, clock divisor, parity, stop bits and data size */ 2140 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2141 ATMEL_US_PAR | ATMEL_US_USMODE); 2142 2143 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2144 2145 /* byte size */ 2146 switch (termios->c_cflag & CSIZE) { 2147 case CS5: 2148 mode |= ATMEL_US_CHRL_5; 2149 break; 2150 case CS6: 2151 mode |= ATMEL_US_CHRL_6; 2152 break; 2153 case CS7: 2154 mode |= ATMEL_US_CHRL_7; 2155 break; 2156 default: 2157 mode |= ATMEL_US_CHRL_8; 2158 break; 2159 } 2160 2161 /* stop bits */ 2162 if (termios->c_cflag & CSTOPB) 2163 mode |= ATMEL_US_NBSTOP_2; 2164 2165 /* parity */ 2166 if (termios->c_cflag & PARENB) { 2167 /* Mark or Space parity */ 2168 if (termios->c_cflag & CMSPAR) { 2169 if (termios->c_cflag & PARODD) 2170 mode |= ATMEL_US_PAR_MARK; 2171 else 2172 mode |= ATMEL_US_PAR_SPACE; 2173 } else if (termios->c_cflag & PARODD) 2174 mode |= ATMEL_US_PAR_ODD; 2175 else 2176 mode |= ATMEL_US_PAR_EVEN; 2177 } else 2178 mode |= ATMEL_US_PAR_NONE; 2179 2180 spin_lock_irqsave(&port->lock, flags); 2181 2182 port->read_status_mask = ATMEL_US_OVRE; 2183 if (termios->c_iflag & INPCK) 2184 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2185 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2186 port->read_status_mask |= ATMEL_US_RXBRK; 2187 2188 if (atmel_use_pdc_rx(port)) 2189 /* need to enable error interrupts */ 2190 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2191 2192 /* 2193 * Characters to ignore 2194 */ 2195 port->ignore_status_mask = 0; 2196 if (termios->c_iflag & IGNPAR) 2197 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2198 if (termios->c_iflag & IGNBRK) { 2199 port->ignore_status_mask |= ATMEL_US_RXBRK; 2200 /* 2201 * If we're ignoring parity and break indicators, 2202 * ignore overruns too (for real raw support). 2203 */ 2204 if (termios->c_iflag & IGNPAR) 2205 port->ignore_status_mask |= ATMEL_US_OVRE; 2206 } 2207 /* TODO: Ignore all characters if CREAD is set.*/ 2208 2209 /* update the per-port timeout */ 2210 uart_update_timeout(port, termios->c_cflag, baud); 2211 2212 /* 2213 * save/disable interrupts. The tty layer will ensure that the 2214 * transmitter is empty if requested by the caller, so there's 2215 * no need to wait for it here. 2216 */ 2217 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2218 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2219 2220 /* disable receiver and transmitter */ 2221 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2222 atmel_port->tx_stopped = true; 2223 2224 /* mode */ 2225 if (port->rs485.flags & SER_RS485_ENABLED) { 2226 atmel_uart_writel(port, ATMEL_US_TTGR, 2227 port->rs485.delay_rts_after_send); 2228 mode |= ATMEL_US_USMODE_RS485; 2229 } else if (port->iso7816.flags & SER_ISO7816_ENABLED) { 2230 atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg); 2231 /* select mck clock, and output */ 2232 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 2233 /* set max iterations */ 2234 mode |= ATMEL_US_MAX_ITER(3); 2235 if ((port->iso7816.flags & SER_ISO7816_T_PARAM) 2236 == SER_ISO7816_T(0)) 2237 mode |= ATMEL_US_USMODE_ISO7816_T0; 2238 else 2239 mode |= ATMEL_US_USMODE_ISO7816_T1; 2240 } else if (termios->c_cflag & CRTSCTS) { 2241 /* RS232 with hardware handshake (RTS/CTS) */ 2242 if (atmel_use_fifo(port) && 2243 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2244 /* 2245 * with ATMEL_US_USMODE_HWHS set, the controller will 2246 * be able to drive the RTS pin high/low when the RX 2247 * FIFO is above RXFTHRES/below RXFTHRES2. 2248 * It will also disable the transmitter when the CTS 2249 * pin is high. 2250 * This mode is not activated if CTS pin is a GPIO 2251 * because in this case, the transmitter is always 2252 * disabled (there must be an internal pull-up 2253 * responsible for this behaviour). 2254 * If the RTS pin is a GPIO, the controller won't be 2255 * able to drive it according to the FIFO thresholds, 2256 * but it will be handled by the driver. 2257 */ 2258 mode |= ATMEL_US_USMODE_HWHS; 2259 } else { 2260 /* 2261 * For platforms without FIFO, the flow control is 2262 * handled by the driver. 2263 */ 2264 mode |= ATMEL_US_USMODE_NORMAL; 2265 } 2266 } else { 2267 /* RS232 without hadware handshake */ 2268 mode |= ATMEL_US_USMODE_NORMAL; 2269 } 2270 2271 /* 2272 * Set the baud rate: 2273 * Fractional baudrate allows to setup output frequency more 2274 * accurately. This feature is enabled only when using normal mode. 2275 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) 2276 * Currently, OVER is always set to 0 so we get 2277 * baudrate = selected clock / (16 * (CD + FP / 8)) 2278 * then 2279 * 8 CD + FP = selected clock / (2 * baudrate) 2280 */ 2281 if (atmel_port->has_frac_baudrate) { 2282 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); 2283 cd = div >> 3; 2284 fp = div & ATMEL_US_FP_MASK; 2285 } else { 2286 cd = uart_get_divisor(port, baud); 2287 } 2288 2289 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2290 cd /= 8; 2291 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2292 } 2293 quot = cd | fp << ATMEL_US_FP_OFFSET; 2294 2295 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) 2296 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2297 2298 /* set the mode, clock divisor, parity, stop bits and data size */ 2299 atmel_uart_writel(port, ATMEL_US_MR, mode); 2300 2301 /* 2302 * when switching the mode, set the RTS line state according to the 2303 * new mode, otherwise keep the former state 2304 */ 2305 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2306 unsigned int rts_state; 2307 2308 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2309 /* let the hardware control the RTS line */ 2310 rts_state = ATMEL_US_RTSDIS; 2311 } else { 2312 /* force RTS line to low level */ 2313 rts_state = ATMEL_US_RTSEN; 2314 } 2315 2316 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2317 } 2318 2319 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2320 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2321 atmel_port->tx_stopped = false; 2322 2323 /* restore interrupts */ 2324 atmel_uart_writel(port, ATMEL_US_IER, imr); 2325 2326 /* CTS flow-control and modem-status interrupts */ 2327 if (UART_ENABLE_MS(port, termios->c_cflag)) 2328 atmel_enable_ms(port); 2329 else 2330 atmel_disable_ms(port); 2331 2332 spin_unlock_irqrestore(&port->lock, flags); 2333} 2334 2335static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2336{ 2337 if (termios->c_line == N_PPS) { 2338 port->flags |= UPF_HARDPPS_CD; 2339 spin_lock_irq(&port->lock); 2340 atmel_enable_ms(port); 2341 spin_unlock_irq(&port->lock); 2342 } else { 2343 port->flags &= ~UPF_HARDPPS_CD; 2344 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2345 spin_lock_irq(&port->lock); 2346 atmel_disable_ms(port); 2347 spin_unlock_irq(&port->lock); 2348 } 2349 } 2350} 2351 2352/* 2353 * Return string describing the specified port 2354 */ 2355static const char *atmel_type(struct uart_port *port) 2356{ 2357 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2358} 2359 2360/* 2361 * Release the memory region(s) being used by 'port'. 2362 */ 2363static void atmel_release_port(struct uart_port *port) 2364{ 2365 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2366 int size = resource_size(mpdev->resource); 2367 2368 release_mem_region(port->mapbase, size); 2369 2370 if (port->flags & UPF_IOREMAP) { 2371 iounmap(port->membase); 2372 port->membase = NULL; 2373 } 2374} 2375 2376/* 2377 * Request the memory region(s) being used by 'port'. 2378 */ 2379static int atmel_request_port(struct uart_port *port) 2380{ 2381 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2382 int size = resource_size(mpdev->resource); 2383 2384 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2385 return -EBUSY; 2386 2387 if (port->flags & UPF_IOREMAP) { 2388 port->membase = ioremap(port->mapbase, size); 2389 if (port->membase == NULL) { 2390 release_mem_region(port->mapbase, size); 2391 return -ENOMEM; 2392 } 2393 } 2394 2395 return 0; 2396} 2397 2398/* 2399 * Configure/autoconfigure the port. 2400 */ 2401static void atmel_config_port(struct uart_port *port, int flags) 2402{ 2403 if (flags & UART_CONFIG_TYPE) { 2404 port->type = PORT_ATMEL; 2405 atmel_request_port(port); 2406 } 2407} 2408 2409/* 2410 * Verify the new serial_struct (for TIOCSSERIAL). 2411 */ 2412static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2413{ 2414 int ret = 0; 2415 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2416 ret = -EINVAL; 2417 if (port->irq != ser->irq) 2418 ret = -EINVAL; 2419 if (ser->io_type != SERIAL_IO_MEM) 2420 ret = -EINVAL; 2421 if (port->uartclk / 16 != ser->baud_base) 2422 ret = -EINVAL; 2423 if (port->mapbase != (unsigned long)ser->iomem_base) 2424 ret = -EINVAL; 2425 if (port->iobase != ser->port) 2426 ret = -EINVAL; 2427 if (ser->hub6 != 0) 2428 ret = -EINVAL; 2429 return ret; 2430} 2431 2432#ifdef CONFIG_CONSOLE_POLL 2433static int atmel_poll_get_char(struct uart_port *port) 2434{ 2435 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2436 cpu_relax(); 2437 2438 return atmel_uart_read_char(port); 2439} 2440 2441static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2442{ 2443 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2444 cpu_relax(); 2445 2446 atmel_uart_write_char(port, ch); 2447} 2448#endif 2449 2450static const struct uart_ops atmel_pops = { 2451 .tx_empty = atmel_tx_empty, 2452 .set_mctrl = atmel_set_mctrl, 2453 .get_mctrl = atmel_get_mctrl, 2454 .stop_tx = atmel_stop_tx, 2455 .start_tx = atmel_start_tx, 2456 .stop_rx = atmel_stop_rx, 2457 .enable_ms = atmel_enable_ms, 2458 .break_ctl = atmel_break_ctl, 2459 .startup = atmel_startup, 2460 .shutdown = atmel_shutdown, 2461 .flush_buffer = atmel_flush_buffer, 2462 .set_termios = atmel_set_termios, 2463 .set_ldisc = atmel_set_ldisc, 2464 .type = atmel_type, 2465 .release_port = atmel_release_port, 2466 .request_port = atmel_request_port, 2467 .config_port = atmel_config_port, 2468 .verify_port = atmel_verify_port, 2469 .pm = atmel_serial_pm, 2470#ifdef CONFIG_CONSOLE_POLL 2471 .poll_get_char = atmel_poll_get_char, 2472 .poll_put_char = atmel_poll_put_char, 2473#endif 2474}; 2475 2476/* 2477 * Configure the port from the platform device resource info. 2478 */ 2479static int atmel_init_port(struct atmel_uart_port *atmel_port, 2480 struct platform_device *pdev) 2481{ 2482 int ret; 2483 struct uart_port *port = &atmel_port->uart; 2484 struct platform_device *mpdev = to_platform_device(pdev->dev.parent); 2485 2486 atmel_init_property(atmel_port, pdev); 2487 atmel_set_ops(port); 2488 2489 port->iotype = UPIO_MEM; 2490 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; 2491 port->ops = &atmel_pops; 2492 port->fifosize = 1; 2493 port->dev = &pdev->dev; 2494 port->mapbase = mpdev->resource[0].start; 2495 port->irq = platform_get_irq(mpdev, 0); 2496 port->rs485_config = atmel_config_rs485; 2497 port->iso7816_config = atmel_config_iso7816; 2498 port->membase = NULL; 2499 2500 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2501 2502 ret = uart_get_rs485_mode(port); 2503 if (ret) 2504 return ret; 2505 2506 /* for console, the clock could already be configured */ 2507 if (!atmel_port->clk) { 2508 atmel_port->clk = clk_get(&mpdev->dev, "usart"); 2509 if (IS_ERR(atmel_port->clk)) { 2510 ret = PTR_ERR(atmel_port->clk); 2511 atmel_port->clk = NULL; 2512 return ret; 2513 } 2514 ret = clk_prepare_enable(atmel_port->clk); 2515 if (ret) { 2516 clk_put(atmel_port->clk); 2517 atmel_port->clk = NULL; 2518 return ret; 2519 } 2520 port->uartclk = clk_get_rate(atmel_port->clk); 2521 clk_disable_unprepare(atmel_port->clk); 2522 /* only enable clock when USART is in use */ 2523 } 2524 2525 /* 2526 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or 2527 * ENDTX|TXBUFE 2528 */ 2529 if (atmel_uart_is_half_duplex(port)) 2530 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2531 else if (atmel_use_pdc_tx(port)) { 2532 port->fifosize = PDC_BUFFER_SIZE; 2533 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2534 } else { 2535 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2536 } 2537 2538 return 0; 2539} 2540 2541#ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2542static void atmel_console_putchar(struct uart_port *port, unsigned char ch) 2543{ 2544 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2545 cpu_relax(); 2546 atmel_uart_write_char(port, ch); 2547} 2548 2549/* 2550 * Interrupts are disabled on entering 2551 */ 2552static void atmel_console_write(struct console *co, const char *s, u_int count) 2553{ 2554 struct uart_port *port = &atmel_ports[co->index].uart; 2555 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2556 unsigned int status, imr; 2557 unsigned int pdc_tx; 2558 2559 /* 2560 * First, save IMR and then disable interrupts 2561 */ 2562 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2563 atmel_uart_writel(port, ATMEL_US_IDR, 2564 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2565 2566 /* Store PDC transmit status and disable it */ 2567 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2568 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2569 2570 /* Make sure that tx path is actually able to send characters */ 2571 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 2572 atmel_port->tx_stopped = false; 2573 2574 uart_console_write(port, s, count, atmel_console_putchar); 2575 2576 /* 2577 * Finally, wait for transmitter to become empty 2578 * and restore IMR 2579 */ 2580 do { 2581 status = atmel_uart_readl(port, ATMEL_US_CSR); 2582 } while (!(status & ATMEL_US_TXRDY)); 2583 2584 /* Restore PDC transmit status */ 2585 if (pdc_tx) 2586 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2587 2588 /* set interrupts back the way they were */ 2589 atmel_uart_writel(port, ATMEL_US_IER, imr); 2590} 2591 2592/* 2593 * If the port was already initialised (eg, by a boot loader), 2594 * try to determine the current setup. 2595 */ 2596static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2597 int *parity, int *bits) 2598{ 2599 unsigned int mr, quot; 2600 2601 /* 2602 * If the baud rate generator isn't running, the port wasn't 2603 * initialized by the boot loader. 2604 */ 2605 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2606 if (!quot) 2607 return; 2608 2609 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2610 if (mr == ATMEL_US_CHRL_8) 2611 *bits = 8; 2612 else 2613 *bits = 7; 2614 2615 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2616 if (mr == ATMEL_US_PAR_EVEN) 2617 *parity = 'e'; 2618 else if (mr == ATMEL_US_PAR_ODD) 2619 *parity = 'o'; 2620 2621 /* 2622 * The serial core only rounds down when matching this to a 2623 * supported baud rate. Make sure we don't end up slightly 2624 * lower than one of those, as it would make us fall through 2625 * to a much lower baud rate than we really want. 2626 */ 2627 *baud = port->uartclk / (16 * (quot - 1)); 2628} 2629 2630static int __init atmel_console_setup(struct console *co, char *options) 2631{ 2632 int ret; 2633 struct uart_port *port = &atmel_ports[co->index].uart; 2634 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2635 int baud = 115200; 2636 int bits = 8; 2637 int parity = 'n'; 2638 int flow = 'n'; 2639 2640 if (port->membase == NULL) { 2641 /* Port not initialized yet - delay setup */ 2642 return -ENODEV; 2643 } 2644 2645 ret = clk_prepare_enable(atmel_ports[co->index].clk); 2646 if (ret) 2647 return ret; 2648 2649 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2650 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2651 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2652 atmel_port->tx_stopped = false; 2653 2654 if (options) 2655 uart_parse_options(options, &baud, &parity, &bits, &flow); 2656 else 2657 atmel_console_get_options(port, &baud, &parity, &bits); 2658 2659 return uart_set_options(port, co, baud, parity, bits, flow); 2660} 2661 2662static struct uart_driver atmel_uart; 2663 2664static struct console atmel_console = { 2665 .name = ATMEL_DEVICENAME, 2666 .write = atmel_console_write, 2667 .device = uart_console_device, 2668 .setup = atmel_console_setup, 2669 .flags = CON_PRINTBUFFER, 2670 .index = -1, 2671 .data = &atmel_uart, 2672}; 2673 2674static void atmel_serial_early_write(struct console *con, const char *s, 2675 unsigned int n) 2676{ 2677 struct earlycon_device *dev = con->data; 2678 2679 uart_console_write(&dev->port, s, n, atmel_console_putchar); 2680} 2681 2682static int __init atmel_early_console_setup(struct earlycon_device *device, 2683 const char *options) 2684{ 2685 if (!device->port.membase) 2686 return -ENODEV; 2687 2688 device->con->write = atmel_serial_early_write; 2689 2690 return 0; 2691} 2692 2693OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart", 2694 atmel_early_console_setup); 2695OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart", 2696 atmel_early_console_setup); 2697 2698#define ATMEL_CONSOLE_DEVICE (&atmel_console) 2699 2700#else 2701#define ATMEL_CONSOLE_DEVICE NULL 2702#endif 2703 2704static struct uart_driver atmel_uart = { 2705 .owner = THIS_MODULE, 2706 .driver_name = "atmel_serial", 2707 .dev_name = ATMEL_DEVICENAME, 2708 .major = SERIAL_ATMEL_MAJOR, 2709 .minor = MINOR_START, 2710 .nr = ATMEL_MAX_UART, 2711 .cons = ATMEL_CONSOLE_DEVICE, 2712}; 2713 2714#ifdef CONFIG_PM 2715static bool atmel_serial_clk_will_stop(void) 2716{ 2717#ifdef CONFIG_ARCH_AT91 2718 return at91_suspend_entering_slow_clock(); 2719#else 2720 return false; 2721#endif 2722} 2723 2724static int atmel_serial_suspend(struct platform_device *pdev, 2725 pm_message_t state) 2726{ 2727 struct uart_port *port = platform_get_drvdata(pdev); 2728 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2729 2730 if (uart_console(port) && console_suspend_enabled) { 2731 /* Drain the TX shifter */ 2732 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2733 ATMEL_US_TXEMPTY)) 2734 cpu_relax(); 2735 } 2736 2737 if (uart_console(port) && !console_suspend_enabled) { 2738 /* Cache register values as we won't get a full shutdown/startup 2739 * cycle 2740 */ 2741 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); 2742 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); 2743 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 2744 atmel_port->cache.rtor = atmel_uart_readl(port, 2745 atmel_port->rtor); 2746 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); 2747 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); 2748 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); 2749 } 2750 2751 /* we can not wake up if we're running on slow clock */ 2752 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2753 if (atmel_serial_clk_will_stop()) { 2754 unsigned long flags; 2755 2756 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2757 atmel_port->suspended = true; 2758 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2759 device_set_wakeup_enable(&pdev->dev, 0); 2760 } 2761 2762 uart_suspend_port(&atmel_uart, port); 2763 2764 return 0; 2765} 2766 2767static int atmel_serial_resume(struct platform_device *pdev) 2768{ 2769 struct uart_port *port = platform_get_drvdata(pdev); 2770 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2771 unsigned long flags; 2772 2773 if (uart_console(port) && !console_suspend_enabled) { 2774 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); 2775 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); 2776 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); 2777 atmel_uart_writel(port, atmel_port->rtor, 2778 atmel_port->cache.rtor); 2779 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); 2780 2781 if (atmel_port->fifo_size) { 2782 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN | 2783 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR); 2784 atmel_uart_writel(port, ATMEL_US_FMR, 2785 atmel_port->cache.fmr); 2786 atmel_uart_writel(port, ATMEL_US_FIER, 2787 atmel_port->cache.fimr); 2788 } 2789 atmel_start_rx(port); 2790 } 2791 2792 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2793 if (atmel_port->pending) { 2794 atmel_handle_receive(port, atmel_port->pending); 2795 atmel_handle_status(port, atmel_port->pending, 2796 atmel_port->pending_status); 2797 atmel_handle_transmit(port, atmel_port->pending); 2798 atmel_port->pending = 0; 2799 } 2800 atmel_port->suspended = false; 2801 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2802 2803 uart_resume_port(&atmel_uart, port); 2804 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2805 2806 return 0; 2807} 2808#else 2809#define atmel_serial_suspend NULL 2810#define atmel_serial_resume NULL 2811#endif 2812 2813static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, 2814 struct platform_device *pdev) 2815{ 2816 atmel_port->fifo_size = 0; 2817 atmel_port->rts_low = 0; 2818 atmel_port->rts_high = 0; 2819 2820 if (of_property_read_u32(pdev->dev.of_node, 2821 "atmel,fifo-size", 2822 &atmel_port->fifo_size)) 2823 return; 2824 2825 if (!atmel_port->fifo_size) 2826 return; 2827 2828 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2829 atmel_port->fifo_size = 0; 2830 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2831 return; 2832 } 2833 2834 /* 2835 * 0 <= rts_low <= rts_high <= fifo_size 2836 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2837 * to flush their internal TX FIFO, commonly up to 16 data, before 2838 * actually stopping to send new data. So we try to set the RTS High 2839 * Threshold to a reasonably high value respecting this 16 data 2840 * empirical rule when possible. 2841 */ 2842 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, 2843 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2844 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, 2845 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2846 2847 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2848 atmel_port->fifo_size); 2849 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2850 atmel_port->rts_high); 2851 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2852 atmel_port->rts_low); 2853} 2854 2855static int atmel_serial_probe(struct platform_device *pdev) 2856{ 2857 struct atmel_uart_port *atmel_port; 2858 struct device_node *np = pdev->dev.parent->of_node; 2859 void *data; 2860 int ret; 2861 bool rs485_enabled; 2862 2863 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2864 2865 /* 2866 * In device tree there is no node with "atmel,at91rm9200-usart-serial" 2867 * as compatible string. This driver is probed by at91-usart mfd driver 2868 * which is just a wrapper over the atmel_serial driver and 2869 * spi-at91-usart driver. All attributes needed by this driver are 2870 * found in of_node of parent. 2871 */ 2872 pdev->dev.of_node = np; 2873 2874 ret = of_alias_get_id(np, "serial"); 2875 if (ret < 0) 2876 /* port id not found in platform data nor device-tree aliases: 2877 * auto-enumerate it */ 2878 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2879 2880 if (ret >= ATMEL_MAX_UART) { 2881 ret = -ENODEV; 2882 goto err; 2883 } 2884 2885 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2886 /* port already in use */ 2887 ret = -EBUSY; 2888 goto err; 2889 } 2890 2891 atmel_port = &atmel_ports[ret]; 2892 atmel_port->backup_imr = 0; 2893 atmel_port->uart.line = ret; 2894 atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE); 2895 atmel_serial_probe_fifos(atmel_port, pdev); 2896 2897 atomic_set(&atmel_port->tasklet_shutdown, 0); 2898 spin_lock_init(&atmel_port->lock_suspended); 2899 2900 ret = atmel_init_port(atmel_port, pdev); 2901 if (ret) 2902 goto err_clear_bit; 2903 2904 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); 2905 if (IS_ERR(atmel_port->gpios)) { 2906 ret = PTR_ERR(atmel_port->gpios); 2907 goto err_clear_bit; 2908 } 2909 2910 if (!atmel_use_pdc_rx(&atmel_port->uart)) { 2911 ret = -ENOMEM; 2912 data = kmalloc_array(ATMEL_SERIAL_RINGSIZE, 2913 sizeof(struct atmel_uart_char), 2914 GFP_KERNEL); 2915 if (!data) 2916 goto err_alloc_ring; 2917 atmel_port->rx_ring.buf = data; 2918 } 2919 2920 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; 2921 2922 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); 2923 if (ret) 2924 goto err_add_port; 2925 2926#ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2927 if (uart_console(&atmel_port->uart) 2928 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 2929 /* 2930 * The serial core enabled the clock for us, so undo 2931 * the clk_prepare_enable() in atmel_console_setup() 2932 */ 2933 clk_disable_unprepare(atmel_port->clk); 2934 } 2935#endif 2936 2937 device_init_wakeup(&pdev->dev, 1); 2938 platform_set_drvdata(pdev, atmel_port); 2939 2940 /* 2941 * The peripheral clock has been disabled by atmel_init_port(): 2942 * enable it before accessing I/O registers 2943 */ 2944 clk_prepare_enable(atmel_port->clk); 2945 2946 if (rs485_enabled) { 2947 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, 2948 ATMEL_US_USMODE_NORMAL); 2949 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, 2950 ATMEL_US_RTSEN); 2951 } 2952 2953 /* 2954 * Get port name of usart or uart 2955 */ 2956 atmel_get_ip_name(&atmel_port->uart); 2957 2958 /* 2959 * The peripheral clock can now safely be disabled till the port 2960 * is used 2961 */ 2962 clk_disable_unprepare(atmel_port->clk); 2963 2964 return 0; 2965 2966err_add_port: 2967 kfree(atmel_port->rx_ring.buf); 2968 atmel_port->rx_ring.buf = NULL; 2969err_alloc_ring: 2970 if (!uart_console(&atmel_port->uart)) { 2971 clk_put(atmel_port->clk); 2972 atmel_port->clk = NULL; 2973 } 2974err_clear_bit: 2975 clear_bit(atmel_port->uart.line, atmel_ports_in_use); 2976err: 2977 return ret; 2978} 2979 2980/* 2981 * Even if the driver is not modular, it makes sense to be able to 2982 * unbind a device: there can be many bound devices, and there are 2983 * situations where dynamic binding and unbinding can be useful. 2984 * 2985 * For example, a connected device can require a specific firmware update 2986 * protocol that needs bitbanging on IO lines, but use the regular serial 2987 * port in the normal case. 2988 */ 2989static int atmel_serial_remove(struct platform_device *pdev) 2990{ 2991 struct uart_port *port = platform_get_drvdata(pdev); 2992 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2993 int ret = 0; 2994 2995 tasklet_kill(&atmel_port->tasklet_rx); 2996 tasklet_kill(&atmel_port->tasklet_tx); 2997 2998 device_init_wakeup(&pdev->dev, 0); 2999 3000 ret = uart_remove_one_port(&atmel_uart, port); 3001 3002 kfree(atmel_port->rx_ring.buf); 3003 3004 /* "port" is allocated statically, so we shouldn't free it */ 3005 3006 clear_bit(port->line, atmel_ports_in_use); 3007 3008 clk_put(atmel_port->clk); 3009 atmel_port->clk = NULL; 3010 pdev->dev.of_node = NULL; 3011 3012 return ret; 3013} 3014 3015static struct platform_driver atmel_serial_driver = { 3016 .probe = atmel_serial_probe, 3017 .remove = atmel_serial_remove, 3018 .suspend = atmel_serial_suspend, 3019 .resume = atmel_serial_resume, 3020 .driver = { 3021 .name = "atmel_usart_serial", 3022 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 3023 }, 3024}; 3025 3026static int __init atmel_serial_init(void) 3027{ 3028 int ret; 3029 3030 ret = uart_register_driver(&atmel_uart); 3031 if (ret) 3032 return ret; 3033 3034 ret = platform_driver_register(&atmel_serial_driver); 3035 if (ret) 3036 uart_unregister_driver(&atmel_uart); 3037 3038 return ret; 3039} 3040device_initcall(atmel_serial_init);