fsl_rmu.c (29534B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Freescale MPC85xx/MPC86xx RapidIO RMU support 4 * 5 * Copyright 2009 Sysgo AG 6 * Thomas Moll <thomas.moll@sysgo.com> 7 * - fixed maintenance access routines, check for aligned access 8 * 9 * Copyright 2009 Integrated Device Technology, Inc. 10 * Alex Bounine <alexandre.bounine@idt.com> 11 * - Added Port-Write message handling 12 * - Added Machine Check exception handling 13 * 14 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. 15 * Zhang Wei <wei.zhang@freescale.com> 16 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com> 17 * Liu Gang <Gang.Liu@freescale.com> 18 * 19 * Copyright 2005 MontaVista Software, Inc. 20 * Matt Porter <mporter@kernel.crashing.org> 21 */ 22 23#include <linux/types.h> 24#include <linux/dma-mapping.h> 25#include <linux/interrupt.h> 26#include <linux/of_irq.h> 27#include <linux/of_platform.h> 28#include <linux/slab.h> 29 30#include "fsl_rio.h" 31 32#define GET_RMM_HANDLE(mport) \ 33 (((struct rio_priv *)(mport->priv))->rmm_handle) 34 35/* RapidIO definition irq, which read from OF-tree */ 36#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq) 37#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq) 38#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq) 39#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq) 40 41#define RIO_MIN_TX_RING_SIZE 2 42#define RIO_MAX_TX_RING_SIZE 2048 43#define RIO_MIN_RX_RING_SIZE 2 44#define RIO_MAX_RX_RING_SIZE 2048 45 46#define RIO_IPWMR_SEN 0x00100000 47#define RIO_IPWMR_QFIE 0x00000100 48#define RIO_IPWMR_EIE 0x00000020 49#define RIO_IPWMR_CQ 0x00000002 50#define RIO_IPWMR_PWE 0x00000001 51 52#define RIO_IPWSR_QF 0x00100000 53#define RIO_IPWSR_TE 0x00000080 54#define RIO_IPWSR_QFI 0x00000010 55#define RIO_IPWSR_PWD 0x00000008 56#define RIO_IPWSR_PWB 0x00000004 57 58#define RIO_EPWISR 0x10010 59/* EPWISR Error match value */ 60#define RIO_EPWISR_PINT1 0x80000000 61#define RIO_EPWISR_PINT2 0x40000000 62#define RIO_EPWISR_MU 0x00000002 63#define RIO_EPWISR_PW 0x00000001 64 65#define IPWSR_CLEAR 0x98 66#define OMSR_CLEAR 0x1cb3 67#define IMSR_CLEAR 0x491 68#define IDSR_CLEAR 0x91 69#define ODSR_CLEAR 0x1c00 70#define LTLEECSR_ENABLE_ALL 0xFFC000FC 71#define RIO_LTLEECSR 0x060c 72 73#define RIO_IM0SR 0x64 74#define RIO_IM1SR 0x164 75#define RIO_OM0SR 0x4 76#define RIO_OM1SR 0x104 77 78#define RIO_DBELL_WIN_SIZE 0x1000 79 80#define RIO_MSG_OMR_MUI 0x00000002 81#define RIO_MSG_OSR_TE 0x00000080 82#define RIO_MSG_OSR_QOI 0x00000020 83#define RIO_MSG_OSR_QFI 0x00000010 84#define RIO_MSG_OSR_MUB 0x00000004 85#define RIO_MSG_OSR_EOMI 0x00000002 86#define RIO_MSG_OSR_QEI 0x00000001 87 88#define RIO_MSG_IMR_MI 0x00000002 89#define RIO_MSG_ISR_TE 0x00000080 90#define RIO_MSG_ISR_QFI 0x00000010 91#define RIO_MSG_ISR_DIQI 0x00000001 92 93#define RIO_MSG_DESC_SIZE 32 94#define RIO_MSG_BUFFER_SIZE 4096 95 96#define DOORBELL_DMR_DI 0x00000002 97#define DOORBELL_DSR_TE 0x00000080 98#define DOORBELL_DSR_QFI 0x00000010 99#define DOORBELL_DSR_DIQI 0x00000001 100 101#define DOORBELL_MESSAGE_SIZE 0x08 102 103static DEFINE_SPINLOCK(fsl_rio_doorbell_lock); 104 105struct rio_msg_regs { 106 u32 omr; 107 u32 osr; 108 u32 pad1; 109 u32 odqdpar; 110 u32 pad2; 111 u32 osar; 112 u32 odpr; 113 u32 odatr; 114 u32 odcr; 115 u32 pad3; 116 u32 odqepar; 117 u32 pad4[13]; 118 u32 imr; 119 u32 isr; 120 u32 pad5; 121 u32 ifqdpar; 122 u32 pad6; 123 u32 ifqepar; 124}; 125 126struct rio_dbell_regs { 127 u32 odmr; 128 u32 odsr; 129 u32 pad1[4]; 130 u32 oddpr; 131 u32 oddatr; 132 u32 pad2[3]; 133 u32 odretcr; 134 u32 pad3[12]; 135 u32 dmr; 136 u32 dsr; 137 u32 pad4; 138 u32 dqdpar; 139 u32 pad5; 140 u32 dqepar; 141}; 142 143struct rio_pw_regs { 144 u32 pwmr; 145 u32 pwsr; 146 u32 epwqbar; 147 u32 pwqbar; 148}; 149 150 151struct rio_tx_desc { 152 u32 pad1; 153 u32 saddr; 154 u32 dport; 155 u32 dattr; 156 u32 pad2; 157 u32 pad3; 158 u32 dwcnt; 159 u32 pad4; 160}; 161 162struct rio_msg_tx_ring { 163 void *virt; 164 dma_addr_t phys; 165 void *virt_buffer[RIO_MAX_TX_RING_SIZE]; 166 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; 167 int tx_slot; 168 int size; 169 void *dev_id; 170}; 171 172struct rio_msg_rx_ring { 173 void *virt; 174 dma_addr_t phys; 175 void *virt_buffer[RIO_MAX_RX_RING_SIZE]; 176 int rx_slot; 177 int size; 178 void *dev_id; 179}; 180 181struct fsl_rmu { 182 struct rio_msg_regs __iomem *msg_regs; 183 struct rio_msg_tx_ring msg_tx_ring; 184 struct rio_msg_rx_ring msg_rx_ring; 185 int txirq; 186 int rxirq; 187}; 188 189struct rio_dbell_msg { 190 u16 pad1; 191 u16 tid; 192 u16 sid; 193 u16 info; 194}; 195 196/** 197 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler 198 * @irq: Linux interrupt number 199 * @dev_instance: Pointer to interrupt-specific data 200 * 201 * Handles outbound message interrupts. Executes a register outbound 202 * mailbox event handler and acks the interrupt occurrence. 203 */ 204static irqreturn_t 205fsl_rio_tx_handler(int irq, void *dev_instance) 206{ 207 int osr; 208 struct rio_mport *port = (struct rio_mport *)dev_instance; 209 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); 210 211 osr = in_be32(&rmu->msg_regs->osr); 212 213 if (osr & RIO_MSG_OSR_TE) { 214 pr_info("RIO: outbound message transmission error\n"); 215 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE); 216 goto out; 217 } 218 219 if (osr & RIO_MSG_OSR_QOI) { 220 pr_info("RIO: outbound message queue overflow\n"); 221 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI); 222 goto out; 223 } 224 225 if (osr & RIO_MSG_OSR_EOMI) { 226 u32 dqp = in_be32(&rmu->msg_regs->odqdpar); 227 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5; 228 if (port->outb_msg[0].mcback != NULL) { 229 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id, 230 -1, 231 slot); 232 } 233 /* Ack the end-of-message interrupt */ 234 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI); 235 } 236 237out: 238 return IRQ_HANDLED; 239} 240 241/** 242 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler 243 * @irq: Linux interrupt number 244 * @dev_instance: Pointer to interrupt-specific data 245 * 246 * Handles inbound message interrupts. Executes a registered inbound 247 * mailbox event handler and acks the interrupt occurrence. 248 */ 249static irqreturn_t 250fsl_rio_rx_handler(int irq, void *dev_instance) 251{ 252 int isr; 253 struct rio_mport *port = (struct rio_mport *)dev_instance; 254 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); 255 256 isr = in_be32(&rmu->msg_regs->isr); 257 258 if (isr & RIO_MSG_ISR_TE) { 259 pr_info("RIO: inbound message reception error\n"); 260 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE); 261 goto out; 262 } 263 264 /* XXX Need to check/dispatch until queue empty */ 265 if (isr & RIO_MSG_ISR_DIQI) { 266 /* 267 * Can receive messages for any mailbox/letter to that 268 * mailbox destination. So, make the callback with an 269 * unknown/invalid mailbox number argument. 270 */ 271 if (port->inb_msg[0].mcback != NULL) 272 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id, 273 -1, 274 -1); 275 276 /* Ack the queueing interrupt */ 277 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI); 278 } 279 280out: 281 return IRQ_HANDLED; 282} 283 284/** 285 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler 286 * @irq: Linux interrupt number 287 * @dev_instance: Pointer to interrupt-specific data 288 * 289 * Handles doorbell interrupts. Parses a list of registered 290 * doorbell event handlers and executes a matching event handler. 291 */ 292static irqreturn_t 293fsl_rio_dbell_handler(int irq, void *dev_instance) 294{ 295 int dsr; 296 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance; 297 int i; 298 299 dsr = in_be32(&fsl_dbell->dbell_regs->dsr); 300 301 if (dsr & DOORBELL_DSR_TE) { 302 pr_info("RIO: doorbell reception error\n"); 303 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE); 304 goto out; 305 } 306 307 if (dsr & DOORBELL_DSR_QFI) { 308 pr_info("RIO: doorbell queue full\n"); 309 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI); 310 } 311 312 /* XXX Need to check/dispatch until queue empty */ 313 if (dsr & DOORBELL_DSR_DIQI) { 314 struct rio_dbell_msg *dmsg = 315 fsl_dbell->dbell_ring.virt + 316 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff); 317 struct rio_dbell *dbell; 318 int found = 0; 319 320 pr_debug 321 ("RIO: processing doorbell," 322 " sid %2.2x tid %2.2x info %4.4x\n", 323 dmsg->sid, dmsg->tid, dmsg->info); 324 325 for (i = 0; i < MAX_PORT_NUM; i++) { 326 if (fsl_dbell->mport[i]) { 327 list_for_each_entry(dbell, 328 &fsl_dbell->mport[i]->dbells, node) { 329 if ((dbell->res->start 330 <= dmsg->info) 331 && (dbell->res->end 332 >= dmsg->info)) { 333 found = 1; 334 break; 335 } 336 } 337 if (found && dbell->dinb) { 338 dbell->dinb(fsl_dbell->mport[i], 339 dbell->dev_id, dmsg->sid, 340 dmsg->tid, 341 dmsg->info); 342 break; 343 } 344 } 345 } 346 347 if (!found) { 348 pr_debug 349 ("RIO: spurious doorbell," 350 " sid %2.2x tid %2.2x info %4.4x\n", 351 dmsg->sid, dmsg->tid, 352 dmsg->info); 353 } 354 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI); 355 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI); 356 } 357 358out: 359 return IRQ_HANDLED; 360} 361 362void msg_unit_error_handler(void) 363{ 364 365 /*XXX: Error recovery is not implemented, we just clear errors */ 366 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); 367 368 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR); 369 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR); 370 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR); 371 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR); 372 373 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR); 374 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR); 375 376 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR); 377} 378 379/** 380 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 381 * @irq: Linux interrupt number 382 * @dev_instance: Pointer to interrupt-specific data 383 * 384 * Handles port write interrupts. Parses a list of registered 385 * port write event handlers and executes a matching event handler. 386 */ 387static irqreturn_t 388fsl_rio_port_write_handler(int irq, void *dev_instance) 389{ 390 u32 ipwmr, ipwsr; 391 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance; 392 u32 epwisr, tmp; 393 394 epwisr = in_be32(rio_regs_win + RIO_EPWISR); 395 if (!(epwisr & RIO_EPWISR_PW)) 396 goto pw_done; 397 398 ipwmr = in_be32(&pw->pw_regs->pwmr); 399 ipwsr = in_be32(&pw->pw_regs->pwsr); 400 401#ifdef DEBUG_PW 402 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); 403 if (ipwsr & RIO_IPWSR_QF) 404 pr_debug(" QF"); 405 if (ipwsr & RIO_IPWSR_TE) 406 pr_debug(" TE"); 407 if (ipwsr & RIO_IPWSR_QFI) 408 pr_debug(" QFI"); 409 if (ipwsr & RIO_IPWSR_PWD) 410 pr_debug(" PWD"); 411 if (ipwsr & RIO_IPWSR_PWB) 412 pr_debug(" PWB"); 413 pr_debug(" )\n"); 414#endif 415 /* Schedule deferred processing if PW was received */ 416 if (ipwsr & RIO_IPWSR_QFI) { 417 /* Save PW message (if there is room in FIFO), 418 * otherwise discard it. 419 */ 420 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) { 421 pw->port_write_msg.msg_count++; 422 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt, 423 RIO_PW_MSG_SIZE); 424 } else { 425 pw->port_write_msg.discard_count++; 426 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", 427 pw->port_write_msg.discard_count); 428 } 429 /* Clear interrupt and issue Clear Queue command. This allows 430 * another port-write to be received. 431 */ 432 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI); 433 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ); 434 435 schedule_work(&pw->pw_work); 436 } 437 438 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { 439 pw->port_write_msg.err_count++; 440 pr_debug("RIO: Port-Write Transaction Err (%d)\n", 441 pw->port_write_msg.err_count); 442 /* Clear Transaction Error: port-write controller should be 443 * disabled when clearing this error 444 */ 445 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); 446 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE); 447 out_be32(&pw->pw_regs->pwmr, ipwmr); 448 } 449 450 if (ipwsr & RIO_IPWSR_PWD) { 451 pw->port_write_msg.discard_count++; 452 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", 453 pw->port_write_msg.discard_count); 454 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD); 455 } 456 457pw_done: 458 if (epwisr & RIO_EPWISR_PINT1) { 459 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 460 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 461 fsl_rio_port_error_handler(0); 462 } 463 464 if (epwisr & RIO_EPWISR_PINT2) { 465 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 466 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 467 fsl_rio_port_error_handler(1); 468 } 469 470 if (epwisr & RIO_EPWISR_MU) { 471 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 472 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 473 msg_unit_error_handler(); 474 } 475 476 return IRQ_HANDLED; 477} 478 479static void fsl_pw_dpc(struct work_struct *work) 480{ 481 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work); 482 union rio_pw_msg msg_buffer; 483 int i; 484 485 /* 486 * Process port-write messages 487 */ 488 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer, 489 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) { 490#ifdef DEBUG_PW 491 { 492 u32 i; 493 pr_debug("%s : Port-Write Message:", __func__); 494 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { 495 if ((i%4) == 0) 496 pr_debug("\n0x%02x: 0x%08x", i*4, 497 msg_buffer.raw[i]); 498 else 499 pr_debug(" 0x%08x", msg_buffer.raw[i]); 500 } 501 pr_debug("\n"); 502 } 503#endif 504 /* Pass the port-write message to RIO core for processing */ 505 for (i = 0; i < MAX_PORT_NUM; i++) { 506 if (pw->mport[i]) 507 rio_inb_pwrite_handler(pw->mport[i], 508 &msg_buffer); 509 } 510 } 511} 512 513/** 514 * fsl_rio_pw_enable - enable/disable port-write interface init 515 * @mport: Master port implementing the port write unit 516 * @enable: 1=enable; 0=disable port-write message handling 517 */ 518int fsl_rio_pw_enable(struct rio_mport *mport, int enable) 519{ 520 u32 rval; 521 522 rval = in_be32(&pw->pw_regs->pwmr); 523 524 if (enable) 525 rval |= RIO_IPWMR_PWE; 526 else 527 rval &= ~RIO_IPWMR_PWE; 528 529 out_be32(&pw->pw_regs->pwmr, rval); 530 531 return 0; 532} 533 534/** 535 * fsl_rio_port_write_init - MPC85xx port write interface init 536 * @mport: Master port implementing the port write unit 537 * 538 * Initializes port write unit hardware and DMA buffer 539 * ring. Called from fsl_rio_setup(). Returns %0 on success 540 * or %-ENOMEM on failure. 541 */ 542 543int fsl_rio_port_write_init(struct fsl_rio_pw *pw) 544{ 545 int rc = 0; 546 547 /* Following configurations require a disabled port write controller */ 548 out_be32(&pw->pw_regs->pwmr, 549 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE); 550 551 /* Initialize port write */ 552 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev, 553 RIO_PW_MSG_SIZE, 554 &pw->port_write_msg.phys, GFP_KERNEL); 555 if (!pw->port_write_msg.virt) { 556 pr_err("RIO: unable allocate port write queue\n"); 557 return -ENOMEM; 558 } 559 560 pw->port_write_msg.err_count = 0; 561 pw->port_write_msg.discard_count = 0; 562 563 /* Point dequeue/enqueue pointers at first entry */ 564 out_be32(&pw->pw_regs->epwqbar, 0); 565 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys); 566 567 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", 568 in_be32(&pw->pw_regs->epwqbar), 569 in_be32(&pw->pw_regs->pwqbar)); 570 571 /* Clear interrupt status IPWSR */ 572 out_be32(&pw->pw_regs->pwsr, 573 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); 574 575 /* Configure port write controller for snooping enable all reporting, 576 clear queue full */ 577 out_be32(&pw->pw_regs->pwmr, 578 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); 579 580 581 /* Hook up port-write handler */ 582 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler, 583 IRQF_SHARED, "port-write", (void *)pw); 584 if (rc < 0) { 585 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 586 goto err_out; 587 } 588 /* Enable Error Interrupt */ 589 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); 590 591 INIT_WORK(&pw->pw_work, fsl_pw_dpc); 592 spin_lock_init(&pw->pw_fifo_lock); 593 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 594 pr_err("FIFO allocation failed\n"); 595 rc = -ENOMEM; 596 goto err_out_irq; 597 } 598 599 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", 600 in_be32(&pw->pw_regs->pwmr), 601 in_be32(&pw->pw_regs->pwsr)); 602 603 return rc; 604 605err_out_irq: 606 free_irq(IRQ_RIO_PW(pw), (void *)pw); 607err_out: 608 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE, 609 pw->port_write_msg.virt, 610 pw->port_write_msg.phys); 611 return rc; 612} 613 614/** 615 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 616 * @mport: RapidIO master port info 617 * @index: ID of RapidIO interface 618 * @destid: Destination ID of target device 619 * @data: 16-bit info field of RapidIO doorbell message 620 * 621 * Sends a MPC85xx doorbell message. Returns %0 on success or 622 * %-EINVAL on failure. 623 */ 624int fsl_rio_doorbell_send(struct rio_mport *mport, 625 int index, u16 destid, u16 data) 626{ 627 unsigned long flags; 628 629 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", 630 index, destid, data); 631 632 spin_lock_irqsave(&fsl_rio_doorbell_lock, flags); 633 634 /* In the serial version silicons, such as MPC8548, MPC8641, 635 * below operations is must be. 636 */ 637 out_be32(&dbell->dbell_regs->odmr, 0x00000000); 638 out_be32(&dbell->dbell_regs->odretcr, 0x00000004); 639 out_be32(&dbell->dbell_regs->oddpr, destid << 16); 640 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); 641 out_be32(&dbell->dbell_regs->odmr, 0x00000001); 642 643 spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags); 644 645 return 0; 646} 647 648/** 649 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue 650 * @mport: Master port with outbound message queue 651 * @rdev: Target of outbound message 652 * @mbox: Outbound mailbox 653 * @buffer: Message to add to outbound queue 654 * @len: Length of message 655 * 656 * Adds the @buffer message to the MPC85xx outbound message queue. Returns 657 * %0 on success or %-EINVAL on failure. 658 */ 659int 660fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 661 void *buffer, size_t len) 662{ 663 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 664 u32 omr; 665 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt 666 + rmu->msg_tx_ring.tx_slot; 667 int ret = 0; 668 669 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ 670 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len); 671 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { 672 ret = -EINVAL; 673 goto out; 674 } 675 676 /* Copy and clear rest of buffer */ 677 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer, 678 len); 679 if (len < (RIO_MAX_MSG_SIZE - 4)) 680 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot] 681 + len, 0, RIO_MAX_MSG_SIZE - len); 682 683 /* Set mbox field for message, and set destid */ 684 desc->dport = (rdev->destid << 16) | (mbox & 0x3); 685 686 /* Enable EOMI interrupt and priority */ 687 desc->dattr = 0x28000000 | ((mport->index) << 20); 688 689 /* Set transfer size aligned to next power of 2 (in double words) */ 690 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); 691 692 /* Set snooping and source buffer address */ 693 desc->saddr = 0x00000004 694 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot]; 695 696 /* Increment enqueue pointer */ 697 omr = in_be32(&rmu->msg_regs->omr); 698 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI); 699 700 /* Go to next descriptor */ 701 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size) 702 rmu->msg_tx_ring.tx_slot = 0; 703 704out: 705 return ret; 706} 707 708/** 709 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox 710 * @mport: Master port implementing the outbound message unit 711 * @dev_id: Device specific pointer to pass on event 712 * @mbox: Mailbox to open 713 * @entries: Number of entries in the outbound mailbox ring 714 * 715 * Initializes buffer ring, request the outbound message interrupt, 716 * and enables the outbound message unit. Returns %0 on success and 717 * %-EINVAL or %-ENOMEM on failure. 718 */ 719int 720fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 721{ 722 int i, j, rc = 0; 723 struct rio_priv *priv = mport->priv; 724 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 725 726 if ((entries < RIO_MIN_TX_RING_SIZE) || 727 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { 728 rc = -EINVAL; 729 goto out; 730 } 731 732 /* Initialize shadow copy ring */ 733 rmu->msg_tx_ring.dev_id = dev_id; 734 rmu->msg_tx_ring.size = entries; 735 736 for (i = 0; i < rmu->msg_tx_ring.size; i++) { 737 rmu->msg_tx_ring.virt_buffer[i] = 738 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 739 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL); 740 if (!rmu->msg_tx_ring.virt_buffer[i]) { 741 rc = -ENOMEM; 742 for (j = 0; j < rmu->msg_tx_ring.size; j++) 743 if (rmu->msg_tx_ring.virt_buffer[j]) 744 dma_free_coherent(priv->dev, 745 RIO_MSG_BUFFER_SIZE, 746 rmu->msg_tx_ring. 747 virt_buffer[j], 748 rmu->msg_tx_ring. 749 phys_buffer[j]); 750 goto out; 751 } 752 } 753 754 /* Initialize outbound message descriptor ring */ 755 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, 756 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 757 &rmu->msg_tx_ring.phys, 758 GFP_KERNEL); 759 if (!rmu->msg_tx_ring.virt) { 760 rc = -ENOMEM; 761 goto out_dma; 762 } 763 rmu->msg_tx_ring.tx_slot = 0; 764 765 /* Point dequeue/enqueue pointers at first entry in ring */ 766 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys); 767 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys); 768 769 /* Configure for snooping */ 770 out_be32(&rmu->msg_regs->osar, 0x00000004); 771 772 /* Clear interrupt status */ 773 out_be32(&rmu->msg_regs->osr, 0x000000b3); 774 775 /* Hook up outbound message handler */ 776 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, 777 "msg_tx", (void *)mport); 778 if (rc < 0) 779 goto out_irq; 780 781 /* 782 * Configure outbound message unit 783 * Snooping 784 * Interrupts (all enabled, except QEIE) 785 * Chaining mode 786 * Disable 787 */ 788 out_be32(&rmu->msg_regs->omr, 0x00100220); 789 790 /* Set number of entries */ 791 out_be32(&rmu->msg_regs->omr, 792 in_be32(&rmu->msg_regs->omr) | 793 ((get_bitmask_order(entries) - 2) << 12)); 794 795 /* Now enable the unit */ 796 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1); 797 798out: 799 return rc; 800 801out_irq: 802 dma_free_coherent(priv->dev, 803 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 804 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); 805 806out_dma: 807 for (i = 0; i < rmu->msg_tx_ring.size; i++) 808 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 809 rmu->msg_tx_ring.virt_buffer[i], 810 rmu->msg_tx_ring.phys_buffer[i]); 811 812 return rc; 813} 814 815/** 816 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox 817 * @mport: Master port implementing the outbound message unit 818 * @mbox: Mailbox to close 819 * 820 * Disables the outbound message unit, free all buffers, and 821 * frees the outbound message interrupt. 822 */ 823void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) 824{ 825 struct rio_priv *priv = mport->priv; 826 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 827 828 /* Disable inbound message unit */ 829 out_be32(&rmu->msg_regs->omr, 0); 830 831 /* Free ring */ 832 dma_free_coherent(priv->dev, 833 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 834 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); 835 836 /* Free interrupt */ 837 free_irq(IRQ_RIO_TX(mport), (void *)mport); 838} 839 840/** 841 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox 842 * @mport: Master port implementing the inbound message unit 843 * @dev_id: Device specific pointer to pass on event 844 * @mbox: Mailbox to open 845 * @entries: Number of entries in the inbound mailbox ring 846 * 847 * Initializes buffer ring, request the inbound message interrupt, 848 * and enables the inbound message unit. Returns %0 on success 849 * and %-EINVAL or %-ENOMEM on failure. 850 */ 851int 852fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 853{ 854 int i, rc = 0; 855 struct rio_priv *priv = mport->priv; 856 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 857 858 if ((entries < RIO_MIN_RX_RING_SIZE) || 859 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { 860 rc = -EINVAL; 861 goto out; 862 } 863 864 /* Initialize client buffer ring */ 865 rmu->msg_rx_ring.dev_id = dev_id; 866 rmu->msg_rx_ring.size = entries; 867 rmu->msg_rx_ring.rx_slot = 0; 868 for (i = 0; i < rmu->msg_rx_ring.size; i++) 869 rmu->msg_rx_ring.virt_buffer[i] = NULL; 870 871 /* Initialize inbound message ring */ 872 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, 873 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 874 &rmu->msg_rx_ring.phys, GFP_KERNEL); 875 if (!rmu->msg_rx_ring.virt) { 876 rc = -ENOMEM; 877 goto out; 878 } 879 880 /* Point dequeue/enqueue pointers at first entry in ring */ 881 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys); 882 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys); 883 884 /* Clear interrupt status */ 885 out_be32(&rmu->msg_regs->isr, 0x00000091); 886 887 /* Hook up inbound message handler */ 888 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, 889 "msg_rx", (void *)mport); 890 if (rc < 0) { 891 dma_free_coherent(priv->dev, 892 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 893 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); 894 goto out; 895 } 896 897 /* 898 * Configure inbound message unit: 899 * Snooping 900 * 4KB max message size 901 * Unmask all interrupt sources 902 * Disable 903 */ 904 out_be32(&rmu->msg_regs->imr, 0x001b0060); 905 906 /* Set number of queue entries */ 907 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); 908 909 /* Now enable the unit */ 910 setbits32(&rmu->msg_regs->imr, 0x1); 911 912out: 913 return rc; 914} 915 916/** 917 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox 918 * @mport: Master port implementing the inbound message unit 919 * @mbox: Mailbox to close 920 * 921 * Disables the inbound message unit, free all buffers, and 922 * frees the inbound message interrupt. 923 */ 924void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) 925{ 926 struct rio_priv *priv = mport->priv; 927 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 928 929 /* Disable inbound message unit */ 930 out_be32(&rmu->msg_regs->imr, 0); 931 932 /* Free ring */ 933 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 934 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); 935 936 /* Free interrupt */ 937 free_irq(IRQ_RIO_RX(mport), (void *)mport); 938} 939 940/** 941 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue 942 * @mport: Master port implementing the inbound message unit 943 * @mbox: Inbound mailbox number 944 * @buf: Buffer to add to inbound queue 945 * 946 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns 947 * %0 on success or %-EINVAL on failure. 948 */ 949int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 950{ 951 int rc = 0; 952 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 953 954 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", 955 rmu->msg_rx_ring.rx_slot); 956 957 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) { 958 printk(KERN_ERR 959 "RIO: error adding inbound buffer %d, buffer exists\n", 960 rmu->msg_rx_ring.rx_slot); 961 rc = -EINVAL; 962 goto out; 963 } 964 965 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf; 966 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size) 967 rmu->msg_rx_ring.rx_slot = 0; 968 969out: 970 return rc; 971} 972 973/** 974 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit 975 * @mport: Master port implementing the inbound message unit 976 * @mbox: Inbound mailbox number 977 * 978 * Gets the next available inbound message from the inbound message queue. 979 * A pointer to the message is returned on success or NULL on failure. 980 */ 981void *fsl_get_inb_message(struct rio_mport *mport, int mbox) 982{ 983 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 984 u32 phys_buf; 985 void *virt_buf; 986 void *buf = NULL; 987 int buf_idx; 988 989 phys_buf = in_be32(&rmu->msg_regs->ifqdpar); 990 991 /* If no more messages, then bail out */ 992 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) 993 goto out2; 994 995 virt_buf = rmu->msg_rx_ring.virt + (phys_buf 996 - rmu->msg_rx_ring.phys); 997 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; 998 buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; 999 1000 if (!buf) { 1001 printk(KERN_ERR 1002 "RIO: inbound message copy failed, no buffers\n"); 1003 goto out1; 1004 } 1005 1006 /* Copy max message size, caller is expected to allocate that big */ 1007 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE); 1008 1009 /* Clear the available buffer */ 1010 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; 1011 1012out1: 1013 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI); 1014 1015out2: 1016 return buf; 1017} 1018 1019/** 1020 * fsl_rio_doorbell_init - MPC85xx doorbell interface init 1021 * @mport: Master port implementing the inbound doorbell unit 1022 * 1023 * Initializes doorbell unit hardware and inbound DMA buffer 1024 * ring. Called from fsl_rio_setup(). Returns %0 on success 1025 * or %-ENOMEM on failure. 1026 */ 1027int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell) 1028{ 1029 int rc = 0; 1030 1031 /* Initialize inbound doorbells */ 1032 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 * 1033 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL); 1034 if (!dbell->dbell_ring.virt) { 1035 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); 1036 rc = -ENOMEM; 1037 goto out; 1038 } 1039 1040 /* Point dequeue/enqueue pointers at first entry in ring */ 1041 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys); 1042 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys); 1043 1044 /* Clear interrupt status */ 1045 out_be32(&dbell->dbell_regs->dsr, 0x00000091); 1046 1047 /* Hook up doorbell handler */ 1048 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0, 1049 "dbell_rx", (void *)dbell); 1050 if (rc < 0) { 1051 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE, 1052 dbell->dbell_ring.virt, dbell->dbell_ring.phys); 1053 printk(KERN_ERR 1054 "MPC85xx RIO: unable to request inbound doorbell irq"); 1055 goto out; 1056 } 1057 1058 /* Configure doorbells for snooping, 512 entries, and enable */ 1059 out_be32(&dbell->dbell_regs->dmr, 0x00108161); 1060 1061out: 1062 return rc; 1063} 1064 1065int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) 1066{ 1067 struct rio_priv *priv; 1068 struct fsl_rmu *rmu; 1069 u64 msg_start; 1070 const u32 *msg_addr; 1071 int mlen; 1072 int aw; 1073 1074 if (!mport || !mport->priv) 1075 return -EINVAL; 1076 1077 priv = mport->priv; 1078 1079 if (!node) { 1080 dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n", 1081 priv->dev->of_node); 1082 return -EINVAL; 1083 } 1084 1085 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL); 1086 if (!rmu) 1087 return -ENOMEM; 1088 1089 aw = of_n_addr_cells(node); 1090 msg_addr = of_get_property(node, "reg", &mlen); 1091 if (!msg_addr) { 1092 pr_err("%pOF: unable to find 'reg' property of message-unit\n", 1093 node); 1094 kfree(rmu); 1095 return -ENOMEM; 1096 } 1097 msg_start = of_read_number(msg_addr, aw); 1098 1099 rmu->msg_regs = (struct rio_msg_regs *) 1100 (rmu_regs_win + (u32)msg_start); 1101 1102 rmu->txirq = irq_of_parse_and_map(node, 0); 1103 rmu->rxirq = irq_of_parse_and_map(node, 1); 1104 printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n", 1105 node, rmu->txirq, rmu->rxirq); 1106 1107 priv->rmm_handle = rmu; 1108 1109 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1110 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1111 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 1112 1113 return 0; 1114}