vme_tsi148.c (70857B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Support for the Tundra TSI148 VME-PCI Bridge Chip 4 * 5 * Author: Martyn Welch <martyn.welch@ge.com> 6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 7 * 8 * Based on work by Tom Armistead and Ajit Prem 9 * Copyright 2004 Motorola Inc. 10 */ 11 12#include <linux/module.h> 13#include <linux/moduleparam.h> 14#include <linux/mm.h> 15#include <linux/types.h> 16#include <linux/errno.h> 17#include <linux/proc_fs.h> 18#include <linux/pci.h> 19#include <linux/poll.h> 20#include <linux/dma-mapping.h> 21#include <linux/interrupt.h> 22#include <linux/spinlock.h> 23#include <linux/sched.h> 24#include <linux/slab.h> 25#include <linux/time.h> 26#include <linux/io.h> 27#include <linux/uaccess.h> 28#include <linux/byteorder/generic.h> 29#include <linux/vme.h> 30 31#include "../vme_bridge.h" 32#include "vme_tsi148.h" 33 34static int tsi148_probe(struct pci_dev *, const struct pci_device_id *); 35static void tsi148_remove(struct pci_dev *); 36 37 38/* Module parameter */ 39static bool err_chk; 40static int geoid; 41 42static const char driver_name[] = "vme_tsi148"; 43 44static const struct pci_device_id tsi148_ids[] = { 45 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) }, 46 { }, 47}; 48 49MODULE_DEVICE_TABLE(pci, tsi148_ids); 50 51static struct pci_driver tsi148_driver = { 52 .name = driver_name, 53 .id_table = tsi148_ids, 54 .probe = tsi148_probe, 55 .remove = tsi148_remove, 56}; 57 58static void reg_join(unsigned int high, unsigned int low, 59 unsigned long long *variable) 60{ 61 *variable = (unsigned long long)high << 32; 62 *variable |= (unsigned long long)low; 63} 64 65static void reg_split(unsigned long long variable, unsigned int *high, 66 unsigned int *low) 67{ 68 *low = (unsigned int)variable & 0xFFFFFFFF; 69 *high = (unsigned int)(variable >> 32); 70} 71 72/* 73 * Wakes up DMA queue. 74 */ 75static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge, 76 int channel_mask) 77{ 78 u32 serviced = 0; 79 80 if (channel_mask & TSI148_LCSR_INTS_DMA0S) { 81 wake_up(&bridge->dma_queue[0]); 82 serviced |= TSI148_LCSR_INTC_DMA0C; 83 } 84 if (channel_mask & TSI148_LCSR_INTS_DMA1S) { 85 wake_up(&bridge->dma_queue[1]); 86 serviced |= TSI148_LCSR_INTC_DMA1C; 87 } 88 89 return serviced; 90} 91 92/* 93 * Wake up location monitor queue 94 */ 95static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat) 96{ 97 int i; 98 u32 serviced = 0; 99 100 for (i = 0; i < 4; i++) { 101 if (stat & TSI148_LCSR_INTS_LMS[i]) { 102 /* We only enable interrupts if the callback is set */ 103 bridge->lm_callback[i](bridge->lm_data[i]); 104 serviced |= TSI148_LCSR_INTC_LMC[i]; 105 } 106 } 107 108 return serviced; 109} 110 111/* 112 * Wake up mail box queue. 113 * 114 * XXX This functionality is not exposed up though API. 115 */ 116static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat) 117{ 118 int i; 119 u32 val; 120 u32 serviced = 0; 121 struct tsi148_driver *bridge; 122 123 bridge = tsi148_bridge->driver_priv; 124 125 for (i = 0; i < 4; i++) { 126 if (stat & TSI148_LCSR_INTS_MBS[i]) { 127 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]); 128 dev_err(tsi148_bridge->parent, "VME Mailbox %d received" 129 ": 0x%x\n", i, val); 130 serviced |= TSI148_LCSR_INTC_MBC[i]; 131 } 132 } 133 134 return serviced; 135} 136 137/* 138 * Display error & status message when PERR (PCI) exception interrupt occurs. 139 */ 140static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge) 141{ 142 struct tsi148_driver *bridge; 143 144 bridge = tsi148_bridge->driver_priv; 145 146 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, " 147 "attributes: %08x\n", 148 ioread32be(bridge->base + TSI148_LCSR_EDPAU), 149 ioread32be(bridge->base + TSI148_LCSR_EDPAL), 150 ioread32be(bridge->base + TSI148_LCSR_EDPAT)); 151 152 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split " 153 "completion reg: %08x\n", 154 ioread32be(bridge->base + TSI148_LCSR_EDPXA), 155 ioread32be(bridge->base + TSI148_LCSR_EDPXS)); 156 157 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT); 158 159 return TSI148_LCSR_INTC_PERRC; 160} 161 162/* 163 * Save address and status when VME error interrupt occurs. 164 */ 165static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge) 166{ 167 unsigned int error_addr_high, error_addr_low; 168 unsigned long long error_addr; 169 u32 error_attrib; 170 int error_am; 171 struct tsi148_driver *bridge; 172 173 bridge = tsi148_bridge->driver_priv; 174 175 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU); 176 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL); 177 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT); 178 error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8; 179 180 reg_join(error_addr_high, error_addr_low, &error_addr); 181 182 /* Check for exception register overflow (we have lost error data) */ 183 if (error_attrib & TSI148_LCSR_VEAT_VEOF) { 184 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow " 185 "Occurred\n"); 186 } 187 188 if (err_chk) 189 vme_bus_error_handler(tsi148_bridge, error_addr, error_am); 190 else 191 dev_err(tsi148_bridge->parent, 192 "VME Bus Error at address: 0x%llx, attributes: %08x\n", 193 error_addr, error_attrib); 194 195 /* Clear Status */ 196 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT); 197 198 return TSI148_LCSR_INTC_VERRC; 199} 200 201/* 202 * Wake up IACK queue. 203 */ 204static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge) 205{ 206 wake_up(&bridge->iack_queue); 207 208 return TSI148_LCSR_INTC_IACKC; 209} 210 211/* 212 * Calling VME bus interrupt callback if provided. 213 */ 214static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge, 215 u32 stat) 216{ 217 int vec, i, serviced = 0; 218 struct tsi148_driver *bridge; 219 220 bridge = tsi148_bridge->driver_priv; 221 222 for (i = 7; i > 0; i--) { 223 if (stat & (1 << i)) { 224 /* 225 * Note: Even though the registers are defined as 226 * 32-bits in the spec, we only want to issue 8-bit 227 * IACK cycles on the bus, read from offset 3. 228 */ 229 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3); 230 231 vme_irq_handler(tsi148_bridge, i, vec); 232 233 serviced |= (1 << i); 234 } 235 } 236 237 return serviced; 238} 239 240/* 241 * Top level interrupt handler. Clears appropriate interrupt status bits and 242 * then calls appropriate sub handler(s). 243 */ 244static irqreturn_t tsi148_irqhandler(int irq, void *ptr) 245{ 246 u32 stat, enable, serviced = 0; 247 struct vme_bridge *tsi148_bridge; 248 struct tsi148_driver *bridge; 249 250 tsi148_bridge = ptr; 251 252 bridge = tsi148_bridge->driver_priv; 253 254 /* Determine which interrupts are unmasked and set */ 255 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO); 256 stat = ioread32be(bridge->base + TSI148_LCSR_INTS); 257 258 /* Only look at unmasked interrupts */ 259 stat &= enable; 260 261 if (unlikely(!stat)) 262 return IRQ_NONE; 263 264 /* Call subhandlers as appropriate */ 265 /* DMA irqs */ 266 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S)) 267 serviced |= tsi148_DMA_irqhandler(bridge, stat); 268 269 /* Location monitor irqs */ 270 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S | 271 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S)) 272 serviced |= tsi148_LM_irqhandler(bridge, stat); 273 274 /* Mail box irqs */ 275 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S | 276 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S)) 277 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat); 278 279 /* PCI bus error */ 280 if (stat & TSI148_LCSR_INTS_PERRS) 281 serviced |= tsi148_PERR_irqhandler(tsi148_bridge); 282 283 /* VME bus error */ 284 if (stat & TSI148_LCSR_INTS_VERRS) 285 serviced |= tsi148_VERR_irqhandler(tsi148_bridge); 286 287 /* IACK irq */ 288 if (stat & TSI148_LCSR_INTS_IACKS) 289 serviced |= tsi148_IACK_irqhandler(bridge); 290 291 /* VME bus irqs */ 292 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S | 293 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S | 294 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S | 295 TSI148_LCSR_INTS_IRQ1S)) 296 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat); 297 298 /* Clear serviced interrupts */ 299 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC); 300 301 return IRQ_HANDLED; 302} 303 304static int tsi148_irq_init(struct vme_bridge *tsi148_bridge) 305{ 306 int result; 307 unsigned int tmp; 308 struct pci_dev *pdev; 309 struct tsi148_driver *bridge; 310 311 pdev = to_pci_dev(tsi148_bridge->parent); 312 313 bridge = tsi148_bridge->driver_priv; 314 315 result = request_irq(pdev->irq, 316 tsi148_irqhandler, 317 IRQF_SHARED, 318 driver_name, tsi148_bridge); 319 if (result) { 320 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq " 321 "vector %02X\n", pdev->irq); 322 return result; 323 } 324 325 /* Enable and unmask interrupts */ 326 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO | 327 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO | 328 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO | 329 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO | 330 TSI148_LCSR_INTEO_IACKEO; 331 332 /* This leaves the following interrupts masked. 333 * TSI148_LCSR_INTEO_VIEEO 334 * TSI148_LCSR_INTEO_SYSFLEO 335 * TSI148_LCSR_INTEO_ACFLEO 336 */ 337 338 /* Don't enable Location Monitor interrupts here - they will be 339 * enabled when the location monitors are properly configured and 340 * a callback has been attached. 341 * TSI148_LCSR_INTEO_LM0EO 342 * TSI148_LCSR_INTEO_LM1EO 343 * TSI148_LCSR_INTEO_LM2EO 344 * TSI148_LCSR_INTEO_LM3EO 345 */ 346 347 /* Don't enable VME interrupts until we add a handler, else the board 348 * will respond to it and we don't want that unless it knows how to 349 * properly deal with it. 350 * TSI148_LCSR_INTEO_IRQ7EO 351 * TSI148_LCSR_INTEO_IRQ6EO 352 * TSI148_LCSR_INTEO_IRQ5EO 353 * TSI148_LCSR_INTEO_IRQ4EO 354 * TSI148_LCSR_INTEO_IRQ3EO 355 * TSI148_LCSR_INTEO_IRQ2EO 356 * TSI148_LCSR_INTEO_IRQ1EO 357 */ 358 359 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); 360 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); 361 362 return 0; 363} 364 365static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge, 366 struct pci_dev *pdev) 367{ 368 struct tsi148_driver *bridge = tsi148_bridge->driver_priv; 369 370 /* Turn off interrupts */ 371 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO); 372 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN); 373 374 /* Clear all interrupts */ 375 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC); 376 377 /* Detach interrupt handler */ 378 free_irq(pdev->irq, tsi148_bridge); 379} 380 381/* 382 * Check to see if an IACk has been received, return true (1) or false (0). 383 */ 384static int tsi148_iack_received(struct tsi148_driver *bridge) 385{ 386 u32 tmp; 387 388 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); 389 390 if (tmp & TSI148_LCSR_VICR_IRQS) 391 return 0; 392 else 393 return 1; 394} 395 396/* 397 * Configure VME interrupt 398 */ 399static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, 400 int state, int sync) 401{ 402 struct pci_dev *pdev; 403 u32 tmp; 404 struct tsi148_driver *bridge; 405 406 bridge = tsi148_bridge->driver_priv; 407 408 /* We need to do the ordering differently for enabling and disabling */ 409 if (state == 0) { 410 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); 411 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1]; 412 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); 413 414 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); 415 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1]; 416 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); 417 418 if (sync != 0) { 419 pdev = to_pci_dev(tsi148_bridge->parent); 420 synchronize_irq(pdev->irq); 421 } 422 } else { 423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); 424 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1]; 425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); 426 427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); 428 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1]; 429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); 430 } 431} 432 433/* 434 * Generate a VME bus interrupt at the requested level & vector. Wait for 435 * interrupt to be acked. 436 */ 437static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, 438 int statid) 439{ 440 u32 tmp; 441 struct tsi148_driver *bridge; 442 443 bridge = tsi148_bridge->driver_priv; 444 445 mutex_lock(&bridge->vme_int); 446 447 /* Read VICR register */ 448 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); 449 450 /* Set Status/ID */ 451 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) | 452 (statid & TSI148_LCSR_VICR_STID_M); 453 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); 454 455 /* Assert VMEbus IRQ */ 456 tmp = tmp | TSI148_LCSR_VICR_IRQL[level]; 457 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); 458 459 /* XXX Consider implementing a timeout? */ 460 wait_event_interruptible(bridge->iack_queue, 461 tsi148_iack_received(bridge)); 462 463 mutex_unlock(&bridge->vme_int); 464 465 return 0; 466} 467 468/* 469 * Initialize a slave window with the requested attributes. 470 */ 471static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, 472 unsigned long long vme_base, unsigned long long size, 473 dma_addr_t pci_base, u32 aspace, u32 cycle) 474{ 475 unsigned int i, addr = 0, granularity = 0; 476 unsigned int temp_ctl = 0; 477 unsigned int vme_base_low, vme_base_high; 478 unsigned int vme_bound_low, vme_bound_high; 479 unsigned int pci_offset_low, pci_offset_high; 480 unsigned long long vme_bound, pci_offset; 481 struct vme_bridge *tsi148_bridge; 482 struct tsi148_driver *bridge; 483 484 tsi148_bridge = image->parent; 485 bridge = tsi148_bridge->driver_priv; 486 487 i = image->number; 488 489 switch (aspace) { 490 case VME_A16: 491 granularity = 0x10; 492 addr |= TSI148_LCSR_ITAT_AS_A16; 493 break; 494 case VME_A24: 495 granularity = 0x1000; 496 addr |= TSI148_LCSR_ITAT_AS_A24; 497 break; 498 case VME_A32: 499 granularity = 0x10000; 500 addr |= TSI148_LCSR_ITAT_AS_A32; 501 break; 502 case VME_A64: 503 granularity = 0x10000; 504 addr |= TSI148_LCSR_ITAT_AS_A64; 505 break; 506 default: 507 dev_err(tsi148_bridge->parent, "Invalid address space\n"); 508 return -EINVAL; 509 } 510 511 /* Convert 64-bit variables to 2x 32-bit variables */ 512 reg_split(vme_base, &vme_base_high, &vme_base_low); 513 514 /* 515 * Bound address is a valid address for the window, adjust 516 * accordingly 517 */ 518 vme_bound = vme_base + size - granularity; 519 reg_split(vme_bound, &vme_bound_high, &vme_bound_low); 520 pci_offset = (unsigned long long)pci_base - vme_base; 521 reg_split(pci_offset, &pci_offset_high, &pci_offset_low); 522 523 if (vme_base_low & (granularity - 1)) { 524 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n"); 525 return -EINVAL; 526 } 527 if (vme_bound_low & (granularity - 1)) { 528 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n"); 529 return -EINVAL; 530 } 531 if (pci_offset_low & (granularity - 1)) { 532 dev_err(tsi148_bridge->parent, "Invalid PCI Offset " 533 "alignment\n"); 534 return -EINVAL; 535 } 536 537 /* Disable while we are mucking around */ 538 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 539 TSI148_LCSR_OFFSET_ITAT); 540 temp_ctl &= ~TSI148_LCSR_ITAT_EN; 541 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + 542 TSI148_LCSR_OFFSET_ITAT); 543 544 /* Setup mapping */ 545 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] + 546 TSI148_LCSR_OFFSET_ITSAU); 547 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] + 548 TSI148_LCSR_OFFSET_ITSAL); 549 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] + 550 TSI148_LCSR_OFFSET_ITEAU); 551 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] + 552 TSI148_LCSR_OFFSET_ITEAL); 553 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] + 554 TSI148_LCSR_OFFSET_ITOFU); 555 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] + 556 TSI148_LCSR_OFFSET_ITOFL); 557 558 /* Setup 2eSST speeds */ 559 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M; 560 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { 561 case VME_2eSST160: 562 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160; 563 break; 564 case VME_2eSST267: 565 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267; 566 break; 567 case VME_2eSST320: 568 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320; 569 break; 570 } 571 572 /* Setup cycle types */ 573 temp_ctl &= ~(0x1F << 7); 574 if (cycle & VME_BLT) 575 temp_ctl |= TSI148_LCSR_ITAT_BLT; 576 if (cycle & VME_MBLT) 577 temp_ctl |= TSI148_LCSR_ITAT_MBLT; 578 if (cycle & VME_2eVME) 579 temp_ctl |= TSI148_LCSR_ITAT_2eVME; 580 if (cycle & VME_2eSST) 581 temp_ctl |= TSI148_LCSR_ITAT_2eSST; 582 if (cycle & VME_2eSSTB) 583 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB; 584 585 /* Setup address space */ 586 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M; 587 temp_ctl |= addr; 588 589 temp_ctl &= ~0xF; 590 if (cycle & VME_SUPER) 591 temp_ctl |= TSI148_LCSR_ITAT_SUPR ; 592 if (cycle & VME_USER) 593 temp_ctl |= TSI148_LCSR_ITAT_NPRIV; 594 if (cycle & VME_PROG) 595 temp_ctl |= TSI148_LCSR_ITAT_PGM; 596 if (cycle & VME_DATA) 597 temp_ctl |= TSI148_LCSR_ITAT_DATA; 598 599 /* Write ctl reg without enable */ 600 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + 601 TSI148_LCSR_OFFSET_ITAT); 602 603 if (enabled) 604 temp_ctl |= TSI148_LCSR_ITAT_EN; 605 606 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + 607 TSI148_LCSR_OFFSET_ITAT); 608 609 return 0; 610} 611 612/* 613 * Get slave window configuration. 614 */ 615static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, 616 unsigned long long *vme_base, unsigned long long *size, 617 dma_addr_t *pci_base, u32 *aspace, u32 *cycle) 618{ 619 unsigned int i, granularity = 0, ctl = 0; 620 unsigned int vme_base_low, vme_base_high; 621 unsigned int vme_bound_low, vme_bound_high; 622 unsigned int pci_offset_low, pci_offset_high; 623 unsigned long long vme_bound, pci_offset; 624 struct tsi148_driver *bridge; 625 626 bridge = image->parent->driver_priv; 627 628 i = image->number; 629 630 /* Read registers */ 631 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 632 TSI148_LCSR_OFFSET_ITAT); 633 634 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 635 TSI148_LCSR_OFFSET_ITSAU); 636 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 637 TSI148_LCSR_OFFSET_ITSAL); 638 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 639 TSI148_LCSR_OFFSET_ITEAU); 640 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 641 TSI148_LCSR_OFFSET_ITEAL); 642 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 643 TSI148_LCSR_OFFSET_ITOFU); 644 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + 645 TSI148_LCSR_OFFSET_ITOFL); 646 647 /* Convert 64-bit variables to 2x 32-bit variables */ 648 reg_join(vme_base_high, vme_base_low, vme_base); 649 reg_join(vme_bound_high, vme_bound_low, &vme_bound); 650 reg_join(pci_offset_high, pci_offset_low, &pci_offset); 651 652 *pci_base = (dma_addr_t)(*vme_base + pci_offset); 653 654 *enabled = 0; 655 *aspace = 0; 656 *cycle = 0; 657 658 if (ctl & TSI148_LCSR_ITAT_EN) 659 *enabled = 1; 660 661 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) { 662 granularity = 0x10; 663 *aspace |= VME_A16; 664 } 665 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) { 666 granularity = 0x1000; 667 *aspace |= VME_A24; 668 } 669 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) { 670 granularity = 0x10000; 671 *aspace |= VME_A32; 672 } 673 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) { 674 granularity = 0x10000; 675 *aspace |= VME_A64; 676 } 677 678 /* Need granularity before we set the size */ 679 *size = (unsigned long long)((vme_bound - *vme_base) + granularity); 680 681 682 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160) 683 *cycle |= VME_2eSST160; 684 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267) 685 *cycle |= VME_2eSST267; 686 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320) 687 *cycle |= VME_2eSST320; 688 689 if (ctl & TSI148_LCSR_ITAT_BLT) 690 *cycle |= VME_BLT; 691 if (ctl & TSI148_LCSR_ITAT_MBLT) 692 *cycle |= VME_MBLT; 693 if (ctl & TSI148_LCSR_ITAT_2eVME) 694 *cycle |= VME_2eVME; 695 if (ctl & TSI148_LCSR_ITAT_2eSST) 696 *cycle |= VME_2eSST; 697 if (ctl & TSI148_LCSR_ITAT_2eSSTB) 698 *cycle |= VME_2eSSTB; 699 700 if (ctl & TSI148_LCSR_ITAT_SUPR) 701 *cycle |= VME_SUPER; 702 if (ctl & TSI148_LCSR_ITAT_NPRIV) 703 *cycle |= VME_USER; 704 if (ctl & TSI148_LCSR_ITAT_PGM) 705 *cycle |= VME_PROG; 706 if (ctl & TSI148_LCSR_ITAT_DATA) 707 *cycle |= VME_DATA; 708 709 return 0; 710} 711 712/* 713 * Allocate and map PCI Resource 714 */ 715static int tsi148_alloc_resource(struct vme_master_resource *image, 716 unsigned long long size) 717{ 718 unsigned long long existing_size; 719 int retval = 0; 720 struct pci_dev *pdev; 721 struct vme_bridge *tsi148_bridge; 722 723 tsi148_bridge = image->parent; 724 725 pdev = to_pci_dev(tsi148_bridge->parent); 726 727 existing_size = (unsigned long long)(image->bus_resource.end - 728 image->bus_resource.start); 729 730 /* If the existing size is OK, return */ 731 if ((size != 0) && (existing_size == (size - 1))) 732 return 0; 733 734 if (existing_size != 0) { 735 iounmap(image->kern_base); 736 image->kern_base = NULL; 737 kfree(image->bus_resource.name); 738 release_resource(&image->bus_resource); 739 memset(&image->bus_resource, 0, sizeof(image->bus_resource)); 740 } 741 742 /* Exit here if size is zero */ 743 if (size == 0) 744 return 0; 745 746 if (!image->bus_resource.name) { 747 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC); 748 if (!image->bus_resource.name) { 749 retval = -ENOMEM; 750 goto err_name; 751 } 752 } 753 754 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name, 755 image->number); 756 757 image->bus_resource.start = 0; 758 image->bus_resource.end = (unsigned long)size; 759 image->bus_resource.flags = IORESOURCE_MEM; 760 761 retval = pci_bus_alloc_resource(pdev->bus, 762 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM, 763 0, NULL, NULL); 764 if (retval) { 765 dev_err(tsi148_bridge->parent, "Failed to allocate mem " 766 "resource for window %d size 0x%lx start 0x%lx\n", 767 image->number, (unsigned long)size, 768 (unsigned long)image->bus_resource.start); 769 goto err_resource; 770 } 771 772 image->kern_base = ioremap( 773 image->bus_resource.start, size); 774 if (!image->kern_base) { 775 dev_err(tsi148_bridge->parent, "Failed to remap resource\n"); 776 retval = -ENOMEM; 777 goto err_remap; 778 } 779 780 return 0; 781 782err_remap: 783 release_resource(&image->bus_resource); 784err_resource: 785 kfree(image->bus_resource.name); 786 memset(&image->bus_resource, 0, sizeof(image->bus_resource)); 787err_name: 788 return retval; 789} 790 791/* 792 * Free and unmap PCI Resource 793 */ 794static void tsi148_free_resource(struct vme_master_resource *image) 795{ 796 iounmap(image->kern_base); 797 image->kern_base = NULL; 798 release_resource(&image->bus_resource); 799 kfree(image->bus_resource.name); 800 memset(&image->bus_resource, 0, sizeof(image->bus_resource)); 801} 802 803/* 804 * Set the attributes of an outbound window. 805 */ 806static int tsi148_master_set(struct vme_master_resource *image, int enabled, 807 unsigned long long vme_base, unsigned long long size, u32 aspace, 808 u32 cycle, u32 dwidth) 809{ 810 int retval = 0; 811 unsigned int i; 812 unsigned int temp_ctl = 0; 813 unsigned int pci_base_low, pci_base_high; 814 unsigned int pci_bound_low, pci_bound_high; 815 unsigned int vme_offset_low, vme_offset_high; 816 unsigned long long pci_bound, vme_offset, pci_base; 817 struct vme_bridge *tsi148_bridge; 818 struct tsi148_driver *bridge; 819 struct pci_bus_region region; 820 struct pci_dev *pdev; 821 822 tsi148_bridge = image->parent; 823 824 bridge = tsi148_bridge->driver_priv; 825 826 pdev = to_pci_dev(tsi148_bridge->parent); 827 828 /* Verify input data */ 829 if (vme_base & 0xFFFF) { 830 dev_err(tsi148_bridge->parent, "Invalid VME Window " 831 "alignment\n"); 832 retval = -EINVAL; 833 goto err_window; 834 } 835 836 if ((size == 0) && (enabled != 0)) { 837 dev_err(tsi148_bridge->parent, "Size must be non-zero for " 838 "enabled windows\n"); 839 retval = -EINVAL; 840 goto err_window; 841 } 842 843 spin_lock(&image->lock); 844 845 /* Let's allocate the resource here rather than further up the stack as 846 * it avoids pushing loads of bus dependent stuff up the stack. If size 847 * is zero, any existing resource will be freed. 848 */ 849 retval = tsi148_alloc_resource(image, size); 850 if (retval) { 851 spin_unlock(&image->lock); 852 dev_err(tsi148_bridge->parent, "Unable to allocate memory for " 853 "resource\n"); 854 goto err_res; 855 } 856 857 if (size == 0) { 858 pci_base = 0; 859 pci_bound = 0; 860 vme_offset = 0; 861 } else { 862 pcibios_resource_to_bus(pdev->bus, ®ion, 863 &image->bus_resource); 864 pci_base = region.start; 865 866 /* 867 * Bound address is a valid address for the window, adjust 868 * according to window granularity. 869 */ 870 pci_bound = pci_base + (size - 0x10000); 871 vme_offset = vme_base - pci_base; 872 } 873 874 /* Convert 64-bit variables to 2x 32-bit variables */ 875 reg_split(pci_base, &pci_base_high, &pci_base_low); 876 reg_split(pci_bound, &pci_bound_high, &pci_bound_low); 877 reg_split(vme_offset, &vme_offset_high, &vme_offset_low); 878 879 if (pci_base_low & 0xFFFF) { 880 spin_unlock(&image->lock); 881 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n"); 882 retval = -EINVAL; 883 goto err_gran; 884 } 885 if (pci_bound_low & 0xFFFF) { 886 spin_unlock(&image->lock); 887 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n"); 888 retval = -EINVAL; 889 goto err_gran; 890 } 891 if (vme_offset_low & 0xFFFF) { 892 spin_unlock(&image->lock); 893 dev_err(tsi148_bridge->parent, "Invalid VME Offset " 894 "alignment\n"); 895 retval = -EINVAL; 896 goto err_gran; 897 } 898 899 i = image->number; 900 901 /* Disable while we are mucking around */ 902 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 903 TSI148_LCSR_OFFSET_OTAT); 904 temp_ctl &= ~TSI148_LCSR_OTAT_EN; 905 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + 906 TSI148_LCSR_OFFSET_OTAT); 907 908 /* Setup 2eSST speeds */ 909 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M; 910 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { 911 case VME_2eSST160: 912 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160; 913 break; 914 case VME_2eSST267: 915 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267; 916 break; 917 case VME_2eSST320: 918 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320; 919 break; 920 } 921 922 /* Setup cycle types */ 923 if (cycle & VME_BLT) { 924 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; 925 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT; 926 } 927 if (cycle & VME_MBLT) { 928 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; 929 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT; 930 } 931 if (cycle & VME_2eVME) { 932 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; 933 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME; 934 } 935 if (cycle & VME_2eSST) { 936 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; 937 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST; 938 } 939 if (cycle & VME_2eSSTB) { 940 dev_warn(tsi148_bridge->parent, "Currently not setting " 941 "Broadcast Select Registers\n"); 942 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; 943 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB; 944 } 945 946 /* Setup data width */ 947 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M; 948 switch (dwidth) { 949 case VME_D16: 950 temp_ctl |= TSI148_LCSR_OTAT_DBW_16; 951 break; 952 case VME_D32: 953 temp_ctl |= TSI148_LCSR_OTAT_DBW_32; 954 break; 955 default: 956 spin_unlock(&image->lock); 957 dev_err(tsi148_bridge->parent, "Invalid data width\n"); 958 retval = -EINVAL; 959 goto err_dwidth; 960 } 961 962 /* Setup address space */ 963 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M; 964 switch (aspace) { 965 case VME_A16: 966 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16; 967 break; 968 case VME_A24: 969 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24; 970 break; 971 case VME_A32: 972 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32; 973 break; 974 case VME_A64: 975 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64; 976 break; 977 case VME_CRCSR: 978 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR; 979 break; 980 case VME_USER1: 981 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1; 982 break; 983 case VME_USER2: 984 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2; 985 break; 986 case VME_USER3: 987 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3; 988 break; 989 case VME_USER4: 990 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4; 991 break; 992 default: 993 spin_unlock(&image->lock); 994 dev_err(tsi148_bridge->parent, "Invalid address space\n"); 995 retval = -EINVAL; 996 goto err_aspace; 997 } 998 999 temp_ctl &= ~(3<<4); 1000 if (cycle & VME_SUPER) 1001 temp_ctl |= TSI148_LCSR_OTAT_SUP; 1002 if (cycle & VME_PROG) 1003 temp_ctl |= TSI148_LCSR_OTAT_PGM; 1004 1005 /* Setup mapping */ 1006 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] + 1007 TSI148_LCSR_OFFSET_OTSAU); 1008 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] + 1009 TSI148_LCSR_OFFSET_OTSAL); 1010 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] + 1011 TSI148_LCSR_OFFSET_OTEAU); 1012 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] + 1013 TSI148_LCSR_OFFSET_OTEAL); 1014 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] + 1015 TSI148_LCSR_OFFSET_OTOFU); 1016 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] + 1017 TSI148_LCSR_OFFSET_OTOFL); 1018 1019 /* Write ctl reg without enable */ 1020 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + 1021 TSI148_LCSR_OFFSET_OTAT); 1022 1023 if (enabled) 1024 temp_ctl |= TSI148_LCSR_OTAT_EN; 1025 1026 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + 1027 TSI148_LCSR_OFFSET_OTAT); 1028 1029 spin_unlock(&image->lock); 1030 return 0; 1031 1032err_aspace: 1033err_dwidth: 1034err_gran: 1035 tsi148_free_resource(image); 1036err_res: 1037err_window: 1038 return retval; 1039 1040} 1041 1042/* 1043 * Set the attributes of an outbound window. 1044 * 1045 * XXX Not parsing prefetch information. 1046 */ 1047static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, 1048 unsigned long long *vme_base, unsigned long long *size, u32 *aspace, 1049 u32 *cycle, u32 *dwidth) 1050{ 1051 unsigned int i, ctl; 1052 unsigned int pci_base_low, pci_base_high; 1053 unsigned int pci_bound_low, pci_bound_high; 1054 unsigned int vme_offset_low, vme_offset_high; 1055 1056 unsigned long long pci_base, pci_bound, vme_offset; 1057 struct tsi148_driver *bridge; 1058 1059 bridge = image->parent->driver_priv; 1060 1061 i = image->number; 1062 1063 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1064 TSI148_LCSR_OFFSET_OTAT); 1065 1066 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1067 TSI148_LCSR_OFFSET_OTSAU); 1068 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1069 TSI148_LCSR_OFFSET_OTSAL); 1070 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1071 TSI148_LCSR_OFFSET_OTEAU); 1072 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1073 TSI148_LCSR_OFFSET_OTEAL); 1074 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1075 TSI148_LCSR_OFFSET_OTOFU); 1076 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1077 TSI148_LCSR_OFFSET_OTOFL); 1078 1079 /* Convert 64-bit variables to 2x 32-bit variables */ 1080 reg_join(pci_base_high, pci_base_low, &pci_base); 1081 reg_join(pci_bound_high, pci_bound_low, &pci_bound); 1082 reg_join(vme_offset_high, vme_offset_low, &vme_offset); 1083 1084 *vme_base = pci_base + vme_offset; 1085 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000; 1086 1087 *enabled = 0; 1088 *aspace = 0; 1089 *cycle = 0; 1090 *dwidth = 0; 1091 1092 if (ctl & TSI148_LCSR_OTAT_EN) 1093 *enabled = 1; 1094 1095 /* Setup address space */ 1096 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16) 1097 *aspace |= VME_A16; 1098 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24) 1099 *aspace |= VME_A24; 1100 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32) 1101 *aspace |= VME_A32; 1102 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64) 1103 *aspace |= VME_A64; 1104 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR) 1105 *aspace |= VME_CRCSR; 1106 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1) 1107 *aspace |= VME_USER1; 1108 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2) 1109 *aspace |= VME_USER2; 1110 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3) 1111 *aspace |= VME_USER3; 1112 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4) 1113 *aspace |= VME_USER4; 1114 1115 /* Setup 2eSST speeds */ 1116 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160) 1117 *cycle |= VME_2eSST160; 1118 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267) 1119 *cycle |= VME_2eSST267; 1120 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320) 1121 *cycle |= VME_2eSST320; 1122 1123 /* Setup cycle types */ 1124 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT) 1125 *cycle |= VME_SCT; 1126 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT) 1127 *cycle |= VME_BLT; 1128 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT) 1129 *cycle |= VME_MBLT; 1130 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME) 1131 *cycle |= VME_2eVME; 1132 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST) 1133 *cycle |= VME_2eSST; 1134 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB) 1135 *cycle |= VME_2eSSTB; 1136 1137 if (ctl & TSI148_LCSR_OTAT_SUP) 1138 *cycle |= VME_SUPER; 1139 else 1140 *cycle |= VME_USER; 1141 1142 if (ctl & TSI148_LCSR_OTAT_PGM) 1143 *cycle |= VME_PROG; 1144 else 1145 *cycle |= VME_DATA; 1146 1147 /* Setup data width */ 1148 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16) 1149 *dwidth = VME_D16; 1150 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32) 1151 *dwidth = VME_D32; 1152 1153 return 0; 1154} 1155 1156 1157static int tsi148_master_get(struct vme_master_resource *image, int *enabled, 1158 unsigned long long *vme_base, unsigned long long *size, u32 *aspace, 1159 u32 *cycle, u32 *dwidth) 1160{ 1161 int retval; 1162 1163 spin_lock(&image->lock); 1164 1165 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace, 1166 cycle, dwidth); 1167 1168 spin_unlock(&image->lock); 1169 1170 return retval; 1171} 1172 1173static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, 1174 size_t count, loff_t offset) 1175{ 1176 int retval, enabled; 1177 unsigned long long vme_base, size; 1178 u32 aspace, cycle, dwidth; 1179 struct vme_error_handler *handler = NULL; 1180 struct vme_bridge *tsi148_bridge; 1181 void __iomem *addr = image->kern_base + offset; 1182 unsigned int done = 0; 1183 unsigned int count32; 1184 1185 tsi148_bridge = image->parent; 1186 1187 spin_lock(&image->lock); 1188 1189 if (err_chk) { 1190 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, 1191 &cycle, &dwidth); 1192 handler = vme_register_error_handler(tsi148_bridge, aspace, 1193 vme_base + offset, count); 1194 if (!handler) { 1195 spin_unlock(&image->lock); 1196 return -ENOMEM; 1197 } 1198 } 1199 1200 /* The following code handles VME address alignment. We cannot use 1201 * memcpy_xxx here because it may cut data transfers in to 8-bit 1202 * cycles when D16 or D32 cycles are required on the VME bus. 1203 * On the other hand, the bridge itself assures that the maximum data 1204 * cycle configured for the transfer is used and splits it 1205 * automatically for non-aligned addresses, so we don't want the 1206 * overhead of needlessly forcing small transfers for the entire cycle. 1207 */ 1208 if ((uintptr_t)addr & 0x1) { 1209 *(u8 *)buf = ioread8(addr); 1210 done += 1; 1211 if (done == count) 1212 goto out; 1213 } 1214 if ((uintptr_t)(addr + done) & 0x2) { 1215 if ((count - done) < 2) { 1216 *(u8 *)(buf + done) = ioread8(addr + done); 1217 done += 1; 1218 goto out; 1219 } else { 1220 *(u16 *)(buf + done) = ioread16(addr + done); 1221 done += 2; 1222 } 1223 } 1224 1225 count32 = (count - done) & ~0x3; 1226 while (done < count32) { 1227 *(u32 *)(buf + done) = ioread32(addr + done); 1228 done += 4; 1229 } 1230 1231 if ((count - done) & 0x2) { 1232 *(u16 *)(buf + done) = ioread16(addr + done); 1233 done += 2; 1234 } 1235 if ((count - done) & 0x1) { 1236 *(u8 *)(buf + done) = ioread8(addr + done); 1237 done += 1; 1238 } 1239 1240out: 1241 retval = count; 1242 1243 if (err_chk) { 1244 if (handler->num_errors) { 1245 dev_err(image->parent->parent, 1246 "First VME read error detected an at address 0x%llx\n", 1247 handler->first_error); 1248 retval = handler->first_error - (vme_base + offset); 1249 } 1250 vme_unregister_error_handler(handler); 1251 } 1252 1253 spin_unlock(&image->lock); 1254 1255 return retval; 1256} 1257 1258 1259static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, 1260 size_t count, loff_t offset) 1261{ 1262 int retval = 0, enabled; 1263 unsigned long long vme_base, size; 1264 u32 aspace, cycle, dwidth; 1265 void __iomem *addr = image->kern_base + offset; 1266 unsigned int done = 0; 1267 unsigned int count32; 1268 1269 struct vme_error_handler *handler = NULL; 1270 struct vme_bridge *tsi148_bridge; 1271 struct tsi148_driver *bridge; 1272 1273 tsi148_bridge = image->parent; 1274 1275 bridge = tsi148_bridge->driver_priv; 1276 1277 spin_lock(&image->lock); 1278 1279 if (err_chk) { 1280 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, 1281 &cycle, &dwidth); 1282 handler = vme_register_error_handler(tsi148_bridge, aspace, 1283 vme_base + offset, count); 1284 if (!handler) { 1285 spin_unlock(&image->lock); 1286 return -ENOMEM; 1287 } 1288 } 1289 1290 /* Here we apply for the same strategy we do in master_read 1291 * function in order to assure the correct cycles. 1292 */ 1293 if ((uintptr_t)addr & 0x1) { 1294 iowrite8(*(u8 *)buf, addr); 1295 done += 1; 1296 if (done == count) 1297 goto out; 1298 } 1299 if ((uintptr_t)(addr + done) & 0x2) { 1300 if ((count - done) < 2) { 1301 iowrite8(*(u8 *)(buf + done), addr + done); 1302 done += 1; 1303 goto out; 1304 } else { 1305 iowrite16(*(u16 *)(buf + done), addr + done); 1306 done += 2; 1307 } 1308 } 1309 1310 count32 = (count - done) & ~0x3; 1311 while (done < count32) { 1312 iowrite32(*(u32 *)(buf + done), addr + done); 1313 done += 4; 1314 } 1315 1316 if ((count - done) & 0x2) { 1317 iowrite16(*(u16 *)(buf + done), addr + done); 1318 done += 2; 1319 } 1320 if ((count - done) & 0x1) { 1321 iowrite8(*(u8 *)(buf + done), addr + done); 1322 done += 1; 1323 } 1324 1325out: 1326 retval = count; 1327 1328 /* 1329 * Writes are posted. We need to do a read on the VME bus to flush out 1330 * all of the writes before we check for errors. We can't guarantee 1331 * that reading the data we have just written is safe. It is believed 1332 * that there isn't any read, write re-ordering, so we can read any 1333 * location in VME space, so lets read the Device ID from the tsi148's 1334 * own registers as mapped into CR/CSR space. 1335 * 1336 * We check for saved errors in the written address range/space. 1337 */ 1338 1339 if (err_chk) { 1340 ioread16(bridge->flush_image->kern_base + 0x7F000); 1341 1342 if (handler->num_errors) { 1343 dev_warn(tsi148_bridge->parent, 1344 "First VME write error detected an at address 0x%llx\n", 1345 handler->first_error); 1346 retval = handler->first_error - (vme_base + offset); 1347 } 1348 vme_unregister_error_handler(handler); 1349 } 1350 1351 spin_unlock(&image->lock); 1352 1353 return retval; 1354} 1355 1356/* 1357 * Perform an RMW cycle on the VME bus. 1358 * 1359 * Requires a previously configured master window, returns final value. 1360 */ 1361static unsigned int tsi148_master_rmw(struct vme_master_resource *image, 1362 unsigned int mask, unsigned int compare, unsigned int swap, 1363 loff_t offset) 1364{ 1365 unsigned long long pci_addr; 1366 unsigned int pci_addr_high, pci_addr_low; 1367 u32 tmp, result; 1368 int i; 1369 struct tsi148_driver *bridge; 1370 1371 bridge = image->parent->driver_priv; 1372 1373 /* Find the PCI address that maps to the desired VME address */ 1374 i = image->number; 1375 1376 /* Locking as we can only do one of these at a time */ 1377 mutex_lock(&bridge->vme_rmw); 1378 1379 /* Lock image */ 1380 spin_lock(&image->lock); 1381 1382 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1383 TSI148_LCSR_OFFSET_OTSAU); 1384 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + 1385 TSI148_LCSR_OFFSET_OTSAL); 1386 1387 reg_join(pci_addr_high, pci_addr_low, &pci_addr); 1388 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low); 1389 1390 /* Configure registers */ 1391 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN); 1392 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC); 1393 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS); 1394 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU); 1395 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL); 1396 1397 /* Enable RMW */ 1398 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); 1399 tmp |= TSI148_LCSR_VMCTRL_RMWEN; 1400 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); 1401 1402 /* Kick process off with a read to the required address. */ 1403 result = ioread32be(image->kern_base + offset); 1404 1405 /* Disable RMW */ 1406 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); 1407 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN; 1408 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); 1409 1410 spin_unlock(&image->lock); 1411 1412 mutex_unlock(&bridge->vme_rmw); 1413 1414 return result; 1415} 1416 1417static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, 1418 u32 aspace, u32 cycle, u32 dwidth) 1419{ 1420 u32 val; 1421 1422 val = be32_to_cpu(*attr); 1423 1424 /* Setup 2eSST speeds */ 1425 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { 1426 case VME_2eSST160: 1427 val |= TSI148_LCSR_DSAT_2eSSTM_160; 1428 break; 1429 case VME_2eSST267: 1430 val |= TSI148_LCSR_DSAT_2eSSTM_267; 1431 break; 1432 case VME_2eSST320: 1433 val |= TSI148_LCSR_DSAT_2eSSTM_320; 1434 break; 1435 } 1436 1437 /* Setup cycle types */ 1438 if (cycle & VME_SCT) 1439 val |= TSI148_LCSR_DSAT_TM_SCT; 1440 1441 if (cycle & VME_BLT) 1442 val |= TSI148_LCSR_DSAT_TM_BLT; 1443 1444 if (cycle & VME_MBLT) 1445 val |= TSI148_LCSR_DSAT_TM_MBLT; 1446 1447 if (cycle & VME_2eVME) 1448 val |= TSI148_LCSR_DSAT_TM_2eVME; 1449 1450 if (cycle & VME_2eSST) 1451 val |= TSI148_LCSR_DSAT_TM_2eSST; 1452 1453 if (cycle & VME_2eSSTB) { 1454 dev_err(dev, "Currently not setting Broadcast Select " 1455 "Registers\n"); 1456 val |= TSI148_LCSR_DSAT_TM_2eSSTB; 1457 } 1458 1459 /* Setup data width */ 1460 switch (dwidth) { 1461 case VME_D16: 1462 val |= TSI148_LCSR_DSAT_DBW_16; 1463 break; 1464 case VME_D32: 1465 val |= TSI148_LCSR_DSAT_DBW_32; 1466 break; 1467 default: 1468 dev_err(dev, "Invalid data width\n"); 1469 return -EINVAL; 1470 } 1471 1472 /* Setup address space */ 1473 switch (aspace) { 1474 case VME_A16: 1475 val |= TSI148_LCSR_DSAT_AMODE_A16; 1476 break; 1477 case VME_A24: 1478 val |= TSI148_LCSR_DSAT_AMODE_A24; 1479 break; 1480 case VME_A32: 1481 val |= TSI148_LCSR_DSAT_AMODE_A32; 1482 break; 1483 case VME_A64: 1484 val |= TSI148_LCSR_DSAT_AMODE_A64; 1485 break; 1486 case VME_CRCSR: 1487 val |= TSI148_LCSR_DSAT_AMODE_CRCSR; 1488 break; 1489 case VME_USER1: 1490 val |= TSI148_LCSR_DSAT_AMODE_USER1; 1491 break; 1492 case VME_USER2: 1493 val |= TSI148_LCSR_DSAT_AMODE_USER2; 1494 break; 1495 case VME_USER3: 1496 val |= TSI148_LCSR_DSAT_AMODE_USER3; 1497 break; 1498 case VME_USER4: 1499 val |= TSI148_LCSR_DSAT_AMODE_USER4; 1500 break; 1501 default: 1502 dev_err(dev, "Invalid address space\n"); 1503 return -EINVAL; 1504 } 1505 1506 if (cycle & VME_SUPER) 1507 val |= TSI148_LCSR_DSAT_SUP; 1508 if (cycle & VME_PROG) 1509 val |= TSI148_LCSR_DSAT_PGM; 1510 1511 *attr = cpu_to_be32(val); 1512 1513 return 0; 1514} 1515 1516static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, 1517 u32 aspace, u32 cycle, u32 dwidth) 1518{ 1519 u32 val; 1520 1521 val = be32_to_cpu(*attr); 1522 1523 /* Setup 2eSST speeds */ 1524 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { 1525 case VME_2eSST160: 1526 val |= TSI148_LCSR_DDAT_2eSSTM_160; 1527 break; 1528 case VME_2eSST267: 1529 val |= TSI148_LCSR_DDAT_2eSSTM_267; 1530 break; 1531 case VME_2eSST320: 1532 val |= TSI148_LCSR_DDAT_2eSSTM_320; 1533 break; 1534 } 1535 1536 /* Setup cycle types */ 1537 if (cycle & VME_SCT) 1538 val |= TSI148_LCSR_DDAT_TM_SCT; 1539 1540 if (cycle & VME_BLT) 1541 val |= TSI148_LCSR_DDAT_TM_BLT; 1542 1543 if (cycle & VME_MBLT) 1544 val |= TSI148_LCSR_DDAT_TM_MBLT; 1545 1546 if (cycle & VME_2eVME) 1547 val |= TSI148_LCSR_DDAT_TM_2eVME; 1548 1549 if (cycle & VME_2eSST) 1550 val |= TSI148_LCSR_DDAT_TM_2eSST; 1551 1552 if (cycle & VME_2eSSTB) { 1553 dev_err(dev, "Currently not setting Broadcast Select " 1554 "Registers\n"); 1555 val |= TSI148_LCSR_DDAT_TM_2eSSTB; 1556 } 1557 1558 /* Setup data width */ 1559 switch (dwidth) { 1560 case VME_D16: 1561 val |= TSI148_LCSR_DDAT_DBW_16; 1562 break; 1563 case VME_D32: 1564 val |= TSI148_LCSR_DDAT_DBW_32; 1565 break; 1566 default: 1567 dev_err(dev, "Invalid data width\n"); 1568 return -EINVAL; 1569 } 1570 1571 /* Setup address space */ 1572 switch (aspace) { 1573 case VME_A16: 1574 val |= TSI148_LCSR_DDAT_AMODE_A16; 1575 break; 1576 case VME_A24: 1577 val |= TSI148_LCSR_DDAT_AMODE_A24; 1578 break; 1579 case VME_A32: 1580 val |= TSI148_LCSR_DDAT_AMODE_A32; 1581 break; 1582 case VME_A64: 1583 val |= TSI148_LCSR_DDAT_AMODE_A64; 1584 break; 1585 case VME_CRCSR: 1586 val |= TSI148_LCSR_DDAT_AMODE_CRCSR; 1587 break; 1588 case VME_USER1: 1589 val |= TSI148_LCSR_DDAT_AMODE_USER1; 1590 break; 1591 case VME_USER2: 1592 val |= TSI148_LCSR_DDAT_AMODE_USER2; 1593 break; 1594 case VME_USER3: 1595 val |= TSI148_LCSR_DDAT_AMODE_USER3; 1596 break; 1597 case VME_USER4: 1598 val |= TSI148_LCSR_DDAT_AMODE_USER4; 1599 break; 1600 default: 1601 dev_err(dev, "Invalid address space\n"); 1602 return -EINVAL; 1603 } 1604 1605 if (cycle & VME_SUPER) 1606 val |= TSI148_LCSR_DDAT_SUP; 1607 if (cycle & VME_PROG) 1608 val |= TSI148_LCSR_DDAT_PGM; 1609 1610 *attr = cpu_to_be32(val); 1611 1612 return 0; 1613} 1614 1615/* 1616 * Add a link list descriptor to the list 1617 * 1618 * Note: DMA engine expects the DMA descriptor to be big endian. 1619 */ 1620static int tsi148_dma_list_add(struct vme_dma_list *list, 1621 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count) 1622{ 1623 struct tsi148_dma_entry *entry, *prev; 1624 u32 address_high, address_low, val; 1625 struct vme_dma_pattern *pattern_attr; 1626 struct vme_dma_pci *pci_attr; 1627 struct vme_dma_vme *vme_attr; 1628 int retval = 0; 1629 struct vme_bridge *tsi148_bridge; 1630 1631 tsi148_bridge = list->parent->parent; 1632 1633 /* Descriptor must be aligned on 64-bit boundaries */ 1634 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1635 if (!entry) { 1636 retval = -ENOMEM; 1637 goto err_mem; 1638 } 1639 1640 /* Test descriptor alignment */ 1641 if ((unsigned long)&entry->descriptor & 0x7) { 1642 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 " 1643 "byte boundary as required: %p\n", 1644 &entry->descriptor); 1645 retval = -EINVAL; 1646 goto err_align; 1647 } 1648 1649 /* Given we are going to fill out the structure, we probably don't 1650 * need to zero it, but better safe than sorry for now. 1651 */ 1652 memset(&entry->descriptor, 0, sizeof(entry->descriptor)); 1653 1654 /* Fill out source part */ 1655 switch (src->type) { 1656 case VME_DMA_PATTERN: 1657 pattern_attr = src->private; 1658 1659 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern); 1660 1661 val = TSI148_LCSR_DSAT_TYP_PAT; 1662 1663 /* Default behaviour is 32 bit pattern */ 1664 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) 1665 val |= TSI148_LCSR_DSAT_PSZ; 1666 1667 /* It seems that the default behaviour is to increment */ 1668 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) 1669 val |= TSI148_LCSR_DSAT_NIN; 1670 entry->descriptor.dsat = cpu_to_be32(val); 1671 break; 1672 case VME_DMA_PCI: 1673 pci_attr = src->private; 1674 1675 reg_split((unsigned long long)pci_attr->address, &address_high, 1676 &address_low); 1677 entry->descriptor.dsau = cpu_to_be32(address_high); 1678 entry->descriptor.dsal = cpu_to_be32(address_low); 1679 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI); 1680 break; 1681 case VME_DMA_VME: 1682 vme_attr = src->private; 1683 1684 reg_split((unsigned long long)vme_attr->address, &address_high, 1685 &address_low); 1686 entry->descriptor.dsau = cpu_to_be32(address_high); 1687 entry->descriptor.dsal = cpu_to_be32(address_low); 1688 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME); 1689 1690 retval = tsi148_dma_set_vme_src_attributes( 1691 tsi148_bridge->parent, &entry->descriptor.dsat, 1692 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); 1693 if (retval < 0) 1694 goto err_source; 1695 break; 1696 default: 1697 dev_err(tsi148_bridge->parent, "Invalid source type\n"); 1698 retval = -EINVAL; 1699 goto err_source; 1700 } 1701 1702 /* Assume last link - this will be over-written by adding another */ 1703 entry->descriptor.dnlau = cpu_to_be32(0); 1704 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA); 1705 1706 /* Fill out destination part */ 1707 switch (dest->type) { 1708 case VME_DMA_PCI: 1709 pci_attr = dest->private; 1710 1711 reg_split((unsigned long long)pci_attr->address, &address_high, 1712 &address_low); 1713 entry->descriptor.ddau = cpu_to_be32(address_high); 1714 entry->descriptor.ddal = cpu_to_be32(address_low); 1715 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI); 1716 break; 1717 case VME_DMA_VME: 1718 vme_attr = dest->private; 1719 1720 reg_split((unsigned long long)vme_attr->address, &address_high, 1721 &address_low); 1722 entry->descriptor.ddau = cpu_to_be32(address_high); 1723 entry->descriptor.ddal = cpu_to_be32(address_low); 1724 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME); 1725 1726 retval = tsi148_dma_set_vme_dest_attributes( 1727 tsi148_bridge->parent, &entry->descriptor.ddat, 1728 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); 1729 if (retval < 0) 1730 goto err_dest; 1731 break; 1732 default: 1733 dev_err(tsi148_bridge->parent, "Invalid destination type\n"); 1734 retval = -EINVAL; 1735 goto err_dest; 1736 } 1737 1738 /* Fill out count */ 1739 entry->descriptor.dcnt = cpu_to_be32((u32)count); 1740 1741 /* Add to list */ 1742 list_add_tail(&entry->list, &list->entries); 1743 1744 entry->dma_handle = dma_map_single(tsi148_bridge->parent, 1745 &entry->descriptor, 1746 sizeof(entry->descriptor), 1747 DMA_TO_DEVICE); 1748 if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) { 1749 dev_err(tsi148_bridge->parent, "DMA mapping error\n"); 1750 retval = -EINVAL; 1751 goto err_dma; 1752 } 1753 1754 /* Fill out previous descriptors "Next Address" */ 1755 if (entry->list.prev != &list->entries) { 1756 reg_split((unsigned long long)entry->dma_handle, &address_high, 1757 &address_low); 1758 prev = list_entry(entry->list.prev, struct tsi148_dma_entry, 1759 list); 1760 prev->descriptor.dnlau = cpu_to_be32(address_high); 1761 prev->descriptor.dnlal = cpu_to_be32(address_low); 1762 1763 } 1764 1765 return 0; 1766 1767err_dma: 1768err_dest: 1769err_source: 1770err_align: 1771 kfree(entry); 1772err_mem: 1773 return retval; 1774} 1775 1776/* 1777 * Check to see if the provided DMA channel is busy. 1778 */ 1779static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel) 1780{ 1781 u32 tmp; 1782 struct tsi148_driver *bridge; 1783 1784 bridge = tsi148_bridge->driver_priv; 1785 1786 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + 1787 TSI148_LCSR_OFFSET_DSTA); 1788 1789 if (tmp & TSI148_LCSR_DSTA_BSY) 1790 return 0; 1791 else 1792 return 1; 1793 1794} 1795 1796/* 1797 * Execute a previously generated link list 1798 * 1799 * XXX Need to provide control register configuration. 1800 */ 1801static int tsi148_dma_list_exec(struct vme_dma_list *list) 1802{ 1803 struct vme_dma_resource *ctrlr; 1804 int channel, retval; 1805 struct tsi148_dma_entry *entry; 1806 u32 bus_addr_high, bus_addr_low; 1807 u32 val, dctlreg = 0; 1808 struct vme_bridge *tsi148_bridge; 1809 struct tsi148_driver *bridge; 1810 1811 ctrlr = list->parent; 1812 1813 tsi148_bridge = ctrlr->parent; 1814 1815 bridge = tsi148_bridge->driver_priv; 1816 1817 mutex_lock(&ctrlr->mtx); 1818 1819 channel = ctrlr->number; 1820 1821 if (!list_empty(&ctrlr->running)) { 1822 /* 1823 * XXX We have an active DMA transfer and currently haven't 1824 * sorted out the mechanism for "pending" DMA transfers. 1825 * Return busy. 1826 */ 1827 /* Need to add to pending here */ 1828 mutex_unlock(&ctrlr->mtx); 1829 return -EBUSY; 1830 } else { 1831 list_add(&list->list, &ctrlr->running); 1832 } 1833 1834 /* Get first bus address and write into registers */ 1835 entry = list_first_entry(&list->entries, struct tsi148_dma_entry, 1836 list); 1837 1838 mutex_unlock(&ctrlr->mtx); 1839 1840 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low); 1841 1842 iowrite32be(bus_addr_high, bridge->base + 1843 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU); 1844 iowrite32be(bus_addr_low, bridge->base + 1845 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL); 1846 1847 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + 1848 TSI148_LCSR_OFFSET_DCTL); 1849 1850 /* Start the operation */ 1851 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base + 1852 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); 1853 1854 retval = wait_event_interruptible(bridge->dma_queue[channel], 1855 tsi148_dma_busy(ctrlr->parent, channel)); 1856 1857 if (retval) { 1858 iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base + 1859 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); 1860 /* Wait for the operation to abort */ 1861 wait_event(bridge->dma_queue[channel], 1862 tsi148_dma_busy(ctrlr->parent, channel)); 1863 retval = -EINTR; 1864 goto exit; 1865 } 1866 1867 /* 1868 * Read status register, this register is valid until we kick off a 1869 * new transfer. 1870 */ 1871 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + 1872 TSI148_LCSR_OFFSET_DSTA); 1873 1874 if (val & TSI148_LCSR_DSTA_VBE) { 1875 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val); 1876 retval = -EIO; 1877 } 1878 1879exit: 1880 /* Remove list from running list */ 1881 mutex_lock(&ctrlr->mtx); 1882 list_del(&list->list); 1883 mutex_unlock(&ctrlr->mtx); 1884 1885 return retval; 1886} 1887 1888/* 1889 * Clean up a previously generated link list 1890 * 1891 * We have a separate function, don't assume that the chain can't be reused. 1892 */ 1893static int tsi148_dma_list_empty(struct vme_dma_list *list) 1894{ 1895 struct list_head *pos, *temp; 1896 struct tsi148_dma_entry *entry; 1897 1898 struct vme_bridge *tsi148_bridge = list->parent->parent; 1899 1900 /* detach and free each entry */ 1901 list_for_each_safe(pos, temp, &list->entries) { 1902 list_del(pos); 1903 entry = list_entry(pos, struct tsi148_dma_entry, list); 1904 1905 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle, 1906 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); 1907 kfree(entry); 1908 } 1909 1910 return 0; 1911} 1912 1913/* 1914 * All 4 location monitors reside at the same base - this is therefore a 1915 * system wide configuration. 1916 * 1917 * This does not enable the LM monitor - that should be done when the first 1918 * callback is attached and disabled when the last callback is removed. 1919 */ 1920static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, 1921 u32 aspace, u32 cycle) 1922{ 1923 u32 lm_base_high, lm_base_low, lm_ctl = 0; 1924 int i; 1925 struct vme_bridge *tsi148_bridge; 1926 struct tsi148_driver *bridge; 1927 1928 tsi148_bridge = lm->parent; 1929 1930 bridge = tsi148_bridge->driver_priv; 1931 1932 mutex_lock(&lm->mtx); 1933 1934 /* If we already have a callback attached, we can't move it! */ 1935 for (i = 0; i < lm->monitors; i++) { 1936 if (bridge->lm_callback[i]) { 1937 mutex_unlock(&lm->mtx); 1938 dev_err(tsi148_bridge->parent, "Location monitor " 1939 "callback attached, can't reset\n"); 1940 return -EBUSY; 1941 } 1942 } 1943 1944 switch (aspace) { 1945 case VME_A16: 1946 lm_ctl |= TSI148_LCSR_LMAT_AS_A16; 1947 break; 1948 case VME_A24: 1949 lm_ctl |= TSI148_LCSR_LMAT_AS_A24; 1950 break; 1951 case VME_A32: 1952 lm_ctl |= TSI148_LCSR_LMAT_AS_A32; 1953 break; 1954 case VME_A64: 1955 lm_ctl |= TSI148_LCSR_LMAT_AS_A64; 1956 break; 1957 default: 1958 mutex_unlock(&lm->mtx); 1959 dev_err(tsi148_bridge->parent, "Invalid address space\n"); 1960 return -EINVAL; 1961 } 1962 1963 if (cycle & VME_SUPER) 1964 lm_ctl |= TSI148_LCSR_LMAT_SUPR ; 1965 if (cycle & VME_USER) 1966 lm_ctl |= TSI148_LCSR_LMAT_NPRIV; 1967 if (cycle & VME_PROG) 1968 lm_ctl |= TSI148_LCSR_LMAT_PGM; 1969 if (cycle & VME_DATA) 1970 lm_ctl |= TSI148_LCSR_LMAT_DATA; 1971 1972 reg_split(lm_base, &lm_base_high, &lm_base_low); 1973 1974 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU); 1975 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL); 1976 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); 1977 1978 mutex_unlock(&lm->mtx); 1979 1980 return 0; 1981} 1982 1983/* Get configuration of the callback monitor and return whether it is enabled 1984 * or disabled. 1985 */ 1986static int tsi148_lm_get(struct vme_lm_resource *lm, 1987 unsigned long long *lm_base, u32 *aspace, u32 *cycle) 1988{ 1989 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0; 1990 struct tsi148_driver *bridge; 1991 1992 bridge = lm->parent->driver_priv; 1993 1994 mutex_lock(&lm->mtx); 1995 1996 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU); 1997 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL); 1998 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); 1999 2000 reg_join(lm_base_high, lm_base_low, lm_base); 2001 2002 if (lm_ctl & TSI148_LCSR_LMAT_EN) 2003 enabled = 1; 2004 2005 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) 2006 *aspace |= VME_A16; 2007 2008 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) 2009 *aspace |= VME_A24; 2010 2011 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) 2012 *aspace |= VME_A32; 2013 2014 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) 2015 *aspace |= VME_A64; 2016 2017 2018 if (lm_ctl & TSI148_LCSR_LMAT_SUPR) 2019 *cycle |= VME_SUPER; 2020 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV) 2021 *cycle |= VME_USER; 2022 if (lm_ctl & TSI148_LCSR_LMAT_PGM) 2023 *cycle |= VME_PROG; 2024 if (lm_ctl & TSI148_LCSR_LMAT_DATA) 2025 *cycle |= VME_DATA; 2026 2027 mutex_unlock(&lm->mtx); 2028 2029 return enabled; 2030} 2031 2032/* 2033 * Attach a callback to a specific location monitor. 2034 * 2035 * Callback will be passed the monitor triggered. 2036 */ 2037static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor, 2038 void (*callback)(void *), void *data) 2039{ 2040 u32 lm_ctl, tmp; 2041 struct vme_bridge *tsi148_bridge; 2042 struct tsi148_driver *bridge; 2043 2044 tsi148_bridge = lm->parent; 2045 2046 bridge = tsi148_bridge->driver_priv; 2047 2048 mutex_lock(&lm->mtx); 2049 2050 /* Ensure that the location monitor is configured - need PGM or DATA */ 2051 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); 2052 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) { 2053 mutex_unlock(&lm->mtx); 2054 dev_err(tsi148_bridge->parent, "Location monitor not properly " 2055 "configured\n"); 2056 return -EINVAL; 2057 } 2058 2059 /* Check that a callback isn't already attached */ 2060 if (bridge->lm_callback[monitor]) { 2061 mutex_unlock(&lm->mtx); 2062 dev_err(tsi148_bridge->parent, "Existing callback attached\n"); 2063 return -EBUSY; 2064 } 2065 2066 /* Attach callback */ 2067 bridge->lm_callback[monitor] = callback; 2068 bridge->lm_data[monitor] = data; 2069 2070 /* Enable Location Monitor interrupt */ 2071 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); 2072 tmp |= TSI148_LCSR_INTEN_LMEN[monitor]; 2073 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); 2074 2075 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); 2076 tmp |= TSI148_LCSR_INTEO_LMEO[monitor]; 2077 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); 2078 2079 /* Ensure that global Location Monitor Enable set */ 2080 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) { 2081 lm_ctl |= TSI148_LCSR_LMAT_EN; 2082 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); 2083 } 2084 2085 mutex_unlock(&lm->mtx); 2086 2087 return 0; 2088} 2089 2090/* 2091 * Detach a callback function forn a specific location monitor. 2092 */ 2093static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor) 2094{ 2095 u32 lm_en, tmp; 2096 struct tsi148_driver *bridge; 2097 2098 bridge = lm->parent->driver_priv; 2099 2100 mutex_lock(&lm->mtx); 2101 2102 /* Disable Location Monitor and ensure previous interrupts are clear */ 2103 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN); 2104 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor]; 2105 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN); 2106 2107 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); 2108 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor]; 2109 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); 2110 2111 iowrite32be(TSI148_LCSR_INTC_LMC[monitor], 2112 bridge->base + TSI148_LCSR_INTC); 2113 2114 /* Detach callback */ 2115 bridge->lm_callback[monitor] = NULL; 2116 bridge->lm_data[monitor] = NULL; 2117 2118 /* If all location monitors disabled, disable global Location Monitor */ 2119 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S | 2120 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) { 2121 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT); 2122 tmp &= ~TSI148_LCSR_LMAT_EN; 2123 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT); 2124 } 2125 2126 mutex_unlock(&lm->mtx); 2127 2128 return 0; 2129} 2130 2131/* 2132 * Determine Geographical Addressing 2133 */ 2134static int tsi148_slot_get(struct vme_bridge *tsi148_bridge) 2135{ 2136 u32 slot = 0; 2137 struct tsi148_driver *bridge; 2138 2139 bridge = tsi148_bridge->driver_priv; 2140 2141 if (!geoid) { 2142 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT); 2143 slot = slot & TSI148_LCSR_VSTAT_GA_M; 2144 } else 2145 slot = geoid; 2146 2147 return (int)slot; 2148} 2149 2150static void *tsi148_alloc_consistent(struct device *parent, size_t size, 2151 dma_addr_t *dma) 2152{ 2153 struct pci_dev *pdev; 2154 2155 /* Find pci_dev container of dev */ 2156 pdev = to_pci_dev(parent); 2157 2158 return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL); 2159} 2160 2161static void tsi148_free_consistent(struct device *parent, size_t size, 2162 void *vaddr, dma_addr_t dma) 2163{ 2164 struct pci_dev *pdev; 2165 2166 /* Find pci_dev container of dev */ 2167 pdev = to_pci_dev(parent); 2168 2169 dma_free_coherent(&pdev->dev, size, vaddr, dma); 2170} 2171 2172/* 2173 * Configure CR/CSR space 2174 * 2175 * Access to the CR/CSR can be configured at power-up. The location of the 2176 * CR/CSR registers in the CR/CSR address space is determined by the boards 2177 * Auto-ID or Geographic address. This function ensures that the window is 2178 * enabled at an offset consistent with the boards geopgraphic address. 2179 * 2180 * Each board has a 512kB window, with the highest 4kB being used for the 2181 * boards registers, this means there is a fix length 508kB window which must 2182 * be mapped onto PCI memory. 2183 */ 2184static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, 2185 struct pci_dev *pdev) 2186{ 2187 u32 cbar, crat, vstat; 2188 u32 crcsr_bus_high, crcsr_bus_low; 2189 int retval; 2190 struct tsi148_driver *bridge; 2191 2192 bridge = tsi148_bridge->driver_priv; 2193 2194 /* Allocate mem for CR/CSR image */ 2195 bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev, 2196 VME_CRCSR_BUF_SIZE, 2197 &bridge->crcsr_bus, GFP_KERNEL); 2198 if (!bridge->crcsr_kernel) { 2199 dev_err(tsi148_bridge->parent, "Failed to allocate memory for " 2200 "CR/CSR image\n"); 2201 return -ENOMEM; 2202 } 2203 2204 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low); 2205 2206 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU); 2207 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL); 2208 2209 /* Ensure that the CR/CSR is configured at the correct offset */ 2210 cbar = ioread32be(bridge->base + TSI148_CBAR); 2211 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3; 2212 2213 vstat = tsi148_slot_get(tsi148_bridge); 2214 2215 if (cbar != vstat) { 2216 cbar = vstat; 2217 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n"); 2218 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR); 2219 } 2220 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar); 2221 2222 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); 2223 if (crat & TSI148_LCSR_CRAT_EN) 2224 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n"); 2225 else { 2226 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n"); 2227 iowrite32be(crat | TSI148_LCSR_CRAT_EN, 2228 bridge->base + TSI148_LCSR_CRAT); 2229 } 2230 2231 /* If we want flushed, error-checked writes, set up a window 2232 * over the CR/CSR registers. We read from here to safely flush 2233 * through VME writes. 2234 */ 2235 if (err_chk) { 2236 retval = tsi148_master_set(bridge->flush_image, 1, 2237 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT, 2238 VME_D16); 2239 if (retval) 2240 dev_err(tsi148_bridge->parent, "Configuring flush image" 2241 " failed\n"); 2242 } 2243 2244 return 0; 2245 2246} 2247 2248static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, 2249 struct pci_dev *pdev) 2250{ 2251 u32 crat; 2252 struct tsi148_driver *bridge; 2253 2254 bridge = tsi148_bridge->driver_priv; 2255 2256 /* Turn off CR/CSR space */ 2257 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); 2258 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN, 2259 bridge->base + TSI148_LCSR_CRAT); 2260 2261 /* Free image */ 2262 iowrite32be(0, bridge->base + TSI148_LCSR_CROU); 2263 iowrite32be(0, bridge->base + TSI148_LCSR_CROL); 2264 2265 dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE, 2266 bridge->crcsr_kernel, bridge->crcsr_bus); 2267} 2268 2269static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2270{ 2271 int retval, i, master_num; 2272 u32 data; 2273 struct list_head *pos = NULL, *n; 2274 struct vme_bridge *tsi148_bridge; 2275 struct tsi148_driver *tsi148_device; 2276 struct vme_master_resource *master_image; 2277 struct vme_slave_resource *slave_image; 2278 struct vme_dma_resource *dma_ctrlr; 2279 struct vme_lm_resource *lm; 2280 2281 /* If we want to support more than one of each bridge, we need to 2282 * dynamically generate this so we get one per device 2283 */ 2284 tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL); 2285 if (!tsi148_bridge) { 2286 retval = -ENOMEM; 2287 goto err_struct; 2288 } 2289 vme_init_bridge(tsi148_bridge); 2290 2291 tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL); 2292 if (!tsi148_device) { 2293 retval = -ENOMEM; 2294 goto err_driver; 2295 } 2296 2297 tsi148_bridge->driver_priv = tsi148_device; 2298 2299 /* Enable the device */ 2300 retval = pci_enable_device(pdev); 2301 if (retval) { 2302 dev_err(&pdev->dev, "Unable to enable device\n"); 2303 goto err_enable; 2304 } 2305 2306 /* Map Registers */ 2307 retval = pci_request_regions(pdev, driver_name); 2308 if (retval) { 2309 dev_err(&pdev->dev, "Unable to reserve resources\n"); 2310 goto err_resource; 2311 } 2312 2313 /* map registers in BAR 0 */ 2314 tsi148_device->base = ioremap(pci_resource_start(pdev, 0), 2315 4096); 2316 if (!tsi148_device->base) { 2317 dev_err(&pdev->dev, "Unable to remap CRG region\n"); 2318 retval = -EIO; 2319 goto err_remap; 2320 } 2321 2322 /* Check to see if the mapping worked out */ 2323 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF; 2324 if (data != PCI_VENDOR_ID_TUNDRA) { 2325 dev_err(&pdev->dev, "CRG region check failed\n"); 2326 retval = -EIO; 2327 goto err_test; 2328 } 2329 2330 /* Initialize wait queues & mutual exclusion flags */ 2331 init_waitqueue_head(&tsi148_device->dma_queue[0]); 2332 init_waitqueue_head(&tsi148_device->dma_queue[1]); 2333 init_waitqueue_head(&tsi148_device->iack_queue); 2334 mutex_init(&tsi148_device->vme_int); 2335 mutex_init(&tsi148_device->vme_rmw); 2336 2337 tsi148_bridge->parent = &pdev->dev; 2338 strcpy(tsi148_bridge->name, driver_name); 2339 2340 /* Setup IRQ */ 2341 retval = tsi148_irq_init(tsi148_bridge); 2342 if (retval != 0) { 2343 dev_err(&pdev->dev, "Chip Initialization failed.\n"); 2344 goto err_irq; 2345 } 2346 2347 /* If we are going to flush writes, we need to read from the VME bus. 2348 * We need to do this safely, thus we read the devices own CR/CSR 2349 * register. To do this we must set up a window in CR/CSR space and 2350 * hence have one less master window resource available. 2351 */ 2352 master_num = TSI148_MAX_MASTER; 2353 if (err_chk) { 2354 master_num--; 2355 2356 tsi148_device->flush_image = 2357 kmalloc(sizeof(*tsi148_device->flush_image), 2358 GFP_KERNEL); 2359 if (!tsi148_device->flush_image) { 2360 retval = -ENOMEM; 2361 goto err_master; 2362 } 2363 tsi148_device->flush_image->parent = tsi148_bridge; 2364 spin_lock_init(&tsi148_device->flush_image->lock); 2365 tsi148_device->flush_image->locked = 1; 2366 tsi148_device->flush_image->number = master_num; 2367 memset(&tsi148_device->flush_image->bus_resource, 0, 2368 sizeof(tsi148_device->flush_image->bus_resource)); 2369 tsi148_device->flush_image->kern_base = NULL; 2370 } 2371 2372 /* Add master windows to list */ 2373 for (i = 0; i < master_num; i++) { 2374 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL); 2375 if (!master_image) { 2376 retval = -ENOMEM; 2377 goto err_master; 2378 } 2379 master_image->parent = tsi148_bridge; 2380 spin_lock_init(&master_image->lock); 2381 master_image->locked = 0; 2382 master_image->number = i; 2383 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | 2384 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 | 2385 VME_USER3 | VME_USER4; 2386 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | 2387 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | 2388 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | 2389 VME_PROG | VME_DATA; 2390 master_image->width_attr = VME_D16 | VME_D32; 2391 memset(&master_image->bus_resource, 0, 2392 sizeof(master_image->bus_resource)); 2393 master_image->kern_base = NULL; 2394 list_add_tail(&master_image->list, 2395 &tsi148_bridge->master_resources); 2396 } 2397 2398 /* Add slave windows to list */ 2399 for (i = 0; i < TSI148_MAX_SLAVE; i++) { 2400 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL); 2401 if (!slave_image) { 2402 retval = -ENOMEM; 2403 goto err_slave; 2404 } 2405 slave_image->parent = tsi148_bridge; 2406 mutex_init(&slave_image->mtx); 2407 slave_image->locked = 0; 2408 slave_image->number = i; 2409 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 | 2410 VME_A64; 2411 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | 2412 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | 2413 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | 2414 VME_PROG | VME_DATA; 2415 list_add_tail(&slave_image->list, 2416 &tsi148_bridge->slave_resources); 2417 } 2418 2419 /* Add dma engines to list */ 2420 for (i = 0; i < TSI148_MAX_DMA; i++) { 2421 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL); 2422 if (!dma_ctrlr) { 2423 retval = -ENOMEM; 2424 goto err_dma; 2425 } 2426 dma_ctrlr->parent = tsi148_bridge; 2427 mutex_init(&dma_ctrlr->mtx); 2428 dma_ctrlr->locked = 0; 2429 dma_ctrlr->number = i; 2430 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM | 2431 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME | 2432 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME | 2433 VME_DMA_PATTERN_TO_MEM; 2434 INIT_LIST_HEAD(&dma_ctrlr->pending); 2435 INIT_LIST_HEAD(&dma_ctrlr->running); 2436 list_add_tail(&dma_ctrlr->list, 2437 &tsi148_bridge->dma_resources); 2438 } 2439 2440 /* Add location monitor to list */ 2441 lm = kmalloc(sizeof(*lm), GFP_KERNEL); 2442 if (!lm) { 2443 retval = -ENOMEM; 2444 goto err_lm; 2445 } 2446 lm->parent = tsi148_bridge; 2447 mutex_init(&lm->mtx); 2448 lm->locked = 0; 2449 lm->number = 1; 2450 lm->monitors = 4; 2451 list_add_tail(&lm->list, &tsi148_bridge->lm_resources); 2452 2453 tsi148_bridge->slave_get = tsi148_slave_get; 2454 tsi148_bridge->slave_set = tsi148_slave_set; 2455 tsi148_bridge->master_get = tsi148_master_get; 2456 tsi148_bridge->master_set = tsi148_master_set; 2457 tsi148_bridge->master_read = tsi148_master_read; 2458 tsi148_bridge->master_write = tsi148_master_write; 2459 tsi148_bridge->master_rmw = tsi148_master_rmw; 2460 tsi148_bridge->dma_list_add = tsi148_dma_list_add; 2461 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec; 2462 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty; 2463 tsi148_bridge->irq_set = tsi148_irq_set; 2464 tsi148_bridge->irq_generate = tsi148_irq_generate; 2465 tsi148_bridge->lm_set = tsi148_lm_set; 2466 tsi148_bridge->lm_get = tsi148_lm_get; 2467 tsi148_bridge->lm_attach = tsi148_lm_attach; 2468 tsi148_bridge->lm_detach = tsi148_lm_detach; 2469 tsi148_bridge->slot_get = tsi148_slot_get; 2470 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent; 2471 tsi148_bridge->free_consistent = tsi148_free_consistent; 2472 2473 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); 2474 dev_info(&pdev->dev, "Board is%s the VME system controller\n", 2475 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not"); 2476 if (!geoid) 2477 dev_info(&pdev->dev, "VME geographical address is %d\n", 2478 data & TSI148_LCSR_VSTAT_GA_M); 2479 else 2480 dev_info(&pdev->dev, "VME geographical address is set to %d\n", 2481 geoid); 2482 2483 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", 2484 err_chk ? "enabled" : "disabled"); 2485 2486 retval = tsi148_crcsr_init(tsi148_bridge, pdev); 2487 if (retval) { 2488 dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); 2489 goto err_crcsr; 2490 } 2491 2492 retval = vme_register_bridge(tsi148_bridge); 2493 if (retval != 0) { 2494 dev_err(&pdev->dev, "Chip Registration failed.\n"); 2495 goto err_reg; 2496 } 2497 2498 pci_set_drvdata(pdev, tsi148_bridge); 2499 2500 /* Clear VME bus "board fail", and "power-up reset" lines */ 2501 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); 2502 data &= ~TSI148_LCSR_VSTAT_BRDFL; 2503 data |= TSI148_LCSR_VSTAT_CPURST; 2504 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT); 2505 2506 return 0; 2507 2508err_reg: 2509 tsi148_crcsr_exit(tsi148_bridge, pdev); 2510err_crcsr: 2511err_lm: 2512 /* resources are stored in link list */ 2513 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) { 2514 lm = list_entry(pos, struct vme_lm_resource, list); 2515 list_del(pos); 2516 kfree(lm); 2517 } 2518err_dma: 2519 /* resources are stored in link list */ 2520 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) { 2521 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); 2522 list_del(pos); 2523 kfree(dma_ctrlr); 2524 } 2525err_slave: 2526 /* resources are stored in link list */ 2527 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) { 2528 slave_image = list_entry(pos, struct vme_slave_resource, list); 2529 list_del(pos); 2530 kfree(slave_image); 2531 } 2532err_master: 2533 /* resources are stored in link list */ 2534 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) { 2535 master_image = list_entry(pos, struct vme_master_resource, 2536 list); 2537 list_del(pos); 2538 kfree(master_image); 2539 } 2540 2541 tsi148_irq_exit(tsi148_bridge, pdev); 2542err_irq: 2543err_test: 2544 iounmap(tsi148_device->base); 2545err_remap: 2546 pci_release_regions(pdev); 2547err_resource: 2548 pci_disable_device(pdev); 2549err_enable: 2550 kfree(tsi148_device); 2551err_driver: 2552 kfree(tsi148_bridge); 2553err_struct: 2554 return retval; 2555 2556} 2557 2558static void tsi148_remove(struct pci_dev *pdev) 2559{ 2560 struct list_head *pos = NULL; 2561 struct list_head *tmplist; 2562 struct vme_master_resource *master_image; 2563 struct vme_slave_resource *slave_image; 2564 struct vme_dma_resource *dma_ctrlr; 2565 int i; 2566 struct tsi148_driver *bridge; 2567 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev); 2568 2569 bridge = tsi148_bridge->driver_priv; 2570 2571 2572 dev_dbg(&pdev->dev, "Driver is being unloaded.\n"); 2573 2574 /* 2575 * Shutdown all inbound and outbound windows. 2576 */ 2577 for (i = 0; i < 8; i++) { 2578 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] + 2579 TSI148_LCSR_OFFSET_ITAT); 2580 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] + 2581 TSI148_LCSR_OFFSET_OTAT); 2582 } 2583 2584 /* 2585 * Shutdown Location monitor. 2586 */ 2587 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT); 2588 2589 /* 2590 * Shutdown CRG map. 2591 */ 2592 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT); 2593 2594 /* 2595 * Clear error status. 2596 */ 2597 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT); 2598 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT); 2599 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT); 2600 2601 /* 2602 * Remove VIRQ interrupt (if any) 2603 */ 2604 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800) 2605 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR); 2606 2607 /* 2608 * Map all Interrupts to PCI INTA 2609 */ 2610 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1); 2611 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2); 2612 2613 tsi148_irq_exit(tsi148_bridge, pdev); 2614 2615 vme_unregister_bridge(tsi148_bridge); 2616 2617 tsi148_crcsr_exit(tsi148_bridge, pdev); 2618 2619 /* resources are stored in link list */ 2620 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) { 2621 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); 2622 list_del(pos); 2623 kfree(dma_ctrlr); 2624 } 2625 2626 /* resources are stored in link list */ 2627 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) { 2628 slave_image = list_entry(pos, struct vme_slave_resource, list); 2629 list_del(pos); 2630 kfree(slave_image); 2631 } 2632 2633 /* resources are stored in link list */ 2634 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) { 2635 master_image = list_entry(pos, struct vme_master_resource, 2636 list); 2637 list_del(pos); 2638 kfree(master_image); 2639 } 2640 2641 iounmap(bridge->base); 2642 2643 pci_release_regions(pdev); 2644 2645 pci_disable_device(pdev); 2646 2647 kfree(tsi148_bridge->driver_priv); 2648 2649 kfree(tsi148_bridge); 2650} 2651 2652module_pci_driver(tsi148_driver); 2653 2654MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes"); 2655module_param(err_chk, bool, 0); 2656 2657MODULE_PARM_DESC(geoid, "Override geographical addressing"); 2658module_param(geoid, int, 0); 2659 2660MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge"); 2661MODULE_LICENSE("GPL");