sx8.c (38038B)
1/* 2 * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware 3 * 4 * Copyright 2004-2005 Red Hat, Inc. 5 * 6 * Author/maintainer: Jeff Garzik <jgarzik@pobox.com> 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/init.h> 16#include <linux/pci.h> 17#include <linux/slab.h> 18#include <linux/spinlock.h> 19#include <linux/blk-mq.h> 20#include <linux/sched.h> 21#include <linux/interrupt.h> 22#include <linux/compiler.h> 23#include <linux/workqueue.h> 24#include <linux/bitops.h> 25#include <linux/delay.h> 26#include <linux/ktime.h> 27#include <linux/hdreg.h> 28#include <linux/dma-mapping.h> 29#include <linux/completion.h> 30#include <linux/scatterlist.h> 31#include <asm/io.h> 32#include <linux/uaccess.h> 33 34#if 0 35#define CARM_DEBUG 36#define CARM_VERBOSE_DEBUG 37#else 38#undef CARM_DEBUG 39#undef CARM_VERBOSE_DEBUG 40#endif 41#undef CARM_NDEBUG 42 43#define DRV_NAME "sx8" 44#define DRV_VERSION "1.0" 45#define PFX DRV_NAME ": " 46 47MODULE_AUTHOR("Jeff Garzik"); 48MODULE_LICENSE("GPL"); 49MODULE_DESCRIPTION("Promise SATA SX8 block driver"); 50MODULE_VERSION(DRV_VERSION); 51 52/* 53 * SX8 hardware has a single message queue for all ATA ports. 54 * When this driver was written, the hardware (firmware?) would 55 * corrupt data eventually, if more than one request was outstanding. 56 * As one can imagine, having 8 ports bottlenecking on a single 57 * command hurts performance. 58 * 59 * Based on user reports, later versions of the hardware (firmware?) 60 * seem to be able to survive with more than one command queued. 61 * 62 * Therefore, we default to the safe option -- 1 command -- but 63 * allow the user to increase this. 64 * 65 * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ), 66 * but problems seem to occur when you exceed ~30, even on newer hardware. 67 */ 68static int max_queue = 1; 69module_param(max_queue, int, 0444); 70MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)"); 71 72 73#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) 74 75/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ 76#define TAG_ENCODE(tag) (((tag) << 16) | 0xf) 77#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f) 78#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32)) 79 80/* note: prints function name for you */ 81#ifdef CARM_DEBUG 82#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) 83#ifdef CARM_VERBOSE_DEBUG 84#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) 85#else 86#define VPRINTK(fmt, args...) 87#endif /* CARM_VERBOSE_DEBUG */ 88#else 89#define DPRINTK(fmt, args...) 90#define VPRINTK(fmt, args...) 91#endif /* CARM_DEBUG */ 92 93#ifdef CARM_NDEBUG 94#define assert(expr) 95#else 96#define assert(expr) \ 97 if(unlikely(!(expr))) { \ 98 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 99 #expr, __FILE__, __func__, __LINE__); \ 100 } 101#endif 102 103/* defines only for the constants which don't work well as enums */ 104struct carm_host; 105 106enum { 107 /* adapter-wide limits */ 108 CARM_MAX_PORTS = 8, 109 CARM_SHM_SIZE = (4096 << 7), 110 CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS, 111 CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1, 112 113 /* command message queue limits */ 114 CARM_MAX_REQ = 64, /* max command msgs per host */ 115 CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ 116 117 /* S/G limits, host-wide and per-request */ 118 CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ 119 CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ 120 CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ 121 122 /* hardware registers */ 123 CARM_IHQP = 0x1c, 124 CARM_INT_STAT = 0x10, /* interrupt status */ 125 CARM_INT_MASK = 0x14, /* interrupt mask */ 126 CARM_HMUC = 0x18, /* host message unit control */ 127 RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */ 128 RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */ 129 RBUF_BYTE_SZ = 0x28, 130 CARM_RESP_IDX = 0x2c, 131 CARM_CMS0 = 0x30, /* command message size reg 0 */ 132 CARM_LMUC = 0x48, 133 CARM_HMPHA = 0x6c, 134 CARM_INITC = 0xb5, 135 136 /* bits in CARM_INT_{STAT,MASK} */ 137 INT_RESERVED = 0xfffffff0, 138 INT_WATCHDOG = (1 << 3), /* watchdog timer */ 139 INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */ 140 INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */ 141 INT_RESPONSE = (1 << 0), /* response msg available */ 142 INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW, 143 INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW | 144 INT_RESPONSE, 145 146 /* command messages, and related register bits */ 147 CARM_HAVE_RESP = 0x01, 148 CARM_MSG_READ = 1, 149 CARM_MSG_WRITE = 2, 150 CARM_MSG_VERIFY = 3, 151 CARM_MSG_GET_CAPACITY = 4, 152 CARM_MSG_FLUSH = 5, 153 CARM_MSG_IOCTL = 6, 154 CARM_MSG_ARRAY = 8, 155 CARM_MSG_MISC = 9, 156 CARM_CME = (1 << 2), 157 CARM_RME = (1 << 1), 158 CARM_WZBC = (1 << 0), 159 CARM_RMI = (1 << 0), 160 CARM_Q_FULL = (1 << 3), 161 CARM_MSG_SIZE = 288, 162 CARM_Q_LEN = 48, 163 164 /* CARM_MSG_IOCTL messages */ 165 CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */ 166 CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */ 167 CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */ 168 169 IOC_SCAN_CHAN_NODEV = 0x1f, 170 IOC_SCAN_CHAN_OFFSET = 0x40, 171 172 /* CARM_MSG_ARRAY messages */ 173 CARM_ARRAY_INFO = 0, 174 175 ARRAY_NO_EXIST = (1 << 31), 176 177 /* response messages */ 178 RMSG_SZ = 8, /* sizeof(struct carm_response) */ 179 RMSG_Q_LEN = 48, /* resp. msg list length */ 180 RMSG_OK = 1, /* bit indicating msg was successful */ 181 /* length of entire resp. msg buffer */ 182 RBUF_LEN = RMSG_SZ * RMSG_Q_LEN, 183 184 PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */ 185 186 /* CARM_MSG_MISC messages */ 187 MISC_GET_FW_VER = 2, 188 MISC_ALLOC_MEM = 3, 189 MISC_SET_TIME = 5, 190 191 /* MISC_GET_FW_VER feature bits */ 192 FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */ 193 FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */ 194 FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */ 195 196 /* carm_host flags */ 197 FL_NON_RAID = FW_VER_NON_RAID, 198 FL_4PORT = FW_VER_4PORT, 199 FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT), 200 FL_DYN_MAJOR = (1 << 17), 201}; 202 203enum { 204 CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ 205}; 206 207enum scatter_gather_types { 208 SGT_32BIT = 0, 209 SGT_64BIT = 1, 210}; 211 212enum host_states { 213 HST_INVALID, /* invalid state; never used */ 214 HST_ALLOC_BUF, /* setting up master SHM area */ 215 HST_ERROR, /* we never leave here */ 216 HST_PORT_SCAN, /* start dev scan */ 217 HST_DEV_SCAN_START, /* start per-device probe */ 218 HST_DEV_SCAN, /* continue per-device probe */ 219 HST_DEV_ACTIVATE, /* activate devices we found */ 220 HST_PROBE_FINISHED, /* probe is complete */ 221 HST_PROBE_START, /* initiate probe */ 222 HST_SYNC_TIME, /* tell firmware what time it is */ 223 HST_GET_FW_VER, /* get firmware version, adapter port cnt */ 224}; 225 226#ifdef CARM_DEBUG 227static const char *state_name[] = { 228 "HST_INVALID", 229 "HST_ALLOC_BUF", 230 "HST_ERROR", 231 "HST_PORT_SCAN", 232 "HST_DEV_SCAN_START", 233 "HST_DEV_SCAN", 234 "HST_DEV_ACTIVATE", 235 "HST_PROBE_FINISHED", 236 "HST_PROBE_START", 237 "HST_SYNC_TIME", 238 "HST_GET_FW_VER", 239}; 240#endif 241 242struct carm_port { 243 unsigned int port_no; 244 struct gendisk *disk; 245 struct carm_host *host; 246 247 /* attached device characteristics */ 248 u64 capacity; 249 char name[41]; 250 u16 dev_geom_head; 251 u16 dev_geom_sect; 252 u16 dev_geom_cyl; 253}; 254 255struct carm_request { 256 int n_elem; 257 unsigned int msg_type; 258 unsigned int msg_subtype; 259 unsigned int msg_bucket; 260 struct scatterlist sg[CARM_MAX_REQ_SG]; 261}; 262 263struct carm_host { 264 unsigned long flags; 265 void __iomem *mmio; 266 void *shm; 267 dma_addr_t shm_dma; 268 269 int major; 270 int id; 271 char name[32]; 272 273 spinlock_t lock; 274 struct pci_dev *pdev; 275 unsigned int state; 276 u32 fw_ver; 277 278 struct blk_mq_tag_set tag_set; 279 struct request_queue *oob_q; 280 unsigned int n_oob; 281 282 unsigned int hw_sg_used; 283 284 unsigned int resp_idx; 285 286 unsigned int wait_q_prod; 287 unsigned int wait_q_cons; 288 struct request_queue *wait_q[CARM_MAX_WAIT_Q]; 289 290 void *msg_base; 291 dma_addr_t msg_dma; 292 293 int cur_scan_dev; 294 unsigned long dev_active; 295 unsigned long dev_present; 296 struct carm_port port[CARM_MAX_PORTS]; 297 298 struct work_struct fsm_task; 299 300 int probe_err; 301 struct completion probe_comp; 302}; 303 304struct carm_response { 305 __le32 ret_handle; 306 __le32 status; 307} __attribute__((packed)); 308 309struct carm_msg_sg { 310 __le32 start; 311 __le32 len; 312} __attribute__((packed)); 313 314struct carm_msg_rw { 315 u8 type; 316 u8 id; 317 u8 sg_count; 318 u8 sg_type; 319 __le32 handle; 320 __le32 lba; 321 __le16 lba_count; 322 __le16 lba_high; 323 struct carm_msg_sg sg[32]; 324} __attribute__((packed)); 325 326struct carm_msg_allocbuf { 327 u8 type; 328 u8 subtype; 329 u8 n_sg; 330 u8 sg_type; 331 __le32 handle; 332 __le32 addr; 333 __le32 len; 334 __le32 evt_pool; 335 __le32 n_evt; 336 __le32 rbuf_pool; 337 __le32 n_rbuf; 338 __le32 msg_pool; 339 __le32 n_msg; 340 struct carm_msg_sg sg[8]; 341} __attribute__((packed)); 342 343struct carm_msg_ioctl { 344 u8 type; 345 u8 subtype; 346 u8 array_id; 347 u8 reserved1; 348 __le32 handle; 349 __le32 data_addr; 350 u32 reserved2; 351} __attribute__((packed)); 352 353struct carm_msg_sync_time { 354 u8 type; 355 u8 subtype; 356 u16 reserved1; 357 __le32 handle; 358 u32 reserved2; 359 __le32 timestamp; 360} __attribute__((packed)); 361 362struct carm_msg_get_fw_ver { 363 u8 type; 364 u8 subtype; 365 u16 reserved1; 366 __le32 handle; 367 __le32 data_addr; 368 u32 reserved2; 369} __attribute__((packed)); 370 371struct carm_fw_ver { 372 __le32 version; 373 u8 features; 374 u8 reserved1; 375 u16 reserved2; 376} __attribute__((packed)); 377 378struct carm_array_info { 379 __le32 size; 380 381 __le16 size_hi; 382 __le16 stripe_size; 383 384 __le32 mode; 385 386 __le16 stripe_blk_sz; 387 __le16 reserved1; 388 389 __le16 cyl; 390 __le16 head; 391 392 __le16 sect; 393 u8 array_id; 394 u8 reserved2; 395 396 char name[40]; 397 398 __le32 array_status; 399 400 /* device list continues beyond this point? */ 401} __attribute__((packed)); 402 403static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 404static void carm_remove_one (struct pci_dev *pdev); 405static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); 406 407static const struct pci_device_id carm_pci_tbl[] = { 408 { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, 409 { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, 410 { } /* terminate list */ 411}; 412MODULE_DEVICE_TABLE(pci, carm_pci_tbl); 413 414static struct pci_driver carm_driver = { 415 .name = DRV_NAME, 416 .id_table = carm_pci_tbl, 417 .probe = carm_init_one, 418 .remove = carm_remove_one, 419}; 420 421static const struct block_device_operations carm_bd_ops = { 422 .owner = THIS_MODULE, 423 .getgeo = carm_bdev_getgeo, 424}; 425 426static unsigned int carm_host_id; 427static unsigned long carm_major_alloc; 428 429 430 431static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) 432{ 433 struct carm_port *port = bdev->bd_disk->private_data; 434 435 geo->heads = (u8) port->dev_geom_head; 436 geo->sectors = (u8) port->dev_geom_sect; 437 geo->cylinders = port->dev_geom_cyl; 438 return 0; 439} 440 441static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE }; 442 443static inline int carm_lookup_bucket(u32 msg_size) 444{ 445 int i; 446 447 for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) 448 if (msg_size <= msg_sizes[i]) 449 return i; 450 451 return -ENOENT; 452} 453 454static void carm_init_buckets(void __iomem *mmio) 455{ 456 unsigned int i; 457 458 for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) 459 writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i)); 460} 461 462static inline void *carm_ref_msg(struct carm_host *host, 463 unsigned int msg_idx) 464{ 465 return host->msg_base + (msg_idx * CARM_MSG_SIZE); 466} 467 468static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host, 469 unsigned int msg_idx) 470{ 471 return host->msg_dma + (msg_idx * CARM_MSG_SIZE); 472} 473 474static int carm_send_msg(struct carm_host *host, 475 struct carm_request *crq, unsigned tag) 476{ 477 void __iomem *mmio = host->mmio; 478 u32 msg = (u32) carm_ref_msg_dma(host, tag); 479 u32 cm_bucket = crq->msg_bucket; 480 u32 tmp; 481 int rc = 0; 482 483 VPRINTK("ENTER\n"); 484 485 tmp = readl(mmio + CARM_HMUC); 486 if (tmp & CARM_Q_FULL) { 487#if 0 488 tmp = readl(mmio + CARM_INT_MASK); 489 tmp |= INT_Q_AVAILABLE; 490 writel(tmp, mmio + CARM_INT_MASK); 491 readl(mmio + CARM_INT_MASK); /* flush */ 492#endif 493 DPRINTK("host msg queue full\n"); 494 rc = -EBUSY; 495 } else { 496 writel(msg | (cm_bucket << 1), mmio + CARM_IHQP); 497 readl(mmio + CARM_IHQP); /* flush */ 498 } 499 500 return rc; 501} 502 503static int carm_array_info (struct carm_host *host, unsigned int array_idx) 504{ 505 struct carm_msg_ioctl *ioc; 506 u32 msg_data; 507 dma_addr_t msg_dma; 508 struct carm_request *crq; 509 struct request *rq; 510 int rc; 511 512 rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0); 513 if (IS_ERR(rq)) { 514 rc = -ENOMEM; 515 goto err_out; 516 } 517 crq = blk_mq_rq_to_pdu(rq); 518 519 ioc = carm_ref_msg(host, rq->tag); 520 msg_dma = carm_ref_msg_dma(host, rq->tag); 521 msg_data = (u32) (msg_dma + sizeof(struct carm_array_info)); 522 523 crq->msg_type = CARM_MSG_ARRAY; 524 crq->msg_subtype = CARM_ARRAY_INFO; 525 rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) + 526 sizeof(struct carm_array_info)); 527 BUG_ON(rc < 0); 528 crq->msg_bucket = (u32) rc; 529 530 memset(ioc, 0, sizeof(*ioc)); 531 ioc->type = CARM_MSG_ARRAY; 532 ioc->subtype = CARM_ARRAY_INFO; 533 ioc->array_id = (u8) array_idx; 534 ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag)); 535 ioc->data_addr = cpu_to_le32(msg_data); 536 537 spin_lock_irq(&host->lock); 538 assert(host->state == HST_DEV_SCAN_START || 539 host->state == HST_DEV_SCAN); 540 spin_unlock_irq(&host->lock); 541 542 DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); 543 blk_execute_rq_nowait(rq, true); 544 545 return 0; 546 547err_out: 548 spin_lock_irq(&host->lock); 549 host->state = HST_ERROR; 550 spin_unlock_irq(&host->lock); 551 return rc; 552} 553 554typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *); 555 556static int carm_send_special (struct carm_host *host, carm_sspc_t func) 557{ 558 struct request *rq; 559 struct carm_request *crq; 560 struct carm_msg_ioctl *ioc; 561 void *mem; 562 unsigned int msg_size; 563 int rc; 564 565 rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0); 566 if (IS_ERR(rq)) 567 return -ENOMEM; 568 crq = blk_mq_rq_to_pdu(rq); 569 570 mem = carm_ref_msg(host, rq->tag); 571 572 msg_size = func(host, rq->tag, mem); 573 574 ioc = mem; 575 crq->msg_type = ioc->type; 576 crq->msg_subtype = ioc->subtype; 577 rc = carm_lookup_bucket(msg_size); 578 BUG_ON(rc < 0); 579 crq->msg_bucket = (u32) rc; 580 581 DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); 582 blk_execute_rq_nowait(rq, true); 583 584 return 0; 585} 586 587static unsigned int carm_fill_sync_time(struct carm_host *host, 588 unsigned int idx, void *mem) 589{ 590 struct carm_msg_sync_time *st = mem; 591 592 time64_t tv = ktime_get_real_seconds(); 593 594 memset(st, 0, sizeof(*st)); 595 st->type = CARM_MSG_MISC; 596 st->subtype = MISC_SET_TIME; 597 st->handle = cpu_to_le32(TAG_ENCODE(idx)); 598 st->timestamp = cpu_to_le32(tv); 599 600 return sizeof(struct carm_msg_sync_time); 601} 602 603static unsigned int carm_fill_alloc_buf(struct carm_host *host, 604 unsigned int idx, void *mem) 605{ 606 struct carm_msg_allocbuf *ab = mem; 607 608 memset(ab, 0, sizeof(*ab)); 609 ab->type = CARM_MSG_MISC; 610 ab->subtype = MISC_ALLOC_MEM; 611 ab->handle = cpu_to_le32(TAG_ENCODE(idx)); 612 ab->n_sg = 1; 613 ab->sg_type = SGT_32BIT; 614 ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); 615 ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1); 616 ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024)); 617 ab->n_evt = cpu_to_le32(1024); 618 ab->rbuf_pool = cpu_to_le32(host->shm_dma); 619 ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN); 620 ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN); 621 ab->n_msg = cpu_to_le32(CARM_Q_LEN); 622 ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); 623 ab->sg[0].len = cpu_to_le32(65536); 624 625 return sizeof(struct carm_msg_allocbuf); 626} 627 628static unsigned int carm_fill_scan_channels(struct carm_host *host, 629 unsigned int idx, void *mem) 630{ 631 struct carm_msg_ioctl *ioc = mem; 632 u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + 633 IOC_SCAN_CHAN_OFFSET); 634 635 memset(ioc, 0, sizeof(*ioc)); 636 ioc->type = CARM_MSG_IOCTL; 637 ioc->subtype = CARM_IOC_SCAN_CHAN; 638 ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); 639 ioc->data_addr = cpu_to_le32(msg_data); 640 641 /* fill output data area with "no device" default values */ 642 mem += IOC_SCAN_CHAN_OFFSET; 643 memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS); 644 645 return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS; 646} 647 648static unsigned int carm_fill_get_fw_ver(struct carm_host *host, 649 unsigned int idx, void *mem) 650{ 651 struct carm_msg_get_fw_ver *ioc = mem; 652 u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc)); 653 654 memset(ioc, 0, sizeof(*ioc)); 655 ioc->type = CARM_MSG_MISC; 656 ioc->subtype = MISC_GET_FW_VER; 657 ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); 658 ioc->data_addr = cpu_to_le32(msg_data); 659 660 return sizeof(struct carm_msg_get_fw_ver) + 661 sizeof(struct carm_fw_ver); 662} 663 664static inline void carm_push_q (struct carm_host *host, struct request_queue *q) 665{ 666 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; 667 668 blk_mq_stop_hw_queues(q); 669 VPRINTK("STOPPED QUEUE %p\n", q); 670 671 host->wait_q[idx] = q; 672 host->wait_q_prod++; 673 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ 674} 675 676static inline struct request_queue *carm_pop_q(struct carm_host *host) 677{ 678 unsigned int idx; 679 680 if (host->wait_q_prod == host->wait_q_cons) 681 return NULL; 682 683 idx = host->wait_q_cons % CARM_MAX_WAIT_Q; 684 host->wait_q_cons++; 685 686 return host->wait_q[idx]; 687} 688 689static inline void carm_round_robin(struct carm_host *host) 690{ 691 struct request_queue *q = carm_pop_q(host); 692 if (q) { 693 blk_mq_start_hw_queues(q); 694 VPRINTK("STARTED QUEUE %p\n", q); 695 } 696} 697 698static inline enum dma_data_direction carm_rq_dir(struct request *rq) 699{ 700 return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 701} 702 703static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx, 704 const struct blk_mq_queue_data *bd) 705{ 706 struct request_queue *q = hctx->queue; 707 struct request *rq = bd->rq; 708 struct carm_port *port = q->queuedata; 709 struct carm_host *host = port->host; 710 struct carm_request *crq = blk_mq_rq_to_pdu(rq); 711 struct carm_msg_rw *msg; 712 struct scatterlist *sg; 713 int i, n_elem = 0, rc; 714 unsigned int msg_size; 715 u32 tmp; 716 717 crq->n_elem = 0; 718 sg_init_table(crq->sg, CARM_MAX_REQ_SG); 719 720 blk_mq_start_request(rq); 721 722 spin_lock_irq(&host->lock); 723 if (req_op(rq) == REQ_OP_DRV_OUT) 724 goto send_msg; 725 726 /* get scatterlist from block layer */ 727 sg = &crq->sg[0]; 728 n_elem = blk_rq_map_sg(q, rq, sg); 729 if (n_elem <= 0) 730 goto out_ioerr; 731 732 /* map scatterlist to PCI bus addresses */ 733 n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq)); 734 if (n_elem <= 0) 735 goto out_ioerr; 736 737 /* obey global hardware limit on S/G entries */ 738 if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem) 739 goto out_resource; 740 741 crq->n_elem = n_elem; 742 host->hw_sg_used += n_elem; 743 744 /* 745 * build read/write message 746 */ 747 748 VPRINTK("build msg\n"); 749 msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag); 750 751 if (rq_data_dir(rq) == WRITE) { 752 msg->type = CARM_MSG_WRITE; 753 crq->msg_type = CARM_MSG_WRITE; 754 } else { 755 msg->type = CARM_MSG_READ; 756 crq->msg_type = CARM_MSG_READ; 757 } 758 759 msg->id = port->port_no; 760 msg->sg_count = n_elem; 761 msg->sg_type = SGT_32BIT; 762 msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag)); 763 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); 764 tmp = (blk_rq_pos(rq) >> 16) >> 16; 765 msg->lba_high = cpu_to_le16( (u16) tmp ); 766 msg->lba_count = cpu_to_le16(blk_rq_sectors(rq)); 767 768 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 769 for (i = 0; i < n_elem; i++) { 770 struct carm_msg_sg *carm_sg = &msg->sg[i]; 771 carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i])); 772 carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i])); 773 msg_size += sizeof(struct carm_msg_sg); 774 } 775 776 rc = carm_lookup_bucket(msg_size); 777 BUG_ON(rc < 0); 778 crq->msg_bucket = (u32) rc; 779send_msg: 780 /* 781 * queue read/write message to hardware 782 */ 783 VPRINTK("send msg, tag == %u\n", rq->tag); 784 rc = carm_send_msg(host, crq, rq->tag); 785 if (rc) { 786 host->hw_sg_used -= n_elem; 787 goto out_resource; 788 } 789 790 spin_unlock_irq(&host->lock); 791 return BLK_STS_OK; 792out_resource: 793 dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq)); 794 carm_push_q(host, q); 795 spin_unlock_irq(&host->lock); 796 return BLK_STS_DEV_RESOURCE; 797out_ioerr: 798 carm_round_robin(host); 799 spin_unlock_irq(&host->lock); 800 return BLK_STS_IOERR; 801} 802 803static void carm_handle_array_info(struct carm_host *host, 804 struct carm_request *crq, u8 *mem, 805 blk_status_t error) 806{ 807 struct carm_port *port; 808 u8 *msg_data = mem + sizeof(struct carm_array_info); 809 struct carm_array_info *desc = (struct carm_array_info *) msg_data; 810 u64 lo, hi; 811 int cur_port; 812 size_t slen; 813 814 DPRINTK("ENTER\n"); 815 816 if (error) 817 goto out; 818 if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST) 819 goto out; 820 821 cur_port = host->cur_scan_dev; 822 823 /* should never occur */ 824 if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) { 825 printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n", 826 cur_port, (int) desc->array_id); 827 goto out; 828 } 829 830 port = &host->port[cur_port]; 831 832 lo = (u64) le32_to_cpu(desc->size); 833 hi = (u64) le16_to_cpu(desc->size_hi); 834 835 port->capacity = lo | (hi << 32); 836 port->dev_geom_head = le16_to_cpu(desc->head); 837 port->dev_geom_sect = le16_to_cpu(desc->sect); 838 port->dev_geom_cyl = le16_to_cpu(desc->cyl); 839 840 host->dev_active |= (1 << cur_port); 841 842 strncpy(port->name, desc->name, sizeof(port->name)); 843 port->name[sizeof(port->name) - 1] = 0; 844 slen = strlen(port->name); 845 while (slen && (port->name[slen - 1] == ' ')) { 846 port->name[slen - 1] = 0; 847 slen--; 848 } 849 850 printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n", 851 pci_name(host->pdev), port->port_no, 852 (unsigned long long) port->capacity); 853 printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n", 854 pci_name(host->pdev), port->port_no, port->name); 855 856out: 857 assert(host->state == HST_DEV_SCAN); 858 schedule_work(&host->fsm_task); 859} 860 861static void carm_handle_scan_chan(struct carm_host *host, 862 struct carm_request *crq, u8 *mem, 863 blk_status_t error) 864{ 865 u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; 866 unsigned int i, dev_count = 0; 867 int new_state = HST_DEV_SCAN_START; 868 869 DPRINTK("ENTER\n"); 870 871 if (error) { 872 new_state = HST_ERROR; 873 goto out; 874 } 875 876 /* TODO: scan and support non-disk devices */ 877 for (i = 0; i < 8; i++) 878 if (msg_data[i] == 0) { /* direct-access device (disk) */ 879 host->dev_present |= (1 << i); 880 dev_count++; 881 } 882 883 printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n", 884 pci_name(host->pdev), dev_count); 885 886out: 887 assert(host->state == HST_PORT_SCAN); 888 host->state = new_state; 889 schedule_work(&host->fsm_task); 890} 891 892static void carm_handle_generic(struct carm_host *host, 893 struct carm_request *crq, blk_status_t error, 894 int cur_state, int next_state) 895{ 896 DPRINTK("ENTER\n"); 897 898 assert(host->state == cur_state); 899 if (error) 900 host->state = HST_ERROR; 901 else 902 host->state = next_state; 903 schedule_work(&host->fsm_task); 904} 905 906static inline void carm_handle_resp(struct carm_host *host, 907 __le32 ret_handle_le, u32 status) 908{ 909 u32 handle = le32_to_cpu(ret_handle_le); 910 unsigned int msg_idx; 911 struct request *rq; 912 struct carm_request *crq; 913 blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR; 914 u8 *mem; 915 916 VPRINTK("ENTER, handle == 0x%x\n", handle); 917 918 if (unlikely(!TAG_VALID(handle))) { 919 printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n", 920 pci_name(host->pdev), handle); 921 return; 922 } 923 924 msg_idx = TAG_DECODE(handle); 925 VPRINTK("tag == %u\n", msg_idx); 926 927 rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx); 928 crq = blk_mq_rq_to_pdu(rq); 929 930 /* fast path */ 931 if (likely(crq->msg_type == CARM_MSG_READ || 932 crq->msg_type == CARM_MSG_WRITE)) { 933 dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, 934 carm_rq_dir(rq)); 935 goto done; 936 } 937 938 mem = carm_ref_msg(host, msg_idx); 939 940 switch (crq->msg_type) { 941 case CARM_MSG_IOCTL: { 942 switch (crq->msg_subtype) { 943 case CARM_IOC_SCAN_CHAN: 944 carm_handle_scan_chan(host, crq, mem, error); 945 goto done; 946 default: 947 /* unknown / invalid response */ 948 goto err_out; 949 } 950 break; 951 } 952 953 case CARM_MSG_MISC: { 954 switch (crq->msg_subtype) { 955 case MISC_ALLOC_MEM: 956 carm_handle_generic(host, crq, error, 957 HST_ALLOC_BUF, HST_SYNC_TIME); 958 goto done; 959 case MISC_SET_TIME: 960 carm_handle_generic(host, crq, error, 961 HST_SYNC_TIME, HST_GET_FW_VER); 962 goto done; 963 case MISC_GET_FW_VER: { 964 struct carm_fw_ver *ver = (struct carm_fw_ver *) 965 (mem + sizeof(struct carm_msg_get_fw_ver)); 966 if (!error) { 967 host->fw_ver = le32_to_cpu(ver->version); 968 host->flags |= (ver->features & FL_FW_VER_MASK); 969 } 970 carm_handle_generic(host, crq, error, 971 HST_GET_FW_VER, HST_PORT_SCAN); 972 goto done; 973 } 974 default: 975 /* unknown / invalid response */ 976 goto err_out; 977 } 978 break; 979 } 980 981 case CARM_MSG_ARRAY: { 982 switch (crq->msg_subtype) { 983 case CARM_ARRAY_INFO: 984 carm_handle_array_info(host, crq, mem, error); 985 break; 986 default: 987 /* unknown / invalid response */ 988 goto err_out; 989 } 990 break; 991 } 992 993 default: 994 /* unknown / invalid response */ 995 goto err_out; 996 } 997 998 return; 999 1000err_out: 1001 printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", 1002 pci_name(host->pdev), crq->msg_type, crq->msg_subtype); 1003 error = BLK_STS_IOERR; 1004done: 1005 host->hw_sg_used -= crq->n_elem; 1006 blk_mq_end_request(blk_mq_rq_from_pdu(crq), error); 1007 1008 if (host->hw_sg_used <= CARM_SG_LOW_WATER) 1009 carm_round_robin(host); 1010} 1011 1012static inline void carm_handle_responses(struct carm_host *host) 1013{ 1014 void __iomem *mmio = host->mmio; 1015 struct carm_response *resp = (struct carm_response *) host->shm; 1016 unsigned int work = 0; 1017 unsigned int idx = host->resp_idx % RMSG_Q_LEN; 1018 1019 while (1) { 1020 u32 status = le32_to_cpu(resp[idx].status); 1021 1022 if (status == 0xffffffff) { 1023 VPRINTK("ending response on index %u\n", idx); 1024 writel(idx << 3, mmio + CARM_RESP_IDX); 1025 break; 1026 } 1027 1028 /* response to a message we sent */ 1029 else if ((status & (1 << 31)) == 0) { 1030 VPRINTK("handling msg response on index %u\n", idx); 1031 carm_handle_resp(host, resp[idx].ret_handle, status); 1032 resp[idx].status = cpu_to_le32(0xffffffff); 1033 } 1034 1035 /* asynchronous events the hardware throws our way */ 1036 else if ((status & 0xff000000) == (1 << 31)) { 1037 u8 *evt_type_ptr = (u8 *) &resp[idx]; 1038 u8 evt_type = *evt_type_ptr; 1039 printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n", 1040 pci_name(host->pdev), (int) evt_type); 1041 resp[idx].status = cpu_to_le32(0xffffffff); 1042 } 1043 1044 idx = NEXT_RESP(idx); 1045 work++; 1046 } 1047 1048 VPRINTK("EXIT, work==%u\n", work); 1049 host->resp_idx += work; 1050} 1051 1052static irqreturn_t carm_interrupt(int irq, void *__host) 1053{ 1054 struct carm_host *host = __host; 1055 void __iomem *mmio; 1056 u32 mask; 1057 int handled = 0; 1058 unsigned long flags; 1059 1060 if (!host) { 1061 VPRINTK("no host\n"); 1062 return IRQ_NONE; 1063 } 1064 1065 spin_lock_irqsave(&host->lock, flags); 1066 1067 mmio = host->mmio; 1068 1069 /* reading should also clear interrupts */ 1070 mask = readl(mmio + CARM_INT_STAT); 1071 1072 if (mask == 0 || mask == 0xffffffff) { 1073 VPRINTK("no work, mask == 0x%x\n", mask); 1074 goto out; 1075 } 1076 1077 if (mask & INT_ACK_MASK) 1078 writel(mask, mmio + CARM_INT_STAT); 1079 1080 if (unlikely(host->state == HST_INVALID)) { 1081 VPRINTK("not initialized yet, mask = 0x%x\n", mask); 1082 goto out; 1083 } 1084 1085 if (mask & CARM_HAVE_RESP) { 1086 handled = 1; 1087 carm_handle_responses(host); 1088 } 1089 1090out: 1091 spin_unlock_irqrestore(&host->lock, flags); 1092 VPRINTK("EXIT\n"); 1093 return IRQ_RETVAL(handled); 1094} 1095 1096static void carm_fsm_task (struct work_struct *work) 1097{ 1098 struct carm_host *host = 1099 container_of(work, struct carm_host, fsm_task); 1100 unsigned long flags; 1101 unsigned int state; 1102 int rc, i, next_dev; 1103 int reschedule = 0; 1104 int new_state = HST_INVALID; 1105 1106 spin_lock_irqsave(&host->lock, flags); 1107 state = host->state; 1108 spin_unlock_irqrestore(&host->lock, flags); 1109 1110 DPRINTK("ENTER, state == %s\n", state_name[state]); 1111 1112 switch (state) { 1113 case HST_PROBE_START: 1114 new_state = HST_ALLOC_BUF; 1115 reschedule = 1; 1116 break; 1117 1118 case HST_ALLOC_BUF: 1119 rc = carm_send_special(host, carm_fill_alloc_buf); 1120 if (rc) { 1121 new_state = HST_ERROR; 1122 reschedule = 1; 1123 } 1124 break; 1125 1126 case HST_SYNC_TIME: 1127 rc = carm_send_special(host, carm_fill_sync_time); 1128 if (rc) { 1129 new_state = HST_ERROR; 1130 reschedule = 1; 1131 } 1132 break; 1133 1134 case HST_GET_FW_VER: 1135 rc = carm_send_special(host, carm_fill_get_fw_ver); 1136 if (rc) { 1137 new_state = HST_ERROR; 1138 reschedule = 1; 1139 } 1140 break; 1141 1142 case HST_PORT_SCAN: 1143 rc = carm_send_special(host, carm_fill_scan_channels); 1144 if (rc) { 1145 new_state = HST_ERROR; 1146 reschedule = 1; 1147 } 1148 break; 1149 1150 case HST_DEV_SCAN_START: 1151 host->cur_scan_dev = -1; 1152 new_state = HST_DEV_SCAN; 1153 reschedule = 1; 1154 break; 1155 1156 case HST_DEV_SCAN: 1157 next_dev = -1; 1158 for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++) 1159 if (host->dev_present & (1 << i)) { 1160 next_dev = i; 1161 break; 1162 } 1163 1164 if (next_dev >= 0) { 1165 host->cur_scan_dev = next_dev; 1166 rc = carm_array_info(host, next_dev); 1167 if (rc) { 1168 new_state = HST_ERROR; 1169 reschedule = 1; 1170 } 1171 } else { 1172 new_state = HST_DEV_ACTIVATE; 1173 reschedule = 1; 1174 } 1175 break; 1176 1177 case HST_DEV_ACTIVATE: { 1178 int activated = 0; 1179 for (i = 0; i < CARM_MAX_PORTS; i++) 1180 if (host->dev_active & (1 << i)) { 1181 struct carm_port *port = &host->port[i]; 1182 struct gendisk *disk = port->disk; 1183 1184 set_capacity(disk, port->capacity); 1185 host->probe_err = add_disk(disk); 1186 if (!host->probe_err) 1187 activated++; 1188 else 1189 break; 1190 } 1191 1192 printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n", 1193 pci_name(host->pdev), activated); 1194 1195 new_state = HST_PROBE_FINISHED; 1196 reschedule = 1; 1197 break; 1198 } 1199 case HST_PROBE_FINISHED: 1200 complete(&host->probe_comp); 1201 break; 1202 case HST_ERROR: 1203 /* FIXME: TODO */ 1204 break; 1205 1206 default: 1207 /* should never occur */ 1208 printk(KERN_ERR PFX "BUG: unknown state %d\n", state); 1209 assert(0); 1210 break; 1211 } 1212 1213 if (new_state != HST_INVALID) { 1214 spin_lock_irqsave(&host->lock, flags); 1215 host->state = new_state; 1216 spin_unlock_irqrestore(&host->lock, flags); 1217 } 1218 if (reschedule) 1219 schedule_work(&host->fsm_task); 1220} 1221 1222static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit) 1223{ 1224 unsigned int i; 1225 1226 for (i = 0; i < 50000; i++) { 1227 u32 tmp = readl(mmio + CARM_LMUC); 1228 udelay(100); 1229 1230 if (test_bit) { 1231 if ((tmp & bits) == bits) 1232 return 0; 1233 } else { 1234 if ((tmp & bits) == 0) 1235 return 0; 1236 } 1237 1238 cond_resched(); 1239 } 1240 1241 printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n", 1242 bits, test_bit ? "yes" : "no"); 1243 return -EBUSY; 1244} 1245 1246static void carm_init_responses(struct carm_host *host) 1247{ 1248 void __iomem *mmio = host->mmio; 1249 unsigned int i; 1250 struct carm_response *resp = (struct carm_response *) host->shm; 1251 1252 for (i = 0; i < RMSG_Q_LEN; i++) 1253 resp[i].status = cpu_to_le32(0xffffffff); 1254 1255 writel(0, mmio + CARM_RESP_IDX); 1256} 1257 1258static int carm_init_host(struct carm_host *host) 1259{ 1260 void __iomem *mmio = host->mmio; 1261 u32 tmp; 1262 u8 tmp8; 1263 int rc; 1264 1265 DPRINTK("ENTER\n"); 1266 1267 writel(0, mmio + CARM_INT_MASK); 1268 1269 tmp8 = readb(mmio + CARM_INITC); 1270 if (tmp8 & 0x01) { 1271 tmp8 &= ~0x01; 1272 writeb(tmp8, mmio + CARM_INITC); 1273 readb(mmio + CARM_INITC); /* flush */ 1274 1275 DPRINTK("snooze...\n"); 1276 msleep(5000); 1277 } 1278 1279 tmp = readl(mmio + CARM_HMUC); 1280 if (tmp & CARM_CME) { 1281 DPRINTK("CME bit present, waiting\n"); 1282 rc = carm_init_wait(mmio, CARM_CME, 1); 1283 if (rc) { 1284 DPRINTK("EXIT, carm_init_wait 1 failed\n"); 1285 return rc; 1286 } 1287 } 1288 if (tmp & CARM_RME) { 1289 DPRINTK("RME bit present, waiting\n"); 1290 rc = carm_init_wait(mmio, CARM_RME, 1); 1291 if (rc) { 1292 DPRINTK("EXIT, carm_init_wait 2 failed\n"); 1293 return rc; 1294 } 1295 } 1296 1297 tmp &= ~(CARM_RME | CARM_CME); 1298 writel(tmp, mmio + CARM_HMUC); 1299 readl(mmio + CARM_HMUC); /* flush */ 1300 1301 rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0); 1302 if (rc) { 1303 DPRINTK("EXIT, carm_init_wait 3 failed\n"); 1304 return rc; 1305 } 1306 1307 carm_init_buckets(mmio); 1308 1309 writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO); 1310 writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI); 1311 writel(RBUF_LEN, mmio + RBUF_BYTE_SZ); 1312 1313 tmp = readl(mmio + CARM_HMUC); 1314 tmp |= (CARM_RME | CARM_CME | CARM_WZBC); 1315 writel(tmp, mmio + CARM_HMUC); 1316 readl(mmio + CARM_HMUC); /* flush */ 1317 1318 rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1); 1319 if (rc) { 1320 DPRINTK("EXIT, carm_init_wait 4 failed\n"); 1321 return rc; 1322 } 1323 1324 writel(0, mmio + CARM_HMPHA); 1325 writel(INT_DEF_MASK, mmio + CARM_INT_MASK); 1326 1327 carm_init_responses(host); 1328 1329 /* start initialization, probing state machine */ 1330 spin_lock_irq(&host->lock); 1331 assert(host->state == HST_INVALID); 1332 host->state = HST_PROBE_START; 1333 spin_unlock_irq(&host->lock); 1334 schedule_work(&host->fsm_task); 1335 1336 DPRINTK("EXIT\n"); 1337 return 0; 1338} 1339 1340static const struct blk_mq_ops carm_mq_ops = { 1341 .queue_rq = carm_queue_rq, 1342}; 1343 1344static int carm_init_disk(struct carm_host *host, unsigned int port_no) 1345{ 1346 struct carm_port *port = &host->port[port_no]; 1347 struct gendisk *disk; 1348 1349 port->host = host; 1350 port->port_no = port_no; 1351 1352 disk = blk_mq_alloc_disk(&host->tag_set, port); 1353 if (IS_ERR(disk)) 1354 return PTR_ERR(disk); 1355 1356 port->disk = disk; 1357 sprintf(disk->disk_name, DRV_NAME "/%u", 1358 (unsigned int)host->id * CARM_MAX_PORTS + port_no); 1359 disk->major = host->major; 1360 disk->first_minor = port_no * CARM_MINORS_PER_MAJOR; 1361 disk->minors = CARM_MINORS_PER_MAJOR; 1362 disk->fops = &carm_bd_ops; 1363 disk->private_data = port; 1364 1365 blk_queue_max_segments(disk->queue, CARM_MAX_REQ_SG); 1366 blk_queue_segment_boundary(disk->queue, CARM_SG_BOUNDARY); 1367 return 0; 1368} 1369 1370static void carm_free_disk(struct carm_host *host, unsigned int port_no) 1371{ 1372 struct carm_port *port = &host->port[port_no]; 1373 struct gendisk *disk = port->disk; 1374 1375 if (!disk) 1376 return; 1377 1378 if (host->state > HST_DEV_ACTIVATE) 1379 del_gendisk(disk); 1380 blk_cleanup_disk(disk); 1381} 1382 1383static int carm_init_shm(struct carm_host *host) 1384{ 1385 host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE, 1386 &host->shm_dma, GFP_KERNEL); 1387 if (!host->shm) 1388 return -ENOMEM; 1389 1390 host->msg_base = host->shm + RBUF_LEN; 1391 host->msg_dma = host->shm_dma + RBUF_LEN; 1392 1393 memset(host->shm, 0xff, RBUF_LEN); 1394 memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN); 1395 1396 return 0; 1397} 1398 1399static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1400{ 1401 struct carm_host *host; 1402 int rc; 1403 struct request_queue *q; 1404 unsigned int i; 1405 1406 printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1407 1408 rc = pci_enable_device(pdev); 1409 if (rc) 1410 return rc; 1411 1412 rc = pci_request_regions(pdev, DRV_NAME); 1413 if (rc) 1414 goto err_out; 1415 1416 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1417 if (rc) { 1418 printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", 1419 pci_name(pdev)); 1420 goto err_out_regions; 1421 } 1422 1423 host = kzalloc(sizeof(*host), GFP_KERNEL); 1424 if (!host) { 1425 rc = -ENOMEM; 1426 goto err_out_regions; 1427 } 1428 1429 host->pdev = pdev; 1430 spin_lock_init(&host->lock); 1431 INIT_WORK(&host->fsm_task, carm_fsm_task); 1432 init_completion(&host->probe_comp); 1433 1434 host->mmio = ioremap(pci_resource_start(pdev, 0), 1435 pci_resource_len(pdev, 0)); 1436 if (!host->mmio) { 1437 printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n", 1438 pci_name(pdev)); 1439 rc = -ENOMEM; 1440 goto err_out_kfree; 1441 } 1442 1443 rc = carm_init_shm(host); 1444 if (rc) { 1445 printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n", 1446 pci_name(pdev)); 1447 goto err_out_iounmap; 1448 } 1449 1450 memset(&host->tag_set, 0, sizeof(host->tag_set)); 1451 host->tag_set.ops = &carm_mq_ops; 1452 host->tag_set.cmd_size = sizeof(struct carm_request); 1453 host->tag_set.nr_hw_queues = 1; 1454 host->tag_set.nr_maps = 1; 1455 host->tag_set.queue_depth = max_queue; 1456 host->tag_set.numa_node = NUMA_NO_NODE; 1457 host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1458 1459 rc = blk_mq_alloc_tag_set(&host->tag_set); 1460 if (rc) 1461 goto err_out_dma_free; 1462 1463 q = blk_mq_init_queue(&host->tag_set); 1464 if (IS_ERR(q)) { 1465 rc = PTR_ERR(q); 1466 blk_mq_free_tag_set(&host->tag_set); 1467 goto err_out_dma_free; 1468 } 1469 1470 host->oob_q = q; 1471 q->queuedata = host; 1472 1473 /* 1474 * Figure out which major to use: 160, 161, or dynamic 1475 */ 1476 if (!test_and_set_bit(0, &carm_major_alloc)) 1477 host->major = 160; 1478 else if (!test_and_set_bit(1, &carm_major_alloc)) 1479 host->major = 161; 1480 else 1481 host->flags |= FL_DYN_MAJOR; 1482 1483 host->id = carm_host_id; 1484 sprintf(host->name, DRV_NAME "%d", carm_host_id); 1485 1486 rc = register_blkdev(host->major, host->name); 1487 if (rc < 0) 1488 goto err_out_free_majors; 1489 if (host->flags & FL_DYN_MAJOR) 1490 host->major = rc; 1491 1492 for (i = 0; i < CARM_MAX_PORTS; i++) { 1493 rc = carm_init_disk(host, i); 1494 if (rc) 1495 goto err_out_blkdev_disks; 1496 } 1497 1498 pci_set_master(pdev); 1499 1500 rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host); 1501 if (rc) { 1502 printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n", 1503 pci_name(pdev)); 1504 goto err_out_blkdev_disks; 1505 } 1506 1507 rc = carm_init_host(host); 1508 if (rc) 1509 goto err_out_free_irq; 1510 1511 DPRINTK("waiting for probe_comp\n"); 1512 host->probe_err = -ENODEV; 1513 wait_for_completion(&host->probe_comp); 1514 if (host->probe_err) { 1515 rc = host->probe_err; 1516 goto err_out_free_irq; 1517 } 1518 1519 printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n", 1520 host->name, pci_name(pdev), (int) CARM_MAX_PORTS, 1521 (unsigned long long)pci_resource_start(pdev, 0), 1522 pdev->irq, host->major); 1523 1524 carm_host_id++; 1525 pci_set_drvdata(pdev, host); 1526 return 0; 1527 1528err_out_free_irq: 1529 free_irq(pdev->irq, host); 1530err_out_blkdev_disks: 1531 for (i = 0; i < CARM_MAX_PORTS; i++) 1532 carm_free_disk(host, i); 1533 unregister_blkdev(host->major, host->name); 1534err_out_free_majors: 1535 if (host->major == 160) 1536 clear_bit(0, &carm_major_alloc); 1537 else if (host->major == 161) 1538 clear_bit(1, &carm_major_alloc); 1539 blk_cleanup_queue(host->oob_q); 1540 blk_mq_free_tag_set(&host->tag_set); 1541err_out_dma_free: 1542 dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); 1543err_out_iounmap: 1544 iounmap(host->mmio); 1545err_out_kfree: 1546 kfree(host); 1547err_out_regions: 1548 pci_release_regions(pdev); 1549err_out: 1550 pci_disable_device(pdev); 1551 return rc; 1552} 1553 1554static void carm_remove_one (struct pci_dev *pdev) 1555{ 1556 struct carm_host *host = pci_get_drvdata(pdev); 1557 unsigned int i; 1558 1559 if (!host) { 1560 printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n", 1561 pci_name(pdev)); 1562 return; 1563 } 1564 1565 free_irq(pdev->irq, host); 1566 for (i = 0; i < CARM_MAX_PORTS; i++) 1567 carm_free_disk(host, i); 1568 unregister_blkdev(host->major, host->name); 1569 if (host->major == 160) 1570 clear_bit(0, &carm_major_alloc); 1571 else if (host->major == 161) 1572 clear_bit(1, &carm_major_alloc); 1573 blk_cleanup_queue(host->oob_q); 1574 blk_mq_free_tag_set(&host->tag_set); 1575 dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); 1576 iounmap(host->mmio); 1577 kfree(host); 1578 pci_release_regions(pdev); 1579 pci_disable_device(pdev); 1580} 1581 1582module_pci_driver(carm_driver);