megaraid.c (107191B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2002 LSI Logic Corporation. 7 * 8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 9 * - fixes 10 * - speed-ups (list handling fixes, issued_list, optimizations.) 11 * - lots of cleanups. 12 * 13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de> 14 * - new-style, hotplug-aware pci probing and scsi registration 15 * 16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju 17 * <Seokmann.Ju@lsil.com> 18 * 19 * Description: Linux device driver for LSI Logic MegaRAID controller 20 * 21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 22 * 518, 520, 531, 532 23 * 24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, 25 * and others. Please send updates to the mailing list 26 * linux-scsi@vger.kernel.org . 27 */ 28 29#include <linux/mm.h> 30#include <linux/fs.h> 31#include <linux/blkdev.h> 32#include <linux/uaccess.h> 33#include <asm/io.h> 34#include <linux/completion.h> 35#include <linux/delay.h> 36#include <linux/proc_fs.h> 37#include <linux/seq_file.h> 38#include <linux/reboot.h> 39#include <linux/module.h> 40#include <linux/list.h> 41#include <linux/interrupt.h> 42#include <linux/pci.h> 43#include <linux/init.h> 44#include <linux/dma-mapping.h> 45#include <linux/mutex.h> 46#include <linux/slab.h> 47 48#include <scsi/scsi.h> 49#include <scsi/scsi_cmnd.h> 50#include <scsi/scsi_device.h> 51#include <scsi/scsi_eh.h> 52#include <scsi/scsi_host.h> 53#include <scsi/scsi_tcq.h> 54#include <scsi/scsicam.h> 55 56#include "megaraid.h" 57 58#define MEGARAID_MODULE_VERSION "2.00.4" 59 60MODULE_AUTHOR ("sju@lsil.com"); 61MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); 62MODULE_LICENSE ("GPL"); 63MODULE_VERSION(MEGARAID_MODULE_VERSION); 64 65static DEFINE_MUTEX(megadev_mutex); 66static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; 67module_param(max_cmd_per_lun, uint, 0); 68MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); 69 70static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; 71module_param(max_sectors_per_io, ushort, 0); 72MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 73 74 75static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; 76module_param(max_mbox_busy_wait, ushort, 0); 77MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 78 79#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) 80#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) 81#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) 82#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) 83 84/* 85 * Global variables 86 */ 87 88static int hba_count; 89static adapter_t *hba_soft_state[MAX_CONTROLLERS]; 90static struct proc_dir_entry *mega_proc_dir_entry; 91 92/* For controller re-ordering */ 93static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 94 95static long 96megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 97 98/* 99 * The File Operations structure for the serial/ioctl interface of the driver 100 */ 101static const struct file_operations megadev_fops = { 102 .owner = THIS_MODULE, 103 .unlocked_ioctl = megadev_unlocked_ioctl, 104 .open = megadev_open, 105 .llseek = noop_llseek, 106}; 107 108/* 109 * Array to structures for storing the information about the controllers. This 110 * information is sent to the user level applications, when they do an ioctl 111 * for this information. 112 */ 113static struct mcontroller mcontroller[MAX_CONTROLLERS]; 114 115/* The current driver version */ 116static u32 driver_ver = 0x02000000; 117 118/* major number used by the device for character interface */ 119static int major; 120 121#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) 122 123 124/* 125 * Debug variable to print some diagnostic messages 126 */ 127static int trace_level; 128 129/** 130 * mega_setup_mailbox() 131 * @adapter: pointer to our soft state 132 * 133 * Allocates a 8 byte aligned memory for the handshake mailbox. 134 */ 135static int 136mega_setup_mailbox(adapter_t *adapter) 137{ 138 unsigned long align; 139 140 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, 141 sizeof(mbox64_t), 142 &adapter->una_mbox64_dma, 143 GFP_KERNEL); 144 145 if( !adapter->una_mbox64 ) return -1; 146 147 adapter->mbox = &adapter->una_mbox64->mbox; 148 149 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & 150 (~0UL ^ 0xFUL)); 151 152 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); 153 154 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); 155 156 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; 157 158 /* 159 * Register the mailbox if the controller is an io-mapped controller 160 */ 161 if( adapter->flag & BOARD_IOMAP ) { 162 163 outb(adapter->mbox_dma & 0xFF, 164 adapter->host->io_port + MBOX_PORT0); 165 166 outb((adapter->mbox_dma >> 8) & 0xFF, 167 adapter->host->io_port + MBOX_PORT1); 168 169 outb((adapter->mbox_dma >> 16) & 0xFF, 170 adapter->host->io_port + MBOX_PORT2); 171 172 outb((adapter->mbox_dma >> 24) & 0xFF, 173 adapter->host->io_port + MBOX_PORT3); 174 175 outb(ENABLE_MBOX_BYTE, 176 adapter->host->io_port + ENABLE_MBOX_REGION); 177 178 irq_ack(adapter); 179 180 irq_enable(adapter); 181 } 182 183 return 0; 184} 185 186 187/* 188 * mega_query_adapter() 189 * @adapter - pointer to our soft state 190 * 191 * Issue the adapter inquiry commands to the controller and find out 192 * information and parameter about the devices attached 193 */ 194static int 195mega_query_adapter(adapter_t *adapter) 196{ 197 dma_addr_t prod_info_dma_handle; 198 mega_inquiry3 *inquiry3; 199 struct mbox_out mbox; 200 u8 *raw_mbox = (u8 *)&mbox; 201 int retval; 202 203 /* Initialize adapter inquiry mailbox */ 204 205 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 206 memset(&mbox, 0, sizeof(mbox)); 207 208 /* 209 * Try to issue Inquiry3 command 210 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 211 * update enquiry3 structure 212 */ 213 mbox.xferaddr = (u32)adapter->buf_dma_handle; 214 215 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 216 217 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 218 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ 219 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ 220 221 /* Issue a blocking command to the card */ 222 if ((retval = issue_scb_block(adapter, raw_mbox))) { 223 /* the adapter does not support 40ld */ 224 225 mraid_ext_inquiry *ext_inq; 226 mraid_inquiry *inq; 227 dma_addr_t dma_handle; 228 229 ext_inq = dma_alloc_coherent(&adapter->dev->dev, 230 sizeof(mraid_ext_inquiry), 231 &dma_handle, GFP_KERNEL); 232 233 if( ext_inq == NULL ) return -1; 234 235 inq = &ext_inq->raid_inq; 236 237 mbox.xferaddr = (u32)dma_handle; 238 239 /*issue old 0x04 command to adapter */ 240 mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ; 241 242 issue_scb_block(adapter, raw_mbox); 243 244 /* 245 * update Enquiry3 and ProductInfo structures with 246 * mraid_inquiry structure 247 */ 248 mega_8_to_40ld(inq, inquiry3, 249 (mega_product_info *)&adapter->product_info); 250 251 dma_free_coherent(&adapter->dev->dev, 252 sizeof(mraid_ext_inquiry), ext_inq, 253 dma_handle); 254 255 } else { /*adapter supports 40ld */ 256 adapter->flag |= BOARD_40LD; 257 258 /* 259 * get product_info, which is static information and will be 260 * unchanged 261 */ 262 prod_info_dma_handle = dma_map_single(&adapter->dev->dev, 263 (void *)&adapter->product_info, 264 sizeof(mega_product_info), 265 DMA_FROM_DEVICE); 266 267 mbox.xferaddr = prod_info_dma_handle; 268 269 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 270 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 271 272 if ((retval = issue_scb_block(adapter, raw_mbox))) 273 dev_warn(&adapter->dev->dev, 274 "Product_info cmd failed with error: %d\n", 275 retval); 276 277 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, 278 sizeof(mega_product_info), DMA_FROM_DEVICE); 279 } 280 281 282 /* 283 * kernel scans the channels from 0 to <= max_channel 284 */ 285 adapter->host->max_channel = 286 adapter->product_info.nchannels + NVIRT_CHAN -1; 287 288 adapter->host->max_id = 16; /* max targets per channel */ 289 290 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ 291 292 adapter->host->cmd_per_lun = max_cmd_per_lun; 293 294 adapter->numldrv = inquiry3->num_ldrv; 295 296 adapter->max_cmds = adapter->product_info.max_commands; 297 298 if(adapter->max_cmds > MAX_COMMANDS) 299 adapter->max_cmds = MAX_COMMANDS; 300 301 adapter->host->can_queue = adapter->max_cmds - 1; 302 303 /* 304 * Get the maximum number of scatter-gather elements supported by this 305 * firmware 306 */ 307 mega_get_max_sgl(adapter); 308 309 adapter->host->sg_tablesize = adapter->sglen; 310 311 /* use HP firmware and bios version encoding 312 Note: fw_version[0|1] and bios_version[0|1] were originally shifted 313 right 8 bits making them zero. This 0 value was hardcoded to fix 314 sparse warnings. */ 315 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { 316 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 317 "%c%d%d.%d%d", 318 adapter->product_info.fw_version[2], 319 0, 320 adapter->product_info.fw_version[1] & 0x0f, 321 0, 322 adapter->product_info.fw_version[0] & 0x0f); 323 snprintf(adapter->bios_version, sizeof(adapter->fw_version), 324 "%c%d%d.%d%d", 325 adapter->product_info.bios_version[2], 326 0, 327 adapter->product_info.bios_version[1] & 0x0f, 328 0, 329 adapter->product_info.bios_version[0] & 0x0f); 330 } else { 331 memcpy(adapter->fw_version, 332 (char *)adapter->product_info.fw_version, 4); 333 adapter->fw_version[4] = 0; 334 335 memcpy(adapter->bios_version, 336 (char *)adapter->product_info.bios_version, 4); 337 338 adapter->bios_version[4] = 0; 339 } 340 341 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", 342 adapter->fw_version, adapter->bios_version, adapter->numldrv); 343 344 /* 345 * Do we support extended (>10 bytes) cdbs 346 */ 347 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 348 if (adapter->support_ext_cdb) 349 dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); 350 351 352 return 0; 353} 354 355/** 356 * mega_runpendq() 357 * @adapter: pointer to our soft state 358 * 359 * Runs through the list of pending requests. 360 */ 361static inline void 362mega_runpendq(adapter_t *adapter) 363{ 364 if(!list_empty(&adapter->pending_list)) 365 __mega_runpendq(adapter); 366} 367 368/* 369 * megaraid_queue() 370 * @scmd - Issue this scsi command 371 * @done - the callback hook into the scsi mid-layer 372 * 373 * The command queuing entry point for the mid-layer. 374 */ 375static int megaraid_queue_lck(struct scsi_cmnd *scmd) 376{ 377 adapter_t *adapter; 378 scb_t *scb; 379 int busy=0; 380 unsigned long flags; 381 382 adapter = (adapter_t *)scmd->device->host->hostdata; 383 384 /* 385 * Allocate and build a SCB request 386 * busy flag will be set if mega_build_cmd() command could not 387 * allocate scb. We will return non-zero status in that case. 388 * NOTE: scb can be null even though certain commands completed 389 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would 390 * return 0 in that case. 391 */ 392 393 spin_lock_irqsave(&adapter->lock, flags); 394 scb = mega_build_cmd(adapter, scmd, &busy); 395 if (!scb) 396 goto out; 397 398 scb->state |= SCB_PENDQ; 399 list_add_tail(&scb->list, &adapter->pending_list); 400 401 /* 402 * Check if the HBA is in quiescent state, e.g., during a 403 * delete logical drive opertion. If it is, don't run 404 * the pending_list. 405 */ 406 if (atomic_read(&adapter->quiescent) == 0) 407 mega_runpendq(adapter); 408 409 busy = 0; 410 out: 411 spin_unlock_irqrestore(&adapter->lock, flags); 412 return busy; 413} 414 415static DEF_SCSI_QCMD(megaraid_queue) 416 417/** 418 * mega_allocate_scb() 419 * @adapter: pointer to our soft state 420 * @cmd: scsi command from the mid-layer 421 * 422 * Allocate a SCB structure. This is the central structure for controller 423 * commands. 424 */ 425static inline scb_t * 426mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) 427{ 428 struct list_head *head = &adapter->free_list; 429 scb_t *scb; 430 431 /* Unlink command from Free List */ 432 if( !list_empty(head) ) { 433 434 scb = list_entry(head->next, scb_t, list); 435 436 list_del_init(head->next); 437 438 scb->state = SCB_ACTIVE; 439 scb->cmd = cmd; 440 scb->dma_type = MEGA_DMA_TYPE_NONE; 441 442 return scb; 443 } 444 445 return NULL; 446} 447 448/** 449 * mega_get_ldrv_num() 450 * @adapter: pointer to our soft state 451 * @cmd: scsi mid layer command 452 * @channel: channel on the controller 453 * 454 * Calculate the logical drive number based on the information in scsi command 455 * and the channel number. 456 */ 457static inline int 458mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) 459{ 460 int tgt; 461 int ldrv_num; 462 463 tgt = cmd->device->id; 464 465 if ( tgt > adapter->this_id ) 466 tgt--; /* we do not get inquires for initiator id */ 467 468 ldrv_num = (channel * 15) + tgt; 469 470 471 /* 472 * If we have a logical drive with boot enabled, project it first 473 */ 474 if( adapter->boot_ldrv_enabled ) { 475 if( ldrv_num == 0 ) { 476 ldrv_num = adapter->boot_ldrv; 477 } 478 else { 479 if( ldrv_num <= adapter->boot_ldrv ) { 480 ldrv_num--; 481 } 482 } 483 } 484 485 /* 486 * If "delete logical drive" feature is enabled on this controller. 487 * Do only if at least one delete logical drive operation was done. 488 * 489 * Also, after logical drive deletion, instead of logical drive number, 490 * the value returned should be 0x80+logical drive id. 491 * 492 * These is valid only for IO commands. 493 */ 494 495 if (adapter->support_random_del && adapter->read_ldidmap ) 496 switch (cmd->cmnd[0]) { 497 case READ_6: 498 case WRITE_6: 499 case READ_10: 500 case WRITE_10: 501 ldrv_num += 0x80; 502 } 503 504 return ldrv_num; 505} 506 507/** 508 * mega_build_cmd() 509 * @adapter: pointer to our soft state 510 * @cmd: Prepare using this scsi command 511 * @busy: busy flag if no resources 512 * 513 * Prepares a command and scatter gather list for the controller. This routine 514 * also finds out if the commands is intended for a logical drive or a 515 * physical device and prepares the controller command accordingly. 516 * 517 * We also re-order the logical drives and physical devices based on their 518 * boot settings. 519 */ 520static scb_t * 521mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 522{ 523 mega_passthru *pthru; 524 scb_t *scb; 525 mbox_t *mbox; 526 u32 seg; 527 char islogical; 528 int max_ldrv_num; 529 int channel = 0; 530 int target = 0; 531 int ldrv_num = 0; /* logical drive number */ 532 533 /* 534 * We know what channels our logical drives are on - mega_find_card() 535 */ 536 islogical = adapter->logdrv_chan[cmd->device->channel]; 537 538 /* 539 * The theory: If physical drive is chosen for boot, all the physical 540 * devices are exported before the logical drives, otherwise physical 541 * devices are pushed after logical drives, in which case - Kernel sees 542 * the physical devices on virtual channel which is obviously converted 543 * to actual channel on the HBA. 544 */ 545 if( adapter->boot_pdrv_enabled ) { 546 if( islogical ) { 547 /* logical channel */ 548 channel = cmd->device->channel - 549 adapter->product_info.nchannels; 550 } 551 else { 552 /* this is physical channel */ 553 channel = cmd->device->channel; 554 target = cmd->device->id; 555 556 /* 557 * boot from a physical disk, that disk needs to be 558 * exposed first IF both the channels are SCSI, then 559 * booting from the second channel is not allowed. 560 */ 561 if( target == 0 ) { 562 target = adapter->boot_pdrv_tgt; 563 } 564 else if( target == adapter->boot_pdrv_tgt ) { 565 target = 0; 566 } 567 } 568 } 569 else { 570 if( islogical ) { 571 /* this is the logical channel */ 572 channel = cmd->device->channel; 573 } 574 else { 575 /* physical channel */ 576 channel = cmd->device->channel - NVIRT_CHAN; 577 target = cmd->device->id; 578 } 579 } 580 581 582 if(islogical) { 583 584 /* have just LUN 0 for each target on virtual channels */ 585 if (cmd->device->lun) { 586 cmd->result = (DID_BAD_TARGET << 16); 587 scsi_done(cmd); 588 return NULL; 589 } 590 591 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); 592 593 594 max_ldrv_num = (adapter->flag & BOARD_40LD) ? 595 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; 596 597 /* 598 * max_ldrv_num increases by 0x80 if some logical drive was 599 * deleted. 600 */ 601 if(adapter->read_ldidmap) 602 max_ldrv_num += 0x80; 603 604 if(ldrv_num > max_ldrv_num ) { 605 cmd->result = (DID_BAD_TARGET << 16); 606 scsi_done(cmd); 607 return NULL; 608 } 609 610 } 611 else { 612 if( cmd->device->lun > 7) { 613 /* 614 * Do not support lun >7 for physically accessed 615 * devices 616 */ 617 cmd->result = (DID_BAD_TARGET << 16); 618 scsi_done(cmd); 619 return NULL; 620 } 621 } 622 623 /* 624 * 625 * Logical drive commands 626 * 627 */ 628 if(islogical) { 629 switch (cmd->cmnd[0]) { 630 case TEST_UNIT_READY: 631#if MEGA_HAVE_CLUSTERING 632 /* 633 * Do we support clustering and is the support enabled 634 * If no, return success always 635 */ 636 if( !adapter->has_cluster ) { 637 cmd->result = (DID_OK << 16); 638 scsi_done(cmd); 639 return NULL; 640 } 641 642 if(!(scb = mega_allocate_scb(adapter, cmd))) { 643 *busy = 1; 644 return NULL; 645 } 646 647 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 648 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; 649 scb->raw_mbox[3] = ldrv_num; 650 651 scb->dma_direction = DMA_NONE; 652 653 return scb; 654#else 655 cmd->result = (DID_OK << 16); 656 scsi_done(cmd); 657 return NULL; 658#endif 659 660 case MODE_SENSE: { 661 char *buf; 662 struct scatterlist *sg; 663 664 sg = scsi_sglist(cmd); 665 buf = kmap_atomic(sg_page(sg)) + sg->offset; 666 667 memset(buf, 0, cmd->cmnd[4]); 668 kunmap_atomic(buf - sg->offset); 669 670 cmd->result = (DID_OK << 16); 671 scsi_done(cmd); 672 return NULL; 673 } 674 675 case READ_CAPACITY: 676 case INQUIRY: 677 678 if(!(adapter->flag & (1L << cmd->device->channel))) { 679 680 dev_notice(&adapter->dev->dev, 681 "scsi%d: scanning scsi channel %d " 682 "for logical drives\n", 683 adapter->host->host_no, 684 cmd->device->channel); 685 686 adapter->flag |= (1L << cmd->device->channel); 687 } 688 689 /* Allocate a SCB and initialize passthru */ 690 if(!(scb = mega_allocate_scb(adapter, cmd))) { 691 *busy = 1; 692 return NULL; 693 } 694 pthru = scb->pthru; 695 696 mbox = (mbox_t *)scb->raw_mbox; 697 memset(mbox, 0, sizeof(scb->raw_mbox)); 698 memset(pthru, 0, sizeof(mega_passthru)); 699 700 pthru->timeout = 0; 701 pthru->ars = 1; 702 pthru->reqsenselen = 14; 703 pthru->islogical = 1; 704 pthru->logdrv = ldrv_num; 705 pthru->cdblen = cmd->cmd_len; 706 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 707 708 if( adapter->has_64bit_addr ) { 709 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 710 } 711 else { 712 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 713 } 714 715 scb->dma_direction = DMA_FROM_DEVICE; 716 717 pthru->numsgelements = mega_build_sglist(adapter, scb, 718 &pthru->dataxferaddr, &pthru->dataxferlen); 719 720 mbox->m_out.xferaddr = scb->pthru_dma_addr; 721 722 return scb; 723 724 case READ_6: 725 case WRITE_6: 726 case READ_10: 727 case WRITE_10: 728 case READ_12: 729 case WRITE_12: 730 731 /* Allocate a SCB and initialize mailbox */ 732 if(!(scb = mega_allocate_scb(adapter, cmd))) { 733 *busy = 1; 734 return NULL; 735 } 736 mbox = (mbox_t *)scb->raw_mbox; 737 738 memset(mbox, 0, sizeof(scb->raw_mbox)); 739 mbox->m_out.logdrv = ldrv_num; 740 741 /* 742 * A little hack: 2nd bit is zero for all scsi read 743 * commands and is set for all scsi write commands 744 */ 745 if( adapter->has_64bit_addr ) { 746 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 747 MEGA_MBOXCMD_LWRITE64: 748 MEGA_MBOXCMD_LREAD64 ; 749 } 750 else { 751 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 752 MEGA_MBOXCMD_LWRITE: 753 MEGA_MBOXCMD_LREAD ; 754 } 755 756 /* 757 * 6-byte READ(0x08) or WRITE(0x0A) cdb 758 */ 759 if( cmd->cmd_len == 6 ) { 760 mbox->m_out.numsectors = (u32) cmd->cmnd[4]; 761 mbox->m_out.lba = 762 ((u32)cmd->cmnd[1] << 16) | 763 ((u32)cmd->cmnd[2] << 8) | 764 (u32)cmd->cmnd[3]; 765 766 mbox->m_out.lba &= 0x1FFFFF; 767 768#if MEGA_HAVE_STATS 769 /* 770 * Take modulo 0x80, since the logical drive 771 * number increases by 0x80 when a logical 772 * drive was deleted 773 */ 774 if (*cmd->cmnd == READ_6) { 775 adapter->nreads[ldrv_num%0x80]++; 776 adapter->nreadblocks[ldrv_num%0x80] += 777 mbox->m_out.numsectors; 778 } else { 779 adapter->nwrites[ldrv_num%0x80]++; 780 adapter->nwriteblocks[ldrv_num%0x80] += 781 mbox->m_out.numsectors; 782 } 783#endif 784 } 785 786 /* 787 * 10-byte READ(0x28) or WRITE(0x2A) cdb 788 */ 789 if( cmd->cmd_len == 10 ) { 790 mbox->m_out.numsectors = 791 (u32)cmd->cmnd[8] | 792 ((u32)cmd->cmnd[7] << 8); 793 mbox->m_out.lba = 794 ((u32)cmd->cmnd[2] << 24) | 795 ((u32)cmd->cmnd[3] << 16) | 796 ((u32)cmd->cmnd[4] << 8) | 797 (u32)cmd->cmnd[5]; 798 799#if MEGA_HAVE_STATS 800 if (*cmd->cmnd == READ_10) { 801 adapter->nreads[ldrv_num%0x80]++; 802 adapter->nreadblocks[ldrv_num%0x80] += 803 mbox->m_out.numsectors; 804 } else { 805 adapter->nwrites[ldrv_num%0x80]++; 806 adapter->nwriteblocks[ldrv_num%0x80] += 807 mbox->m_out.numsectors; 808 } 809#endif 810 } 811 812 /* 813 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 814 */ 815 if( cmd->cmd_len == 12 ) { 816 mbox->m_out.lba = 817 ((u32)cmd->cmnd[2] << 24) | 818 ((u32)cmd->cmnd[3] << 16) | 819 ((u32)cmd->cmnd[4] << 8) | 820 (u32)cmd->cmnd[5]; 821 822 mbox->m_out.numsectors = 823 ((u32)cmd->cmnd[6] << 24) | 824 ((u32)cmd->cmnd[7] << 16) | 825 ((u32)cmd->cmnd[8] << 8) | 826 (u32)cmd->cmnd[9]; 827 828#if MEGA_HAVE_STATS 829 if (*cmd->cmnd == READ_12) { 830 adapter->nreads[ldrv_num%0x80]++; 831 adapter->nreadblocks[ldrv_num%0x80] += 832 mbox->m_out.numsectors; 833 } else { 834 adapter->nwrites[ldrv_num%0x80]++; 835 adapter->nwriteblocks[ldrv_num%0x80] += 836 mbox->m_out.numsectors; 837 } 838#endif 839 } 840 841 /* 842 * If it is a read command 843 */ 844 if( (*cmd->cmnd & 0x0F) == 0x08 ) { 845 scb->dma_direction = DMA_FROM_DEVICE; 846 } 847 else { 848 scb->dma_direction = DMA_TO_DEVICE; 849 } 850 851 /* Calculate Scatter-Gather info */ 852 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 853 (u32 *)&mbox->m_out.xferaddr, &seg); 854 855 return scb; 856 857#if MEGA_HAVE_CLUSTERING 858 case RESERVE: 859 case RELEASE: 860 861 /* 862 * Do we support clustering and is the support enabled 863 */ 864 if( ! adapter->has_cluster ) { 865 866 cmd->result = (DID_BAD_TARGET << 16); 867 scsi_done(cmd); 868 return NULL; 869 } 870 871 /* Allocate a SCB and initialize mailbox */ 872 if(!(scb = mega_allocate_scb(adapter, cmd))) { 873 *busy = 1; 874 return NULL; 875 } 876 877 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 878 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? 879 MEGA_RESERVE_LD : MEGA_RELEASE_LD; 880 881 scb->raw_mbox[3] = ldrv_num; 882 883 scb->dma_direction = DMA_NONE; 884 885 return scb; 886#endif 887 888 default: 889 cmd->result = (DID_BAD_TARGET << 16); 890 scsi_done(cmd); 891 return NULL; 892 } 893 } 894 895 /* 896 * Passthru drive commands 897 */ 898 else { 899 /* Allocate a SCB and initialize passthru */ 900 if(!(scb = mega_allocate_scb(adapter, cmd))) { 901 *busy = 1; 902 return NULL; 903 } 904 905 mbox = (mbox_t *)scb->raw_mbox; 906 memset(mbox, 0, sizeof(scb->raw_mbox)); 907 908 if( adapter->support_ext_cdb ) { 909 910 mega_prepare_extpassthru(adapter, scb, cmd, 911 channel, target); 912 913 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; 914 915 mbox->m_out.xferaddr = scb->epthru_dma_addr; 916 917 } 918 else { 919 920 pthru = mega_prepare_passthru(adapter, scb, cmd, 921 channel, target); 922 923 /* Initialize mailbox */ 924 if( adapter->has_64bit_addr ) { 925 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 926 } 927 else { 928 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 929 } 930 931 mbox->m_out.xferaddr = scb->pthru_dma_addr; 932 933 } 934 return scb; 935 } 936 return NULL; 937} 938 939 940/** 941 * mega_prepare_passthru() 942 * @adapter: pointer to our soft state 943 * @scb: our scsi control block 944 * @cmd: scsi command from the mid-layer 945 * @channel: actual channel on the controller 946 * @target: actual id on the controller. 947 * 948 * prepare a command for the scsi physical devices. 949 */ 950static mega_passthru * 951mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, 952 int channel, int target) 953{ 954 mega_passthru *pthru; 955 956 pthru = scb->pthru; 957 memset(pthru, 0, sizeof (mega_passthru)); 958 959 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 960 pthru->timeout = 2; 961 962 pthru->ars = 1; 963 pthru->reqsenselen = 14; 964 pthru->islogical = 0; 965 966 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 967 968 pthru->target = (adapter->flag & BOARD_40LD) ? 969 (channel << 4) | target : target; 970 971 pthru->cdblen = cmd->cmd_len; 972 pthru->logdrv = cmd->device->lun; 973 974 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 975 976 /* Not sure about the direction */ 977 scb->dma_direction = DMA_BIDIRECTIONAL; 978 979 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ 980 switch (cmd->cmnd[0]) { 981 case INQUIRY: 982 case READ_CAPACITY: 983 if(!(adapter->flag & (1L << cmd->device->channel))) { 984 985 dev_notice(&adapter->dev->dev, 986 "scsi%d: scanning scsi channel %d [P%d] " 987 "for physical devices\n", 988 adapter->host->host_no, 989 cmd->device->channel, channel); 990 991 adapter->flag |= (1L << cmd->device->channel); 992 } 993 fallthrough; 994 default: 995 pthru->numsgelements = mega_build_sglist(adapter, scb, 996 &pthru->dataxferaddr, &pthru->dataxferlen); 997 break; 998 } 999 return pthru; 1000} 1001 1002 1003/** 1004 * mega_prepare_extpassthru() 1005 * @adapter: pointer to our soft state 1006 * @scb: our scsi control block 1007 * @cmd: scsi command from the mid-layer 1008 * @channel: actual channel on the controller 1009 * @target: actual id on the controller. 1010 * 1011 * prepare a command for the scsi physical devices. This rountine prepares 1012 * commands for devices which can take extended CDBs (>10 bytes) 1013 */ 1014static mega_ext_passthru * 1015mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, 1016 struct scsi_cmnd *cmd, 1017 int channel, int target) 1018{ 1019 mega_ext_passthru *epthru; 1020 1021 epthru = scb->epthru; 1022 memset(epthru, 0, sizeof(mega_ext_passthru)); 1023 1024 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 1025 epthru->timeout = 2; 1026 1027 epthru->ars = 1; 1028 epthru->reqsenselen = 14; 1029 epthru->islogical = 0; 1030 1031 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 1032 epthru->target = (adapter->flag & BOARD_40LD) ? 1033 (channel << 4) | target : target; 1034 1035 epthru->cdblen = cmd->cmd_len; 1036 epthru->logdrv = cmd->device->lun; 1037 1038 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); 1039 1040 /* Not sure about the direction */ 1041 scb->dma_direction = DMA_BIDIRECTIONAL; 1042 1043 switch(cmd->cmnd[0]) { 1044 case INQUIRY: 1045 case READ_CAPACITY: 1046 if(!(adapter->flag & (1L << cmd->device->channel))) { 1047 1048 dev_notice(&adapter->dev->dev, 1049 "scsi%d: scanning scsi channel %d [P%d] " 1050 "for physical devices\n", 1051 adapter->host->host_no, 1052 cmd->device->channel, channel); 1053 1054 adapter->flag |= (1L << cmd->device->channel); 1055 } 1056 fallthrough; 1057 default: 1058 epthru->numsgelements = mega_build_sglist(adapter, scb, 1059 &epthru->dataxferaddr, &epthru->dataxferlen); 1060 break; 1061 } 1062 1063 return epthru; 1064} 1065 1066static void 1067__mega_runpendq(adapter_t *adapter) 1068{ 1069 scb_t *scb; 1070 struct list_head *pos, *next; 1071 1072 /* Issue any pending commands to the card */ 1073 list_for_each_safe(pos, next, &adapter->pending_list) { 1074 1075 scb = list_entry(pos, scb_t, list); 1076 1077 if( !(scb->state & SCB_ISSUED) ) { 1078 1079 if( issue_scb(adapter, scb) != 0 ) 1080 return; 1081 } 1082 } 1083 1084 return; 1085} 1086 1087 1088/** 1089 * issue_scb() 1090 * @adapter: pointer to our soft state 1091 * @scb: scsi control block 1092 * 1093 * Post a command to the card if the mailbox is available, otherwise return 1094 * busy. We also take the scb from the pending list if the mailbox is 1095 * available. 1096 */ 1097static int 1098issue_scb(adapter_t *adapter, scb_t *scb) 1099{ 1100 volatile mbox64_t *mbox64 = adapter->mbox64; 1101 volatile mbox_t *mbox = adapter->mbox; 1102 unsigned int i = 0; 1103 1104 if(unlikely(mbox->m_in.busy)) { 1105 do { 1106 udelay(1); 1107 i++; 1108 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); 1109 1110 if(mbox->m_in.busy) return -1; 1111 } 1112 1113 /* Copy mailbox data into host structure */ 1114 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, 1115 sizeof(struct mbox_out)); 1116 1117 mbox->m_out.cmdid = scb->idx; /* Set cmdid */ 1118 mbox->m_in.busy = 1; /* Set busy */ 1119 1120 1121 /* 1122 * Increment the pending queue counter 1123 */ 1124 atomic_inc(&adapter->pend_cmds); 1125 1126 switch (mbox->m_out.cmd) { 1127 case MEGA_MBOXCMD_LREAD64: 1128 case MEGA_MBOXCMD_LWRITE64: 1129 case MEGA_MBOXCMD_PASSTHRU64: 1130 case MEGA_MBOXCMD_EXTPTHRU: 1131 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1132 mbox64->xfer_segment_hi = 0; 1133 mbox->m_out.xferaddr = 0xFFFFFFFF; 1134 break; 1135 default: 1136 mbox64->xfer_segment_lo = 0; 1137 mbox64->xfer_segment_hi = 0; 1138 } 1139 1140 /* 1141 * post the command 1142 */ 1143 scb->state |= SCB_ISSUED; 1144 1145 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1146 mbox->m_in.poll = 0; 1147 mbox->m_in.ack = 0; 1148 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1149 } 1150 else { 1151 irq_enable(adapter); 1152 issue_command(adapter); 1153 } 1154 1155 return 0; 1156} 1157 1158/* 1159 * Wait until the controller's mailbox is available 1160 */ 1161static inline int 1162mega_busywait_mbox (adapter_t *adapter) 1163{ 1164 if (adapter->mbox->m_in.busy) 1165 return __mega_busywait_mbox(adapter); 1166 return 0; 1167} 1168 1169/** 1170 * issue_scb_block() 1171 * @adapter: pointer to our soft state 1172 * @raw_mbox: the mailbox 1173 * 1174 * Issue a scb in synchronous and non-interrupt mode 1175 */ 1176static int 1177issue_scb_block(adapter_t *adapter, u_char *raw_mbox) 1178{ 1179 volatile mbox64_t *mbox64 = adapter->mbox64; 1180 volatile mbox_t *mbox = adapter->mbox; 1181 u8 byte; 1182 1183 /* Wait until mailbox is free */ 1184 if(mega_busywait_mbox (adapter)) 1185 goto bug_blocked_mailbox; 1186 1187 /* Copy mailbox data into host structure */ 1188 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); 1189 mbox->m_out.cmdid = 0xFE; 1190 mbox->m_in.busy = 1; 1191 1192 switch (raw_mbox[0]) { 1193 case MEGA_MBOXCMD_LREAD64: 1194 case MEGA_MBOXCMD_LWRITE64: 1195 case MEGA_MBOXCMD_PASSTHRU64: 1196 case MEGA_MBOXCMD_EXTPTHRU: 1197 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1198 mbox64->xfer_segment_hi = 0; 1199 mbox->m_out.xferaddr = 0xFFFFFFFF; 1200 break; 1201 default: 1202 mbox64->xfer_segment_lo = 0; 1203 mbox64->xfer_segment_hi = 0; 1204 } 1205 1206 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1207 mbox->m_in.poll = 0; 1208 mbox->m_in.ack = 0; 1209 mbox->m_in.numstatus = 0xFF; 1210 mbox->m_in.status = 0xFF; 1211 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1212 1213 while((volatile u8)mbox->m_in.numstatus == 0xFF) 1214 cpu_relax(); 1215 1216 mbox->m_in.numstatus = 0xFF; 1217 1218 while( (volatile u8)mbox->m_in.poll != 0x77 ) 1219 cpu_relax(); 1220 1221 mbox->m_in.poll = 0; 1222 mbox->m_in.ack = 0x77; 1223 1224 WRINDOOR(adapter, adapter->mbox_dma | 0x2); 1225 1226 while(RDINDOOR(adapter) & 0x2) 1227 cpu_relax(); 1228 } 1229 else { 1230 irq_disable(adapter); 1231 issue_command(adapter); 1232 1233 while (!((byte = irq_state(adapter)) & INTR_VALID)) 1234 cpu_relax(); 1235 1236 set_irq_state(adapter, byte); 1237 irq_enable(adapter); 1238 irq_ack(adapter); 1239 } 1240 1241 return mbox->m_in.status; 1242 1243bug_blocked_mailbox: 1244 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); 1245 udelay (1000); 1246 return -1; 1247} 1248 1249 1250/** 1251 * megaraid_isr_iomapped() 1252 * @irq: irq 1253 * @devp: pointer to our soft state 1254 * 1255 * Interrupt service routine for io-mapped controllers. 1256 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1257 * and service the completed commands. 1258 */ 1259static irqreturn_t 1260megaraid_isr_iomapped(int irq, void *devp) 1261{ 1262 adapter_t *adapter = devp; 1263 unsigned long flags; 1264 u8 status; 1265 u8 nstatus; 1266 u8 completed[MAX_FIRMWARE_STATUS]; 1267 u8 byte; 1268 int handled = 0; 1269 1270 1271 /* 1272 * loop till F/W has more commands for us to complete. 1273 */ 1274 spin_lock_irqsave(&adapter->lock, flags); 1275 1276 do { 1277 /* Check if a valid interrupt is pending */ 1278 byte = irq_state(adapter); 1279 if( (byte & VALID_INTR_BYTE) == 0 ) { 1280 /* 1281 * No more pending commands 1282 */ 1283 goto out_unlock; 1284 } 1285 set_irq_state(adapter, byte); 1286 1287 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1288 == 0xFF) 1289 cpu_relax(); 1290 adapter->mbox->m_in.numstatus = 0xFF; 1291 1292 status = adapter->mbox->m_in.status; 1293 1294 /* 1295 * decrement the pending queue counter 1296 */ 1297 atomic_sub(nstatus, &adapter->pend_cmds); 1298 1299 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1300 nstatus); 1301 1302 /* Acknowledge interrupt */ 1303 irq_ack(adapter); 1304 1305 mega_cmd_done(adapter, completed, nstatus, status); 1306 1307 mega_rundoneq(adapter); 1308 1309 handled = 1; 1310 1311 /* Loop through any pending requests */ 1312 if(atomic_read(&adapter->quiescent) == 0) { 1313 mega_runpendq(adapter); 1314 } 1315 1316 } while(1); 1317 1318 out_unlock: 1319 1320 spin_unlock_irqrestore(&adapter->lock, flags); 1321 1322 return IRQ_RETVAL(handled); 1323} 1324 1325 1326/** 1327 * megaraid_isr_memmapped() 1328 * @irq: irq 1329 * @devp: pointer to our soft state 1330 * 1331 * Interrupt service routine for memory-mapped controllers. 1332 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1333 * and service the completed commands. 1334 */ 1335static irqreturn_t 1336megaraid_isr_memmapped(int irq, void *devp) 1337{ 1338 adapter_t *adapter = devp; 1339 unsigned long flags; 1340 u8 status; 1341 u32 dword = 0; 1342 u8 nstatus; 1343 u8 completed[MAX_FIRMWARE_STATUS]; 1344 int handled = 0; 1345 1346 1347 /* 1348 * loop till F/W has more commands for us to complete. 1349 */ 1350 spin_lock_irqsave(&adapter->lock, flags); 1351 1352 do { 1353 /* Check if a valid interrupt is pending */ 1354 dword = RDOUTDOOR(adapter); 1355 if(dword != 0x10001234) { 1356 /* 1357 * No more pending commands 1358 */ 1359 goto out_unlock; 1360 } 1361 WROUTDOOR(adapter, 0x10001234); 1362 1363 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1364 == 0xFF) { 1365 cpu_relax(); 1366 } 1367 adapter->mbox->m_in.numstatus = 0xFF; 1368 1369 status = adapter->mbox->m_in.status; 1370 1371 /* 1372 * decrement the pending queue counter 1373 */ 1374 atomic_sub(nstatus, &adapter->pend_cmds); 1375 1376 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1377 nstatus); 1378 1379 /* Acknowledge interrupt */ 1380 WRINDOOR(adapter, 0x2); 1381 1382 handled = 1; 1383 1384 while( RDINDOOR(adapter) & 0x02 ) 1385 cpu_relax(); 1386 1387 mega_cmd_done(adapter, completed, nstatus, status); 1388 1389 mega_rundoneq(adapter); 1390 1391 /* Loop through any pending requests */ 1392 if(atomic_read(&adapter->quiescent) == 0) { 1393 mega_runpendq(adapter); 1394 } 1395 1396 } while(1); 1397 1398 out_unlock: 1399 1400 spin_unlock_irqrestore(&adapter->lock, flags); 1401 1402 return IRQ_RETVAL(handled); 1403} 1404/** 1405 * mega_cmd_done() 1406 * @adapter: pointer to our soft state 1407 * @completed: array of ids of completed commands 1408 * @nstatus: number of completed commands 1409 * @status: status of the last command completed 1410 * 1411 * Complete the commands and call the scsi mid-layer callback hooks. 1412 */ 1413static void 1414mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) 1415{ 1416 mega_ext_passthru *epthru = NULL; 1417 struct scatterlist *sgl; 1418 struct scsi_cmnd *cmd = NULL; 1419 mega_passthru *pthru = NULL; 1420 mbox_t *mbox = NULL; 1421 u8 c; 1422 scb_t *scb; 1423 int islogical; 1424 int cmdid; 1425 int i; 1426 1427 /* 1428 * for all the commands completed, call the mid-layer callback routine 1429 * and free the scb. 1430 */ 1431 for( i = 0; i < nstatus; i++ ) { 1432 1433 cmdid = completed[i]; 1434 1435 /* 1436 * Only free SCBs for the commands coming down from the 1437 * mid-layer, not for which were issued internally 1438 * 1439 * For internal command, restore the status returned by the 1440 * firmware so that user can interpret it. 1441 */ 1442 if (cmdid == CMDID_INT_CMDS) { 1443 scb = &adapter->int_scb; 1444 1445 list_del_init(&scb->list); 1446 scb->state = SCB_FREE; 1447 1448 adapter->int_status = status; 1449 complete(&adapter->int_waitq); 1450 } else { 1451 scb = &adapter->scb_list[cmdid]; 1452 1453 /* 1454 * Make sure f/w has completed a valid command 1455 */ 1456 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1457 dev_crit(&adapter->dev->dev, "invalid command " 1458 "Id %d, scb->state:%x, scsi cmd:%p\n", 1459 cmdid, scb->state, scb->cmd); 1460 1461 continue; 1462 } 1463 1464 /* 1465 * Was a abort issued for this command 1466 */ 1467 if( scb->state & SCB_ABORT ) { 1468 1469 dev_warn(&adapter->dev->dev, 1470 "aborted cmd [%x] complete\n", 1471 scb->idx); 1472 1473 scb->cmd->result = (DID_ABORT << 16); 1474 1475 list_add_tail(SCSI_LIST(scb->cmd), 1476 &adapter->completed_list); 1477 1478 mega_free_scb(adapter, scb); 1479 1480 continue; 1481 } 1482 1483 /* 1484 * Was a reset issued for this command 1485 */ 1486 if( scb->state & SCB_RESET ) { 1487 1488 dev_warn(&adapter->dev->dev, 1489 "reset cmd [%x] complete\n", 1490 scb->idx); 1491 1492 scb->cmd->result = (DID_RESET << 16); 1493 1494 list_add_tail(SCSI_LIST(scb->cmd), 1495 &adapter->completed_list); 1496 1497 mega_free_scb (adapter, scb); 1498 1499 continue; 1500 } 1501 1502 cmd = scb->cmd; 1503 pthru = scb->pthru; 1504 epthru = scb->epthru; 1505 mbox = (mbox_t *)scb->raw_mbox; 1506 1507#if MEGA_HAVE_STATS 1508 { 1509 1510 int logdrv = mbox->m_out.logdrv; 1511 1512 islogical = adapter->logdrv_chan[cmd->channel]; 1513 /* 1514 * Maintain an error counter for the logical drive. 1515 * Some application like SNMP agent need such 1516 * statistics 1517 */ 1518 if( status && islogical && (cmd->cmnd[0] == READ_6 || 1519 cmd->cmnd[0] == READ_10 || 1520 cmd->cmnd[0] == READ_12)) { 1521 /* 1522 * Logical drive number increases by 0x80 when 1523 * a logical drive is deleted 1524 */ 1525 adapter->rd_errors[logdrv%0x80]++; 1526 } 1527 1528 if( status && islogical && (cmd->cmnd[0] == WRITE_6 || 1529 cmd->cmnd[0] == WRITE_10 || 1530 cmd->cmnd[0] == WRITE_12)) { 1531 /* 1532 * Logical drive number increases by 0x80 when 1533 * a logical drive is deleted 1534 */ 1535 adapter->wr_errors[logdrv%0x80]++; 1536 } 1537 1538 } 1539#endif 1540 } 1541 1542 /* 1543 * Do not return the presence of hard disk on the channel so, 1544 * inquiry sent, and returned data==hard disk or removable 1545 * hard disk and not logical, request should return failure! - 1546 * PJ 1547 */ 1548 islogical = adapter->logdrv_chan[cmd->device->channel]; 1549 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1550 1551 sgl = scsi_sglist(cmd); 1552 if( sg_page(sgl) ) { 1553 c = *(unsigned char *) sg_virt(&sgl[0]); 1554 } else { 1555 dev_warn(&adapter->dev->dev, "invalid sg\n"); 1556 c = 0; 1557 } 1558 1559 if(IS_RAID_CH(adapter, cmd->device->channel) && 1560 ((c & 0x1F ) == TYPE_DISK)) { 1561 status = 0xF0; 1562 } 1563 } 1564 1565 /* clear result; otherwise, success returns corrupt value */ 1566 cmd->result = 0; 1567 1568 /* Convert MegaRAID status to Linux error code */ 1569 switch (status) { 1570 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ 1571 cmd->result |= (DID_OK << 16); 1572 break; 1573 1574 case 0x02: /* ERROR_ABORTED, i.e. 1575 SCSI_STATUS_CHECK_CONDITION */ 1576 1577 /* set sense_buffer and result fields */ 1578 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || 1579 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { 1580 1581 memcpy(cmd->sense_buffer, pthru->reqsensearea, 1582 14); 1583 1584 cmd->result = SAM_STAT_CHECK_CONDITION; 1585 } 1586 else { 1587 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { 1588 1589 memcpy(cmd->sense_buffer, 1590 epthru->reqsensearea, 14); 1591 1592 cmd->result = SAM_STAT_CHECK_CONDITION; 1593 } else 1594 scsi_build_sense(cmd, 0, 1595 ABORTED_COMMAND, 0, 0); 1596 } 1597 break; 1598 1599 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. 1600 SCSI_STATUS_BUSY */ 1601 cmd->result |= (DID_BUS_BUSY << 16) | status; 1602 break; 1603 1604 default: 1605#if MEGA_HAVE_CLUSTERING 1606 /* 1607 * If TEST_UNIT_READY fails, we know 1608 * MEGA_RESERVATION_STATUS failed 1609 */ 1610 if( cmd->cmnd[0] == TEST_UNIT_READY ) { 1611 cmd->result |= (DID_ERROR << 16) | 1612 SAM_STAT_RESERVATION_CONFLICT; 1613 } 1614 else 1615 /* 1616 * Error code returned is 1 if Reserve or Release 1617 * failed or the input parameter is invalid 1618 */ 1619 if( status == 1 && 1620 (cmd->cmnd[0] == RESERVE || 1621 cmd->cmnd[0] == RELEASE) ) { 1622 1623 cmd->result |= (DID_ERROR << 16) | 1624 SAM_STAT_RESERVATION_CONFLICT; 1625 } 1626 else 1627#endif 1628 cmd->result |= (DID_BAD_TARGET << 16)|status; 1629 } 1630 1631 mega_free_scb(adapter, scb); 1632 1633 /* Add Scsi_Command to end of completed queue */ 1634 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1635 } 1636} 1637 1638 1639/* 1640 * mega_runpendq() 1641 * 1642 * Run through the list of completed requests and finish it 1643 */ 1644static void 1645mega_rundoneq (adapter_t *adapter) 1646{ 1647 struct megaraid_cmd_priv *cmd_priv; 1648 1649 list_for_each_entry(cmd_priv, &adapter->completed_list, entry) 1650 scsi_done(megaraid_to_scsi_cmd(cmd_priv)); 1651 1652 INIT_LIST_HEAD(&adapter->completed_list); 1653} 1654 1655 1656/* 1657 * Free a SCB structure 1658 * Note: We assume the scsi commands associated with this scb is not free yet. 1659 */ 1660static void 1661mega_free_scb(adapter_t *adapter, scb_t *scb) 1662{ 1663 switch( scb->dma_type ) { 1664 1665 case MEGA_DMA_TYPE_NONE: 1666 break; 1667 1668 case MEGA_SGLIST: 1669 scsi_dma_unmap(scb->cmd); 1670 break; 1671 default: 1672 break; 1673 } 1674 1675 /* 1676 * Remove from the pending list 1677 */ 1678 list_del_init(&scb->list); 1679 1680 /* Link the scb back into free list */ 1681 scb->state = SCB_FREE; 1682 scb->cmd = NULL; 1683 1684 list_add(&scb->list, &adapter->free_list); 1685} 1686 1687 1688static int 1689__mega_busywait_mbox (adapter_t *adapter) 1690{ 1691 volatile mbox_t *mbox = adapter->mbox; 1692 long counter; 1693 1694 for (counter = 0; counter < 10000; counter++) { 1695 if (!mbox->m_in.busy) 1696 return 0; 1697 udelay(100); 1698 cond_resched(); 1699 } 1700 return -1; /* give up after 1 second */ 1701} 1702 1703/* 1704 * Copies data to SGLIST 1705 * Note: For 64 bit cards, we need a minimum of one SG element for read/write 1706 */ 1707static int 1708mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1709{ 1710 struct scatterlist *sg; 1711 struct scsi_cmnd *cmd; 1712 int sgcnt; 1713 int idx; 1714 1715 cmd = scb->cmd; 1716 1717 /* 1718 * Copy Scatter-Gather list info into controller structure. 1719 * 1720 * The number of sg elements returned must not exceed our limit 1721 */ 1722 sgcnt = scsi_dma_map(cmd); 1723 1724 scb->dma_type = MEGA_SGLIST; 1725 1726 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); 1727 1728 *len = 0; 1729 1730 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { 1731 sg = scsi_sglist(cmd); 1732 scb->dma_h_bulkdata = sg_dma_address(sg); 1733 *buf = (u32)scb->dma_h_bulkdata; 1734 *len = sg_dma_len(sg); 1735 return 0; 1736 } 1737 1738 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1739 if (adapter->has_64bit_addr) { 1740 scb->sgl64[idx].address = sg_dma_address(sg); 1741 *len += scb->sgl64[idx].length = sg_dma_len(sg); 1742 } else { 1743 scb->sgl[idx].address = sg_dma_address(sg); 1744 *len += scb->sgl[idx].length = sg_dma_len(sg); 1745 } 1746 } 1747 1748 /* Reset pointer and length fields */ 1749 *buf = scb->sgl_dma_addr; 1750 1751 /* Return count of SG requests */ 1752 return sgcnt; 1753} 1754 1755 1756/* 1757 * mega_8_to_40ld() 1758 * 1759 * takes all info in AdapterInquiry structure and puts it into ProductInfo and 1760 * Enquiry3 structures for later use 1761 */ 1762static void 1763mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, 1764 mega_product_info *product_info) 1765{ 1766 int i; 1767 1768 product_info->max_commands = inquiry->adapter_info.max_commands; 1769 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; 1770 product_info->nchannels = inquiry->adapter_info.nchannels; 1771 1772 for (i = 0; i < 4; i++) { 1773 product_info->fw_version[i] = 1774 inquiry->adapter_info.fw_version[i]; 1775 1776 product_info->bios_version[i] = 1777 inquiry->adapter_info.bios_version[i]; 1778 } 1779 enquiry3->cache_flush_interval = 1780 inquiry->adapter_info.cache_flush_interval; 1781 1782 product_info->dram_size = inquiry->adapter_info.dram_size; 1783 1784 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; 1785 1786 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { 1787 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; 1788 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; 1789 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; 1790 } 1791 1792 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) 1793 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; 1794} 1795 1796static inline void 1797mega_free_sgl(adapter_t *adapter) 1798{ 1799 scb_t *scb; 1800 int i; 1801 1802 for(i = 0; i < adapter->max_cmds; i++) { 1803 1804 scb = &adapter->scb_list[i]; 1805 1806 if( scb->sgl64 ) { 1807 dma_free_coherent(&adapter->dev->dev, 1808 sizeof(mega_sgl64) * adapter->sglen, 1809 scb->sgl64, scb->sgl_dma_addr); 1810 1811 scb->sgl64 = NULL; 1812 } 1813 1814 if( scb->pthru ) { 1815 dma_free_coherent(&adapter->dev->dev, 1816 sizeof(mega_passthru), scb->pthru, 1817 scb->pthru_dma_addr); 1818 1819 scb->pthru = NULL; 1820 } 1821 1822 if( scb->epthru ) { 1823 dma_free_coherent(&adapter->dev->dev, 1824 sizeof(mega_ext_passthru), 1825 scb->epthru, scb->epthru_dma_addr); 1826 1827 scb->epthru = NULL; 1828 } 1829 1830 } 1831} 1832 1833 1834/* 1835 * Get information about the card/driver 1836 */ 1837const char * 1838megaraid_info(struct Scsi_Host *host) 1839{ 1840 static char buffer[512]; 1841 adapter_t *adapter; 1842 1843 adapter = (adapter_t *)host->hostdata; 1844 1845 sprintf (buffer, 1846 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", 1847 adapter->fw_version, adapter->product_info.max_commands, 1848 adapter->host->max_id, adapter->host->max_channel, 1849 (u32)adapter->host->max_lun); 1850 return buffer; 1851} 1852 1853/* 1854 * Abort a previous SCSI request. Only commands on the pending list can be 1855 * aborted. All the commands issued to the F/W must complete. 1856 */ 1857static int 1858megaraid_abort(struct scsi_cmnd *cmd) 1859{ 1860 adapter_t *adapter; 1861 int rval; 1862 1863 adapter = (adapter_t *)cmd->device->host->hostdata; 1864 1865 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); 1866 1867 /* 1868 * This is required here to complete any completed requests 1869 * to be communicated over to the mid layer. 1870 */ 1871 mega_rundoneq(adapter); 1872 1873 return rval; 1874} 1875 1876 1877static int 1878megaraid_reset(struct scsi_cmnd *cmd) 1879{ 1880 adapter_t *adapter; 1881 megacmd_t mc; 1882 int rval; 1883 1884 adapter = (adapter_t *)cmd->device->host->hostdata; 1885 1886#if MEGA_HAVE_CLUSTERING 1887 mc.cmd = MEGA_CLUSTER_CMD; 1888 mc.opcode = MEGA_RESET_RESERVATIONS; 1889 1890 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1891 dev_warn(&adapter->dev->dev, "reservation reset failed\n"); 1892 } 1893 else { 1894 dev_info(&adapter->dev->dev, "reservation reset\n"); 1895 } 1896#endif 1897 1898 spin_lock_irq(&adapter->lock); 1899 1900 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); 1901 1902 /* 1903 * This is required here to complete any completed requests 1904 * to be communicated over to the mid layer. 1905 */ 1906 mega_rundoneq(adapter); 1907 spin_unlock_irq(&adapter->lock); 1908 1909 return rval; 1910} 1911 1912/** 1913 * megaraid_abort_and_reset() 1914 * @adapter: megaraid soft state 1915 * @cmd: scsi command to be aborted or reset 1916 * @aor: abort or reset flag 1917 * 1918 * Try to locate the scsi command in the pending queue. If found and is not 1919 * issued to the controller, abort/reset it. Otherwise return failure 1920 */ 1921static int 1922megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) 1923{ 1924 struct list_head *pos, *next; 1925 scb_t *scb; 1926 1927 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", 1928 (aor == SCB_ABORT)? "ABORTING":"RESET", 1929 cmd->cmnd[0], cmd->device->channel, 1930 cmd->device->id, (u32)cmd->device->lun); 1931 1932 if(list_empty(&adapter->pending_list)) 1933 return FAILED; 1934 1935 list_for_each_safe(pos, next, &adapter->pending_list) { 1936 1937 scb = list_entry(pos, scb_t, list); 1938 1939 if (scb->cmd == cmd) { /* Found command */ 1940 1941 scb->state |= aor; 1942 1943 /* 1944 * Check if this command has firmware ownership. If 1945 * yes, we cannot reset this command. Whenever f/w 1946 * completes this command, we will return appropriate 1947 * status from ISR. 1948 */ 1949 if( scb->state & SCB_ISSUED ) { 1950 1951 dev_warn(&adapter->dev->dev, 1952 "%s[%x], fw owner\n", 1953 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1954 scb->idx); 1955 1956 return FAILED; 1957 } 1958 else { 1959 1960 /* 1961 * Not yet issued! Remove from the pending 1962 * list 1963 */ 1964 dev_warn(&adapter->dev->dev, 1965 "%s-[%x], driver owner\n", 1966 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1967 scb->idx); 1968 1969 mega_free_scb(adapter, scb); 1970 1971 if( aor == SCB_ABORT ) { 1972 cmd->result = (DID_ABORT << 16); 1973 } 1974 else { 1975 cmd->result = (DID_RESET << 16); 1976 } 1977 1978 list_add_tail(SCSI_LIST(cmd), 1979 &adapter->completed_list); 1980 1981 return SUCCESS; 1982 } 1983 } 1984 } 1985 1986 return FAILED; 1987} 1988 1989static inline int 1990make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 1991{ 1992 *pdev = pci_alloc_dev(NULL); 1993 1994 if( *pdev == NULL ) return -1; 1995 1996 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 1997 1998 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { 1999 kfree(*pdev); 2000 return -1; 2001 } 2002 2003 return 0; 2004} 2005 2006static inline void 2007free_local_pdev(struct pci_dev *pdev) 2008{ 2009 kfree(pdev); 2010} 2011 2012/** 2013 * mega_allocate_inquiry() 2014 * @dma_handle: handle returned for dma address 2015 * @pdev: handle to pci device 2016 * 2017 * allocates memory for inquiry structure 2018 */ 2019static inline void * 2020mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) 2021{ 2022 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), 2023 dma_handle, GFP_KERNEL); 2024} 2025 2026 2027static inline void 2028mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) 2029{ 2030 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, 2031 dma_handle); 2032} 2033 2034 2035#ifdef CONFIG_PROC_FS 2036/* Following code handles /proc fs */ 2037 2038/** 2039 * proc_show_config() 2040 * @m: Synthetic file construction data 2041 * @v: File iterator 2042 * 2043 * Display configuration information about the controller. 2044 */ 2045static int 2046proc_show_config(struct seq_file *m, void *v) 2047{ 2048 2049 adapter_t *adapter = m->private; 2050 2051 seq_puts(m, MEGARAID_VERSION); 2052 if(adapter->product_info.product_name[0]) 2053 seq_printf(m, "%s\n", adapter->product_info.product_name); 2054 2055 seq_puts(m, "Controller Type: "); 2056 2057 if( adapter->flag & BOARD_MEMMAP ) 2058 seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); 2059 else 2060 seq_puts(m, "418/428/434\n"); 2061 2062 if(adapter->flag & BOARD_40LD) 2063 seq_puts(m, "Controller Supports 40 Logical Drives\n"); 2064 2065 if(adapter->flag & BOARD_64BIT) 2066 seq_puts(m, "Controller capable of 64-bit memory addressing\n"); 2067 if( adapter->has_64bit_addr ) 2068 seq_puts(m, "Controller using 64-bit memory addressing\n"); 2069 else 2070 seq_puts(m, "Controller is not using 64-bit memory addressing\n"); 2071 2072 seq_printf(m, "Base = %08lx, Irq = %d, ", 2073 adapter->base, adapter->host->irq); 2074 2075 seq_printf(m, "Logical Drives = %d, Channels = %d\n", 2076 adapter->numldrv, adapter->product_info.nchannels); 2077 2078 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", 2079 adapter->fw_version, adapter->bios_version, 2080 adapter->product_info.dram_size); 2081 2082 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", 2083 adapter->product_info.max_commands, adapter->max_cmds); 2084 2085 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); 2086 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); 2087 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); 2088 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); 2089 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); 2090 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); 2091 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); 2092 seq_printf(m, "quiescent = %d\n", 2093 atomic_read(&adapter->quiescent)); 2094 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); 2095 2096 seq_puts(m, "\nModule Parameters:\n"); 2097 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); 2098 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); 2099 return 0; 2100} 2101 2102/** 2103 * proc_show_stat() 2104 * @m: Synthetic file construction data 2105 * @v: File iterator 2106 * 2107 * Display statistical information about the I/O activity. 2108 */ 2109static int 2110proc_show_stat(struct seq_file *m, void *v) 2111{ 2112 adapter_t *adapter = m->private; 2113#if MEGA_HAVE_STATS 2114 int i; 2115#endif 2116 2117 seq_puts(m, "Statistical Information for this controller\n"); 2118 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); 2119#if MEGA_HAVE_STATS 2120 for(i = 0; i < adapter->numldrv; i++) { 2121 seq_printf(m, "Logical Drive %d:\n", i); 2122 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", 2123 adapter->nreads[i], adapter->nwrites[i]); 2124 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", 2125 adapter->nreadblocks[i], adapter->nwriteblocks[i]); 2126 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", 2127 adapter->rd_errors[i], adapter->wr_errors[i]); 2128 } 2129#else 2130 seq_puts(m, "IO and error counters not compiled in driver.\n"); 2131#endif 2132 return 0; 2133} 2134 2135 2136/** 2137 * proc_show_mbox() 2138 * @m: Synthetic file construction data 2139 * @v: File iterator 2140 * 2141 * Display mailbox information for the last command issued. This information 2142 * is good for debugging. 2143 */ 2144static int 2145proc_show_mbox(struct seq_file *m, void *v) 2146{ 2147 adapter_t *adapter = m->private; 2148 volatile mbox_t *mbox = adapter->mbox; 2149 2150 seq_puts(m, "Contents of Mail Box Structure\n"); 2151 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); 2152 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); 2153 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); 2154 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); 2155 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); 2156 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); 2157 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); 2158 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); 2159 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); 2160 return 0; 2161} 2162 2163 2164/** 2165 * proc_show_rebuild_rate() 2166 * @m: Synthetic file construction data 2167 * @v: File iterator 2168 * 2169 * Display current rebuild rate 2170 */ 2171static int 2172proc_show_rebuild_rate(struct seq_file *m, void *v) 2173{ 2174 adapter_t *adapter = m->private; 2175 dma_addr_t dma_handle; 2176 caddr_t inquiry; 2177 struct pci_dev *pdev; 2178 2179 if( make_local_pdev(adapter, &pdev) != 0 ) 2180 return 0; 2181 2182 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2183 goto free_pdev; 2184 2185 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2186 seq_puts(m, "Adapter inquiry failed.\n"); 2187 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2188 goto free_inquiry; 2189 } 2190 2191 if( adapter->flag & BOARD_40LD ) 2192 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2193 ((mega_inquiry3 *)inquiry)->rebuild_rate); 2194 else 2195 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2196 ((mraid_ext_inquiry *) 2197 inquiry)->raid_inq.adapter_info.rebuild_rate); 2198 2199free_inquiry: 2200 mega_free_inquiry(inquiry, dma_handle, pdev); 2201free_pdev: 2202 free_local_pdev(pdev); 2203 return 0; 2204} 2205 2206 2207/** 2208 * proc_show_battery() 2209 * @m: Synthetic file construction data 2210 * @v: File iterator 2211 * 2212 * Display information about the battery module on the controller. 2213 */ 2214static int 2215proc_show_battery(struct seq_file *m, void *v) 2216{ 2217 adapter_t *adapter = m->private; 2218 dma_addr_t dma_handle; 2219 caddr_t inquiry; 2220 struct pci_dev *pdev; 2221 u8 battery_status; 2222 2223 if( make_local_pdev(adapter, &pdev) != 0 ) 2224 return 0; 2225 2226 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2227 goto free_pdev; 2228 2229 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2230 seq_puts(m, "Adapter inquiry failed.\n"); 2231 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2232 goto free_inquiry; 2233 } 2234 2235 if( adapter->flag & BOARD_40LD ) { 2236 battery_status = ((mega_inquiry3 *)inquiry)->battery_status; 2237 } 2238 else { 2239 battery_status = ((mraid_ext_inquiry *)inquiry)-> 2240 raid_inq.adapter_info.battery_status; 2241 } 2242 2243 /* 2244 * Decode the battery status 2245 */ 2246 seq_printf(m, "Battery Status:[%d]", battery_status); 2247 2248 if(battery_status == MEGA_BATT_CHARGE_DONE) 2249 seq_puts(m, " Charge Done"); 2250 2251 if(battery_status & MEGA_BATT_MODULE_MISSING) 2252 seq_puts(m, " Module Missing"); 2253 2254 if(battery_status & MEGA_BATT_LOW_VOLTAGE) 2255 seq_puts(m, " Low Voltage"); 2256 2257 if(battery_status & MEGA_BATT_TEMP_HIGH) 2258 seq_puts(m, " Temperature High"); 2259 2260 if(battery_status & MEGA_BATT_PACK_MISSING) 2261 seq_puts(m, " Pack Missing"); 2262 2263 if(battery_status & MEGA_BATT_CHARGE_INPROG) 2264 seq_puts(m, " Charge In-progress"); 2265 2266 if(battery_status & MEGA_BATT_CHARGE_FAIL) 2267 seq_puts(m, " Charge Fail"); 2268 2269 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) 2270 seq_puts(m, " Cycles Exceeded"); 2271 2272 seq_putc(m, '\n'); 2273 2274free_inquiry: 2275 mega_free_inquiry(inquiry, dma_handle, pdev); 2276free_pdev: 2277 free_local_pdev(pdev); 2278 return 0; 2279} 2280 2281 2282/* 2283 * Display scsi inquiry 2284 */ 2285static void 2286mega_print_inquiry(struct seq_file *m, char *scsi_inq) 2287{ 2288 int i; 2289 2290 seq_puts(m, " Vendor: "); 2291 seq_write(m, scsi_inq + 8, 8); 2292 seq_puts(m, " Model: "); 2293 seq_write(m, scsi_inq + 16, 16); 2294 seq_puts(m, " Rev: "); 2295 seq_write(m, scsi_inq + 32, 4); 2296 seq_putc(m, '\n'); 2297 2298 i = scsi_inq[0] & 0x1f; 2299 seq_printf(m, " Type: %s ", scsi_device_type(i)); 2300 2301 seq_printf(m, " ANSI SCSI revision: %02x", 2302 scsi_inq[2] & 0x07); 2303 2304 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) 2305 seq_puts(m, " CCS\n"); 2306 else 2307 seq_putc(m, '\n'); 2308} 2309 2310/** 2311 * proc_show_pdrv() 2312 * @m: Synthetic file construction data 2313 * @adapter: pointer to our soft state 2314 * @channel: channel 2315 * 2316 * Display information about the physical drives. 2317 */ 2318static int 2319proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) 2320{ 2321 dma_addr_t dma_handle; 2322 char *scsi_inq; 2323 dma_addr_t scsi_inq_dma_handle; 2324 caddr_t inquiry; 2325 struct pci_dev *pdev; 2326 u8 *pdrv_state; 2327 u8 state; 2328 int tgt; 2329 int max_channels; 2330 int i; 2331 2332 if( make_local_pdev(adapter, &pdev) != 0 ) 2333 return 0; 2334 2335 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2336 goto free_pdev; 2337 2338 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2339 seq_puts(m, "Adapter inquiry failed.\n"); 2340 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2341 goto free_inquiry; 2342 } 2343 2344 2345 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, 2346 GFP_KERNEL); 2347 if( scsi_inq == NULL ) { 2348 seq_puts(m, "memory not available for scsi inq.\n"); 2349 goto free_inquiry; 2350 } 2351 2352 if( adapter->flag & BOARD_40LD ) { 2353 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; 2354 } 2355 else { 2356 pdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2357 raid_inq.pdrv_info.pdrv_state; 2358 } 2359 2360 max_channels = adapter->product_info.nchannels; 2361 2362 if( channel >= max_channels ) { 2363 goto free_pci; 2364 } 2365 2366 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { 2367 2368 i = channel*16 + tgt; 2369 2370 state = *(pdrv_state + i); 2371 switch( state & 0x0F ) { 2372 case PDRV_ONLINE: 2373 seq_printf(m, "Channel:%2d Id:%2d State: Online", 2374 channel, tgt); 2375 break; 2376 2377 case PDRV_FAILED: 2378 seq_printf(m, "Channel:%2d Id:%2d State: Failed", 2379 channel, tgt); 2380 break; 2381 2382 case PDRV_RBLD: 2383 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", 2384 channel, tgt); 2385 break; 2386 2387 case PDRV_HOTSPARE: 2388 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", 2389 channel, tgt); 2390 break; 2391 2392 default: 2393 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", 2394 channel, tgt); 2395 break; 2396 } 2397 2398 /* 2399 * This interface displays inquiries for disk drives 2400 * only. Inquries for logical drives and non-disk 2401 * devices are available through /proc/scsi/scsi 2402 */ 2403 memset(scsi_inq, 0, 256); 2404 if( mega_internal_dev_inquiry(adapter, channel, tgt, 2405 scsi_inq_dma_handle) || 2406 (scsi_inq[0] & 0x1F) != TYPE_DISK ) { 2407 continue; 2408 } 2409 2410 /* 2411 * Check for overflow. We print less than 240 2412 * characters for inquiry 2413 */ 2414 seq_puts(m, ".\n"); 2415 mega_print_inquiry(m, scsi_inq); 2416 } 2417 2418free_pci: 2419 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); 2420free_inquiry: 2421 mega_free_inquiry(inquiry, dma_handle, pdev); 2422free_pdev: 2423 free_local_pdev(pdev); 2424 return 0; 2425} 2426 2427/** 2428 * proc_show_pdrv_ch0() 2429 * @m: Synthetic file construction data 2430 * @v: File iterator 2431 * 2432 * Display information about the physical drives on physical channel 0. 2433 */ 2434static int 2435proc_show_pdrv_ch0(struct seq_file *m, void *v) 2436{ 2437 return proc_show_pdrv(m, m->private, 0); 2438} 2439 2440 2441/** 2442 * proc_show_pdrv_ch1() 2443 * @m: Synthetic file construction data 2444 * @v: File iterator 2445 * 2446 * Display information about the physical drives on physical channel 1. 2447 */ 2448static int 2449proc_show_pdrv_ch1(struct seq_file *m, void *v) 2450{ 2451 return proc_show_pdrv(m, m->private, 1); 2452} 2453 2454 2455/** 2456 * proc_show_pdrv_ch2() 2457 * @m: Synthetic file construction data 2458 * @v: File iterator 2459 * 2460 * Display information about the physical drives on physical channel 2. 2461 */ 2462static int 2463proc_show_pdrv_ch2(struct seq_file *m, void *v) 2464{ 2465 return proc_show_pdrv(m, m->private, 2); 2466} 2467 2468 2469/** 2470 * proc_show_pdrv_ch3() 2471 * @m: Synthetic file construction data 2472 * @v: File iterator 2473 * 2474 * Display information about the physical drives on physical channel 3. 2475 */ 2476static int 2477proc_show_pdrv_ch3(struct seq_file *m, void *v) 2478{ 2479 return proc_show_pdrv(m, m->private, 3); 2480} 2481 2482 2483/** 2484 * proc_show_rdrv() 2485 * @m: Synthetic file construction data 2486 * @adapter: pointer to our soft state 2487 * @start: starting logical drive to display 2488 * @end: ending logical drive to display 2489 * 2490 * We do not print the inquiry information since its already available through 2491 * /proc/scsi/scsi interface 2492 */ 2493static int 2494proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) 2495{ 2496 dma_addr_t dma_handle; 2497 logdrv_param *lparam; 2498 megacmd_t mc; 2499 char *disk_array; 2500 dma_addr_t disk_array_dma_handle; 2501 caddr_t inquiry; 2502 struct pci_dev *pdev; 2503 u8 *rdrv_state; 2504 int num_ldrv; 2505 u32 array_sz; 2506 int i; 2507 2508 if( make_local_pdev(adapter, &pdev) != 0 ) 2509 return 0; 2510 2511 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2512 goto free_pdev; 2513 2514 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2515 seq_puts(m, "Adapter inquiry failed.\n"); 2516 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2517 goto free_inquiry; 2518 } 2519 2520 memset(&mc, 0, sizeof(megacmd_t)); 2521 2522 if( adapter->flag & BOARD_40LD ) { 2523 array_sz = sizeof(disk_array_40ld); 2524 2525 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; 2526 2527 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; 2528 } 2529 else { 2530 array_sz = sizeof(disk_array_8ld); 2531 2532 rdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2533 raid_inq.logdrv_info.ldrv_state; 2534 2535 num_ldrv = ((mraid_ext_inquiry *)inquiry)-> 2536 raid_inq.logdrv_info.num_ldrv; 2537 } 2538 2539 disk_array = dma_alloc_coherent(&pdev->dev, array_sz, 2540 &disk_array_dma_handle, GFP_KERNEL); 2541 2542 if( disk_array == NULL ) { 2543 seq_puts(m, "memory not available.\n"); 2544 goto free_inquiry; 2545 } 2546 2547 mc.xferaddr = (u32)disk_array_dma_handle; 2548 2549 if( adapter->flag & BOARD_40LD ) { 2550 mc.cmd = FC_NEW_CONFIG; 2551 mc.opcode = OP_DCMD_READ_CONFIG; 2552 2553 if( mega_internal_command(adapter, &mc, NULL) ) { 2554 seq_puts(m, "40LD read config failed.\n"); 2555 goto free_pci; 2556 } 2557 2558 } 2559 else { 2560 mc.cmd = NEW_READ_CONFIG_8LD; 2561 2562 if( mega_internal_command(adapter, &mc, NULL) ) { 2563 mc.cmd = READ_CONFIG_8LD; 2564 if( mega_internal_command(adapter, &mc, NULL) ) { 2565 seq_puts(m, "8LD read config failed.\n"); 2566 goto free_pci; 2567 } 2568 } 2569 } 2570 2571 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { 2572 2573 if( adapter->flag & BOARD_40LD ) { 2574 lparam = 2575 &((disk_array_40ld *)disk_array)->ldrv[i].lparam; 2576 } 2577 else { 2578 lparam = 2579 &((disk_array_8ld *)disk_array)->ldrv[i].lparam; 2580 } 2581 2582 /* 2583 * Check for overflow. We print less than 240 characters for 2584 * information about each logical drive. 2585 */ 2586 seq_printf(m, "Logical drive:%2d:, ", i); 2587 2588 switch( rdrv_state[i] & 0x0F ) { 2589 case RDRV_OFFLINE: 2590 seq_puts(m, "state: offline"); 2591 break; 2592 case RDRV_DEGRADED: 2593 seq_puts(m, "state: degraded"); 2594 break; 2595 case RDRV_OPTIMAL: 2596 seq_puts(m, "state: optimal"); 2597 break; 2598 case RDRV_DELETED: 2599 seq_puts(m, "state: deleted"); 2600 break; 2601 default: 2602 seq_puts(m, "state: unknown"); 2603 break; 2604 } 2605 2606 /* 2607 * Check if check consistency or initialization is going on 2608 * for this logical drive. 2609 */ 2610 if( (rdrv_state[i] & 0xF0) == 0x20 ) 2611 seq_puts(m, ", check-consistency in progress"); 2612 else if( (rdrv_state[i] & 0xF0) == 0x10 ) 2613 seq_puts(m, ", initialization in progress"); 2614 2615 seq_putc(m, '\n'); 2616 2617 seq_printf(m, "Span depth:%3d, ", lparam->span_depth); 2618 seq_printf(m, "RAID level:%3d, ", lparam->level); 2619 seq_printf(m, "Stripe size:%3d, ", 2620 lparam->stripe_sz ? lparam->stripe_sz/2: 128); 2621 seq_printf(m, "Row size:%3d\n", lparam->row_size); 2622 2623 seq_puts(m, "Read Policy: "); 2624 switch(lparam->read_ahead) { 2625 case NO_READ_AHEAD: 2626 seq_puts(m, "No read ahead, "); 2627 break; 2628 case READ_AHEAD: 2629 seq_puts(m, "Read ahead, "); 2630 break; 2631 case ADAP_READ_AHEAD: 2632 seq_puts(m, "Adaptive, "); 2633 break; 2634 2635 } 2636 2637 seq_puts(m, "Write Policy: "); 2638 switch(lparam->write_mode) { 2639 case WRMODE_WRITE_THRU: 2640 seq_puts(m, "Write thru, "); 2641 break; 2642 case WRMODE_WRITE_BACK: 2643 seq_puts(m, "Write back, "); 2644 break; 2645 } 2646 2647 seq_puts(m, "Cache Policy: "); 2648 switch(lparam->direct_io) { 2649 case CACHED_IO: 2650 seq_puts(m, "Cached IO\n\n"); 2651 break; 2652 case DIRECT_IO: 2653 seq_puts(m, "Direct IO\n\n"); 2654 break; 2655 } 2656 } 2657 2658free_pci: 2659 dma_free_coherent(&pdev->dev, array_sz, disk_array, 2660 disk_array_dma_handle); 2661free_inquiry: 2662 mega_free_inquiry(inquiry, dma_handle, pdev); 2663free_pdev: 2664 free_local_pdev(pdev); 2665 return 0; 2666} 2667 2668/** 2669 * proc_show_rdrv_10() 2670 * @m: Synthetic file construction data 2671 * @v: File iterator 2672 * 2673 * Display real time information about the logical drives 0 through 9. 2674 */ 2675static int 2676proc_show_rdrv_10(struct seq_file *m, void *v) 2677{ 2678 return proc_show_rdrv(m, m->private, 0, 9); 2679} 2680 2681 2682/** 2683 * proc_show_rdrv_20() 2684 * @m: Synthetic file construction data 2685 * @v: File iterator 2686 * 2687 * Display real time information about the logical drives 0 through 9. 2688 */ 2689static int 2690proc_show_rdrv_20(struct seq_file *m, void *v) 2691{ 2692 return proc_show_rdrv(m, m->private, 10, 19); 2693} 2694 2695 2696/** 2697 * proc_show_rdrv_30() 2698 * @m: Synthetic file construction data 2699 * @v: File iterator 2700 * 2701 * Display real time information about the logical drives 0 through 9. 2702 */ 2703static int 2704proc_show_rdrv_30(struct seq_file *m, void *v) 2705{ 2706 return proc_show_rdrv(m, m->private, 20, 29); 2707} 2708 2709 2710/** 2711 * proc_show_rdrv_40() 2712 * @m: Synthetic file construction data 2713 * @v: File iterator 2714 * 2715 * Display real time information about the logical drives 0 through 9. 2716 */ 2717static int 2718proc_show_rdrv_40(struct seq_file *m, void *v) 2719{ 2720 return proc_show_rdrv(m, m->private, 30, 39); 2721} 2722 2723/** 2724 * mega_create_proc_entry() 2725 * @index: index in soft state array 2726 * @parent: parent node for this /proc entry 2727 * 2728 * Creates /proc entries for our controllers. 2729 */ 2730static void 2731mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2732{ 2733 adapter_t *adapter = hba_soft_state[index]; 2734 struct proc_dir_entry *dir; 2735 u8 string[16]; 2736 2737 sprintf(string, "hba%d", adapter->host->host_no); 2738 dir = proc_mkdir_data(string, 0, parent, adapter); 2739 if (!dir) { 2740 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); 2741 return; 2742 } 2743 2744 proc_create_single_data("config", S_IRUSR, dir, 2745 proc_show_config, adapter); 2746 proc_create_single_data("stat", S_IRUSR, dir, 2747 proc_show_stat, adapter); 2748 proc_create_single_data("mailbox", S_IRUSR, dir, 2749 proc_show_mbox, adapter); 2750#if MEGA_HAVE_ENH_PROC 2751 proc_create_single_data("rebuild-rate", S_IRUSR, dir, 2752 proc_show_rebuild_rate, adapter); 2753 proc_create_single_data("battery-status", S_IRUSR, dir, 2754 proc_show_battery, adapter); 2755 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, 2756 proc_show_pdrv_ch0, adapter); 2757 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, 2758 proc_show_pdrv_ch1, adapter); 2759 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, 2760 proc_show_pdrv_ch2, adapter); 2761 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, 2762 proc_show_pdrv_ch3, adapter); 2763 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, 2764 proc_show_rdrv_10, adapter); 2765 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, 2766 proc_show_rdrv_20, adapter); 2767 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, 2768 proc_show_rdrv_30, adapter); 2769 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, 2770 proc_show_rdrv_40, adapter); 2771#endif 2772} 2773 2774#else 2775static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2776{ 2777} 2778#endif 2779 2780 2781/* 2782 * megaraid_biosparam() 2783 * 2784 * Return the disk geometry for a particular disk 2785 */ 2786static int 2787megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, 2788 sector_t capacity, int geom[]) 2789{ 2790 adapter_t *adapter; 2791 int heads; 2792 int sectors; 2793 int cylinders; 2794 2795 /* Get pointer to host config structure */ 2796 adapter = (adapter_t *)sdev->host->hostdata; 2797 2798 if (IS_RAID_CH(adapter, sdev->channel)) { 2799 /* Default heads (64) & sectors (32) */ 2800 heads = 64; 2801 sectors = 32; 2802 cylinders = (ulong)capacity / (heads * sectors); 2803 2804 /* 2805 * Handle extended translation size for logical drives 2806 * > 1Gb 2807 */ 2808 if ((ulong)capacity >= 0x200000) { 2809 heads = 255; 2810 sectors = 63; 2811 cylinders = (ulong)capacity / (heads * sectors); 2812 } 2813 2814 /* return result */ 2815 geom[0] = heads; 2816 geom[1] = sectors; 2817 geom[2] = cylinders; 2818 } 2819 else { 2820 if (scsi_partsize(bdev, capacity, geom)) 2821 return 0; 2822 2823 dev_info(&adapter->dev->dev, 2824 "invalid partition on this disk on channel %d\n", 2825 sdev->channel); 2826 2827 /* Default heads (64) & sectors (32) */ 2828 heads = 64; 2829 sectors = 32; 2830 cylinders = (ulong)capacity / (heads * sectors); 2831 2832 /* Handle extended translation size for logical drives > 1Gb */ 2833 if ((ulong)capacity >= 0x200000) { 2834 heads = 255; 2835 sectors = 63; 2836 cylinders = (ulong)capacity / (heads * sectors); 2837 } 2838 2839 /* return result */ 2840 geom[0] = heads; 2841 geom[1] = sectors; 2842 geom[2] = cylinders; 2843 } 2844 2845 return 0; 2846} 2847 2848/** 2849 * mega_init_scb() 2850 * @adapter: pointer to our soft state 2851 * 2852 * Allocate memory for the various pointers in the scb structures: 2853 * scatter-gather list pointer, passthru and extended passthru structure 2854 * pointers. 2855 */ 2856static int 2857mega_init_scb(adapter_t *adapter) 2858{ 2859 scb_t *scb; 2860 int i; 2861 2862 for( i = 0; i < adapter->max_cmds; i++ ) { 2863 2864 scb = &adapter->scb_list[i]; 2865 2866 scb->sgl64 = NULL; 2867 scb->sgl = NULL; 2868 scb->pthru = NULL; 2869 scb->epthru = NULL; 2870 } 2871 2872 for( i = 0; i < adapter->max_cmds; i++ ) { 2873 2874 scb = &adapter->scb_list[i]; 2875 2876 scb->idx = i; 2877 2878 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, 2879 sizeof(mega_sgl64) * adapter->sglen, 2880 &scb->sgl_dma_addr, GFP_KERNEL); 2881 2882 scb->sgl = (mega_sglist *)scb->sgl64; 2883 2884 if( !scb->sgl ) { 2885 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); 2886 mega_free_sgl(adapter); 2887 return -1; 2888 } 2889 2890 scb->pthru = dma_alloc_coherent(&adapter->dev->dev, 2891 sizeof(mega_passthru), 2892 &scb->pthru_dma_addr, GFP_KERNEL); 2893 2894 if( !scb->pthru ) { 2895 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); 2896 mega_free_sgl(adapter); 2897 return -1; 2898 } 2899 2900 scb->epthru = dma_alloc_coherent(&adapter->dev->dev, 2901 sizeof(mega_ext_passthru), 2902 &scb->epthru_dma_addr, GFP_KERNEL); 2903 2904 if( !scb->epthru ) { 2905 dev_warn(&adapter->dev->dev, 2906 "Can't allocate extended passthru\n"); 2907 mega_free_sgl(adapter); 2908 return -1; 2909 } 2910 2911 2912 scb->dma_type = MEGA_DMA_TYPE_NONE; 2913 2914 /* 2915 * Link to free list 2916 * lock not required since we are loading the driver, so no 2917 * commands possible right now. 2918 */ 2919 scb->state = SCB_FREE; 2920 scb->cmd = NULL; 2921 list_add(&scb->list, &adapter->free_list); 2922 } 2923 2924 return 0; 2925} 2926 2927 2928/** 2929 * megadev_open() 2930 * @inode: unused 2931 * @filep: unused 2932 * 2933 * Routines for the character/ioctl interface to the driver. Find out if this 2934 * is a valid open. 2935 */ 2936static int 2937megadev_open (struct inode *inode, struct file *filep) 2938{ 2939 /* 2940 * Only allow superuser to access private ioctl interface 2941 */ 2942 if( !capable(CAP_SYS_ADMIN) ) return -EACCES; 2943 2944 return 0; 2945} 2946 2947 2948/** 2949 * megadev_ioctl() 2950 * @filep: Our device file 2951 * @cmd: ioctl command 2952 * @arg: user buffer 2953 * 2954 * ioctl entry point for our private ioctl interface. We move the data in from 2955 * the user space, prepare the command (if necessary, convert the old MIMD 2956 * ioctl to new ioctl command), and issue a synchronous command to the 2957 * controller. 2958 */ 2959static int 2960megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2961{ 2962 adapter_t *adapter; 2963 nitioctl_t uioc; 2964 int adapno; 2965 int rval; 2966 mega_passthru __user *upthru; /* user address for passthru */ 2967 mega_passthru *pthru; /* copy user passthru here */ 2968 dma_addr_t pthru_dma_hndl; 2969 void *data = NULL; /* data to be transferred */ 2970 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ 2971 megacmd_t mc; 2972#if MEGA_HAVE_STATS 2973 megastat_t __user *ustats = NULL; 2974 int num_ldrv = 0; 2975#endif 2976 u32 uxferaddr = 0; 2977 struct pci_dev *pdev; 2978 2979 /* 2980 * Make sure only USCSICMD are issued through this interface. 2981 * MIMD application would still fire different command. 2982 */ 2983 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { 2984 return -EINVAL; 2985 } 2986 2987 /* 2988 * Check and convert a possible MIMD command to NIT command. 2989 * mega_m_to_n() copies the data from the user space, so we do not 2990 * have to do it here. 2991 * NOTE: We will need some user address to copyout the data, therefore 2992 * the inteface layer will also provide us with the required user 2993 * addresses. 2994 */ 2995 memset(&uioc, 0, sizeof(nitioctl_t)); 2996 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) 2997 return rval; 2998 2999 3000 switch( uioc.opcode ) { 3001 3002 case GET_DRIVER_VER: 3003 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) 3004 return (-EFAULT); 3005 3006 break; 3007 3008 case GET_N_ADAP: 3009 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) 3010 return (-EFAULT); 3011 3012 /* 3013 * Shucks. MIMD interface returns a positive value for number 3014 * of adapters. TODO: Change it to return 0 when there is no 3015 * applicatio using mimd interface. 3016 */ 3017 return hba_count; 3018 3019 case GET_ADAP_INFO: 3020 3021 /* 3022 * Which adapter 3023 */ 3024 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3025 return (-ENODEV); 3026 3027 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, 3028 sizeof(struct mcontroller)) ) 3029 return (-EFAULT); 3030 break; 3031 3032#if MEGA_HAVE_STATS 3033 3034 case GET_STATS: 3035 /* 3036 * Which adapter 3037 */ 3038 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3039 return (-ENODEV); 3040 3041 adapter = hba_soft_state[adapno]; 3042 3043 ustats = uioc.uioc_uaddr; 3044 3045 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) 3046 return (-EFAULT); 3047 3048 /* 3049 * Check for the validity of the logical drive number 3050 */ 3051 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; 3052 3053 if( copy_to_user(ustats->nreads, adapter->nreads, 3054 num_ldrv*sizeof(u32)) ) 3055 return -EFAULT; 3056 3057 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, 3058 num_ldrv*sizeof(u32)) ) 3059 return -EFAULT; 3060 3061 if( copy_to_user(ustats->nwrites, adapter->nwrites, 3062 num_ldrv*sizeof(u32)) ) 3063 return -EFAULT; 3064 3065 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, 3066 num_ldrv*sizeof(u32)) ) 3067 return -EFAULT; 3068 3069 if( copy_to_user(ustats->rd_errors, adapter->rd_errors, 3070 num_ldrv*sizeof(u32)) ) 3071 return -EFAULT; 3072 3073 if( copy_to_user(ustats->wr_errors, adapter->wr_errors, 3074 num_ldrv*sizeof(u32)) ) 3075 return -EFAULT; 3076 3077 return 0; 3078 3079#endif 3080 case MBOX_CMD: 3081 3082 /* 3083 * Which adapter 3084 */ 3085 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3086 return (-ENODEV); 3087 3088 adapter = hba_soft_state[adapno]; 3089 3090 /* 3091 * Deletion of logical drive is a special case. The adapter 3092 * should be quiescent before this command is issued. 3093 */ 3094 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && 3095 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { 3096 3097 /* 3098 * Do we support this feature 3099 */ 3100 if( !adapter->support_random_del ) { 3101 dev_warn(&adapter->dev->dev, "logdrv " 3102 "delete on non-supporting F/W\n"); 3103 3104 return (-EINVAL); 3105 } 3106 3107 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); 3108 3109 if( rval == 0 ) { 3110 memset(&mc, 0, sizeof(megacmd_t)); 3111 3112 mc.status = rval; 3113 3114 rval = mega_n_to_m((void __user *)arg, &mc); 3115 } 3116 3117 return rval; 3118 } 3119 /* 3120 * This interface only support the regular passthru commands. 3121 * Reject extended passthru and 64-bit passthru 3122 */ 3123 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3124 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3125 3126 dev_warn(&adapter->dev->dev, "rejected passthru\n"); 3127 3128 return (-EINVAL); 3129 } 3130 3131 /* 3132 * For all internal commands, the buffer must be allocated in 3133 * <4GB address range 3134 */ 3135 if( make_local_pdev(adapter, &pdev) != 0 ) 3136 return -EIO; 3137 3138 /* Is it a passthru command or a DCMD */ 3139 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { 3140 /* Passthru commands */ 3141 3142 pthru = dma_alloc_coherent(&pdev->dev, 3143 sizeof(mega_passthru), 3144 &pthru_dma_hndl, GFP_KERNEL); 3145 3146 if( pthru == NULL ) { 3147 free_local_pdev(pdev); 3148 return (-ENOMEM); 3149 } 3150 3151 /* 3152 * The user passthru structure 3153 */ 3154 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; 3155 3156 /* 3157 * Copy in the user passthru here. 3158 */ 3159 if( copy_from_user(pthru, upthru, 3160 sizeof(mega_passthru)) ) { 3161 3162 dma_free_coherent(&pdev->dev, 3163 sizeof(mega_passthru), 3164 pthru, pthru_dma_hndl); 3165 3166 free_local_pdev(pdev); 3167 3168 return (-EFAULT); 3169 } 3170 3171 /* 3172 * Is there a data transfer 3173 */ 3174 if( pthru->dataxferlen ) { 3175 data = dma_alloc_coherent(&pdev->dev, 3176 pthru->dataxferlen, 3177 &data_dma_hndl, 3178 GFP_KERNEL); 3179 3180 if( data == NULL ) { 3181 dma_free_coherent(&pdev->dev, 3182 sizeof(mega_passthru), 3183 pthru, 3184 pthru_dma_hndl); 3185 3186 free_local_pdev(pdev); 3187 3188 return (-ENOMEM); 3189 } 3190 3191 /* 3192 * Save the user address and point the kernel 3193 * address at just allocated memory 3194 */ 3195 uxferaddr = pthru->dataxferaddr; 3196 pthru->dataxferaddr = data_dma_hndl; 3197 } 3198 3199 3200 /* 3201 * Is data coming down-stream 3202 */ 3203 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { 3204 /* 3205 * Get the user data 3206 */ 3207 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3208 pthru->dataxferlen) ) { 3209 rval = (-EFAULT); 3210 goto freemem_and_return; 3211 } 3212 } 3213 3214 memset(&mc, 0, sizeof(megacmd_t)); 3215 3216 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 3217 mc.xferaddr = (u32)pthru_dma_hndl; 3218 3219 /* 3220 * Issue the command 3221 */ 3222 mega_internal_command(adapter, &mc, pthru); 3223 3224 rval = mega_n_to_m((void __user *)arg, &mc); 3225 3226 if( rval ) goto freemem_and_return; 3227 3228 3229 /* 3230 * Is data going up-stream 3231 */ 3232 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3233 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3234 pthru->dataxferlen) ) { 3235 rval = (-EFAULT); 3236 } 3237 } 3238 3239 /* 3240 * Send the request sense data also, irrespective of 3241 * whether the user has asked for it or not. 3242 */ 3243 if (copy_to_user(upthru->reqsensearea, 3244 pthru->reqsensearea, 14)) 3245 rval = -EFAULT; 3246 3247freemem_and_return: 3248 if( pthru->dataxferlen ) { 3249 dma_free_coherent(&pdev->dev, 3250 pthru->dataxferlen, data, 3251 data_dma_hndl); 3252 } 3253 3254 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), 3255 pthru, pthru_dma_hndl); 3256 3257 free_local_pdev(pdev); 3258 3259 return rval; 3260 } 3261 else { 3262 /* DCMD commands */ 3263 3264 /* 3265 * Is there a data transfer 3266 */ 3267 if( uioc.xferlen ) { 3268 data = dma_alloc_coherent(&pdev->dev, 3269 uioc.xferlen, 3270 &data_dma_hndl, 3271 GFP_KERNEL); 3272 3273 if( data == NULL ) { 3274 free_local_pdev(pdev); 3275 return (-ENOMEM); 3276 } 3277 3278 uxferaddr = MBOX(uioc)->xferaddr; 3279 } 3280 3281 /* 3282 * Is data coming down-stream 3283 */ 3284 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { 3285 /* 3286 * Get the user data 3287 */ 3288 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3289 uioc.xferlen) ) { 3290 3291 dma_free_coherent(&pdev->dev, 3292 uioc.xferlen, data, 3293 data_dma_hndl); 3294 3295 free_local_pdev(pdev); 3296 3297 return (-EFAULT); 3298 } 3299 } 3300 3301 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); 3302 3303 mc.xferaddr = (u32)data_dma_hndl; 3304 3305 /* 3306 * Issue the command 3307 */ 3308 mega_internal_command(adapter, &mc, NULL); 3309 3310 rval = mega_n_to_m((void __user *)arg, &mc); 3311 3312 if( rval ) { 3313 if( uioc.xferlen ) { 3314 dma_free_coherent(&pdev->dev, 3315 uioc.xferlen, data, 3316 data_dma_hndl); 3317 } 3318 3319 free_local_pdev(pdev); 3320 3321 return rval; 3322 } 3323 3324 /* 3325 * Is data going up-stream 3326 */ 3327 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3328 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3329 uioc.xferlen) ) { 3330 3331 rval = (-EFAULT); 3332 } 3333 } 3334 3335 if( uioc.xferlen ) { 3336 dma_free_coherent(&pdev->dev, uioc.xferlen, 3337 data, data_dma_hndl); 3338 } 3339 3340 free_local_pdev(pdev); 3341 3342 return rval; 3343 } 3344 3345 default: 3346 return (-EINVAL); 3347 } 3348 3349 return 0; 3350} 3351 3352static long 3353megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3354{ 3355 int ret; 3356 3357 mutex_lock(&megadev_mutex); 3358 ret = megadev_ioctl(filep, cmd, arg); 3359 mutex_unlock(&megadev_mutex); 3360 3361 return ret; 3362} 3363 3364/** 3365 * mega_m_to_n() 3366 * @arg: user address 3367 * @uioc: new ioctl structure 3368 * 3369 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl 3370 * structure 3371 * 3372 * Converts the older mimd ioctl structure to newer NIT structure 3373 */ 3374static int 3375mega_m_to_n(void __user *arg, nitioctl_t *uioc) 3376{ 3377 struct uioctl_t uioc_mimd; 3378 char signature[8] = {0}; 3379 u8 opcode; 3380 u8 subopcode; 3381 3382 3383 /* 3384 * check is the application conforms to NIT. We do not have to do much 3385 * in that case. 3386 * We exploit the fact that the signature is stored in the very 3387 * beginning of the structure. 3388 */ 3389 3390 if( copy_from_user(signature, arg, 7) ) 3391 return (-EFAULT); 3392 3393 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3394 3395 /* 3396 * NOTE NOTE: The nit ioctl is still under flux because of 3397 * change of mailbox definition, in HPE. No applications yet 3398 * use this interface and let's not have applications use this 3399 * interface till the new specifitions are in place. 3400 */ 3401 return -EINVAL; 3402#if 0 3403 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) 3404 return (-EFAULT); 3405 return 0; 3406#endif 3407 } 3408 3409 /* 3410 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t 3411 * 3412 * Get the user ioctl structure 3413 */ 3414 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) 3415 return (-EFAULT); 3416 3417 3418 /* 3419 * Get the opcode and subopcode for the commands 3420 */ 3421 opcode = uioc_mimd.ui.fcs.opcode; 3422 subopcode = uioc_mimd.ui.fcs.subopcode; 3423 3424 switch (opcode) { 3425 case 0x82: 3426 3427 switch (subopcode) { 3428 3429 case MEGAIOC_QDRVRVER: /* Query driver version */ 3430 uioc->opcode = GET_DRIVER_VER; 3431 uioc->uioc_uaddr = uioc_mimd.data; 3432 break; 3433 3434 case MEGAIOC_QNADAP: /* Get # of adapters */ 3435 uioc->opcode = GET_N_ADAP; 3436 uioc->uioc_uaddr = uioc_mimd.data; 3437 break; 3438 3439 case MEGAIOC_QADAPINFO: /* Get adapter information */ 3440 uioc->opcode = GET_ADAP_INFO; 3441 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3442 uioc->uioc_uaddr = uioc_mimd.data; 3443 break; 3444 3445 default: 3446 return(-EINVAL); 3447 } 3448 3449 break; 3450 3451 3452 case 0x81: 3453 3454 uioc->opcode = MBOX_CMD; 3455 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3456 3457 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3458 3459 uioc->xferlen = uioc_mimd.ui.fcs.length; 3460 3461 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3462 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3463 3464 break; 3465 3466 case 0x80: 3467 3468 uioc->opcode = MBOX_CMD; 3469 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3470 3471 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3472 3473 /* 3474 * Choose the xferlen bigger of input and output data 3475 */ 3476 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? 3477 uioc_mimd.outlen : uioc_mimd.inlen; 3478 3479 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3480 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3481 3482 break; 3483 3484 default: 3485 return (-EINVAL); 3486 3487 } 3488 3489 return 0; 3490} 3491 3492/* 3493 * mega_n_to_m() 3494 * @arg: user address 3495 * @mc: mailbox command 3496 * 3497 * Updates the status information to the application, depending on application 3498 * conforms to older mimd ioctl interface or newer NIT ioctl interface 3499 */ 3500static int 3501mega_n_to_m(void __user *arg, megacmd_t *mc) 3502{ 3503 nitioctl_t __user *uiocp; 3504 megacmd_t __user *umc; 3505 mega_passthru __user *upthru; 3506 struct uioctl_t __user *uioc_mimd; 3507 char signature[8] = {0}; 3508 3509 /* 3510 * check is the application conforms to NIT. 3511 */ 3512 if( copy_from_user(signature, arg, 7) ) 3513 return -EFAULT; 3514 3515 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3516 3517 uiocp = arg; 3518 3519 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) 3520 return (-EFAULT); 3521 3522 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3523 3524 umc = MBOX_P(uiocp); 3525 3526 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3527 return -EFAULT; 3528 3529 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) 3530 return (-EFAULT); 3531 } 3532 } 3533 else { 3534 uioc_mimd = arg; 3535 3536 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) 3537 return (-EFAULT); 3538 3539 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3540 3541 umc = (megacmd_t __user *)uioc_mimd->mbox; 3542 3543 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3544 return (-EFAULT); 3545 3546 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) 3547 return (-EFAULT); 3548 } 3549 } 3550 3551 return 0; 3552} 3553 3554 3555/* 3556 * MEGARAID 'FW' commands. 3557 */ 3558 3559/** 3560 * mega_is_bios_enabled() 3561 * @adapter: pointer to our soft state 3562 * 3563 * issue command to find out if the BIOS is enabled for this controller 3564 */ 3565static int 3566mega_is_bios_enabled(adapter_t *adapter) 3567{ 3568 struct mbox_out mbox; 3569 unsigned char *raw_mbox = (u8 *)&mbox; 3570 3571 memset(&mbox, 0, sizeof(mbox)); 3572 3573 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3574 3575 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3576 3577 raw_mbox[0] = IS_BIOS_ENABLED; 3578 raw_mbox[2] = GET_BIOS; 3579 3580 issue_scb_block(adapter, raw_mbox); 3581 3582 return *(char *)adapter->mega_buffer; 3583} 3584 3585 3586/** 3587 * mega_enum_raid_scsi() 3588 * @adapter: pointer to our soft state 3589 * 3590 * Find out what channels are RAID/SCSI. This information is used to 3591 * differentiate the virtual channels and physical channels and to support 3592 * ROMB feature and non-disk devices. 3593 */ 3594static void 3595mega_enum_raid_scsi(adapter_t *adapter) 3596{ 3597 struct mbox_out mbox; 3598 unsigned char *raw_mbox = (u8 *)&mbox; 3599 int i; 3600 3601 memset(&mbox, 0, sizeof(mbox)); 3602 3603 /* 3604 * issue command to find out what channels are raid/scsi 3605 */ 3606 raw_mbox[0] = CHNL_CLASS; 3607 raw_mbox[2] = GET_CHNL_CLASS; 3608 3609 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3610 3611 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3612 3613 /* 3614 * Non-ROMB firmware fail this command, so all channels 3615 * must be shown RAID 3616 */ 3617 adapter->mega_ch_class = 0xFF; 3618 3619 if(!issue_scb_block(adapter, raw_mbox)) { 3620 adapter->mega_ch_class = *((char *)adapter->mega_buffer); 3621 3622 } 3623 3624 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3625 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3626 dev_info(&adapter->dev->dev, "channel[%d] is raid\n", 3627 i); 3628 } 3629 else { 3630 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", 3631 i); 3632 } 3633 } 3634 3635 return; 3636} 3637 3638 3639/** 3640 * mega_get_boot_drv() 3641 * @adapter: pointer to our soft state 3642 * 3643 * Find out which device is the boot device. Note, any logical drive or any 3644 * phyical device (e.g., a CDROM) can be designated as a boot device. 3645 */ 3646static void 3647mega_get_boot_drv(adapter_t *adapter) 3648{ 3649 struct private_bios_data *prv_bios_data; 3650 struct mbox_out mbox; 3651 unsigned char *raw_mbox = (u8 *)&mbox; 3652 u16 cksum = 0; 3653 u8 *cksum_p; 3654 u8 boot_pdrv; 3655 int i; 3656 3657 memset(&mbox, 0, sizeof(mbox)); 3658 3659 raw_mbox[0] = BIOS_PVT_DATA; 3660 raw_mbox[2] = GET_BIOS_PVT_DATA; 3661 3662 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3663 3664 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3665 3666 adapter->boot_ldrv_enabled = 0; 3667 adapter->boot_ldrv = 0; 3668 3669 adapter->boot_pdrv_enabled = 0; 3670 adapter->boot_pdrv_ch = 0; 3671 adapter->boot_pdrv_tgt = 0; 3672 3673 if(issue_scb_block(adapter, raw_mbox) == 0) { 3674 prv_bios_data = 3675 (struct private_bios_data *)adapter->mega_buffer; 3676 3677 cksum = 0; 3678 cksum_p = (char *)prv_bios_data; 3679 for (i = 0; i < 14; i++ ) { 3680 cksum += (u16)(*cksum_p++); 3681 } 3682 3683 if (prv_bios_data->cksum == (u16)(0-cksum) ) { 3684 3685 /* 3686 * If MSB is set, a physical drive is set as boot 3687 * device 3688 */ 3689 if( prv_bios_data->boot_drv & 0x80 ) { 3690 adapter->boot_pdrv_enabled = 1; 3691 boot_pdrv = prv_bios_data->boot_drv & 0x7F; 3692 adapter->boot_pdrv_ch = boot_pdrv / 16; 3693 adapter->boot_pdrv_tgt = boot_pdrv % 16; 3694 } 3695 else { 3696 adapter->boot_ldrv_enabled = 1; 3697 adapter->boot_ldrv = prv_bios_data->boot_drv; 3698 } 3699 } 3700 } 3701 3702} 3703 3704/** 3705 * mega_support_random_del() 3706 * @adapter: pointer to our soft state 3707 * 3708 * Find out if this controller supports random deletion and addition of 3709 * logical drives 3710 */ 3711static int 3712mega_support_random_del(adapter_t *adapter) 3713{ 3714 struct mbox_out mbox; 3715 unsigned char *raw_mbox = (u8 *)&mbox; 3716 int rval; 3717 3718 memset(&mbox, 0, sizeof(mbox)); 3719 3720 /* 3721 * issue command 3722 */ 3723 raw_mbox[0] = FC_DEL_LOGDRV; 3724 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3725 3726 rval = issue_scb_block(adapter, raw_mbox); 3727 3728 return !rval; 3729} 3730 3731 3732/** 3733 * mega_support_ext_cdb() 3734 * @adapter: pointer to our soft state 3735 * 3736 * Find out if this firmware support cdblen > 10 3737 */ 3738static int 3739mega_support_ext_cdb(adapter_t *adapter) 3740{ 3741 struct mbox_out mbox; 3742 unsigned char *raw_mbox = (u8 *)&mbox; 3743 int rval; 3744 3745 memset(&mbox, 0, sizeof(mbox)); 3746 /* 3747 * issue command to find out if controller supports extended CDBs. 3748 */ 3749 raw_mbox[0] = 0xA4; 3750 raw_mbox[2] = 0x16; 3751 3752 rval = issue_scb_block(adapter, raw_mbox); 3753 3754 return !rval; 3755} 3756 3757 3758/** 3759 * mega_del_logdrv() 3760 * @adapter: pointer to our soft state 3761 * @logdrv: logical drive to be deleted 3762 * 3763 * Delete the specified logical drive. It is the responsibility of the user 3764 * app to let the OS know about this operation. 3765 */ 3766static int 3767mega_del_logdrv(adapter_t *adapter, int logdrv) 3768{ 3769 unsigned long flags; 3770 scb_t *scb; 3771 int rval; 3772 3773 /* 3774 * Stop sending commands to the controller, queue them internally. 3775 * When deletion is complete, ISR will flush the queue. 3776 */ 3777 atomic_set(&adapter->quiescent, 1); 3778 3779 /* 3780 * Wait till all the issued commands are complete and there are no 3781 * commands in the pending queue 3782 */ 3783 while (atomic_read(&adapter->pend_cmds) > 0 || 3784 !list_empty(&adapter->pending_list)) 3785 msleep(1000); /* sleep for 1s */ 3786 3787 rval = mega_do_del_logdrv(adapter, logdrv); 3788 3789 spin_lock_irqsave(&adapter->lock, flags); 3790 3791 /* 3792 * If delete operation was successful, add 0x80 to the logical drive 3793 * ids for commands in the pending queue. 3794 */ 3795 if (adapter->read_ldidmap) { 3796 struct list_head *pos; 3797 list_for_each(pos, &adapter->pending_list) { 3798 scb = list_entry(pos, scb_t, list); 3799 if (scb->pthru->logdrv < 0x80 ) 3800 scb->pthru->logdrv += 0x80; 3801 } 3802 } 3803 3804 atomic_set(&adapter->quiescent, 0); 3805 3806 mega_runpendq(adapter); 3807 3808 spin_unlock_irqrestore(&adapter->lock, flags); 3809 3810 return rval; 3811} 3812 3813 3814static int 3815mega_do_del_logdrv(adapter_t *adapter, int logdrv) 3816{ 3817 megacmd_t mc; 3818 int rval; 3819 3820 memset( &mc, 0, sizeof(megacmd_t)); 3821 3822 mc.cmd = FC_DEL_LOGDRV; 3823 mc.opcode = OP_DEL_LOGDRV; 3824 mc.subopcode = logdrv; 3825 3826 rval = mega_internal_command(adapter, &mc, NULL); 3827 3828 /* log this event */ 3829 if(rval) { 3830 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); 3831 return rval; 3832 } 3833 3834 /* 3835 * After deleting first logical drive, the logical drives must be 3836 * addressed by adding 0x80 to the logical drive id. 3837 */ 3838 adapter->read_ldidmap = 1; 3839 3840 return rval; 3841} 3842 3843 3844/** 3845 * mega_get_max_sgl() 3846 * @adapter: pointer to our soft state 3847 * 3848 * Find out the maximum number of scatter-gather elements supported by this 3849 * version of the firmware 3850 */ 3851static void 3852mega_get_max_sgl(adapter_t *adapter) 3853{ 3854 struct mbox_out mbox; 3855 unsigned char *raw_mbox = (u8 *)&mbox; 3856 3857 memset(&mbox, 0, sizeof(mbox)); 3858 3859 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3860 3861 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3862 3863 raw_mbox[0] = MAIN_MISC_OPCODE; 3864 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3865 3866 3867 if( issue_scb_block(adapter, raw_mbox) ) { 3868 /* 3869 * f/w does not support this command. Choose the default value 3870 */ 3871 adapter->sglen = MIN_SGLIST; 3872 } 3873 else { 3874 adapter->sglen = *((char *)adapter->mega_buffer); 3875 3876 /* 3877 * Make sure this is not more than the resources we are 3878 * planning to allocate 3879 */ 3880 if ( adapter->sglen > MAX_SGLIST ) 3881 adapter->sglen = MAX_SGLIST; 3882 } 3883 3884 return; 3885} 3886 3887 3888/** 3889 * mega_support_cluster() 3890 * @adapter: pointer to our soft state 3891 * 3892 * Find out if this firmware support cluster calls. 3893 */ 3894static int 3895mega_support_cluster(adapter_t *adapter) 3896{ 3897 struct mbox_out mbox; 3898 unsigned char *raw_mbox = (u8 *)&mbox; 3899 3900 memset(&mbox, 0, sizeof(mbox)); 3901 3902 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3903 3904 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3905 3906 /* 3907 * Try to get the initiator id. This command will succeed iff the 3908 * clustering is available on this HBA. 3909 */ 3910 raw_mbox[0] = MEGA_GET_TARGET_ID; 3911 3912 if( issue_scb_block(adapter, raw_mbox) == 0 ) { 3913 3914 /* 3915 * Cluster support available. Get the initiator target id. 3916 * Tell our id to mid-layer too. 3917 */ 3918 adapter->this_id = *(u32 *)adapter->mega_buffer; 3919 adapter->host->this_id = adapter->this_id; 3920 3921 return 1; 3922 } 3923 3924 return 0; 3925} 3926 3927#ifdef CONFIG_PROC_FS 3928/** 3929 * mega_adapinq() 3930 * @adapter: pointer to our soft state 3931 * @dma_handle: DMA address of the buffer 3932 * 3933 * Issue internal commands while interrupts are available. 3934 * We only issue direct mailbox commands from within the driver. ioctl() 3935 * interface using these routines can issue passthru commands. 3936 */ 3937static int 3938mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) 3939{ 3940 megacmd_t mc; 3941 3942 memset(&mc, 0, sizeof(megacmd_t)); 3943 3944 if( adapter->flag & BOARD_40LD ) { 3945 mc.cmd = FC_NEW_CONFIG; 3946 mc.opcode = NC_SUBOP_ENQUIRY3; 3947 mc.subopcode = ENQ3_GET_SOLICITED_FULL; 3948 } 3949 else { 3950 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; 3951 } 3952 3953 mc.xferaddr = (u32)dma_handle; 3954 3955 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { 3956 return -1; 3957 } 3958 3959 return 0; 3960} 3961 3962 3963/** 3964 * mega_internal_dev_inquiry() 3965 * @adapter: pointer to our soft state 3966 * @ch: channel for this device 3967 * @tgt: ID of this device 3968 * @buf_dma_handle: DMA address of the buffer 3969 * 3970 * Issue the scsi inquiry for the specified device. 3971 */ 3972static int 3973mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, 3974 dma_addr_t buf_dma_handle) 3975{ 3976 mega_passthru *pthru; 3977 dma_addr_t pthru_dma_handle; 3978 megacmd_t mc; 3979 int rval; 3980 struct pci_dev *pdev; 3981 3982 3983 /* 3984 * For all internal commands, the buffer must be allocated in <4GB 3985 * address range 3986 */ 3987 if( make_local_pdev(adapter, &pdev) != 0 ) return -1; 3988 3989 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), 3990 &pthru_dma_handle, GFP_KERNEL); 3991 3992 if( pthru == NULL ) { 3993 free_local_pdev(pdev); 3994 return -1; 3995 } 3996 3997 pthru->timeout = 2; 3998 pthru->ars = 1; 3999 pthru->reqsenselen = 14; 4000 pthru->islogical = 0; 4001 4002 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; 4003 4004 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; 4005 4006 pthru->cdblen = 6; 4007 4008 pthru->cdb[0] = INQUIRY; 4009 pthru->cdb[1] = 0; 4010 pthru->cdb[2] = 0; 4011 pthru->cdb[3] = 0; 4012 pthru->cdb[4] = 255; 4013 pthru->cdb[5] = 0; 4014 4015 4016 pthru->dataxferaddr = (u32)buf_dma_handle; 4017 pthru->dataxferlen = 256; 4018 4019 memset(&mc, 0, sizeof(megacmd_t)); 4020 4021 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 4022 mc.xferaddr = (u32)pthru_dma_handle; 4023 4024 rval = mega_internal_command(adapter, &mc, pthru); 4025 4026 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, 4027 pthru_dma_handle); 4028 4029 free_local_pdev(pdev); 4030 4031 return rval; 4032} 4033#endif 4034 4035/** 4036 * mega_internal_command() 4037 * @adapter: pointer to our soft state 4038 * @mc: the mailbox command 4039 * @pthru: Passthru structure for DCDB commands 4040 * 4041 * Issue the internal commands in interrupt mode. 4042 * The last argument is the address of the passthru structure if the command 4043 * to be fired is a passthru command 4044 * 4045 * Note: parameter 'pthru' is null for non-passthru commands. 4046 */ 4047static int 4048mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4049{ 4050 unsigned long flags; 4051 scb_t *scb; 4052 int rval; 4053 4054 /* 4055 * The internal commands share one command id and hence are 4056 * serialized. This is so because we want to reserve maximum number of 4057 * available command ids for the I/O commands. 4058 */ 4059 mutex_lock(&adapter->int_mtx); 4060 4061 scb = &adapter->int_scb; 4062 memset(scb, 0, sizeof(scb_t)); 4063 4064 scb->idx = CMDID_INT_CMDS; 4065 scb->state |= SCB_ACTIVE | SCB_PENDQ; 4066 4067 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4068 4069 /* 4070 * Is it a passthru command 4071 */ 4072 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) 4073 scb->pthru = pthru; 4074 4075 spin_lock_irqsave(&adapter->lock, flags); 4076 list_add_tail(&scb->list, &adapter->pending_list); 4077 /* 4078 * Check if the HBA is in quiescent state, e.g., during a 4079 * delete logical drive opertion. If it is, don't run 4080 * the pending_list. 4081 */ 4082 if (atomic_read(&adapter->quiescent) == 0) 4083 mega_runpendq(adapter); 4084 spin_unlock_irqrestore(&adapter->lock, flags); 4085 4086 wait_for_completion(&adapter->int_waitq); 4087 4088 mc->status = rval = adapter->int_status; 4089 4090 /* 4091 * Print a debug message for all failed commands. Applications can use 4092 * this information. 4093 */ 4094 if (rval && trace_level) { 4095 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", 4096 mc->cmd, mc->opcode, mc->subopcode, rval); 4097 } 4098 4099 mutex_unlock(&adapter->int_mtx); 4100 return rval; 4101} 4102 4103static struct scsi_host_template megaraid_template = { 4104 .module = THIS_MODULE, 4105 .name = "MegaRAID", 4106 .proc_name = "megaraid_legacy", 4107 .info = megaraid_info, 4108 .queuecommand = megaraid_queue, 4109 .bios_param = megaraid_biosparam, 4110 .max_sectors = MAX_SECTORS_PER_IO, 4111 .can_queue = MAX_COMMANDS, 4112 .this_id = DEFAULT_INITIATOR_ID, 4113 .sg_tablesize = MAX_SGLIST, 4114 .cmd_per_lun = DEF_CMD_PER_LUN, 4115 .eh_abort_handler = megaraid_abort, 4116 .eh_device_reset_handler = megaraid_reset, 4117 .eh_bus_reset_handler = megaraid_reset, 4118 .eh_host_reset_handler = megaraid_reset, 4119 .no_write_same = 1, 4120 .cmd_size = sizeof(struct megaraid_cmd_priv), 4121}; 4122 4123static int 4124megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4125{ 4126 struct Scsi_Host *host; 4127 adapter_t *adapter; 4128 unsigned long mega_baseport, tbase, flag = 0; 4129 u16 subsysid, subsysvid; 4130 u8 pci_bus, pci_dev_func; 4131 int irq, i, j; 4132 int error = -ENODEV; 4133 4134 if (hba_count >= MAX_CONTROLLERS) 4135 goto out; 4136 4137 if (pci_enable_device(pdev)) 4138 goto out; 4139 pci_set_master(pdev); 4140 4141 pci_bus = pdev->bus->number; 4142 pci_dev_func = pdev->devfn; 4143 4144 /* 4145 * The megaraid3 stuff reports the ID of the Intel part which is not 4146 * remotely specific to the megaraid 4147 */ 4148 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 4149 u16 magic; 4150 /* 4151 * Don't fall over the Compaq management cards using the same 4152 * PCI identifier 4153 */ 4154 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && 4155 pdev->subsystem_device == 0xC000) 4156 goto out_disable_device; 4157 /* Now check the magic signature byte */ 4158 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); 4159 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) 4160 goto out_disable_device; 4161 /* Ok it is probably a megaraid */ 4162 } 4163 4164 /* 4165 * For these vendor and device ids, signature offsets are not 4166 * valid and 64 bit is implicit 4167 */ 4168 if (id->driver_data & BOARD_64BIT) 4169 flag |= BOARD_64BIT; 4170 else { 4171 u32 magic64; 4172 4173 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); 4174 if (magic64 == HBA_SIGNATURE_64BIT) 4175 flag |= BOARD_64BIT; 4176 } 4177 4178 subsysvid = pdev->subsystem_vendor; 4179 subsysid = pdev->subsystem_device; 4180 4181 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", 4182 id->vendor, id->device); 4183 4184 /* Read the base port and IRQ from PCI */ 4185 mega_baseport = pci_resource_start(pdev, 0); 4186 irq = pdev->irq; 4187 4188 tbase = mega_baseport; 4189 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { 4190 flag |= BOARD_MEMMAP; 4191 4192 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4193 dev_warn(&pdev->dev, "mem region busy!\n"); 4194 goto out_disable_device; 4195 } 4196 4197 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4198 if (!mega_baseport) { 4199 dev_warn(&pdev->dev, "could not map hba memory\n"); 4200 goto out_release_region; 4201 } 4202 } else { 4203 flag |= BOARD_IOMAP; 4204 mega_baseport += 0x10; 4205 4206 if (!request_region(mega_baseport, 16, "megaraid")) 4207 goto out_disable_device; 4208 } 4209 4210 /* Initialize SCSI Host structure */ 4211 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); 4212 if (!host) 4213 goto out_iounmap; 4214 4215 adapter = (adapter_t *)host->hostdata; 4216 memset(adapter, 0, sizeof(adapter_t)); 4217 4218 dev_notice(&pdev->dev, 4219 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4220 host->host_no, mega_baseport, irq); 4221 4222 adapter->base = mega_baseport; 4223 if (flag & BOARD_MEMMAP) 4224 adapter->mmio_base = (void __iomem *) mega_baseport; 4225 4226 INIT_LIST_HEAD(&adapter->free_list); 4227 INIT_LIST_HEAD(&adapter->pending_list); 4228 INIT_LIST_HEAD(&adapter->completed_list); 4229 4230 adapter->flag = flag; 4231 spin_lock_init(&adapter->lock); 4232 4233 host->cmd_per_lun = max_cmd_per_lun; 4234 host->max_sectors = max_sectors_per_io; 4235 4236 adapter->dev = pdev; 4237 adapter->host = host; 4238 4239 adapter->host->irq = irq; 4240 4241 if (flag & BOARD_MEMMAP) 4242 adapter->host->base = tbase; 4243 else { 4244 adapter->host->io_port = tbase; 4245 adapter->host->n_io_port = 16; 4246 } 4247 4248 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; 4249 4250 /* 4251 * Allocate buffer to issue internal commands. 4252 */ 4253 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, 4254 MEGA_BUFFER_SIZE, 4255 &adapter->buf_dma_handle, 4256 GFP_KERNEL); 4257 if (!adapter->mega_buffer) { 4258 dev_warn(&pdev->dev, "out of RAM\n"); 4259 goto out_host_put; 4260 } 4261 4262 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), 4263 GFP_KERNEL); 4264 if (!adapter->scb_list) { 4265 dev_warn(&pdev->dev, "out of RAM\n"); 4266 goto out_free_cmd_buffer; 4267 } 4268 4269 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4270 megaraid_isr_memmapped : megaraid_isr_iomapped, 4271 IRQF_SHARED, "megaraid", adapter)) { 4272 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); 4273 goto out_free_scb_list; 4274 } 4275 4276 if (mega_setup_mailbox(adapter)) 4277 goto out_free_irq; 4278 4279 if (mega_query_adapter(adapter)) 4280 goto out_free_mbox; 4281 4282 /* 4283 * Have checks for some buggy f/w 4284 */ 4285 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { 4286 /* 4287 * Which firmware 4288 */ 4289 if (!strcmp(adapter->fw_version, "3.00") || 4290 !strcmp(adapter->fw_version, "3.01")) { 4291 4292 dev_warn(&pdev->dev, 4293 "Your card is a Dell PERC " 4294 "2/SC RAID controller with " 4295 "firmware\nmegaraid: 3.00 or 3.01. " 4296 "This driver is known to have " 4297 "corruption issues\nmegaraid: with " 4298 "those firmware versions on this " 4299 "specific card. In order\nmegaraid: " 4300 "to protect your data, please upgrade " 4301 "your firmware to version\nmegaraid: " 4302 "3.10 or later, available from the " 4303 "Dell Technical Support web\n" 4304 "megaraid: site at\nhttp://support." 4305 "dell.com/us/en/filelib/download/" 4306 "index.asp?fileid=2940\n" 4307 ); 4308 } 4309 } 4310 4311 /* 4312 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with 4313 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit 4314 * support, since this firmware cannot handle 64 bit 4315 * addressing 4316 */ 4317 if ((subsysvid == PCI_VENDOR_ID_HP) && 4318 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { 4319 /* 4320 * which firmware 4321 */ 4322 if (!strcmp(adapter->fw_version, "H01.07") || 4323 !strcmp(adapter->fw_version, "H01.08") || 4324 !strcmp(adapter->fw_version, "H01.09") ) { 4325 dev_warn(&pdev->dev, 4326 "Firmware H.01.07, " 4327 "H.01.08, and H.01.09 on 1M/2M " 4328 "controllers\n" 4329 "do not support 64 bit " 4330 "addressing.\nDISABLING " 4331 "64 bit support.\n"); 4332 adapter->flag &= ~BOARD_64BIT; 4333 } 4334 } 4335 4336 if (mega_is_bios_enabled(adapter)) 4337 mega_hbas[hba_count].is_bios_enabled = 1; 4338 mega_hbas[hba_count].hostdata_addr = adapter; 4339 4340 /* 4341 * Find out which channel is raid and which is scsi. This is 4342 * for ROMB support. 4343 */ 4344 mega_enum_raid_scsi(adapter); 4345 4346 /* 4347 * Find out if a logical drive is set as the boot drive. If 4348 * there is one, will make that as the first logical drive. 4349 * ROMB: Do we have to boot from a physical drive. Then all 4350 * the physical drives would appear before the logical disks. 4351 * Else, all the physical drives would be exported to the mid 4352 * layer after logical drives. 4353 */ 4354 mega_get_boot_drv(adapter); 4355 4356 if (adapter->boot_pdrv_enabled) { 4357 j = adapter->product_info.nchannels; 4358 for( i = 0; i < j; i++ ) 4359 adapter->logdrv_chan[i] = 0; 4360 for( i = j; i < NVIRT_CHAN + j; i++ ) 4361 adapter->logdrv_chan[i] = 1; 4362 } else { 4363 for (i = 0; i < NVIRT_CHAN; i++) 4364 adapter->logdrv_chan[i] = 1; 4365 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) 4366 adapter->logdrv_chan[i] = 0; 4367 adapter->mega_ch_class <<= NVIRT_CHAN; 4368 } 4369 4370 /* 4371 * Do we support random deletion and addition of logical 4372 * drives 4373 */ 4374 adapter->read_ldidmap = 0; /* set it after first logdrv 4375 delete cmd */ 4376 adapter->support_random_del = mega_support_random_del(adapter); 4377 4378 /* Initialize SCBs */ 4379 if (mega_init_scb(adapter)) 4380 goto out_free_mbox; 4381 4382 /* 4383 * Reset the pending commands counter 4384 */ 4385 atomic_set(&adapter->pend_cmds, 0); 4386 4387 /* 4388 * Reset the adapter quiescent flag 4389 */ 4390 atomic_set(&adapter->quiescent, 0); 4391 4392 hba_soft_state[hba_count] = adapter; 4393 4394 /* 4395 * Fill in the structure which needs to be passed back to the 4396 * application when it does an ioctl() for controller related 4397 * information. 4398 */ 4399 i = hba_count; 4400 4401 mcontroller[i].base = mega_baseport; 4402 mcontroller[i].irq = irq; 4403 mcontroller[i].numldrv = adapter->numldrv; 4404 mcontroller[i].pcibus = pci_bus; 4405 mcontroller[i].pcidev = id->device; 4406 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); 4407 mcontroller[i].pciid = -1; 4408 mcontroller[i].pcivendor = id->vendor; 4409 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); 4410 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; 4411 4412 4413 /* Set the Mode of addressing to 64 bit if we can */ 4414 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4415 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4416 adapter->has_64bit_addr = 1; 4417 } else { 4418 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4419 adapter->has_64bit_addr = 0; 4420 } 4421 4422 mutex_init(&adapter->int_mtx); 4423 init_completion(&adapter->int_waitq); 4424 4425 adapter->this_id = DEFAULT_INITIATOR_ID; 4426 adapter->host->this_id = DEFAULT_INITIATOR_ID; 4427 4428#if MEGA_HAVE_CLUSTERING 4429 /* 4430 * Is cluster support enabled on this controller 4431 * Note: In a cluster the HBAs ( the initiators ) will have 4432 * different target IDs and we cannot assume it to be 7. Call 4433 * to mega_support_cluster() will get the target ids also if 4434 * the cluster support is available 4435 */ 4436 adapter->has_cluster = mega_support_cluster(adapter); 4437 if (adapter->has_cluster) { 4438 dev_notice(&pdev->dev, 4439 "Cluster driver, initiator id:%d\n", 4440 adapter->this_id); 4441 } 4442#endif 4443 4444 pci_set_drvdata(pdev, host); 4445 4446 mega_create_proc_entry(hba_count, mega_proc_dir_entry); 4447 4448 error = scsi_add_host(host, &pdev->dev); 4449 if (error) 4450 goto out_free_mbox; 4451 4452 scsi_scan_host(host); 4453 hba_count++; 4454 return 0; 4455 4456 out_free_mbox: 4457 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4458 adapter->una_mbox64, adapter->una_mbox64_dma); 4459 out_free_irq: 4460 free_irq(adapter->host->irq, adapter); 4461 out_free_scb_list: 4462 kfree(adapter->scb_list); 4463 out_free_cmd_buffer: 4464 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4465 adapter->mega_buffer, adapter->buf_dma_handle); 4466 out_host_put: 4467 scsi_host_put(host); 4468 out_iounmap: 4469 if (flag & BOARD_MEMMAP) 4470 iounmap((void *)mega_baseport); 4471 out_release_region: 4472 if (flag & BOARD_MEMMAP) 4473 release_mem_region(tbase, 128); 4474 else 4475 release_region(mega_baseport, 16); 4476 out_disable_device: 4477 pci_disable_device(pdev); 4478 out: 4479 return error; 4480} 4481 4482static void 4483__megaraid_shutdown(adapter_t *adapter) 4484{ 4485 u_char raw_mbox[sizeof(struct mbox_out)]; 4486 mbox_t *mbox = (mbox_t *)raw_mbox; 4487 int i; 4488 4489 /* Flush adapter cache */ 4490 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4491 raw_mbox[0] = FLUSH_ADAPTER; 4492 4493 free_irq(adapter->host->irq, adapter); 4494 4495 /* Issue a blocking (interrupts disabled) command to the card */ 4496 issue_scb_block(adapter, raw_mbox); 4497 4498 /* Flush disks cache */ 4499 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4500 raw_mbox[0] = FLUSH_SYSTEM; 4501 4502 /* Issue a blocking (interrupts disabled) command to the card */ 4503 issue_scb_block(adapter, raw_mbox); 4504 4505 if (atomic_read(&adapter->pend_cmds) > 0) 4506 dev_warn(&adapter->dev->dev, "pending commands!!\n"); 4507 4508 /* 4509 * Have a delibrate delay to make sure all the caches are 4510 * actually flushed. 4511 */ 4512 for (i = 0; i <= 10; i++) 4513 mdelay(1000); 4514} 4515 4516static void 4517megaraid_remove_one(struct pci_dev *pdev) 4518{ 4519 struct Scsi_Host *host = pci_get_drvdata(pdev); 4520 adapter_t *adapter = (adapter_t *)host->hostdata; 4521 char buf[12] = { 0 }; 4522 4523 scsi_remove_host(host); 4524 4525 __megaraid_shutdown(adapter); 4526 4527 /* Free our resources */ 4528 if (adapter->flag & BOARD_MEMMAP) { 4529 iounmap((void *)adapter->base); 4530 release_mem_region(adapter->host->base, 128); 4531 } else 4532 release_region(adapter->base, 16); 4533 4534 mega_free_sgl(adapter); 4535 4536 sprintf(buf, "hba%d", adapter->host->host_no); 4537 remove_proc_subtree(buf, mega_proc_dir_entry); 4538 4539 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4540 adapter->mega_buffer, adapter->buf_dma_handle); 4541 kfree(adapter->scb_list); 4542 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4543 adapter->una_mbox64, adapter->una_mbox64_dma); 4544 4545 scsi_host_put(host); 4546 pci_disable_device(pdev); 4547 4548 hba_count--; 4549} 4550 4551static void 4552megaraid_shutdown(struct pci_dev *pdev) 4553{ 4554 struct Scsi_Host *host = pci_get_drvdata(pdev); 4555 adapter_t *adapter = (adapter_t *)host->hostdata; 4556 4557 __megaraid_shutdown(adapter); 4558} 4559 4560static struct pci_device_id megaraid_pci_tbl[] = { 4561 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, 4562 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4563 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, 4564 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4565 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, 4566 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4567 {0,} 4568}; 4569MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 4570 4571static struct pci_driver megaraid_pci_driver = { 4572 .name = "megaraid_legacy", 4573 .id_table = megaraid_pci_tbl, 4574 .probe = megaraid_probe_one, 4575 .remove = megaraid_remove_one, 4576 .shutdown = megaraid_shutdown, 4577}; 4578 4579static int __init megaraid_init(void) 4580{ 4581 int error; 4582 4583 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) 4584 max_cmd_per_lun = MAX_CMD_PER_LUN; 4585 if (max_mbox_busy_wait > MBOX_BUSY_WAIT) 4586 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4587 4588#ifdef CONFIG_PROC_FS 4589 mega_proc_dir_entry = proc_mkdir("megaraid", NULL); 4590 if (!mega_proc_dir_entry) { 4591 printk(KERN_WARNING 4592 "megaraid: failed to create megaraid root\n"); 4593 } 4594#endif 4595 error = pci_register_driver(&megaraid_pci_driver); 4596 if (error) { 4597#ifdef CONFIG_PROC_FS 4598 remove_proc_entry("megaraid", NULL); 4599#endif 4600 return error; 4601 } 4602 4603 /* 4604 * Register the driver as a character device, for applications 4605 * to access it for ioctls. 4606 * First argument (major) to register_chrdev implies a dynamic 4607 * major number allocation. 4608 */ 4609 major = register_chrdev(0, "megadev_legacy", &megadev_fops); 4610 if (major < 0) { 4611 printk(KERN_WARNING 4612 "megaraid: failed to register char device\n"); 4613 } 4614 4615 return 0; 4616} 4617 4618static void __exit megaraid_exit(void) 4619{ 4620 /* 4621 * Unregister the character device interface to the driver. 4622 */ 4623 unregister_chrdev(major, "megadev_legacy"); 4624 4625 pci_unregister_driver(&megaraid_pci_driver); 4626 4627#ifdef CONFIG_PROC_FS 4628 remove_proc_entry("megaraid", NULL); 4629#endif 4630} 4631 4632module_init(megaraid_init); 4633module_exit(megaraid_exit); 4634 4635/* vi: set ts=8 sw=8 tw=78: */