cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

linit.c (61936B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	Adaptec AAC series RAID controller driver
      4 *	(c) Copyright 2001 Red Hat Inc.
      5 *
      6 * based on the old aacraid driver that is..
      7 * Adaptec aacraid device driver for Linux.
      8 *
      9 * Copyright (c) 2000-2010 Adaptec, Inc.
     10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
     11 *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
     12 *
     13 * Module Name:
     14 *   linit.c
     15 *
     16 * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
     17 */
     18
     19
     20#include <linux/compat.h>
     21#include <linux/blkdev.h>
     22#include <linux/completion.h>
     23#include <linux/init.h>
     24#include <linux/interrupt.h>
     25#include <linux/kernel.h>
     26#include <linux/module.h>
     27#include <linux/moduleparam.h>
     28#include <linux/pci.h>
     29#include <linux/aer.h>
     30#include <linux/slab.h>
     31#include <linux/mutex.h>
     32#include <linux/spinlock.h>
     33#include <linux/syscalls.h>
     34#include <linux/delay.h>
     35#include <linux/kthread.h>
     36#include <linux/msdos_partition.h>
     37
     38#include <scsi/scsi.h>
     39#include <scsi/scsi_cmnd.h>
     40#include <scsi/scsi_device.h>
     41#include <scsi/scsi_host.h>
     42#include <scsi/scsi_tcq.h>
     43#include <scsi/scsicam.h>
     44#include <scsi/scsi_eh.h>
     45
     46#include "aacraid.h"
     47
     48#define AAC_DRIVER_VERSION		"1.2.1"
     49#ifndef AAC_DRIVER_BRANCH
     50#define AAC_DRIVER_BRANCH		""
     51#endif
     52#define AAC_DRIVERNAME			"aacraid"
     53
     54#ifdef AAC_DRIVER_BUILD
     55#define _str(x) #x
     56#define str(x) _str(x)
     57#define AAC_DRIVER_FULL_VERSION	AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
     58#else
     59#define AAC_DRIVER_FULL_VERSION	AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
     60#endif
     61
     62MODULE_AUTHOR("Red Hat Inc and Adaptec");
     63MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
     64		   "Adaptec Advanced Raid Products, "
     65		   "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
     66MODULE_LICENSE("GPL");
     67MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
     68
     69static DEFINE_MUTEX(aac_mutex);
     70static LIST_HEAD(aac_devices);
     71static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
     72char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
     73
     74/*
     75 * Because of the way Linux names scsi devices, the order in this table has
     76 * become important.  Check for on-board Raid first, add-in cards second.
     77 *
     78 * Note: The last field is used to index into aac_drivers below.
     79 */
     80static const struct pci_device_id aac_pci_tbl[] = {
     81	{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
     82	{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
     83	{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
     84	{ 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
     85	{ 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
     86	{ 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
     87	{ 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
     88	{ 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
     89	{ 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
     90	{ 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
     91	{ 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
     92	{ 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
     93	{ 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
     94	{ 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
     95	{ 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
     96	{ 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
     97
     98	{ 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
     99	{ 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
    100	{ 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
    101	{ 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
    102	{ 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
    103	{ 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
    104	{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
    105	{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
    106	{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
    107	{ 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
    108	{ 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
    109	{ 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
    110	{ 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
    111	{ 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
    112	{ 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
    113	{ 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
    114	{ 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
    115	{ 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
    116	{ 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
    117	{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
    118	{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
    119	{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
    120	{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
    121	{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
    122	{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
    123	{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
    124	{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
    125	{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
    126	{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
    127	{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
    128	{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
    129	{ 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
    130	{ 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
    131	{ 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
    132	{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
    133	{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
    134	{ 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
    135	{ 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
    136
    137	{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
    138	{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
    139	{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
    140	{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
    141	{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
    142
    143	{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
    144	{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
    145	{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
    146	{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
    147	{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
    148	{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
    149	{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
    150	{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
    151	{ 0,}
    152};
    153MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
    154
    155/*
    156 * dmb - For now we add the number of channels to this structure.
    157 * In the future we should add a fib that reports the number of channels
    158 * for the card.  At that time we can remove the channels from here
    159 */
    160static struct aac_driver_ident aac_drivers[] = {
    161	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
    162	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
    163	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
    164	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
    165	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
    166	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
    167	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
    168	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
    169	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
    170	{ aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
    171	{ aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
    172	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },		      /* Adaptec 2120S (Crusader) */
    173	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },		      /* Adaptec 2200S (Vulcan) */
    174	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
    175	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
    176	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
    177
    178	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3230S   ", 2 }, /* Adaptec 3230S (Harrier) */
    179	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3240S   ", 2 }, /* Adaptec 3240S (Tornado) */
    180	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020ZCR     ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
    181	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025ZCR     ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
    182	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
    183	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
    184	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2820SA      ", 1 }, /* AAR-2820SA (Intruder) */
    185	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2620SA      ", 1 }, /* AAR-2620SA (Intruder) */
    186	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2420SA      ", 1 }, /* AAR-2420SA (Intruder) */
    187	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9024RO       ", 2 }, /* ICP9024RO (Lancer) */
    188	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9014RO       ", 1 }, /* ICP9014RO (Lancer) */
    189	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9047MA       ", 1 }, /* ICP9047MA (Lancer) */
    190	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9087MA       ", 1 }, /* ICP9087MA (Lancer) */
    191	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP5445AU       ", 1 }, /* ICP5445AU (Hurricane44) */
    192	{ aac_rx_init, "aacraid",  "ICP     ", "ICP9085LI       ", 1 }, /* ICP9085LI (Marauder-X) */
    193	{ aac_rx_init, "aacraid",  "ICP     ", "ICP5085BR       ", 1 }, /* ICP5085BR (Marauder-E) */
    194	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9067MA       ", 1 }, /* ICP9067MA (Intruder-6) */
    195	{ NULL        , "aacraid",  "ADAPTEC ", "Themisto        ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
    196	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "Callisto        ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
    197	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020SA       ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
    198	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025SA       ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
    199	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
    200	{ aac_rx_init, "aacraid",  "DELL    ", "CERC SR2        ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
    201	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
    202	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
    203	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2026ZCR     ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
    204	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2610SA      ", 1 }, /* SATA 6Ch (Bearcat) */
    205	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2240S       ", 1 }, /* ASR-2240S (SabreExpress) */
    206	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4005        ", 1 }, /* ASR-4005 */
    207	{ aac_rx_init, "ServeRAID","IBM     ", "ServeRAID 8i    ", 1 }, /* IBM 8i (AvonPark) */
    208	{ aac_rkt_init, "ServeRAID","IBM     ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
    209	{ aac_rkt_init, "ServeRAID","IBM     ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
    210	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4000        ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
    211	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4800SAS     ", 1 }, /* ASR-4800SAS (Marauder-X) */
    212	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4805SAS     ", 1 }, /* ASR-4805SAS (Marauder-E) */
    213	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-3800        ", 1 }, /* ASR-3800 (Hurricane44) */
    214
    215	{ aac_rx_init, "percraid", "DELL    ", "PERC 320/DC     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
    216	{ aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
    217	{ aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
    218	{ aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
    219	{ aac_sa_init, "hpnraid",  "HP      ", "NetRAID         ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
    220
    221	{ aac_rx_init, "aacraid",  "DELL    ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
    222	{ aac_rx_init, "aacraid",  "Legend  ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
    223	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
    224	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
    225	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
    226	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
    227	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
    228	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
    229};
    230
    231/**
    232 *	aac_queuecommand	-	queue a SCSI command
    233 *	@shost:		Scsi host to queue command on
    234 *	@cmd:		SCSI command to queue
    235 *
    236 *	Queues a command for execution by the associated Host Adapter.
    237 *
    238 *	TODO: unify with aac_scsi_cmd().
    239 */
    240
    241static int aac_queuecommand(struct Scsi_Host *shost,
    242			    struct scsi_cmnd *cmd)
    243{
    244	aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
    245
    246	return aac_scsi_cmd(cmd) ? FAILED : 0;
    247}
    248
    249/**
    250 *	aac_info		-	Returns the host adapter name
    251 *	@shost:		Scsi host to report on
    252 *
    253 *	Returns a static string describing the device in question
    254 */
    255
    256static const char *aac_info(struct Scsi_Host *shost)
    257{
    258	struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
    259	return aac_drivers[dev->cardtype].name;
    260}
    261
    262/**
    263 *	aac_get_driver_ident
    264 *	@devtype: index into lookup table
    265 *
    266 *	Returns a pointer to the entry in the driver lookup table.
    267 */
    268
    269struct aac_driver_ident* aac_get_driver_ident(int devtype)
    270{
    271	return &aac_drivers[devtype];
    272}
    273
    274/**
    275 *	aac_biosparm	-	return BIOS parameters for disk
    276 *	@sdev: The scsi device corresponding to the disk
    277 *	@bdev: the block device corresponding to the disk
    278 *	@capacity: the sector capacity of the disk
    279 *	@geom: geometry block to fill in
    280 *
    281 *	Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
    282 *	The default disk geometry is 64 heads, 32 sectors, and the appropriate
    283 *	number of cylinders so as not to exceed drive capacity.  In order for
    284 *	disks equal to or larger than 1 GB to be addressable by the BIOS
    285 *	without exceeding the BIOS limitation of 1024 cylinders, Extended
    286 *	Translation should be enabled.   With Extended Translation enabled,
    287 *	drives between 1 GB inclusive and 2 GB exclusive are given a disk
    288 *	geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
    289 *	are given a disk geometry of 255 heads and 63 sectors.  However, if
    290 *	the BIOS detects that the Extended Translation setting does not match
    291 *	the geometry in the partition table, then the translation inferred
    292 *	from the partition table will be used by the BIOS, and a warning may
    293 *	be displayed.
    294 */
    295
    296static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
    297			sector_t capacity, int *geom)
    298{
    299	struct diskparm *param = (struct diskparm *)geom;
    300	unsigned char *buf;
    301
    302	dprintk((KERN_DEBUG "aac_biosparm.\n"));
    303
    304	/*
    305	 *	Assuming extended translation is enabled - #REVISIT#
    306	 */
    307	if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
    308		if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
    309			param->heads = 255;
    310			param->sectors = 63;
    311		} else {
    312			param->heads = 128;
    313			param->sectors = 32;
    314		}
    315	} else {
    316		param->heads = 64;
    317		param->sectors = 32;
    318	}
    319
    320	param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
    321
    322	/*
    323	 *	Read the first 1024 bytes from the disk device, if the boot
    324	 *	sector partition table is valid, search for a partition table
    325	 *	entry whose end_head matches one of the standard geometry
    326	 *	translations ( 64/32, 128/32, 255/63 ).
    327	 */
    328	buf = scsi_bios_ptable(bdev);
    329	if (!buf)
    330		return 0;
    331	if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) {
    332		struct msdos_partition *first = (struct msdos_partition *)buf;
    333		struct msdos_partition *entry = first;
    334		int saved_cylinders = param->cylinders;
    335		int num;
    336		unsigned char end_head, end_sec;
    337
    338		for(num = 0; num < 4; num++) {
    339			end_head = entry->end_head;
    340			end_sec = entry->end_sector & 0x3f;
    341
    342			if(end_head == 63) {
    343				param->heads = 64;
    344				param->sectors = 32;
    345				break;
    346			} else if(end_head == 127) {
    347				param->heads = 128;
    348				param->sectors = 32;
    349				break;
    350			} else if(end_head == 254) {
    351				param->heads = 255;
    352				param->sectors = 63;
    353				break;
    354			}
    355			entry++;
    356		}
    357
    358		if (num == 4) {
    359			end_head = first->end_head;
    360			end_sec = first->end_sector & 0x3f;
    361		}
    362
    363		param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
    364		if (num < 4 && end_sec == param->sectors) {
    365			if (param->cylinders != saved_cylinders) {
    366				dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
    367					param->heads, param->sectors, num));
    368			}
    369		} else if (end_head > 0 || end_sec > 0) {
    370			dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
    371				end_head + 1, end_sec, num));
    372			dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
    373					param->heads, param->sectors));
    374		}
    375	}
    376	kfree(buf);
    377	return 0;
    378}
    379
    380/**
    381 *	aac_slave_configure		-	compute queue depths
    382 *	@sdev:	SCSI device we are considering
    383 *
    384 *	Selects queue depths for each target device based on the host adapter's
    385 *	total capacity and the queue depth supported by the target device.
    386 *	A queue depth of one automatically disables tagged queueing.
    387 */
    388
    389static int aac_slave_configure(struct scsi_device *sdev)
    390{
    391	struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
    392	int chn, tid;
    393	unsigned int depth = 0;
    394	unsigned int set_timeout = 0;
    395	int timeout = 0;
    396	bool set_qd_dev_type = false;
    397	u8 devtype = 0;
    398
    399	chn = aac_logical_to_phys(sdev_channel(sdev));
    400	tid = sdev_id(sdev);
    401	if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
    402		devtype = aac->hba_map[chn][tid].devtype;
    403
    404		if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
    405			depth = aac->hba_map[chn][tid].qd_limit;
    406			set_timeout = 1;
    407			goto common_config;
    408		}
    409		if (devtype == AAC_DEVTYPE_ARC_RAW) {
    410			set_qd_dev_type = true;
    411			set_timeout = 1;
    412			goto common_config;
    413		}
    414	}
    415
    416	if (aac->jbod && (sdev->type == TYPE_DISK))
    417		sdev->removable = 1;
    418
    419	if (sdev->type == TYPE_DISK
    420	 && sdev_channel(sdev) != CONTAINER_CHANNEL
    421	 && (!aac->jbod || sdev->inq_periph_qual)
    422	 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
    423
    424		if (expose_physicals == 0)
    425			return -ENXIO;
    426
    427		if (expose_physicals < 0)
    428			sdev->no_uld_attach = 1;
    429	}
    430
    431	if (sdev->tagged_supported
    432	 &&  sdev->type == TYPE_DISK
    433	 &&  (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
    434	 && !sdev->no_uld_attach) {
    435
    436		struct scsi_device * dev;
    437		struct Scsi_Host *host = sdev->host;
    438		unsigned num_lsu = 0;
    439		unsigned num_one = 0;
    440		unsigned cid;
    441
    442		set_timeout = 1;
    443
    444		for (cid = 0; cid < aac->maximum_num_containers; ++cid)
    445			if (aac->fsa_dev[cid].valid)
    446				++num_lsu;
    447
    448		__shost_for_each_device(dev, host) {
    449			if (dev->tagged_supported
    450			 && dev->type == TYPE_DISK
    451			 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
    452			 && !dev->no_uld_attach) {
    453				if ((sdev_channel(dev) != CONTAINER_CHANNEL)
    454				 || !aac->fsa_dev[sdev_id(dev)].valid) {
    455					++num_lsu;
    456				}
    457			} else {
    458				++num_one;
    459			}
    460		}
    461
    462		if (num_lsu == 0)
    463			++num_lsu;
    464
    465		depth = (host->can_queue - num_one) / num_lsu;
    466
    467		if (sdev_channel(sdev) != NATIVE_CHANNEL)
    468			goto common_config;
    469
    470		set_qd_dev_type = true;
    471
    472	}
    473
    474common_config:
    475
    476	/*
    477	 * Check if SATA drive
    478	 */
    479	if (set_qd_dev_type) {
    480		if (strncmp(sdev->vendor, "ATA", 3) == 0)
    481			depth = 32;
    482		else
    483			depth = 64;
    484	}
    485
    486	/*
    487	 * Firmware has an individual device recovery time typically
    488	 * of 35 seconds, give us a margin. Thor devices can take longer in
    489	 * error recovery, hence different value.
    490	 */
    491	if (set_timeout) {
    492		timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
    493		blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
    494	}
    495
    496	if (depth > 256)
    497		depth = 256;
    498	else if (depth < 1)
    499		depth = 1;
    500
    501	scsi_change_queue_depth(sdev, depth);
    502
    503	sdev->tagged_supported = 1;
    504
    505	return 0;
    506}
    507
    508/**
    509 *	aac_change_queue_depth		-	alter queue depths
    510 *	@sdev:	SCSI device we are considering
    511 *	@depth:	desired queue depth
    512 *
    513 *	Alters queue depths for target device based on the host adapter's
    514 *	total capacity and the queue depth supported by the target device.
    515 */
    516
    517static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
    518{
    519	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
    520	int chn, tid, is_native_device = 0;
    521
    522	chn = aac_logical_to_phys(sdev_channel(sdev));
    523	tid = sdev_id(sdev);
    524	if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
    525		aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
    526		is_native_device = 1;
    527
    528	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
    529	    (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
    530		struct scsi_device * dev;
    531		struct Scsi_Host *host = sdev->host;
    532		unsigned num = 0;
    533
    534		__shost_for_each_device(dev, host) {
    535			if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
    536			    (sdev_channel(dev) == CONTAINER_CHANNEL))
    537				++num;
    538			++num;
    539		}
    540		if (num >= host->can_queue)
    541			num = host->can_queue - 1;
    542		if (depth > (host->can_queue - num))
    543			depth = host->can_queue - num;
    544		if (depth > 256)
    545			depth = 256;
    546		else if (depth < 2)
    547			depth = 2;
    548		return scsi_change_queue_depth(sdev, depth);
    549	} else if (is_native_device) {
    550		scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
    551	} else {
    552		scsi_change_queue_depth(sdev, 1);
    553	}
    554	return sdev->queue_depth;
    555}
    556
    557static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
    558{
    559	struct scsi_device *sdev = to_scsi_device(dev);
    560	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
    561	if (sdev_channel(sdev) != CONTAINER_CHANNEL)
    562		return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
    563		  ? "Hidden\n" :
    564		  ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
    565	return snprintf(buf, PAGE_SIZE, "%s\n",
    566	  get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
    567}
    568
    569static struct device_attribute aac_raid_level_attr = {
    570	.attr = {
    571		.name = "level",
    572		.mode = S_IRUGO,
    573	},
    574	.show = aac_show_raid_level
    575};
    576
    577static ssize_t aac_show_unique_id(struct device *dev,
    578	     struct device_attribute *attr, char *buf)
    579{
    580	struct scsi_device *sdev = to_scsi_device(dev);
    581	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
    582	unsigned char sn[16];
    583
    584	memset(sn, 0, sizeof(sn));
    585
    586	if (sdev_channel(sdev) == CONTAINER_CHANNEL)
    587		memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
    588
    589	return snprintf(buf, 16 * 2 + 2,
    590		"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
    591		sn[0], sn[1], sn[2], sn[3],
    592		sn[4], sn[5], sn[6], sn[7],
    593		sn[8], sn[9], sn[10], sn[11],
    594		sn[12], sn[13], sn[14], sn[15]);
    595}
    596
    597static struct device_attribute aac_unique_id_attr = {
    598	.attr = {
    599		.name = "unique_id",
    600		.mode = 0444,
    601	},
    602	.show = aac_show_unique_id
    603};
    604
    605
    606
    607static struct attribute *aac_dev_attrs[] = {
    608	&aac_raid_level_attr.attr,
    609	&aac_unique_id_attr.attr,
    610	NULL,
    611};
    612
    613ATTRIBUTE_GROUPS(aac_dev);
    614
    615static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
    616		     void __user *arg)
    617{
    618	int retval;
    619	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
    620	if (!capable(CAP_SYS_RAWIO))
    621		return -EPERM;
    622	retval = aac_adapter_check_health(dev);
    623	if (retval)
    624		return -EBUSY;
    625	return aac_do_ioctl(dev, cmd, arg);
    626}
    627
    628struct fib_count_data {
    629	int mlcnt;
    630	int llcnt;
    631	int ehcnt;
    632	int fwcnt;
    633	int krlcnt;
    634};
    635
    636static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
    637{
    638	struct fib_count_data *fib_count = data;
    639
    640	switch (aac_priv(scmnd)->owner) {
    641	case AAC_OWNER_FIRMWARE:
    642		fib_count->fwcnt++;
    643		break;
    644	case AAC_OWNER_ERROR_HANDLER:
    645		fib_count->ehcnt++;
    646		break;
    647	case AAC_OWNER_LOWLEVEL:
    648		fib_count->llcnt++;
    649		break;
    650	case AAC_OWNER_MIDLEVEL:
    651		fib_count->mlcnt++;
    652		break;
    653	default:
    654		fib_count->krlcnt++;
    655		break;
    656	}
    657	return true;
    658}
    659
    660/* Called during SCSI EH, so we don't need to block requests */
    661static int get_num_of_incomplete_fibs(struct aac_dev *aac)
    662{
    663	struct Scsi_Host *shost = aac->scsi_host_ptr;
    664	struct device *ctrl_dev;
    665	struct fib_count_data fcnt = { };
    666
    667	scsi_host_busy_iter(shost, fib_count_iter, &fcnt);
    668
    669	ctrl_dev = &aac->pdev->dev;
    670
    671	dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt);
    672	dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt);
    673	dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt);
    674	dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt);
    675	dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt);
    676
    677	return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt;
    678}
    679
    680static int aac_eh_abort(struct scsi_cmnd* cmd)
    681{
    682	struct aac_cmd_priv *cmd_priv = aac_priv(cmd);
    683	struct scsi_device * dev = cmd->device;
    684	struct Scsi_Host * host = dev->host;
    685	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
    686	int count, found;
    687	u32 bus, cid;
    688	int ret = FAILED;
    689
    690	if (aac_adapter_check_health(aac))
    691		return ret;
    692
    693	bus = aac_logical_to_phys(scmd_channel(cmd));
    694	cid = scmd_id(cmd);
    695	if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
    696		struct fib *fib;
    697		struct aac_hba_tm_req *tmf;
    698		int status;
    699		u64 address;
    700
    701		pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
    702		 AAC_DRIVERNAME,
    703		 host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
    704
    705		found = 0;
    706		for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
    707			fib = &aac->fibs[count];
    708			if (*(u8 *)fib->hw_fib_va != 0 &&
    709				(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
    710				(fib->callback_data == cmd)) {
    711				found = 1;
    712				break;
    713			}
    714		}
    715		if (!found)
    716			return ret;
    717
    718		/* start a HBA_TMF_ABORT_TASK TMF request */
    719		fib = aac_fib_alloc(aac);
    720		if (!fib)
    721			return ret;
    722
    723		tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
    724		memset(tmf, 0, sizeof(*tmf));
    725		tmf->tmf = HBA_TMF_ABORT_TASK;
    726		tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
    727		tmf->lun[1] = cmd->device->lun;
    728
    729		address = (u64)fib->hw_error_pa;
    730		tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
    731		tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
    732		tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
    733
    734		fib->hbacmd_size = sizeof(*tmf);
    735		cmd_priv->sent_command = 0;
    736
    737		status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
    738				  (fib_callback) aac_hba_callback,
    739				  (void *) cmd);
    740		if (status != -EINPROGRESS) {
    741			aac_fib_complete(fib);
    742			aac_fib_free(fib);
    743			return ret;
    744		}
    745		/* Wait up to 15 secs for completion */
    746		for (count = 0; count < 15; ++count) {
    747			if (cmd_priv->sent_command) {
    748				ret = SUCCESS;
    749				break;
    750			}
    751			msleep(1000);
    752		}
    753
    754		if (ret != SUCCESS)
    755			pr_err("%s: Host adapter abort request timed out\n",
    756			AAC_DRIVERNAME);
    757	} else {
    758		pr_err(
    759			"%s: Host adapter abort request.\n"
    760			"%s: Outstanding commands on (%d,%d,%d,%d):\n",
    761			AAC_DRIVERNAME, AAC_DRIVERNAME,
    762			host->host_no, sdev_channel(dev), sdev_id(dev),
    763			(int)dev->lun);
    764		switch (cmd->cmnd[0]) {
    765		case SERVICE_ACTION_IN_16:
    766			if (!(aac->raw_io_interface) ||
    767			    !(aac->raw_io_64) ||
    768			    ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
    769				break;
    770			fallthrough;
    771		case INQUIRY:
    772		case READ_CAPACITY:
    773			/*
    774			 * Mark associated FIB to not complete,
    775			 * eh handler does this
    776			 */
    777			for (count = 0;
    778				count < (host->can_queue + AAC_NUM_MGT_FIB);
    779				++count) {
    780				struct fib *fib = &aac->fibs[count];
    781
    782				if (fib->hw_fib_va->header.XferState &&
    783				(fib->flags & FIB_CONTEXT_FLAG) &&
    784				(fib->callback_data == cmd)) {
    785					fib->flags |=
    786						FIB_CONTEXT_FLAG_TIMED_OUT;
    787					cmd_priv->owner =
    788						AAC_OWNER_ERROR_HANDLER;
    789					ret = SUCCESS;
    790				}
    791			}
    792			break;
    793		case TEST_UNIT_READY:
    794			/*
    795			 * Mark associated FIB to not complete,
    796			 * eh handler does this
    797			 */
    798			for (count = 0;
    799				count < (host->can_queue + AAC_NUM_MGT_FIB);
    800				++count) {
    801				struct scsi_cmnd *command;
    802				struct fib *fib = &aac->fibs[count];
    803
    804				command = fib->callback_data;
    805
    806				if ((fib->hw_fib_va->header.XferState &
    807					cpu_to_le32
    808					(Async | NoResponseExpected)) &&
    809					(fib->flags & FIB_CONTEXT_FLAG) &&
    810					((command)) &&
    811					(command->device == cmd->device)) {
    812					fib->flags |=
    813						FIB_CONTEXT_FLAG_TIMED_OUT;
    814					aac_priv(command)->owner =
    815						AAC_OWNER_ERROR_HANDLER;
    816					if (command == cmd)
    817						ret = SUCCESS;
    818				}
    819			}
    820			break;
    821		}
    822	}
    823	return ret;
    824}
    825
    826static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
    827				   struct fib *fib, u64 tmf_lun)
    828{
    829	struct aac_hba_tm_req *tmf;
    830	u64 address;
    831
    832	/* start a HBA_TMF_LUN_RESET TMF request */
    833	tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
    834	memset(tmf, 0, sizeof(*tmf));
    835	tmf->tmf = HBA_TMF_LUN_RESET;
    836	tmf->it_nexus = info->rmw_nexus;
    837	int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
    838
    839	address = (u64)fib->hw_error_pa;
    840	tmf->error_ptr_hi = cpu_to_le32
    841		((u32)(address >> 32));
    842	tmf->error_ptr_lo = cpu_to_le32
    843		((u32)(address & 0xffffffff));
    844	tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
    845	fib->hbacmd_size = sizeof(*tmf);
    846
    847	return HBA_IU_TYPE_SCSI_TM_REQ;
    848}
    849
    850static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
    851				    struct fib *fib)
    852{
    853	struct aac_hba_reset_req *rst;
    854	u64 address;
    855
    856	/* already tried, start a hard reset now */
    857	rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
    858	memset(rst, 0, sizeof(*rst));
    859	rst->it_nexus = info->rmw_nexus;
    860
    861	address = (u64)fib->hw_error_pa;
    862	rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
    863	rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
    864	rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
    865	fib->hbacmd_size = sizeof(*rst);
    866
    867	return HBA_IU_TYPE_SATA_REQ;
    868}
    869
    870static void aac_tmf_callback(void *context, struct fib *fibptr)
    871{
    872	struct aac_hba_resp *err =
    873		&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
    874	struct aac_hba_map_info *info = context;
    875	int res;
    876
    877	switch (err->service_response) {
    878	case HBA_RESP_SVCRES_TMF_REJECTED:
    879		res = -1;
    880		break;
    881	case HBA_RESP_SVCRES_TMF_LUN_INVALID:
    882		res = 0;
    883		break;
    884	case HBA_RESP_SVCRES_TMF_COMPLETE:
    885	case HBA_RESP_SVCRES_TMF_SUCCEEDED:
    886		res = 0;
    887		break;
    888	default:
    889		res = -2;
    890		break;
    891	}
    892	aac_fib_complete(fibptr);
    893
    894	info->reset_state = res;
    895}
    896
    897/*
    898 *	aac_eh_dev_reset	- Device reset command handling
    899 *	@scsi_cmd:	SCSI command block causing the reset
    900 *
    901 */
    902static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
    903{
    904	struct scsi_device * dev = cmd->device;
    905	struct Scsi_Host * host = dev->host;
    906	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
    907	struct aac_hba_map_info *info;
    908	int count;
    909	u32 bus, cid;
    910	struct fib *fib;
    911	int ret = FAILED;
    912	int status;
    913	u8 command;
    914
    915	bus = aac_logical_to_phys(scmd_channel(cmd));
    916	cid = scmd_id(cmd);
    917
    918	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
    919		return FAILED;
    920
    921	info = &aac->hba_map[bus][cid];
    922
    923	if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
    924	 !(info->reset_state > 0)))
    925		return FAILED;
    926
    927	pr_err("%s: Host device reset request. SCSI hang ?\n",
    928	       AAC_DRIVERNAME);
    929
    930	fib = aac_fib_alloc(aac);
    931	if (!fib)
    932		return ret;
    933
    934	/* start a HBA_TMF_LUN_RESET TMF request */
    935	command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
    936
    937	info->reset_state = 1;
    938
    939	status = aac_hba_send(command, fib,
    940			      (fib_callback) aac_tmf_callback,
    941			      (void *) info);
    942	if (status != -EINPROGRESS) {
    943		info->reset_state = 0;
    944		aac_fib_complete(fib);
    945		aac_fib_free(fib);
    946		return ret;
    947	}
    948	/* Wait up to 15 seconds for completion */
    949	for (count = 0; count < 15; ++count) {
    950		if (info->reset_state == 0) {
    951			ret = info->reset_state == 0 ? SUCCESS : FAILED;
    952			break;
    953		}
    954		msleep(1000);
    955	}
    956
    957	return ret;
    958}
    959
    960/*
    961 *	aac_eh_target_reset	- Target reset command handling
    962 *	@scsi_cmd:	SCSI command block causing the reset
    963 *
    964 */
    965static int aac_eh_target_reset(struct scsi_cmnd *cmd)
    966{
    967	struct scsi_device * dev = cmd->device;
    968	struct Scsi_Host * host = dev->host;
    969	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
    970	struct aac_hba_map_info *info;
    971	int count;
    972	u32 bus, cid;
    973	int ret = FAILED;
    974	struct fib *fib;
    975	int status;
    976	u8 command;
    977
    978	bus = aac_logical_to_phys(scmd_channel(cmd));
    979	cid = scmd_id(cmd);
    980
    981	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
    982		return FAILED;
    983
    984	info = &aac->hba_map[bus][cid];
    985
    986	if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
    987	 !(info->reset_state > 0)))
    988		return FAILED;
    989
    990	pr_err("%s: Host target reset request. SCSI hang ?\n",
    991	       AAC_DRIVERNAME);
    992
    993	fib = aac_fib_alloc(aac);
    994	if (!fib)
    995		return ret;
    996
    997
    998	/* already tried, start a hard reset now */
    999	command = aac_eh_tmf_hard_reset_fib(info, fib);
   1000
   1001	info->reset_state = 2;
   1002
   1003	status = aac_hba_send(command, fib,
   1004			      (fib_callback) aac_tmf_callback,
   1005			      (void *) info);
   1006
   1007	if (status != -EINPROGRESS) {
   1008		info->reset_state = 0;
   1009		aac_fib_complete(fib);
   1010		aac_fib_free(fib);
   1011		return ret;
   1012	}
   1013
   1014	/* Wait up to 15 seconds for completion */
   1015	for (count = 0; count < 15; ++count) {
   1016		if (info->reset_state <= 0) {
   1017			ret = info->reset_state == 0 ? SUCCESS : FAILED;
   1018			break;
   1019		}
   1020		msleep(1000);
   1021	}
   1022
   1023	return ret;
   1024}
   1025
   1026/*
   1027 *	aac_eh_bus_reset	- Bus reset command handling
   1028 *	@scsi_cmd:	SCSI command block causing the reset
   1029 *
   1030 */
   1031static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
   1032{
   1033	struct scsi_device * dev = cmd->device;
   1034	struct Scsi_Host * host = dev->host;
   1035	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
   1036	int count;
   1037	u32 cmd_bus;
   1038	int status = 0;
   1039
   1040
   1041	cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
   1042	/* Mark the assoc. FIB to not complete, eh handler does this */
   1043	for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
   1044		struct fib *fib = &aac->fibs[count];
   1045
   1046		if (fib->hw_fib_va->header.XferState &&
   1047		    (fib->flags & FIB_CONTEXT_FLAG) &&
   1048		    (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
   1049			struct aac_hba_map_info *info;
   1050			u32 bus, cid;
   1051
   1052			cmd = (struct scsi_cmnd *)fib->callback_data;
   1053			bus = aac_logical_to_phys(scmd_channel(cmd));
   1054			if (bus != cmd_bus)
   1055				continue;
   1056			cid = scmd_id(cmd);
   1057			info = &aac->hba_map[bus][cid];
   1058			if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
   1059			    info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
   1060				fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
   1061				aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER;
   1062			}
   1063		}
   1064	}
   1065
   1066	pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
   1067
   1068	/*
   1069	 * Check the health of the controller
   1070	 */
   1071	status = aac_adapter_check_health(aac);
   1072	if (status)
   1073		dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
   1074
   1075	count = get_num_of_incomplete_fibs(aac);
   1076	return (count == 0) ? SUCCESS : FAILED;
   1077}
   1078
   1079/*
   1080 *	aac_eh_host_reset	- Host reset command handling
   1081 *	@scsi_cmd:	SCSI command block causing the reset
   1082 *
   1083 */
   1084static int aac_eh_host_reset(struct scsi_cmnd *cmd)
   1085{
   1086	struct scsi_device * dev = cmd->device;
   1087	struct Scsi_Host * host = dev->host;
   1088	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
   1089	int ret = FAILED;
   1090	__le32 supported_options2 = 0;
   1091	bool is_mu_reset;
   1092	bool is_ignore_reset;
   1093	bool is_doorbell_reset;
   1094
   1095	/*
   1096	 * Check if reset is supported by the firmware
   1097	 */
   1098	supported_options2 = aac->supplement_adapter_info.supported_options2;
   1099	is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
   1100	is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
   1101	is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
   1102	/*
   1103	 * This adapter needs a blind reset, only do so for
   1104	 * Adapters that support a register, instead of a commanded,
   1105	 * reset.
   1106	 */
   1107	if ((is_mu_reset || is_doorbell_reset)
   1108	 && aac_check_reset
   1109	 && (aac_check_reset != -1 || !is_ignore_reset)) {
   1110		/* Bypass wait for command quiesce */
   1111		if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
   1112			ret = SUCCESS;
   1113	}
   1114	/*
   1115	 * Reset EH state
   1116	 */
   1117	if (ret == SUCCESS) {
   1118		int bus, cid;
   1119		struct aac_hba_map_info *info;
   1120
   1121		for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
   1122			for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
   1123				info = &aac->hba_map[bus][cid];
   1124				if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
   1125					info->reset_state = 0;
   1126			}
   1127		}
   1128	}
   1129	return ret;
   1130}
   1131
   1132/**
   1133 *	aac_cfg_open		-	open a configuration file
   1134 *	@inode: inode being opened
   1135 *	@file: file handle attached
   1136 *
   1137 *	Called when the configuration device is opened. Does the needed
   1138 *	set up on the handle and then returns
   1139 *
   1140 *	Bugs: This needs extending to check a given adapter is present
   1141 *	so we can support hot plugging, and to ref count adapters.
   1142 */
   1143
   1144static int aac_cfg_open(struct inode *inode, struct file *file)
   1145{
   1146	struct aac_dev *aac;
   1147	unsigned minor_number = iminor(inode);
   1148	int err = -ENODEV;
   1149
   1150	mutex_lock(&aac_mutex);  /* BKL pushdown: nothing else protects this list */
   1151	list_for_each_entry(aac, &aac_devices, entry) {
   1152		if (aac->id == minor_number) {
   1153			file->private_data = aac;
   1154			err = 0;
   1155			break;
   1156		}
   1157	}
   1158	mutex_unlock(&aac_mutex);
   1159
   1160	return err;
   1161}
   1162
   1163/**
   1164 *	aac_cfg_ioctl		-	AAC configuration request
   1165 *	@file: file handle
   1166 *	@cmd: ioctl command code
   1167 *	@arg: argument
   1168 *
   1169 *	Handles a configuration ioctl. Currently this involves wrapping it
   1170 *	up and feeding it into the nasty windowsalike glue layer.
   1171 *
   1172 *	Bugs: Needs locking against parallel ioctls lower down
   1173 *	Bugs: Needs to handle hot plugging
   1174 */
   1175
   1176static long aac_cfg_ioctl(struct file *file,
   1177		unsigned int cmd, unsigned long arg)
   1178{
   1179	struct aac_dev *aac = (struct aac_dev *)file->private_data;
   1180
   1181	if (!capable(CAP_SYS_RAWIO))
   1182		return -EPERM;
   1183
   1184	return aac_do_ioctl(aac, cmd, (void __user *)arg);
   1185}
   1186
   1187static ssize_t aac_show_model(struct device *device,
   1188			      struct device_attribute *attr, char *buf)
   1189{
   1190	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1191	int len;
   1192
   1193	if (dev->supplement_adapter_info.adapter_type_text[0]) {
   1194		char *cp = dev->supplement_adapter_info.adapter_type_text;
   1195		while (*cp && *cp != ' ')
   1196			++cp;
   1197		while (*cp == ' ')
   1198			++cp;
   1199		len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
   1200	} else
   1201		len = snprintf(buf, PAGE_SIZE, "%s\n",
   1202		  aac_drivers[dev->cardtype].model);
   1203	return len;
   1204}
   1205
   1206static ssize_t aac_show_vendor(struct device *device,
   1207			       struct device_attribute *attr, char *buf)
   1208{
   1209	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1210	struct aac_supplement_adapter_info *sup_adap_info;
   1211	int len;
   1212
   1213	sup_adap_info = &dev->supplement_adapter_info;
   1214	if (sup_adap_info->adapter_type_text[0]) {
   1215		char *cp = sup_adap_info->adapter_type_text;
   1216		while (*cp && *cp != ' ')
   1217			++cp;
   1218		len = snprintf(buf, PAGE_SIZE, "%.*s\n",
   1219			(int)(cp - (char *)sup_adap_info->adapter_type_text),
   1220					sup_adap_info->adapter_type_text);
   1221	} else
   1222		len = snprintf(buf, PAGE_SIZE, "%s\n",
   1223			aac_drivers[dev->cardtype].vname);
   1224	return len;
   1225}
   1226
   1227static ssize_t aac_show_flags(struct device *cdev,
   1228			      struct device_attribute *attr, char *buf)
   1229{
   1230	int len = 0;
   1231	struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
   1232
   1233	if (nblank(dprintk(x)))
   1234		len = snprintf(buf, PAGE_SIZE, "dprintk\n");
   1235#ifdef AAC_DETAILED_STATUS_INFO
   1236	len += scnprintf(buf + len, PAGE_SIZE - len,
   1237			 "AAC_DETAILED_STATUS_INFO\n");
   1238#endif
   1239	if (dev->raw_io_interface && dev->raw_io_64)
   1240		len += scnprintf(buf + len, PAGE_SIZE - len,
   1241				 "SAI_READ_CAPACITY_16\n");
   1242	if (dev->jbod)
   1243		len += scnprintf(buf + len, PAGE_SIZE - len,
   1244				 "SUPPORTED_JBOD\n");
   1245	if (dev->supplement_adapter_info.supported_options2 &
   1246		AAC_OPTION_POWER_MANAGEMENT)
   1247		len += scnprintf(buf + len, PAGE_SIZE - len,
   1248				 "SUPPORTED_POWER_MANAGEMENT\n");
   1249	if (dev->msi)
   1250		len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
   1251	return len;
   1252}
   1253
   1254static ssize_t aac_show_kernel_version(struct device *device,
   1255				       struct device_attribute *attr,
   1256				       char *buf)
   1257{
   1258	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1259	int len, tmp;
   1260
   1261	tmp = le32_to_cpu(dev->adapter_info.kernelrev);
   1262	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
   1263	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
   1264	  le32_to_cpu(dev->adapter_info.kernelbuild));
   1265	return len;
   1266}
   1267
   1268static ssize_t aac_show_monitor_version(struct device *device,
   1269					struct device_attribute *attr,
   1270					char *buf)
   1271{
   1272	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1273	int len, tmp;
   1274
   1275	tmp = le32_to_cpu(dev->adapter_info.monitorrev);
   1276	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
   1277	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
   1278	  le32_to_cpu(dev->adapter_info.monitorbuild));
   1279	return len;
   1280}
   1281
   1282static ssize_t aac_show_bios_version(struct device *device,
   1283				     struct device_attribute *attr,
   1284				     char *buf)
   1285{
   1286	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1287	int len, tmp;
   1288
   1289	tmp = le32_to_cpu(dev->adapter_info.biosrev);
   1290	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
   1291	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
   1292	  le32_to_cpu(dev->adapter_info.biosbuild));
   1293	return len;
   1294}
   1295
   1296static ssize_t aac_show_driver_version(struct device *device,
   1297					struct device_attribute *attr,
   1298					char *buf)
   1299{
   1300	return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
   1301}
   1302
   1303static ssize_t aac_show_serial_number(struct device *device,
   1304			       struct device_attribute *attr, char *buf)
   1305{
   1306	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1307	int len = 0;
   1308
   1309	if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
   1310		len = snprintf(buf, 16, "%06X\n",
   1311		  le32_to_cpu(dev->adapter_info.serial[0]));
   1312	if (len &&
   1313	  !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
   1314	    sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
   1315	  buf, len-1))
   1316		len = snprintf(buf, 16, "%.*s\n",
   1317		  (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
   1318		  dev->supplement_adapter_info.mfg_pcba_serial_no);
   1319
   1320	return min(len, 16);
   1321}
   1322
   1323static ssize_t aac_show_max_channel(struct device *device,
   1324				    struct device_attribute *attr, char *buf)
   1325{
   1326	return snprintf(buf, PAGE_SIZE, "%d\n",
   1327	  class_to_shost(device)->max_channel);
   1328}
   1329
   1330static ssize_t aac_show_max_id(struct device *device,
   1331			       struct device_attribute *attr, char *buf)
   1332{
   1333	return snprintf(buf, PAGE_SIZE, "%d\n",
   1334	  class_to_shost(device)->max_id);
   1335}
   1336
   1337static ssize_t aac_store_reset_adapter(struct device *device,
   1338				       struct device_attribute *attr,
   1339				       const char *buf, size_t count)
   1340{
   1341	int retval = -EACCES;
   1342
   1343	if (!capable(CAP_SYS_ADMIN))
   1344		return retval;
   1345
   1346	retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
   1347					buf[0] == '!', IOP_HWSOFT_RESET);
   1348	if (retval >= 0)
   1349		retval = count;
   1350
   1351	return retval;
   1352}
   1353
   1354static ssize_t aac_show_reset_adapter(struct device *device,
   1355				      struct device_attribute *attr,
   1356				      char *buf)
   1357{
   1358	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
   1359	int len, tmp;
   1360
   1361	tmp = aac_adapter_check_health(dev);
   1362	if ((tmp == 0) && dev->in_reset)
   1363		tmp = -EBUSY;
   1364	len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
   1365	return len;
   1366}
   1367
   1368static struct device_attribute aac_model = {
   1369	.attr = {
   1370		.name = "model",
   1371		.mode = S_IRUGO,
   1372	},
   1373	.show = aac_show_model,
   1374};
   1375static struct device_attribute aac_vendor = {
   1376	.attr = {
   1377		.name = "vendor",
   1378		.mode = S_IRUGO,
   1379	},
   1380	.show = aac_show_vendor,
   1381};
   1382static struct device_attribute aac_flags = {
   1383	.attr = {
   1384		.name = "flags",
   1385		.mode = S_IRUGO,
   1386	},
   1387	.show = aac_show_flags,
   1388};
   1389static struct device_attribute aac_kernel_version = {
   1390	.attr = {
   1391		.name = "hba_kernel_version",
   1392		.mode = S_IRUGO,
   1393	},
   1394	.show = aac_show_kernel_version,
   1395};
   1396static struct device_attribute aac_monitor_version = {
   1397	.attr = {
   1398		.name = "hba_monitor_version",
   1399		.mode = S_IRUGO,
   1400	},
   1401	.show = aac_show_monitor_version,
   1402};
   1403static struct device_attribute aac_bios_version = {
   1404	.attr = {
   1405		.name = "hba_bios_version",
   1406		.mode = S_IRUGO,
   1407	},
   1408	.show = aac_show_bios_version,
   1409};
   1410static struct device_attribute aac_lld_version = {
   1411	.attr = {
   1412		.name = "driver_version",
   1413		.mode = 0444,
   1414	},
   1415	.show = aac_show_driver_version,
   1416};
   1417static struct device_attribute aac_serial_number = {
   1418	.attr = {
   1419		.name = "serial_number",
   1420		.mode = S_IRUGO,
   1421	},
   1422	.show = aac_show_serial_number,
   1423};
   1424static struct device_attribute aac_max_channel = {
   1425	.attr = {
   1426		.name = "max_channel",
   1427		.mode = S_IRUGO,
   1428	},
   1429	.show = aac_show_max_channel,
   1430};
   1431static struct device_attribute aac_max_id = {
   1432	.attr = {
   1433		.name = "max_id",
   1434		.mode = S_IRUGO,
   1435	},
   1436	.show = aac_show_max_id,
   1437};
   1438static struct device_attribute aac_reset = {
   1439	.attr = {
   1440		.name = "reset_host",
   1441		.mode = S_IWUSR|S_IRUGO,
   1442	},
   1443	.store = aac_store_reset_adapter,
   1444	.show = aac_show_reset_adapter,
   1445};
   1446
   1447static struct attribute *aac_host_attrs[] = {
   1448	&aac_model.attr,
   1449	&aac_vendor.attr,
   1450	&aac_flags.attr,
   1451	&aac_kernel_version.attr,
   1452	&aac_monitor_version.attr,
   1453	&aac_bios_version.attr,
   1454	&aac_lld_version.attr,
   1455	&aac_serial_number.attr,
   1456	&aac_max_channel.attr,
   1457	&aac_max_id.attr,
   1458	&aac_reset.attr,
   1459	NULL
   1460};
   1461
   1462ATTRIBUTE_GROUPS(aac_host);
   1463
   1464ssize_t aac_get_serial_number(struct device *device, char *buf)
   1465{
   1466	return aac_show_serial_number(device, &aac_serial_number, buf);
   1467}
   1468
   1469static const struct file_operations aac_cfg_fops = {
   1470	.owner		= THIS_MODULE,
   1471	.unlocked_ioctl	= aac_cfg_ioctl,
   1472#ifdef CONFIG_COMPAT
   1473	.compat_ioctl   = aac_cfg_ioctl,
   1474#endif
   1475	.open		= aac_cfg_open,
   1476	.llseek		= noop_llseek,
   1477};
   1478
   1479static struct scsi_host_template aac_driver_template = {
   1480	.module				= THIS_MODULE,
   1481	.name				= "AAC",
   1482	.proc_name			= AAC_DRIVERNAME,
   1483	.info				= aac_info,
   1484	.ioctl				= aac_ioctl,
   1485#ifdef CONFIG_COMPAT
   1486	.compat_ioctl			= aac_ioctl,
   1487#endif
   1488	.queuecommand			= aac_queuecommand,
   1489	.bios_param			= aac_biosparm,
   1490	.shost_groups			= aac_host_groups,
   1491	.slave_configure		= aac_slave_configure,
   1492	.change_queue_depth		= aac_change_queue_depth,
   1493	.sdev_groups			= aac_dev_groups,
   1494	.eh_abort_handler		= aac_eh_abort,
   1495	.eh_device_reset_handler	= aac_eh_dev_reset,
   1496	.eh_target_reset_handler	= aac_eh_target_reset,
   1497	.eh_bus_reset_handler		= aac_eh_bus_reset,
   1498	.eh_host_reset_handler		= aac_eh_host_reset,
   1499	.can_queue			= AAC_NUM_IO_FIB,
   1500	.this_id			= MAXIMUM_NUM_CONTAINERS,
   1501	.sg_tablesize			= 16,
   1502	.max_sectors			= 128,
   1503#if (AAC_NUM_IO_FIB > 256)
   1504	.cmd_per_lun			= 256,
   1505#else
   1506	.cmd_per_lun			= AAC_NUM_IO_FIB,
   1507#endif
   1508	.emulated			= 1,
   1509	.no_write_same			= 1,
   1510	.cmd_size			= sizeof(struct aac_cmd_priv),
   1511};
   1512
   1513static void __aac_shutdown(struct aac_dev * aac)
   1514{
   1515	int i;
   1516
   1517	mutex_lock(&aac->ioctl_mutex);
   1518	aac->adapter_shutdown = 1;
   1519	mutex_unlock(&aac->ioctl_mutex);
   1520
   1521	if (aac->aif_thread) {
   1522		int i;
   1523		/* Clear out events first */
   1524		for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
   1525			struct fib *fib = &aac->fibs[i];
   1526			if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
   1527			    (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
   1528				complete(&fib->event_wait);
   1529		}
   1530		kthread_stop(aac->thread);
   1531		aac->thread = NULL;
   1532	}
   1533
   1534	aac_send_shutdown(aac);
   1535
   1536	aac_adapter_disable_int(aac);
   1537
   1538	if (aac_is_src(aac)) {
   1539		if (aac->max_msix > 1) {
   1540			for (i = 0; i < aac->max_msix; i++) {
   1541				free_irq(pci_irq_vector(aac->pdev, i),
   1542					 &(aac->aac_msix[i]));
   1543			}
   1544		} else {
   1545			free_irq(aac->pdev->irq,
   1546				 &(aac->aac_msix[0]));
   1547		}
   1548	} else {
   1549		free_irq(aac->pdev->irq, aac);
   1550	}
   1551	if (aac->msi)
   1552		pci_disable_msi(aac->pdev);
   1553	else if (aac->max_msix > 1)
   1554		pci_disable_msix(aac->pdev);
   1555}
   1556static void aac_init_char(void)
   1557{
   1558	aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
   1559	if (aac_cfg_major < 0) {
   1560		pr_err("aacraid: unable to register \"aac\" device.\n");
   1561	}
   1562}
   1563
   1564void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
   1565{
   1566	/*
   1567	 * Firmware may send a AIF messages very early and the Driver may have
   1568	 * ignored as it is not fully ready to process the messages. Send
   1569	 * AIF to firmware so that if there are any unprocessed events they
   1570	 * can be processed now.
   1571	 */
   1572	if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
   1573		aac_intr_normal(aac, 0, 2, 0, NULL);
   1574
   1575}
   1576
   1577static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
   1578{
   1579	unsigned index = id->driver_data;
   1580	struct Scsi_Host *shost;
   1581	struct aac_dev *aac;
   1582	struct list_head *insert = &aac_devices;
   1583	int error;
   1584	int unique_id = 0;
   1585	u64 dmamask;
   1586	int mask_bits = 0;
   1587	extern int aac_sync_mode;
   1588
   1589	/*
   1590	 * Only series 7 needs freset.
   1591	 */
   1592	if (pdev->device == PMC_DEVICE_S7)
   1593		pdev->needs_freset = 1;
   1594
   1595	list_for_each_entry(aac, &aac_devices, entry) {
   1596		if (aac->id > unique_id)
   1597			break;
   1598		insert = &aac->entry;
   1599		unique_id++;
   1600	}
   1601
   1602	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
   1603			       PCIE_LINK_STATE_CLKPM);
   1604
   1605	error = pci_enable_device(pdev);
   1606	if (error)
   1607		goto out;
   1608
   1609	if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
   1610		error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
   1611		if (error) {
   1612			dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
   1613			goto out_disable_pdev;
   1614		}
   1615	}
   1616
   1617	/*
   1618	 * If the quirk31 bit is set, the adapter needs adapter
   1619	 * to driver communication memory to be allocated below 2gig
   1620	 */
   1621	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
   1622		dmamask = DMA_BIT_MASK(31);
   1623		mask_bits = 31;
   1624	} else {
   1625		dmamask = DMA_BIT_MASK(32);
   1626		mask_bits = 32;
   1627	}
   1628
   1629	error = dma_set_coherent_mask(&pdev->dev, dmamask);
   1630	if (error) {
   1631		dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
   1632				, mask_bits);
   1633		goto out_disable_pdev;
   1634	}
   1635
   1636	pci_set_master(pdev);
   1637
   1638	shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
   1639	if (!shost) {
   1640		error = -ENOMEM;
   1641		goto out_disable_pdev;
   1642	}
   1643
   1644	shost->irq = pdev->irq;
   1645	shost->unique_id = unique_id;
   1646	shost->max_cmd_len = 16;
   1647
   1648	if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
   1649		aac_init_char();
   1650
   1651	aac = (struct aac_dev *)shost->hostdata;
   1652	aac->base_start = pci_resource_start(pdev, 0);
   1653	aac->scsi_host_ptr = shost;
   1654	aac->pdev = pdev;
   1655	aac->name = aac_driver_template.name;
   1656	aac->id = shost->unique_id;
   1657	aac->cardtype = index;
   1658	INIT_LIST_HEAD(&aac->entry);
   1659
   1660	if (aac_reset_devices || reset_devices)
   1661		aac->init_reset = true;
   1662
   1663	aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
   1664			    sizeof(struct fib),
   1665			    GFP_KERNEL);
   1666	if (!aac->fibs) {
   1667		error = -ENOMEM;
   1668		goto out_free_host;
   1669	}
   1670
   1671	spin_lock_init(&aac->fib_lock);
   1672
   1673	mutex_init(&aac->ioctl_mutex);
   1674	mutex_init(&aac->scan_mutex);
   1675
   1676	INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
   1677	INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
   1678				aac_src_reinit_aif_worker);
   1679	/*
   1680	 *	Map in the registers from the adapter.
   1681	 */
   1682	aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
   1683	if ((*aac_drivers[index].init)(aac)) {
   1684		error = -ENODEV;
   1685		goto out_unmap;
   1686	}
   1687
   1688	if (aac->sync_mode) {
   1689		if (aac_sync_mode)
   1690			printk(KERN_INFO "%s%d: Sync. mode enforced "
   1691				"by driver parameter. This will cause "
   1692				"a significant performance decrease!\n",
   1693				aac->name,
   1694				aac->id);
   1695		else
   1696			printk(KERN_INFO "%s%d: Async. mode not supported "
   1697				"by current driver, sync. mode enforced."
   1698				"\nPlease update driver to get full performance.\n",
   1699				aac->name,
   1700				aac->id);
   1701	}
   1702
   1703	/*
   1704	 *	Start any kernel threads needed
   1705	 */
   1706	aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
   1707	if (IS_ERR(aac->thread)) {
   1708		printk(KERN_ERR "aacraid: Unable to create command thread.\n");
   1709		error = PTR_ERR(aac->thread);
   1710		aac->thread = NULL;
   1711		goto out_deinit;
   1712	}
   1713
   1714	aac->maximum_num_channels = aac_drivers[index].channels;
   1715	error = aac_get_adapter_info(aac);
   1716	if (error < 0)
   1717		goto out_deinit;
   1718
   1719	/*
   1720	 * Lets override negotiations and drop the maximum SG limit to 34
   1721	 */
   1722	if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
   1723			(shost->sg_tablesize > 34)) {
   1724		shost->sg_tablesize = 34;
   1725		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
   1726	}
   1727
   1728	if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
   1729			(shost->sg_tablesize > 17)) {
   1730		shost->sg_tablesize = 17;
   1731		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
   1732	}
   1733
   1734	if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
   1735		shost->max_segment_size = shost->max_sectors << 9;
   1736	else
   1737		shost->max_segment_size = 65536;
   1738
   1739	/*
   1740	 * Firmware printf works only with older firmware.
   1741	 */
   1742	if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
   1743		aac->printf_enabled = 1;
   1744	else
   1745		aac->printf_enabled = 0;
   1746
   1747	/*
   1748	 * max channel will be the physical channels plus 1 virtual channel
   1749	 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
   1750	 * physical channels are address by their actual physical number+1
   1751	 */
   1752	if (aac->nondasd_support || expose_physicals || aac->jbod)
   1753		shost->max_channel = aac->maximum_num_channels;
   1754	else
   1755		shost->max_channel = 0;
   1756
   1757	aac_get_config_status(aac, 0);
   1758	aac_get_containers(aac);
   1759	list_add(&aac->entry, insert);
   1760
   1761	shost->max_id = aac->maximum_num_containers;
   1762	if (shost->max_id < aac->maximum_num_physicals)
   1763		shost->max_id = aac->maximum_num_physicals;
   1764	if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
   1765		shost->max_id = MAXIMUM_NUM_CONTAINERS;
   1766	else
   1767		shost->this_id = shost->max_id;
   1768
   1769	if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
   1770		aac_intr_normal(aac, 0, 2, 0, NULL);
   1771
   1772	/*
   1773	 * dmb - we may need to move the setting of these parms somewhere else once
   1774	 * we get a fib that can report the actual numbers
   1775	 */
   1776	shost->max_lun = AAC_MAX_LUN;
   1777
   1778	pci_set_drvdata(pdev, shost);
   1779
   1780	error = scsi_add_host(shost, &pdev->dev);
   1781	if (error)
   1782		goto out_deinit;
   1783
   1784	aac_scan_host(aac);
   1785
   1786	pci_enable_pcie_error_reporting(pdev);
   1787	pci_save_state(pdev);
   1788
   1789	return 0;
   1790
   1791 out_deinit:
   1792	__aac_shutdown(aac);
   1793 out_unmap:
   1794	aac_fib_map_free(aac);
   1795	if (aac->comm_addr)
   1796		dma_free_coherent(&aac->pdev->dev, aac->comm_size,
   1797				  aac->comm_addr, aac->comm_phys);
   1798	kfree(aac->queues);
   1799	aac_adapter_ioremap(aac, 0);
   1800	kfree(aac->fibs);
   1801	kfree(aac->fsa_dev);
   1802 out_free_host:
   1803	scsi_host_put(shost);
   1804 out_disable_pdev:
   1805	pci_disable_device(pdev);
   1806 out:
   1807	return error;
   1808}
   1809
   1810static void aac_release_resources(struct aac_dev *aac)
   1811{
   1812	aac_adapter_disable_int(aac);
   1813	aac_free_irq(aac);
   1814}
   1815
   1816static int aac_acquire_resources(struct aac_dev *dev)
   1817{
   1818	unsigned long status;
   1819	/*
   1820	 *	First clear out all interrupts.  Then enable the one's that we
   1821	 *	can handle.
   1822	 */
   1823	while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
   1824		|| status == 0xffffffff)
   1825			msleep(20);
   1826
   1827	aac_adapter_disable_int(dev);
   1828	aac_adapter_enable_int(dev);
   1829
   1830
   1831	if (aac_is_src(dev))
   1832		aac_define_int_mode(dev);
   1833
   1834	if (dev->msi_enabled)
   1835		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
   1836
   1837	if (aac_acquire_irq(dev))
   1838		goto error_iounmap;
   1839
   1840	aac_adapter_enable_int(dev);
   1841
   1842	/*max msix may change  after EEH
   1843	 * Re-assign vectors to fibs
   1844	 */
   1845	aac_fib_vector_assign(dev);
   1846
   1847	if (!dev->sync_mode) {
   1848		/* After EEH recovery or suspend resume, max_msix count
   1849		 * may change, therefore updating in init as well.
   1850		 */
   1851		dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
   1852		aac_adapter_start(dev);
   1853	}
   1854	return 0;
   1855
   1856error_iounmap:
   1857	return -1;
   1858
   1859}
   1860
   1861static int __maybe_unused aac_suspend(struct device *dev)
   1862{
   1863	struct Scsi_Host *shost = dev_get_drvdata(dev);
   1864	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
   1865
   1866	scsi_host_block(shost);
   1867	aac_cancel_rescan_worker(aac);
   1868	aac_send_shutdown(aac);
   1869
   1870	aac_release_resources(aac);
   1871
   1872	return 0;
   1873}
   1874
   1875static int __maybe_unused aac_resume(struct device *dev)
   1876{
   1877	struct Scsi_Host *shost = dev_get_drvdata(dev);
   1878	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
   1879
   1880	if (aac_acquire_resources(aac))
   1881		goto fail_device;
   1882	/*
   1883	* reset this flag to unblock ioctl() as it was set at
   1884	* aac_send_shutdown() to block ioctls from upperlayer
   1885	*/
   1886	aac->adapter_shutdown = 0;
   1887	scsi_host_unblock(shost, SDEV_RUNNING);
   1888
   1889	return 0;
   1890
   1891fail_device:
   1892	printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
   1893	scsi_host_put(shost);
   1894	return -ENODEV;
   1895}
   1896
   1897static void aac_shutdown(struct pci_dev *dev)
   1898{
   1899	struct Scsi_Host *shost = pci_get_drvdata(dev);
   1900
   1901	scsi_host_block(shost);
   1902	__aac_shutdown((struct aac_dev *)shost->hostdata);
   1903}
   1904
   1905static void aac_remove_one(struct pci_dev *pdev)
   1906{
   1907	struct Scsi_Host *shost = pci_get_drvdata(pdev);
   1908	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
   1909
   1910	aac_cancel_rescan_worker(aac);
   1911	scsi_remove_host(shost);
   1912
   1913	__aac_shutdown(aac);
   1914	aac_fib_map_free(aac);
   1915	dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
   1916			  aac->comm_phys);
   1917	kfree(aac->queues);
   1918
   1919	aac_adapter_ioremap(aac, 0);
   1920
   1921	kfree(aac->fibs);
   1922	kfree(aac->fsa_dev);
   1923
   1924	list_del(&aac->entry);
   1925	scsi_host_put(shost);
   1926	pci_disable_device(pdev);
   1927	if (list_empty(&aac_devices)) {
   1928		unregister_chrdev(aac_cfg_major, "aac");
   1929		aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
   1930	}
   1931}
   1932
   1933static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
   1934					pci_channel_state_t error)
   1935{
   1936	struct Scsi_Host *shost = pci_get_drvdata(pdev);
   1937	struct aac_dev *aac = shost_priv(shost);
   1938
   1939	dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
   1940
   1941	switch (error) {
   1942	case pci_channel_io_normal:
   1943		return PCI_ERS_RESULT_CAN_RECOVER;
   1944	case pci_channel_io_frozen:
   1945		aac->handle_pci_error = 1;
   1946
   1947		scsi_host_block(shost);
   1948		aac_cancel_rescan_worker(aac);
   1949		scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
   1950		aac_release_resources(aac);
   1951
   1952		pci_disable_pcie_error_reporting(pdev);
   1953		aac_adapter_ioremap(aac, 0);
   1954
   1955		return PCI_ERS_RESULT_NEED_RESET;
   1956	case pci_channel_io_perm_failure:
   1957		aac->handle_pci_error = 1;
   1958
   1959		scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
   1960		return PCI_ERS_RESULT_DISCONNECT;
   1961	}
   1962
   1963	return PCI_ERS_RESULT_NEED_RESET;
   1964}
   1965
   1966static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
   1967{
   1968	dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
   1969	return PCI_ERS_RESULT_NEED_RESET;
   1970}
   1971
   1972static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
   1973{
   1974	dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
   1975	pci_restore_state(pdev);
   1976	if (pci_enable_device(pdev)) {
   1977		dev_warn(&pdev->dev,
   1978			"aacraid: failed to enable slave\n");
   1979		goto fail_device;
   1980	}
   1981
   1982	pci_set_master(pdev);
   1983
   1984	if (pci_enable_device_mem(pdev)) {
   1985		dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
   1986		goto fail_device;
   1987	}
   1988
   1989	return PCI_ERS_RESULT_RECOVERED;
   1990
   1991fail_device:
   1992	dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
   1993	return PCI_ERS_RESULT_DISCONNECT;
   1994}
   1995
   1996
   1997static void aac_pci_resume(struct pci_dev *pdev)
   1998{
   1999	struct Scsi_Host *shost = pci_get_drvdata(pdev);
   2000	struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
   2001
   2002	if (aac_adapter_ioremap(aac, aac->base_size)) {
   2003
   2004		dev_err(&pdev->dev, "aacraid: ioremap failed\n");
   2005		/* remap failed, go back ... */
   2006		aac->comm_interface = AAC_COMM_PRODUCER;
   2007		if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
   2008			dev_warn(&pdev->dev,
   2009				"aacraid: unable to map adapter.\n");
   2010
   2011			return;
   2012		}
   2013	}
   2014
   2015	msleep(10000);
   2016
   2017	aac_acquire_resources(aac);
   2018
   2019	/*
   2020	 * reset this flag to unblock ioctl() as it was set
   2021	 * at aac_send_shutdown() to block ioctls from upperlayer
   2022	 */
   2023	aac->adapter_shutdown = 0;
   2024	aac->handle_pci_error = 0;
   2025
   2026	scsi_host_unblock(shost, SDEV_RUNNING);
   2027	aac_scan_host(aac);
   2028	pci_save_state(pdev);
   2029
   2030	dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
   2031}
   2032
   2033static struct pci_error_handlers aac_pci_err_handler = {
   2034	.error_detected		= aac_pci_error_detected,
   2035	.mmio_enabled		= aac_pci_mmio_enabled,
   2036	.slot_reset		= aac_pci_slot_reset,
   2037	.resume			= aac_pci_resume,
   2038};
   2039
   2040static SIMPLE_DEV_PM_OPS(aac_pm_ops, aac_suspend, aac_resume);
   2041
   2042static struct pci_driver aac_pci_driver = {
   2043	.name		= AAC_DRIVERNAME,
   2044	.id_table	= aac_pci_tbl,
   2045	.probe		= aac_probe_one,
   2046	.remove		= aac_remove_one,
   2047	.driver.pm      = &aac_pm_ops,
   2048	.shutdown	= aac_shutdown,
   2049	.err_handler    = &aac_pci_err_handler,
   2050};
   2051
   2052static int __init aac_init(void)
   2053{
   2054	int error;
   2055
   2056	printk(KERN_INFO "Adaptec %s driver %s\n",
   2057	  AAC_DRIVERNAME, aac_driver_version);
   2058
   2059	error = pci_register_driver(&aac_pci_driver);
   2060	if (error < 0)
   2061		return error;
   2062
   2063	aac_init_char();
   2064
   2065
   2066	return 0;
   2067}
   2068
   2069static void __exit aac_exit(void)
   2070{
   2071	if (aac_cfg_major > -1)
   2072		unregister_chrdev(aac_cfg_major, "aac");
   2073	pci_unregister_driver(&aac_pci_driver);
   2074}
   2075
   2076module_init(aac_init);
   2077module_exit(aac_exit);