cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ibmvscsi.c (71744B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* ------------------------------------------------------------
      3 * ibmvscsi.c
      4 * (C) Copyright IBM Corporation 1994, 2004
      5 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
      6 *          Santiago Leon (santil@us.ibm.com)
      7 *          Dave Boutcher (sleddog@us.ibm.com)
      8 *
      9 * ------------------------------------------------------------
     10 * Emulation of a SCSI host adapter for Virtual I/O devices
     11 *
     12 * This driver supports the SCSI adapter implemented by the IBM
     13 * Power5 firmware.  That SCSI adapter is not a physical adapter,
     14 * but allows Linux SCSI peripheral drivers to directly
     15 * access devices in another logical partition on the physical system.
     16 *
     17 * The virtual adapter(s) are present in the open firmware device
     18 * tree just like real adapters.
     19 *
     20 * One of the capabilities provided on these systems is the ability
     21 * to DMA between partitions.  The architecture states that for VSCSI,
     22 * the server side is allowed to DMA to and from the client.  The client
     23 * is never trusted to DMA to or from the server directly.
     24 *
     25 * Messages are sent between partitions on a "Command/Response Queue" 
     26 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
     27 * Senders cannot access the buffer directly, but send messages by
     28 * making a hypervisor call and passing in the 16 bytes.  The hypervisor
     29 * puts the message in the next 16 byte space in round-robin fashion,
     30 * turns on the high order bit of the message (the valid bit), and 
     31 * generates an interrupt to the receiver (if interrupts are turned on.) 
     32 * The receiver just turns off the valid bit when they have copied out
     33 * the message.
     34 *
     35 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
     36 * (IU) (as defined in the T10 standard available at www.t10.org), gets 
     37 * a DMA address for the message, and sends it to the server as the
     38 * payload of a CRQ message.  The server DMAs the SRP IU and processes it,
     39 * including doing any additional data transfers.  When it is done, it
     40 * DMAs the SRP response back to the same address as the request came from,
     41 * and sends a CRQ message back to inform the client that the request has
     42 * completed.
     43 *
     44 * TODO: This is currently pretty tied to the IBM pSeries hypervisor
     45 * interfaces.  It would be really nice to abstract this above an RDMA
     46 * layer.
     47 */
     48
     49#include <linux/module.h>
     50#include <linux/moduleparam.h>
     51#include <linux/dma-mapping.h>
     52#include <linux/delay.h>
     53#include <linux/slab.h>
     54#include <linux/of.h>
     55#include <linux/pm.h>
     56#include <linux/kthread.h>
     57#include <asm/firmware.h>
     58#include <asm/vio.h>
     59#include <scsi/scsi.h>
     60#include <scsi/scsi_cmnd.h>
     61#include <scsi/scsi_host.h>
     62#include <scsi/scsi_device.h>
     63#include <scsi/scsi_transport_srp.h>
     64#include "ibmvscsi.h"
     65
     66/* The values below are somewhat arbitrary default values, but 
     67 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
     68 * Note that there are 3 bits of channel value, 6 bits of id, and
     69 * 5 bits of LUN.
     70 */
     71static int max_id = 64;
     72static int max_channel = 3;
     73static int init_timeout = 300;
     74static int login_timeout = 60;
     75static int info_timeout = 30;
     76static int abort_timeout = 60;
     77static int reset_timeout = 60;
     78static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
     79static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
     80static int fast_fail = 1;
     81static int client_reserve = 1;
     82static char partition_name[96] = "UNKNOWN";
     83static unsigned int partition_number = -1;
     84static LIST_HEAD(ibmvscsi_head);
     85static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
     86
     87static struct scsi_transport_template *ibmvscsi_transport_template;
     88
     89#define IBMVSCSI_VERSION "1.5.9"
     90
     91MODULE_DESCRIPTION("IBM Virtual SCSI");
     92MODULE_AUTHOR("Dave Boutcher");
     93MODULE_LICENSE("GPL");
     94MODULE_VERSION(IBMVSCSI_VERSION);
     95
     96module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
     97MODULE_PARM_DESC(max_id, "Largest ID value for each channel [Default=64]");
     98module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
     99MODULE_PARM_DESC(max_channel, "Largest channel value [Default=3]");
    100module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
    101MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
    102module_param_named(max_requests, max_requests, int, S_IRUGO);
    103MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
    104module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
    105MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
    106module_param_named(client_reserve, client_reserve, int, S_IRUGO );
    107MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
    108
    109static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
    110				struct ibmvscsi_host_data *hostdata);
    111
    112/* ------------------------------------------------------------
    113 * Routines for managing the command/response queue
    114 */
    115/**
    116 * ibmvscsi_handle_event: - Interrupt handler for crq events
    117 * @irq:	number of irq to handle, not used
    118 * @dev_instance: ibmvscsi_host_data of host that received interrupt
    119 *
    120 * Disables interrupts and schedules srp_task
    121 * Always returns IRQ_HANDLED
    122 */
    123static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
    124{
    125	struct ibmvscsi_host_data *hostdata =
    126	    (struct ibmvscsi_host_data *)dev_instance;
    127	vio_disable_interrupts(to_vio_dev(hostdata->dev));
    128	tasklet_schedule(&hostdata->srp_task);
    129	return IRQ_HANDLED;
    130}
    131
    132/**
    133 * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ
    134 * @queue:		crq_queue to initialize and register
    135 * @hostdata:		ibmvscsi_host_data of host
    136 * @max_requests:	maximum requests (unused)
    137 *
    138 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
    139 * the crq with the hypervisor.
    140 */
    141static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
    142				       struct ibmvscsi_host_data *hostdata,
    143				       int max_requests)
    144{
    145	long rc = 0;
    146	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
    147	free_irq(vdev->irq, (void *)hostdata);
    148	tasklet_kill(&hostdata->srp_task);
    149	do {
    150		if (rc)
    151			msleep(100);
    152		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
    153	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
    154	dma_unmap_single(hostdata->dev,
    155			 queue->msg_token,
    156			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
    157	free_page((unsigned long)queue->msgs);
    158}
    159
    160/**
    161 * crq_queue_next_crq: - Returns the next entry in message queue
    162 * @queue:	crq_queue to use
    163 *
    164 * Returns pointer to next entry in queue, or NULL if there are no new
    165 * entried in the CRQ.
    166 */
    167static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
    168{
    169	struct viosrp_crq *crq;
    170	unsigned long flags;
    171
    172	spin_lock_irqsave(&queue->lock, flags);
    173	crq = &queue->msgs[queue->cur];
    174	if (crq->valid != VIOSRP_CRQ_FREE) {
    175		if (++queue->cur == queue->size)
    176			queue->cur = 0;
    177
    178		/* Ensure the read of the valid bit occurs before reading any
    179		 * other bits of the CRQ entry
    180		 */
    181		rmb();
    182	} else
    183		crq = NULL;
    184	spin_unlock_irqrestore(&queue->lock, flags);
    185
    186	return crq;
    187}
    188
    189/**
    190 * ibmvscsi_send_crq: - Send a CRQ
    191 * @hostdata:	the adapter
    192 * @word1:	the first 64 bits of the data
    193 * @word2:	the second 64 bits of the data
    194 */
    195static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
    196			     u64 word1, u64 word2)
    197{
    198	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
    199
    200	/*
    201	 * Ensure the command buffer is flushed to memory before handing it
    202	 * over to the VIOS to prevent it from fetching any stale data.
    203	 */
    204	mb();
    205	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
    206}
    207
    208/**
    209 * ibmvscsi_task: - Process srps asynchronously
    210 * @data:	ibmvscsi_host_data of host
    211 */
    212static void ibmvscsi_task(void *data)
    213{
    214	struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
    215	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
    216	struct viosrp_crq *crq;
    217	int done = 0;
    218
    219	while (!done) {
    220		/* Pull all the valid messages off the CRQ */
    221		while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
    222			ibmvscsi_handle_crq(crq, hostdata);
    223			crq->valid = VIOSRP_CRQ_FREE;
    224			wmb();
    225		}
    226
    227		vio_enable_interrupts(vdev);
    228		crq = crq_queue_next_crq(&hostdata->queue);
    229		if (crq != NULL) {
    230			vio_disable_interrupts(vdev);
    231			ibmvscsi_handle_crq(crq, hostdata);
    232			crq->valid = VIOSRP_CRQ_FREE;
    233			wmb();
    234		} else {
    235			done = 1;
    236		}
    237	}
    238}
    239
    240static void gather_partition_info(void)
    241{
    242	const char *ppartition_name;
    243	const __be32 *p_number_ptr;
    244
    245	/* Retrieve information about this partition */
    246	if (!of_root)
    247		return;
    248
    249	of_node_get(of_root);
    250
    251	ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
    252	if (ppartition_name)
    253		strlcpy(partition_name, ppartition_name,
    254				sizeof(partition_name));
    255	p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
    256	if (p_number_ptr)
    257		partition_number = of_read_number(p_number_ptr, 1);
    258	of_node_put(of_root);
    259}
    260
    261static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
    262{
    263	memset(&hostdata->madapter_info, 0x00,
    264			sizeof(hostdata->madapter_info));
    265
    266	dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
    267	strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
    268
    269	strncpy(hostdata->madapter_info.partition_name, partition_name,
    270			sizeof(hostdata->madapter_info.partition_name));
    271
    272	hostdata->madapter_info.partition_number =
    273					cpu_to_be32(partition_number);
    274
    275	hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1);
    276	hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX);
    277}
    278
    279/**
    280 * ibmvscsi_reset_crq_queue() - resets a crq after a failure
    281 * @queue:	crq_queue to initialize and register
    282 * @hostdata:	ibmvscsi_host_data of host
    283 */
    284static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
    285				    struct ibmvscsi_host_data *hostdata)
    286{
    287	int rc = 0;
    288	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
    289
    290	/* Close the CRQ */
    291	do {
    292		if (rc)
    293			msleep(100);
    294		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
    295	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
    296
    297	/* Clean out the queue */
    298	memset(queue->msgs, 0x00, PAGE_SIZE);
    299	queue->cur = 0;
    300
    301	set_adapter_info(hostdata);
    302
    303	/* And re-open it again */
    304	rc = plpar_hcall_norets(H_REG_CRQ,
    305				vdev->unit_address,
    306				queue->msg_token, PAGE_SIZE);
    307	if (rc == H_CLOSED) {
    308		/* Adapter is good, but other end is not ready */
    309		dev_warn(hostdata->dev, "Partner adapter not ready\n");
    310	} else if (rc != 0) {
    311		dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
    312	}
    313	return rc;
    314}
    315
    316/**
    317 * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor
    318 * @queue:		crq_queue to initialize and register
    319 * @hostdata:		ibmvscsi_host_data of host
    320 * @max_requests:	maximum requests (unused)
    321 *
    322 * Allocates a page for messages, maps it for dma, and registers
    323 * the crq with the hypervisor.
    324 * Returns zero on success.
    325 */
    326static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
    327				   struct ibmvscsi_host_data *hostdata,
    328				   int max_requests)
    329{
    330	int rc;
    331	int retrc;
    332	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
    333
    334	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
    335
    336	if (!queue->msgs)
    337		goto malloc_failed;
    338	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
    339
    340	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
    341					  queue->size * sizeof(*queue->msgs),
    342					  DMA_BIDIRECTIONAL);
    343
    344	if (dma_mapping_error(hostdata->dev, queue->msg_token))
    345		goto map_failed;
    346
    347	gather_partition_info();
    348	set_adapter_info(hostdata);
    349
    350	retrc = rc = plpar_hcall_norets(H_REG_CRQ,
    351				vdev->unit_address,
    352				queue->msg_token, PAGE_SIZE);
    353	if (rc == H_RESOURCE)
    354		/* maybe kexecing and resource is busy. try a reset */
    355		rc = ibmvscsi_reset_crq_queue(queue,
    356					      hostdata);
    357
    358	if (rc == H_CLOSED) {
    359		/* Adapter is good, but other end is not ready */
    360		dev_warn(hostdata->dev, "Partner adapter not ready\n");
    361		retrc = 0;
    362	} else if (rc != 0) {
    363		dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
    364		goto reg_crq_failed;
    365	}
    366
    367	queue->cur = 0;
    368	spin_lock_init(&queue->lock);
    369
    370	tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
    371		     (unsigned long)hostdata);
    372
    373	if (request_irq(vdev->irq,
    374			ibmvscsi_handle_event,
    375			0, "ibmvscsi", (void *)hostdata) != 0) {
    376		dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
    377			vdev->irq);
    378		goto req_irq_failed;
    379	}
    380
    381	rc = vio_enable_interrupts(vdev);
    382	if (rc != 0) {
    383		dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
    384		goto req_irq_failed;
    385	}
    386
    387	return retrc;
    388
    389      req_irq_failed:
    390	tasklet_kill(&hostdata->srp_task);
    391	rc = 0;
    392	do {
    393		if (rc)
    394			msleep(100);
    395		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
    396	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
    397      reg_crq_failed:
    398	dma_unmap_single(hostdata->dev,
    399			 queue->msg_token,
    400			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
    401      map_failed:
    402	free_page((unsigned long)queue->msgs);
    403      malloc_failed:
    404	return -1;
    405}
    406
    407/**
    408 * ibmvscsi_reenable_crq_queue() - reenables a crq after
    409 * @queue:	crq_queue to initialize and register
    410 * @hostdata:	ibmvscsi_host_data of host
    411 */
    412static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
    413				       struct ibmvscsi_host_data *hostdata)
    414{
    415	int rc = 0;
    416	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
    417
    418	set_adapter_info(hostdata);
    419
    420	/* Re-enable the CRQ */
    421	do {
    422		if (rc)
    423			msleep(100);
    424		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
    425	} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
    426
    427	if (rc)
    428		dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
    429	return rc;
    430}
    431
    432/* ------------------------------------------------------------
    433 * Routines for the event pool and event structs
    434 */
    435/**
    436 * initialize_event_pool: - Allocates and initializes the event pool for a host
    437 * @pool:	event_pool to be initialized
    438 * @size:	Number of events in pool
    439 * @hostdata:	ibmvscsi_host_data who owns the event pool
    440 *
    441 * Returns zero on success.
    442 */
    443static int initialize_event_pool(struct event_pool *pool,
    444				 int size, struct ibmvscsi_host_data *hostdata)
    445{
    446	int i;
    447
    448	pool->size = size;
    449	pool->next = 0;
    450	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
    451	if (!pool->events)
    452		return -ENOMEM;
    453
    454	pool->iu_storage =
    455	    dma_alloc_coherent(hostdata->dev,
    456			       pool->size * sizeof(*pool->iu_storage),
    457			       &pool->iu_token, GFP_KERNEL);
    458	if (!pool->iu_storage) {
    459		kfree(pool->events);
    460		return -ENOMEM;
    461	}
    462
    463	for (i = 0; i < pool->size; ++i) {
    464		struct srp_event_struct *evt = &pool->events[i];
    465		memset(&evt->crq, 0x00, sizeof(evt->crq));
    466		atomic_set(&evt->free, 1);
    467		evt->crq.valid = VIOSRP_CRQ_CMD_RSP;
    468		evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
    469		evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
    470			sizeof(*evt->xfer_iu) * i);
    471		evt->xfer_iu = pool->iu_storage + i;
    472		evt->hostdata = hostdata;
    473		evt->ext_list = NULL;
    474		evt->ext_list_token = 0;
    475	}
    476
    477	return 0;
    478}
    479
    480/**
    481 * release_event_pool() - Frees memory of an event pool of a host
    482 * @pool:	event_pool to be released
    483 * @hostdata:	ibmvscsi_host_data who owns the even pool
    484 *
    485 * Returns zero on success.
    486 */
    487static void release_event_pool(struct event_pool *pool,
    488			       struct ibmvscsi_host_data *hostdata)
    489{
    490	int i, in_use = 0;
    491	for (i = 0; i < pool->size; ++i) {
    492		if (atomic_read(&pool->events[i].free) != 1)
    493			++in_use;
    494		if (pool->events[i].ext_list) {
    495			dma_free_coherent(hostdata->dev,
    496				  SG_ALL * sizeof(struct srp_direct_buf),
    497				  pool->events[i].ext_list,
    498				  pool->events[i].ext_list_token);
    499		}
    500	}
    501	if (in_use)
    502		dev_warn(hostdata->dev, "releasing event pool with %d "
    503			 "events still in use?\n", in_use);
    504	kfree(pool->events);
    505	dma_free_coherent(hostdata->dev,
    506			  pool->size * sizeof(*pool->iu_storage),
    507			  pool->iu_storage, pool->iu_token);
    508}
    509
    510/**
    511 * valid_event_struct: - Determines if event is valid.
    512 * @pool:	event_pool that contains the event
    513 * @evt:	srp_event_struct to be checked for validity
    514 *
    515 * Returns zero if event is invalid, one otherwise.
    516*/
    517static int valid_event_struct(struct event_pool *pool,
    518				struct srp_event_struct *evt)
    519{
    520	int index = evt - pool->events;
    521	if (index < 0 || index >= pool->size)	/* outside of bounds */
    522		return 0;
    523	if (evt != pool->events + index)	/* unaligned */
    524		return 0;
    525	return 1;
    526}
    527
    528/**
    529 * free_event_struct() - Changes status of event to "free"
    530 * @pool:	event_pool that contains the event
    531 * @evt:	srp_event_struct to be modified
    532 */
    533static void free_event_struct(struct event_pool *pool,
    534				       struct srp_event_struct *evt)
    535{
    536	if (!valid_event_struct(pool, evt)) {
    537		dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
    538			"(not in pool %p)\n", evt, pool->events);
    539		return;
    540	}
    541	if (atomic_inc_return(&evt->free) != 1) {
    542		dev_err(evt->hostdata->dev, "Freeing event_struct %p "
    543			"which is not in use!\n", evt);
    544		return;
    545	}
    546}
    547
    548/**
    549 * get_event_struct() - Gets the next free event in pool
    550 * @pool:	event_pool that contains the events to be searched
    551 *
    552 * Returns the next event in "free" state, and NULL if none are free.
    553 * Note that no synchronization is done here, we assume the host_lock
    554 * will syncrhonze things.
    555*/
    556static struct srp_event_struct *get_event_struct(struct event_pool *pool)
    557{
    558	int i;
    559	int poolsize = pool->size;
    560	int offset = pool->next;
    561
    562	for (i = 0; i < poolsize; i++) {
    563		offset = (offset + 1) % poolsize;
    564		if (!atomic_dec_if_positive(&pool->events[offset].free)) {
    565			pool->next = offset;
    566			return &pool->events[offset];
    567		}
    568	}
    569
    570	printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
    571	return NULL;
    572}
    573
    574/**
    575 * init_event_struct: Initialize fields in an event struct that are always 
    576 *                    required.
    577 * @evt_struct: The event
    578 * @done:       Routine to call when the event is responded to
    579 * @format:     SRP or MAD format
    580 * @timeout:    timeout value set in the CRQ
    581 */
    582static void init_event_struct(struct srp_event_struct *evt_struct,
    583			      void (*done) (struct srp_event_struct *),
    584			      u8 format,
    585			      int timeout)
    586{
    587	evt_struct->cmnd = NULL;
    588	evt_struct->cmnd_done = NULL;
    589	evt_struct->sync_srp = NULL;
    590	evt_struct->crq.format = format;
    591	evt_struct->crq.timeout = cpu_to_be16(timeout);
    592	evt_struct->done = done;
    593}
    594
    595/* ------------------------------------------------------------
    596 * Routines for receiving SCSI responses from the hosting partition
    597 */
    598
    599/*
    600 * set_srp_direction: Set the fields in the srp related to data
    601 *     direction and number of buffers based on the direction in
    602 *     the scsi_cmnd and the number of buffers
    603 */
    604static void set_srp_direction(struct scsi_cmnd *cmd,
    605			      struct srp_cmd *srp_cmd, 
    606			      int numbuf)
    607{
    608	u8 fmt;
    609
    610	if (numbuf == 0)
    611		return;
    612	
    613	if (numbuf == 1)
    614		fmt = SRP_DATA_DESC_DIRECT;
    615	else {
    616		fmt = SRP_DATA_DESC_INDIRECT;
    617		numbuf = min(numbuf, MAX_INDIRECT_BUFS);
    618
    619		if (cmd->sc_data_direction == DMA_TO_DEVICE)
    620			srp_cmd->data_out_desc_cnt = numbuf;
    621		else
    622			srp_cmd->data_in_desc_cnt = numbuf;
    623	}
    624
    625	if (cmd->sc_data_direction == DMA_TO_DEVICE)
    626		srp_cmd->buf_fmt = fmt << 4;
    627	else
    628		srp_cmd->buf_fmt = fmt;
    629}
    630
    631/**
    632 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
    633 * @cmd:	srp_cmd whose additional_data member will be unmapped
    634 * @evt_struct: the event
    635 * @dev:	device for which the memory is mapped
    636 */
    637static void unmap_cmd_data(struct srp_cmd *cmd,
    638			   struct srp_event_struct *evt_struct,
    639			   struct device *dev)
    640{
    641	u8 out_fmt, in_fmt;
    642
    643	out_fmt = cmd->buf_fmt >> 4;
    644	in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
    645
    646	if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
    647		return;
    648
    649	if (evt_struct->cmnd)
    650		scsi_dma_unmap(evt_struct->cmnd);
    651}
    652
    653static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
    654		       struct srp_direct_buf *md)
    655{
    656	int i;
    657	struct scatterlist *sg;
    658	u64 total_length = 0;
    659
    660	scsi_for_each_sg(cmd, sg, nseg, i) {
    661		struct srp_direct_buf *descr = md + i;
    662		descr->va = cpu_to_be64(sg_dma_address(sg));
    663		descr->len = cpu_to_be32(sg_dma_len(sg));
    664		descr->key = 0;
    665		total_length += sg_dma_len(sg);
    666 	}
    667	return total_length;
    668}
    669
    670/**
    671 * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
    672 * @cmd:	struct scsi_cmnd with the scatterlist
    673 * @evt_struct:	struct srp_event_struct to map
    674 * @srp_cmd:	srp_cmd that contains the memory descriptor
    675 * @dev:	device for which to map dma memory
    676 *
    677 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
    678 * Returns 1 on success.
    679*/
    680static int map_sg_data(struct scsi_cmnd *cmd,
    681		       struct srp_event_struct *evt_struct,
    682		       struct srp_cmd *srp_cmd, struct device *dev)
    683{
    684
    685	int sg_mapped;
    686	u64 total_length = 0;
    687	struct srp_direct_buf *data =
    688		(struct srp_direct_buf *) srp_cmd->add_data;
    689	struct srp_indirect_buf *indirect =
    690		(struct srp_indirect_buf *) data;
    691
    692	sg_mapped = scsi_dma_map(cmd);
    693	if (!sg_mapped)
    694		return 1;
    695	else if (sg_mapped < 0)
    696		return 0;
    697
    698	set_srp_direction(cmd, srp_cmd, sg_mapped);
    699
    700	/* special case; we can use a single direct descriptor */
    701	if (sg_mapped == 1) {
    702		map_sg_list(cmd, sg_mapped, data);
    703		return 1;
    704	}
    705
    706	indirect->table_desc.va = 0;
    707	indirect->table_desc.len = cpu_to_be32(sg_mapped *
    708					       sizeof(struct srp_direct_buf));
    709	indirect->table_desc.key = 0;
    710
    711	if (sg_mapped <= MAX_INDIRECT_BUFS) {
    712		total_length = map_sg_list(cmd, sg_mapped,
    713					   &indirect->desc_list[0]);
    714		indirect->len = cpu_to_be32(total_length);
    715		return 1;
    716	}
    717
    718	/* get indirect table */
    719	if (!evt_struct->ext_list) {
    720		evt_struct->ext_list = dma_alloc_coherent(dev,
    721					   SG_ALL * sizeof(struct srp_direct_buf),
    722					   &evt_struct->ext_list_token, 0);
    723		if (!evt_struct->ext_list) {
    724			if (!firmware_has_feature(FW_FEATURE_CMO))
    725				sdev_printk(KERN_ERR, cmd->device,
    726				            "Can't allocate memory "
    727				            "for indirect table\n");
    728			scsi_dma_unmap(cmd);
    729			return 0;
    730		}
    731	}
    732
    733	total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
    734
    735	indirect->len = cpu_to_be32(total_length);
    736	indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
    737	indirect->table_desc.len = cpu_to_be32(sg_mapped *
    738					       sizeof(indirect->desc_list[0]));
    739	memcpy(indirect->desc_list, evt_struct->ext_list,
    740	       MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
    741 	return 1;
    742}
    743
    744/**
    745 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
    746 * @cmd:	struct scsi_cmnd with the memory to be mapped
    747 * @evt_struct:	struct srp_event_struct to map
    748 * @srp_cmd:	srp_cmd that contains the memory descriptor
    749 * @dev:	dma device for which to map dma memory
    750 *
    751 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 
    752 * Returns 1 on success.
    753*/
    754static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
    755				struct srp_event_struct *evt_struct,
    756				struct srp_cmd *srp_cmd, struct device *dev)
    757{
    758	switch (cmd->sc_data_direction) {
    759	case DMA_FROM_DEVICE:
    760	case DMA_TO_DEVICE:
    761		break;
    762	case DMA_NONE:
    763		return 1;
    764	case DMA_BIDIRECTIONAL:
    765		sdev_printk(KERN_ERR, cmd->device,
    766			    "Can't map DMA_BIDIRECTIONAL to read/write\n");
    767		return 0;
    768	default:
    769		sdev_printk(KERN_ERR, cmd->device,
    770			    "Unknown data direction 0x%02x; can't map!\n",
    771			    cmd->sc_data_direction);
    772		return 0;
    773	}
    774
    775	return map_sg_data(cmd, evt_struct, srp_cmd, dev);
    776}
    777
    778/**
    779 * purge_requests: Our virtual adapter just shut down.  purge any sent requests
    780 * @hostdata:    the adapter
    781 * @error_code:  error code to return as the 'result'
    782 */
    783static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
    784{
    785	struct srp_event_struct *evt;
    786	unsigned long flags;
    787
    788	spin_lock_irqsave(hostdata->host->host_lock, flags);
    789	while (!list_empty(&hostdata->sent)) {
    790		evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
    791		list_del(&evt->list);
    792		del_timer(&evt->timer);
    793
    794		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
    795		if (evt->cmnd) {
    796			evt->cmnd->result = (error_code << 16);
    797			unmap_cmd_data(&evt->iu.srp.cmd, evt,
    798				       evt->hostdata->dev);
    799			if (evt->cmnd_done)
    800				evt->cmnd_done(evt->cmnd);
    801		} else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
    802			   evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
    803			evt->done(evt);
    804		free_event_struct(&evt->hostdata->pool, evt);
    805		spin_lock_irqsave(hostdata->host->host_lock, flags);
    806	}
    807	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
    808}
    809
    810/**
    811 * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
    812 * an adapter failure, reset, or SRP Login. Done under host lock to prevent
    813 * race with SCSI command submission.
    814 * @hostdata:	adapter to adjust
    815 * @limit:	new request limit
    816 */
    817static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
    818{
    819	unsigned long flags;
    820
    821	spin_lock_irqsave(hostdata->host->host_lock, flags);
    822	atomic_set(&hostdata->request_limit, limit);
    823	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
    824}
    825
    826/**
    827 * ibmvscsi_reset_host - Reset the connection to the server
    828 * @hostdata:	struct ibmvscsi_host_data to reset
    829*/
    830static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
    831{
    832	scsi_block_requests(hostdata->host);
    833	ibmvscsi_set_request_limit(hostdata, 0);
    834
    835	purge_requests(hostdata, DID_ERROR);
    836	hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
    837	wake_up(&hostdata->work_wait_q);
    838}
    839
    840/**
    841 * ibmvscsi_timeout - Internal command timeout handler
    842 * @t:	struct srp_event_struct that timed out
    843 *
    844 * Called when an internally generated command times out
    845*/
    846static void ibmvscsi_timeout(struct timer_list *t)
    847{
    848	struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
    849	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
    850
    851	dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
    852		evt_struct->iu.srp.cmd.opcode);
    853
    854	ibmvscsi_reset_host(hostdata);
    855}
    856
    857
    858/* ------------------------------------------------------------
    859 * Routines for sending and receiving SRPs
    860 */
    861/**
    862 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
    863 * @evt_struct:	evt_struct to be sent
    864 * @hostdata:	ibmvscsi_host_data of host
    865 * @timeout:	timeout in seconds - 0 means do not time command
    866 *
    867 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
    868 * Note that this routine assumes that host_lock is held for synchronization
    869*/
    870static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
    871				   struct ibmvscsi_host_data *hostdata,
    872				   unsigned long timeout)
    873{
    874	__be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
    875	int request_status = 0;
    876	int rc;
    877	int srp_req = 0;
    878
    879	/* If we have exhausted our request limit, just fail this request,
    880	 * unless it is for a reset or abort.
    881	 * Note that there are rare cases involving driver generated requests 
    882	 * (such as task management requests) that the mid layer may think we
    883	 * can handle more requests (can_queue) when we actually can't
    884	 */
    885	if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
    886		srp_req = 1;
    887		request_status =
    888			atomic_dec_if_positive(&hostdata->request_limit);
    889		/* If request limit was -1 when we started, it is now even
    890		 * less than that
    891		 */
    892		if (request_status < -1)
    893			goto send_error;
    894		/* Otherwise, we may have run out of requests. */
    895		/* If request limit was 0 when we started the adapter is in the
    896		 * process of performing a login with the server adapter, or
    897		 * we may have run out of requests.
    898		 */
    899		else if (request_status == -1 &&
    900		         evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
    901			goto send_busy;
    902		/* Abort and reset calls should make it through.
    903		 * Nothing except abort and reset should use the last two
    904		 * slots unless we had two or less to begin with.
    905		 */
    906		else if (request_status < 2 &&
    907		         evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
    908			/* In the case that we have less than two requests
    909			 * available, check the server limit as a combination
    910			 * of the request limit and the number of requests
    911			 * in-flight (the size of the send list).  If the
    912			 * server limit is greater than 2, return busy so
    913			 * that the last two are reserved for reset and abort.
    914			 */
    915			int server_limit = request_status;
    916			struct srp_event_struct *tmp_evt;
    917
    918			list_for_each_entry(tmp_evt, &hostdata->sent, list) {
    919				server_limit++;
    920			}
    921
    922			if (server_limit > 2)
    923				goto send_busy;
    924		}
    925	}
    926
    927	/* Copy the IU into the transfer area */
    928	*evt_struct->xfer_iu = evt_struct->iu;
    929	evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
    930
    931	/* Add this to the sent list.  We need to do this 
    932	 * before we actually send 
    933	 * in case it comes back REALLY fast
    934	 */
    935	list_add_tail(&evt_struct->list, &hostdata->sent);
    936
    937	timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0);
    938	if (timeout) {
    939		evt_struct->timer.expires = jiffies + (timeout * HZ);
    940		add_timer(&evt_struct->timer);
    941	}
    942
    943	rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
    944			       be64_to_cpu(crq_as_u64[1]));
    945	if (rc != 0) {
    946		list_del(&evt_struct->list);
    947		del_timer(&evt_struct->timer);
    948
    949		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
    950		 * Firmware will send a CRQ with a transport event (0xFF) to
    951		 * tell this client what has happened to the transport.  This
    952		 * will be handled in ibmvscsi_handle_crq()
    953		 */
    954		if (rc == H_CLOSED) {
    955			dev_warn(hostdata->dev, "send warning. "
    956			         "Receive queue closed, will retry.\n");
    957			goto send_busy;
    958		}
    959		dev_err(hostdata->dev, "send error %d\n", rc);
    960		if (srp_req)
    961			atomic_inc(&hostdata->request_limit);
    962		goto send_error;
    963	}
    964
    965	return 0;
    966
    967 send_busy:
    968	unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
    969
    970	free_event_struct(&hostdata->pool, evt_struct);
    971	if (srp_req && request_status != -1)
    972		atomic_inc(&hostdata->request_limit);
    973	return SCSI_MLQUEUE_HOST_BUSY;
    974
    975 send_error:
    976	unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
    977
    978	if (evt_struct->cmnd != NULL) {
    979		evt_struct->cmnd->result = DID_ERROR << 16;
    980		evt_struct->cmnd_done(evt_struct->cmnd);
    981	} else if (evt_struct->done)
    982		evt_struct->done(evt_struct);
    983
    984	free_event_struct(&hostdata->pool, evt_struct);
    985	return 0;
    986}
    987
    988/**
    989 * handle_cmd_rsp: -  Handle responses from commands
    990 * @evt_struct:	srp_event_struct to be handled
    991 *
    992 * Used as a callback by when sending scsi cmds.
    993 * Gets called by ibmvscsi_handle_crq()
    994*/
    995static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
    996{
    997	struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
    998	struct scsi_cmnd *cmnd = evt_struct->cmnd;
    999
   1000	if (unlikely(rsp->opcode != SRP_RSP)) {
   1001		if (printk_ratelimit())
   1002			dev_warn(evt_struct->hostdata->dev,
   1003				 "bad SRP RSP type %#02x\n", rsp->opcode);
   1004	}
   1005	
   1006	if (cmnd) {
   1007		cmnd->result |= rsp->status;
   1008		if (scsi_status_is_check_condition(cmnd->result))
   1009			memcpy(cmnd->sense_buffer,
   1010			       rsp->data,
   1011			       be32_to_cpu(rsp->sense_data_len));
   1012		unmap_cmd_data(&evt_struct->iu.srp.cmd, 
   1013			       evt_struct, 
   1014			       evt_struct->hostdata->dev);
   1015
   1016		if (rsp->flags & SRP_RSP_FLAG_DOOVER)
   1017			scsi_set_resid(cmnd,
   1018				       be32_to_cpu(rsp->data_out_res_cnt));
   1019		else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
   1020			scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
   1021	}
   1022
   1023	if (evt_struct->cmnd_done)
   1024		evt_struct->cmnd_done(cmnd);
   1025}
   1026
   1027/**
   1028 * lun_from_dev: - Returns the lun of the scsi device
   1029 * @dev:	struct scsi_device
   1030 *
   1031*/
   1032static inline u16 lun_from_dev(struct scsi_device *dev)
   1033{
   1034	return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
   1035}
   1036
   1037/**
   1038 * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template
   1039 * @cmnd:	struct scsi_cmnd to be executed
   1040 * @done:	Callback function to be called when cmd is completed
   1041*/
   1042static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd)
   1043{
   1044	void (*done)(struct scsi_cmnd *) = scsi_done;
   1045	struct srp_cmd *srp_cmd;
   1046	struct srp_event_struct *evt_struct;
   1047	struct srp_indirect_buf *indirect;
   1048	struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
   1049	u16 lun = lun_from_dev(cmnd->device);
   1050	u8 out_fmt, in_fmt;
   1051
   1052	cmnd->result = (DID_OK << 16);
   1053	evt_struct = get_event_struct(&hostdata->pool);
   1054	if (!evt_struct)
   1055		return SCSI_MLQUEUE_HOST_BUSY;
   1056
   1057	/* Set up the actual SRP IU */
   1058	BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN);
   1059	memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp));
   1060	srp_cmd = &evt_struct->iu.srp.cmd;
   1061	srp_cmd->opcode = SRP_CMD;
   1062	memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
   1063	int_to_scsilun(lun, &srp_cmd->lun);
   1064
   1065	if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
   1066		if (!firmware_has_feature(FW_FEATURE_CMO))
   1067			sdev_printk(KERN_ERR, cmnd->device,
   1068			            "couldn't convert cmd to srp_cmd\n");
   1069		free_event_struct(&hostdata->pool, evt_struct);
   1070		return SCSI_MLQUEUE_HOST_BUSY;
   1071	}
   1072
   1073	init_event_struct(evt_struct,
   1074			  handle_cmd_rsp,
   1075			  VIOSRP_SRP_FORMAT,
   1076			  scsi_cmd_to_rq(cmnd)->timeout / HZ);
   1077
   1078	evt_struct->cmnd = cmnd;
   1079	evt_struct->cmnd_done = done;
   1080
   1081	/* Fix up dma address of the buffer itself */
   1082	indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
   1083	out_fmt = srp_cmd->buf_fmt >> 4;
   1084	in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
   1085	if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
   1086	     out_fmt == SRP_DATA_DESC_INDIRECT) &&
   1087	    indirect->table_desc.va == 0) {
   1088		indirect->table_desc.va =
   1089			cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
   1090			offsetof(struct srp_cmd, add_data) +
   1091			offsetof(struct srp_indirect_buf, desc_list));
   1092	}
   1093
   1094	return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
   1095}
   1096
   1097static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
   1098
   1099/* ------------------------------------------------------------
   1100 * Routines for driver initialization
   1101 */
   1102
   1103/**
   1104 * map_persist_bufs: - Pre-map persistent data for adapter logins
   1105 * @hostdata:   ibmvscsi_host_data of host
   1106 *
   1107 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
   1108 * Return 1 on error, 0 on success.
   1109 */
   1110static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
   1111{
   1112
   1113	hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
   1114					     sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
   1115
   1116	if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
   1117		dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
   1118		return 1;
   1119	}
   1120
   1121	hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
   1122						     &hostdata->madapter_info,
   1123						     sizeof(hostdata->madapter_info),
   1124						     DMA_BIDIRECTIONAL);
   1125	if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
   1126		dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
   1127		dma_unmap_single(hostdata->dev, hostdata->caps_addr,
   1128				 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
   1129		return 1;
   1130	}
   1131
   1132	return 0;
   1133}
   1134
   1135/**
   1136 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
   1137 * @hostdata:   ibmvscsi_host_data of host
   1138 *
   1139 * Unmap the capabilities and adapter info DMA buffers
   1140 */
   1141static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
   1142{
   1143	dma_unmap_single(hostdata->dev, hostdata->caps_addr,
   1144			 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
   1145
   1146	dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
   1147			 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
   1148}
   1149
   1150/**
   1151 * login_rsp: - Handle response to SRP login request
   1152 * @evt_struct:	srp_event_struct with the response
   1153 *
   1154 * Used as a "done" callback by when sending srp_login. Gets called
   1155 * by ibmvscsi_handle_crq()
   1156*/
   1157static void login_rsp(struct srp_event_struct *evt_struct)
   1158{
   1159	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
   1160	switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
   1161	case SRP_LOGIN_RSP:	/* it worked! */
   1162		break;
   1163	case SRP_LOGIN_REJ:	/* refused! */
   1164		dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
   1165			 evt_struct->xfer_iu->srp.login_rej.reason);
   1166		/* Login failed.  */
   1167		ibmvscsi_set_request_limit(hostdata, -1);
   1168		return;
   1169	default:
   1170		dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
   1171			evt_struct->xfer_iu->srp.login_rsp.opcode);
   1172		/* Login failed.  */
   1173		ibmvscsi_set_request_limit(hostdata, -1);
   1174		return;
   1175	}
   1176
   1177	dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
   1178	hostdata->client_migrated = 0;
   1179
   1180	/* Now we know what the real request-limit is.
   1181	 * This value is set rather than added to request_limit because
   1182	 * request_limit could have been set to -1 by this client.
   1183	 */
   1184	ibmvscsi_set_request_limit(hostdata,
   1185		   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
   1186
   1187	/* If we had any pending I/Os, kick them */
   1188	hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK;
   1189	wake_up(&hostdata->work_wait_q);
   1190}
   1191
   1192/**
   1193 * send_srp_login: - Sends the srp login
   1194 * @hostdata:	ibmvscsi_host_data of host
   1195 *
   1196 * Returns zero if successful.
   1197*/
   1198static int send_srp_login(struct ibmvscsi_host_data *hostdata)
   1199{
   1200	int rc;
   1201	unsigned long flags;
   1202	struct srp_login_req *login;
   1203	struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
   1204
   1205	BUG_ON(!evt_struct);
   1206	init_event_struct(evt_struct, login_rsp,
   1207			  VIOSRP_SRP_FORMAT, login_timeout);
   1208
   1209	login = &evt_struct->iu.srp.login_req;
   1210	memset(login, 0, sizeof(*login));
   1211	login->opcode = SRP_LOGIN_REQ;
   1212	login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
   1213	login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
   1214					 SRP_BUF_FORMAT_INDIRECT);
   1215
   1216	/* Start out with a request limit of 0, since this is negotiated in
   1217	 * the login request we are just sending and login requests always
   1218	 * get sent by the driver regardless of request_limit.
   1219	 */
   1220	ibmvscsi_set_request_limit(hostdata, 0);
   1221
   1222	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1223	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
   1224	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1225	dev_info(hostdata->dev, "sent SRP login\n");
   1226	return rc;
   1227};
   1228
   1229/**
   1230 * capabilities_rsp: - Handle response to MAD adapter capabilities request
   1231 * @evt_struct:	srp_event_struct with the response
   1232 *
   1233 * Used as a "done" callback by when sending adapter_info.
   1234 */
   1235static void capabilities_rsp(struct srp_event_struct *evt_struct)
   1236{
   1237	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
   1238
   1239	if (evt_struct->xfer_iu->mad.capabilities.common.status) {
   1240		dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
   1241			evt_struct->xfer_iu->mad.capabilities.common.status);
   1242	} else {
   1243		if (hostdata->caps.migration.common.server_support !=
   1244		    cpu_to_be16(SERVER_SUPPORTS_CAP))
   1245			dev_info(hostdata->dev, "Partition migration not supported\n");
   1246
   1247		if (client_reserve) {
   1248			if (hostdata->caps.reserve.common.server_support ==
   1249			    cpu_to_be16(SERVER_SUPPORTS_CAP))
   1250				dev_info(hostdata->dev, "Client reserve enabled\n");
   1251			else
   1252				dev_info(hostdata->dev, "Client reserve not supported\n");
   1253		}
   1254	}
   1255
   1256	send_srp_login(hostdata);
   1257}
   1258
   1259/**
   1260 * send_mad_capabilities: - Sends the mad capabilities request
   1261 *      and stores the result so it can be retrieved with
   1262 * @hostdata:	ibmvscsi_host_data of host
   1263 */
   1264static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
   1265{
   1266	struct viosrp_capabilities *req;
   1267	struct srp_event_struct *evt_struct;
   1268	unsigned long flags;
   1269	struct device_node *of_node = hostdata->dev->of_node;
   1270	const char *location;
   1271
   1272	evt_struct = get_event_struct(&hostdata->pool);
   1273	BUG_ON(!evt_struct);
   1274
   1275	init_event_struct(evt_struct, capabilities_rsp,
   1276			  VIOSRP_MAD_FORMAT, info_timeout);
   1277
   1278	req = &evt_struct->iu.mad.capabilities;
   1279	memset(req, 0, sizeof(*req));
   1280
   1281	hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
   1282	if (hostdata->client_migrated)
   1283		hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
   1284
   1285	strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
   1286		sizeof(hostdata->caps.name));
   1287
   1288	location = of_get_property(of_node, "ibm,loc-code", NULL);
   1289	location = location ? location : dev_name(hostdata->dev);
   1290	strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
   1291
   1292	req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
   1293	req->buffer = cpu_to_be64(hostdata->caps_addr);
   1294
   1295	hostdata->caps.migration.common.cap_type =
   1296				cpu_to_be32(MIGRATION_CAPABILITIES);
   1297	hostdata->caps.migration.common.length =
   1298				cpu_to_be16(sizeof(hostdata->caps.migration));
   1299	hostdata->caps.migration.common.server_support =
   1300				cpu_to_be16(SERVER_SUPPORTS_CAP);
   1301	hostdata->caps.migration.ecl = cpu_to_be32(1);
   1302
   1303	if (client_reserve) {
   1304		hostdata->caps.reserve.common.cap_type =
   1305					cpu_to_be32(RESERVATION_CAPABILITIES);
   1306		hostdata->caps.reserve.common.length =
   1307				cpu_to_be16(sizeof(hostdata->caps.reserve));
   1308		hostdata->caps.reserve.common.server_support =
   1309				cpu_to_be16(SERVER_SUPPORTS_CAP);
   1310		hostdata->caps.reserve.type =
   1311				cpu_to_be32(CLIENT_RESERVE_SCSI_2);
   1312		req->common.length =
   1313				cpu_to_be16(sizeof(hostdata->caps));
   1314	} else
   1315		req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
   1316						sizeof(hostdata->caps.reserve));
   1317
   1318	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1319	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
   1320		dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
   1321	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1322};
   1323
   1324/**
   1325 * fast_fail_rsp: - Handle response to MAD enable fast fail
   1326 * @evt_struct:	srp_event_struct with the response
   1327 *
   1328 * Used as a "done" callback by when sending enable fast fail. Gets called
   1329 * by ibmvscsi_handle_crq()
   1330 */
   1331static void fast_fail_rsp(struct srp_event_struct *evt_struct)
   1332{
   1333	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
   1334	u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
   1335
   1336	if (status == VIOSRP_MAD_NOT_SUPPORTED)
   1337		dev_err(hostdata->dev, "fast_fail not supported in server\n");
   1338	else if (status == VIOSRP_MAD_FAILED)
   1339		dev_err(hostdata->dev, "fast_fail request failed\n");
   1340	else if (status != VIOSRP_MAD_SUCCESS)
   1341		dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
   1342
   1343	send_mad_capabilities(hostdata);
   1344}
   1345
   1346/**
   1347 * enable_fast_fail() - Start host initialization
   1348 * @hostdata:	ibmvscsi_host_data of host
   1349 *
   1350 * Returns zero if successful.
   1351 */
   1352static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
   1353{
   1354	int rc;
   1355	unsigned long flags;
   1356	struct viosrp_fast_fail *fast_fail_mad;
   1357	struct srp_event_struct *evt_struct;
   1358
   1359	if (!fast_fail) {
   1360		send_mad_capabilities(hostdata);
   1361		return 0;
   1362	}
   1363
   1364	evt_struct = get_event_struct(&hostdata->pool);
   1365	BUG_ON(!evt_struct);
   1366
   1367	init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
   1368
   1369	fast_fail_mad = &evt_struct->iu.mad.fast_fail;
   1370	memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
   1371	fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
   1372	fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
   1373
   1374	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1375	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
   1376	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1377	return rc;
   1378}
   1379
   1380/**
   1381 * adapter_info_rsp: - Handle response to MAD adapter info request
   1382 * @evt_struct:	srp_event_struct with the response
   1383 *
   1384 * Used as a "done" callback by when sending adapter_info. Gets called
   1385 * by ibmvscsi_handle_crq()
   1386*/
   1387static void adapter_info_rsp(struct srp_event_struct *evt_struct)
   1388{
   1389	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
   1390
   1391	if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
   1392		dev_err(hostdata->dev, "error %d getting adapter info\n",
   1393			evt_struct->xfer_iu->mad.adapter_info.common.status);
   1394	} else {
   1395		dev_info(hostdata->dev, "host srp version: %s, "
   1396			 "host partition %s (%d), OS %d, max io %u\n",
   1397			 hostdata->madapter_info.srp_version,
   1398			 hostdata->madapter_info.partition_name,
   1399			 be32_to_cpu(hostdata->madapter_info.partition_number),
   1400			 be32_to_cpu(hostdata->madapter_info.os_type),
   1401			 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
   1402		
   1403		if (hostdata->madapter_info.port_max_txu[0]) 
   1404			hostdata->host->max_sectors = 
   1405				be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
   1406		
   1407		if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX &&
   1408		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
   1409			dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
   1410				hostdata->madapter_info.srp_version);
   1411			dev_err(hostdata->dev, "limiting scatterlists to %d\n",
   1412				MAX_INDIRECT_BUFS);
   1413			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
   1414		}
   1415
   1416		if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) {
   1417			enable_fast_fail(hostdata);
   1418			return;
   1419		}
   1420	}
   1421
   1422	send_srp_login(hostdata);
   1423}
   1424
   1425/**
   1426 * send_mad_adapter_info: - Sends the mad adapter info request
   1427 *      and stores the result so it can be retrieved with
   1428 *      sysfs.  We COULD consider causing a failure if the
   1429 *      returned SRP version doesn't match ours.
   1430 * @hostdata:	ibmvscsi_host_data of host
   1431 * 
   1432 * Returns zero if successful.
   1433*/
   1434static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
   1435{
   1436	struct viosrp_adapter_info *req;
   1437	struct srp_event_struct *evt_struct;
   1438	unsigned long flags;
   1439
   1440	evt_struct = get_event_struct(&hostdata->pool);
   1441	BUG_ON(!evt_struct);
   1442
   1443	init_event_struct(evt_struct,
   1444			  adapter_info_rsp,
   1445			  VIOSRP_MAD_FORMAT,
   1446			  info_timeout);
   1447	
   1448	req = &evt_struct->iu.mad.adapter_info;
   1449	memset(req, 0x00, sizeof(*req));
   1450	
   1451	req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
   1452	req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
   1453	req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
   1454
   1455	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1456	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
   1457		dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
   1458	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1459};
   1460
   1461/*
   1462 * init_adapter() - Start virtual adapter initialization sequence
   1463 */
   1464static void init_adapter(struct ibmvscsi_host_data *hostdata)
   1465{
   1466	send_mad_adapter_info(hostdata);
   1467}
   1468
   1469/*
   1470 * sync_completion: Signal that a synchronous command has completed
   1471 * Note that after returning from this call, the evt_struct is freed.
   1472 * the caller waiting on this completion shouldn't touch the evt_struct
   1473 * again.
   1474 */
   1475static void sync_completion(struct srp_event_struct *evt_struct)
   1476{
   1477	/* copy the response back */
   1478	if (evt_struct->sync_srp)
   1479		*evt_struct->sync_srp = *evt_struct->xfer_iu;
   1480	
   1481	complete(&evt_struct->comp);
   1482}
   1483
   1484/*
   1485 * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template
   1486 * send this over to the server and wait synchronously for the response
   1487 */
   1488static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
   1489{
   1490	struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
   1491	struct srp_tsk_mgmt *tsk_mgmt;
   1492	struct srp_event_struct *evt;
   1493	struct srp_event_struct *tmp_evt, *found_evt;
   1494	union viosrp_iu srp_rsp;
   1495	int rsp_rc;
   1496	unsigned long flags;
   1497	u16 lun = lun_from_dev(cmd->device);
   1498	unsigned long wait_switch = 0;
   1499
   1500	/* First, find this command in our sent list so we can figure
   1501	 * out the correct tag
   1502	 */
   1503	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1504	wait_switch = jiffies + (init_timeout * HZ);
   1505	do {
   1506		found_evt = NULL;
   1507		list_for_each_entry(tmp_evt, &hostdata->sent, list) {
   1508			if (tmp_evt->cmnd == cmd) {
   1509				found_evt = tmp_evt;
   1510				break;
   1511			}
   1512		}
   1513
   1514		if (!found_evt) {
   1515			spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1516			return SUCCESS;
   1517		}
   1518
   1519		evt = get_event_struct(&hostdata->pool);
   1520		if (evt == NULL) {
   1521			spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1522			sdev_printk(KERN_ERR, cmd->device,
   1523				"failed to allocate abort event\n");
   1524			return FAILED;
   1525		}
   1526	
   1527		init_event_struct(evt,
   1528				  sync_completion,
   1529				  VIOSRP_SRP_FORMAT,
   1530				  abort_timeout);
   1531
   1532		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
   1533	
   1534		/* Set up an abort SRP command */
   1535		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
   1536		tsk_mgmt->opcode = SRP_TSK_MGMT;
   1537		int_to_scsilun(lun, &tsk_mgmt->lun);
   1538		tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
   1539		tsk_mgmt->task_tag = (u64) found_evt;
   1540
   1541		evt->sync_srp = &srp_rsp;
   1542
   1543		init_completion(&evt->comp);
   1544		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
   1545
   1546		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
   1547			break;
   1548
   1549		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1550		msleep(10);
   1551		spin_lock_irqsave(hostdata->host->host_lock, flags);
   1552	} while (time_before(jiffies, wait_switch));
   1553
   1554	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1555
   1556	if (rsp_rc != 0) {
   1557		sdev_printk(KERN_ERR, cmd->device,
   1558			    "failed to send abort() event. rc=%d\n", rsp_rc);
   1559		return FAILED;
   1560	}
   1561
   1562	sdev_printk(KERN_INFO, cmd->device,
   1563                    "aborting command. lun 0x%llx, tag 0x%llx\n",
   1564		    (((u64) lun) << 48), (u64) found_evt);
   1565
   1566	wait_for_completion(&evt->comp);
   1567
   1568	/* make sure we got a good response */
   1569	if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
   1570		if (printk_ratelimit())
   1571			sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
   1572				    srp_rsp.srp.rsp.opcode);
   1573		return FAILED;
   1574	}
   1575
   1576	if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
   1577		rsp_rc = *((int *)srp_rsp.srp.rsp.data);
   1578	else
   1579		rsp_rc = srp_rsp.srp.rsp.status;
   1580
   1581	if (rsp_rc) {
   1582		if (printk_ratelimit())
   1583			sdev_printk(KERN_WARNING, cmd->device,
   1584				    "abort code %d for task tag 0x%llx\n",
   1585				    rsp_rc, tsk_mgmt->task_tag);
   1586		return FAILED;
   1587	}
   1588
   1589	/* Because we dropped the spinlock above, it's possible
   1590	 * The event is no longer in our list.  Make sure it didn't
   1591	 * complete while we were aborting
   1592	 */
   1593	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1594	found_evt = NULL;
   1595	list_for_each_entry(tmp_evt, &hostdata->sent, list) {
   1596		if (tmp_evt->cmnd == cmd) {
   1597			found_evt = tmp_evt;
   1598			break;
   1599		}
   1600	}
   1601
   1602	if (found_evt == NULL) {
   1603		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1604		sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
   1605			    tsk_mgmt->task_tag);
   1606		return SUCCESS;
   1607	}
   1608
   1609	sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
   1610		    tsk_mgmt->task_tag);
   1611
   1612	cmd->result = (DID_ABORT << 16);
   1613	list_del(&found_evt->list);
   1614	unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
   1615		       found_evt->hostdata->dev);
   1616	free_event_struct(&found_evt->hostdata->pool, found_evt);
   1617	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1618	atomic_inc(&hostdata->request_limit);
   1619	return SUCCESS;
   1620}
   1621
   1622/*
   1623 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
   1624 * template send this over to the server and wait synchronously for the 
   1625 * response
   1626 */
   1627static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
   1628{
   1629	struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
   1630	struct srp_tsk_mgmt *tsk_mgmt;
   1631	struct srp_event_struct *evt;
   1632	struct srp_event_struct *tmp_evt, *pos;
   1633	union viosrp_iu srp_rsp;
   1634	int rsp_rc;
   1635	unsigned long flags;
   1636	u16 lun = lun_from_dev(cmd->device);
   1637	unsigned long wait_switch = 0;
   1638
   1639	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1640	wait_switch = jiffies + (init_timeout * HZ);
   1641	do {
   1642		evt = get_event_struct(&hostdata->pool);
   1643		if (evt == NULL) {
   1644			spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1645			sdev_printk(KERN_ERR, cmd->device,
   1646				"failed to allocate reset event\n");
   1647			return FAILED;
   1648		}
   1649	
   1650		init_event_struct(evt,
   1651				  sync_completion,
   1652				  VIOSRP_SRP_FORMAT,
   1653				  reset_timeout);
   1654
   1655		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
   1656
   1657		/* Set up a lun reset SRP command */
   1658		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
   1659		tsk_mgmt->opcode = SRP_TSK_MGMT;
   1660		int_to_scsilun(lun, &tsk_mgmt->lun);
   1661		tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
   1662
   1663		evt->sync_srp = &srp_rsp;
   1664
   1665		init_completion(&evt->comp);
   1666		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
   1667
   1668		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
   1669			break;
   1670
   1671		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1672		msleep(10);
   1673		spin_lock_irqsave(hostdata->host->host_lock, flags);
   1674	} while (time_before(jiffies, wait_switch));
   1675
   1676	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1677
   1678	if (rsp_rc != 0) {
   1679		sdev_printk(KERN_ERR, cmd->device,
   1680			    "failed to send reset event. rc=%d\n", rsp_rc);
   1681		return FAILED;
   1682	}
   1683
   1684	sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
   1685		    (((u64) lun) << 48));
   1686
   1687	wait_for_completion(&evt->comp);
   1688
   1689	/* make sure we got a good response */
   1690	if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
   1691		if (printk_ratelimit())
   1692			sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
   1693				    srp_rsp.srp.rsp.opcode);
   1694		return FAILED;
   1695	}
   1696
   1697	if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
   1698		rsp_rc = *((int *)srp_rsp.srp.rsp.data);
   1699	else
   1700		rsp_rc = srp_rsp.srp.rsp.status;
   1701
   1702	if (rsp_rc) {
   1703		if (printk_ratelimit())
   1704			sdev_printk(KERN_WARNING, cmd->device,
   1705				    "reset code %d for task tag 0x%llx\n",
   1706				    rsp_rc, tsk_mgmt->task_tag);
   1707		return FAILED;
   1708	}
   1709
   1710	/* We need to find all commands for this LUN that have not yet been
   1711	 * responded to, and fail them with DID_RESET
   1712	 */
   1713	spin_lock_irqsave(hostdata->host->host_lock, flags);
   1714	list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
   1715		if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
   1716			if (tmp_evt->cmnd)
   1717				tmp_evt->cmnd->result = (DID_RESET << 16);
   1718			list_del(&tmp_evt->list);
   1719			unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
   1720				       tmp_evt->hostdata->dev);
   1721			free_event_struct(&tmp_evt->hostdata->pool,
   1722						   tmp_evt);
   1723			atomic_inc(&hostdata->request_limit);
   1724			if (tmp_evt->cmnd_done)
   1725				tmp_evt->cmnd_done(tmp_evt->cmnd);
   1726			else if (tmp_evt->done)
   1727				tmp_evt->done(tmp_evt);
   1728		}
   1729	}
   1730	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   1731	return SUCCESS;
   1732}
   1733
   1734/**
   1735 * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
   1736 * @cmd:	struct scsi_cmnd having problems
   1737*/
   1738static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
   1739{
   1740	unsigned long wait_switch = 0;
   1741	struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
   1742
   1743	dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
   1744
   1745	ibmvscsi_reset_host(hostdata);
   1746
   1747	for (wait_switch = jiffies + (init_timeout * HZ);
   1748	     time_before(jiffies, wait_switch) &&
   1749		     atomic_read(&hostdata->request_limit) < 2;) {
   1750
   1751		msleep(10);
   1752	}
   1753
   1754	if (atomic_read(&hostdata->request_limit) <= 0)
   1755		return FAILED;
   1756
   1757	return SUCCESS;
   1758}
   1759
   1760/**
   1761 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
   1762 * @crq:	Command/Response queue
   1763 * @hostdata:	ibmvscsi_host_data of host
   1764 *
   1765*/
   1766static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
   1767				struct ibmvscsi_host_data *hostdata)
   1768{
   1769	long rc;
   1770	unsigned long flags;
   1771	/* The hypervisor copies our tag value here so no byteswapping */
   1772	struct srp_event_struct *evt_struct =
   1773			(__force struct srp_event_struct *)crq->IU_data_ptr;
   1774	switch (crq->valid) {
   1775	case VIOSRP_CRQ_INIT_RSP:		/* initialization */
   1776		switch (crq->format) {
   1777		case VIOSRP_CRQ_INIT:	/* Initialization message */
   1778			dev_info(hostdata->dev, "partner initialized\n");
   1779			/* Send back a response */
   1780			rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
   1781			if (rc == 0) {
   1782				/* Now login */
   1783				init_adapter(hostdata);
   1784			} else {
   1785				dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
   1786			}
   1787
   1788			break;
   1789		case VIOSRP_CRQ_INIT_COMPLETE:	/* Initialization response */
   1790			dev_info(hostdata->dev, "partner initialization complete\n");
   1791
   1792			/* Now login */
   1793			init_adapter(hostdata);
   1794			break;
   1795		default:
   1796			dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
   1797		}
   1798		return;
   1799	case VIOSRP_CRQ_XPORT_EVENT:	/* Hypervisor telling us the connection is closed */
   1800		scsi_block_requests(hostdata->host);
   1801		ibmvscsi_set_request_limit(hostdata, 0);
   1802		if (crq->format == 0x06) {
   1803			/* We need to re-setup the interpartition connection */
   1804			dev_info(hostdata->dev, "Re-enabling adapter!\n");
   1805			hostdata->client_migrated = 1;
   1806			hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE;
   1807			purge_requests(hostdata, DID_REQUEUE);
   1808			wake_up(&hostdata->work_wait_q);
   1809		} else {
   1810			dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
   1811				crq->format);
   1812			ibmvscsi_reset_host(hostdata);
   1813		}
   1814		return;
   1815	case VIOSRP_CRQ_CMD_RSP:		/* real payload */
   1816		break;
   1817	default:
   1818		dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
   1819			crq->valid);
   1820		return;
   1821	}
   1822
   1823	/* The only kind of payload CRQs we should get are responses to
   1824	 * things we send. Make sure this response is to something we
   1825	 * actually sent
   1826	 */
   1827	if (!valid_event_struct(&hostdata->pool, evt_struct)) {
   1828		dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
   1829		       evt_struct);
   1830		return;
   1831	}
   1832
   1833	if (atomic_read(&evt_struct->free)) {
   1834		dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
   1835			evt_struct);
   1836		return;
   1837	}
   1838
   1839	if (crq->format == VIOSRP_SRP_FORMAT)
   1840		atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
   1841			   &hostdata->request_limit);
   1842
   1843	del_timer(&evt_struct->timer);
   1844
   1845	if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
   1846		evt_struct->cmnd->result = DID_ERROR << 16;
   1847	if (evt_struct->done)
   1848		evt_struct->done(evt_struct);
   1849	else
   1850		dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
   1851
   1852	/*
   1853	 * Lock the host_lock before messing with these structures, since we
   1854	 * are running in a task context
   1855	 */
   1856	spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
   1857	list_del(&evt_struct->list);
   1858	free_event_struct(&evt_struct->hostdata->pool, evt_struct);
   1859	spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
   1860}
   1861
   1862/**
   1863 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
   1864 * @sdev:	struct scsi_device device to configure
   1865 *
   1866 * Enable allow_restart for a device if it is a disk.  Adjust the
   1867 * queue_depth here also as is required by the documentation for
   1868 * struct scsi_host_template.
   1869 */
   1870static int ibmvscsi_slave_configure(struct scsi_device *sdev)
   1871{
   1872	struct Scsi_Host *shost = sdev->host;
   1873	unsigned long lock_flags = 0;
   1874
   1875	spin_lock_irqsave(shost->host_lock, lock_flags);
   1876	if (sdev->type == TYPE_DISK) {
   1877		sdev->allow_restart = 1;
   1878		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
   1879	}
   1880	spin_unlock_irqrestore(shost->host_lock, lock_flags);
   1881	return 0;
   1882}
   1883
   1884/**
   1885 * ibmvscsi_change_queue_depth - Change the device's queue depth
   1886 * @sdev:	scsi device struct
   1887 * @qdepth:	depth to set
   1888 *
   1889 * Return value:
   1890 * 	actual depth set
   1891 **/
   1892static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
   1893{
   1894	if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
   1895		qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
   1896	return scsi_change_queue_depth(sdev, qdepth);
   1897}
   1898
   1899/* ------------------------------------------------------------
   1900 * sysfs attributes
   1901 */
   1902static ssize_t show_host_vhost_loc(struct device *dev,
   1903				   struct device_attribute *attr, char *buf)
   1904{
   1905	struct Scsi_Host *shost = class_to_shost(dev);
   1906	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   1907	int len;
   1908
   1909	len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
   1910		       hostdata->caps.loc);
   1911	return len;
   1912}
   1913
   1914static struct device_attribute ibmvscsi_host_vhost_loc = {
   1915	.attr = {
   1916		 .name = "vhost_loc",
   1917		 .mode = S_IRUGO,
   1918		 },
   1919	.show = show_host_vhost_loc,
   1920};
   1921
   1922static ssize_t show_host_vhost_name(struct device *dev,
   1923				    struct device_attribute *attr, char *buf)
   1924{
   1925	struct Scsi_Host *shost = class_to_shost(dev);
   1926	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   1927	int len;
   1928
   1929	len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
   1930		       hostdata->caps.name);
   1931	return len;
   1932}
   1933
   1934static struct device_attribute ibmvscsi_host_vhost_name = {
   1935	.attr = {
   1936		 .name = "vhost_name",
   1937		 .mode = S_IRUGO,
   1938		 },
   1939	.show = show_host_vhost_name,
   1940};
   1941
   1942static ssize_t show_host_srp_version(struct device *dev,
   1943				     struct device_attribute *attr, char *buf)
   1944{
   1945	struct Scsi_Host *shost = class_to_shost(dev);
   1946	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   1947	int len;
   1948
   1949	len = snprintf(buf, PAGE_SIZE, "%s\n",
   1950		       hostdata->madapter_info.srp_version);
   1951	return len;
   1952}
   1953
   1954static struct device_attribute ibmvscsi_host_srp_version = {
   1955	.attr = {
   1956		 .name = "srp_version",
   1957		 .mode = S_IRUGO,
   1958		 },
   1959	.show = show_host_srp_version,
   1960};
   1961
   1962static ssize_t show_host_partition_name(struct device *dev,
   1963					struct device_attribute *attr,
   1964					char *buf)
   1965{
   1966	struct Scsi_Host *shost = class_to_shost(dev);
   1967	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   1968	int len;
   1969
   1970	len = snprintf(buf, PAGE_SIZE, "%s\n",
   1971		       hostdata->madapter_info.partition_name);
   1972	return len;
   1973}
   1974
   1975static struct device_attribute ibmvscsi_host_partition_name = {
   1976	.attr = {
   1977		 .name = "partition_name",
   1978		 .mode = S_IRUGO,
   1979		 },
   1980	.show = show_host_partition_name,
   1981};
   1982
   1983static ssize_t show_host_partition_number(struct device *dev,
   1984					  struct device_attribute *attr,
   1985					  char *buf)
   1986{
   1987	struct Scsi_Host *shost = class_to_shost(dev);
   1988	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   1989	int len;
   1990
   1991	len = snprintf(buf, PAGE_SIZE, "%d\n",
   1992		       be32_to_cpu(hostdata->madapter_info.partition_number));
   1993	return len;
   1994}
   1995
   1996static struct device_attribute ibmvscsi_host_partition_number = {
   1997	.attr = {
   1998		 .name = "partition_number",
   1999		 .mode = S_IRUGO,
   2000		 },
   2001	.show = show_host_partition_number,
   2002};
   2003
   2004static ssize_t show_host_mad_version(struct device *dev,
   2005				     struct device_attribute *attr, char *buf)
   2006{
   2007	struct Scsi_Host *shost = class_to_shost(dev);
   2008	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   2009	int len;
   2010
   2011	len = snprintf(buf, PAGE_SIZE, "%d\n",
   2012		       be32_to_cpu(hostdata->madapter_info.mad_version));
   2013	return len;
   2014}
   2015
   2016static struct device_attribute ibmvscsi_host_mad_version = {
   2017	.attr = {
   2018		 .name = "mad_version",
   2019		 .mode = S_IRUGO,
   2020		 },
   2021	.show = show_host_mad_version,
   2022};
   2023
   2024static ssize_t show_host_os_type(struct device *dev,
   2025				 struct device_attribute *attr, char *buf)
   2026{
   2027	struct Scsi_Host *shost = class_to_shost(dev);
   2028	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   2029	int len;
   2030
   2031	len = snprintf(buf, PAGE_SIZE, "%d\n",
   2032		       be32_to_cpu(hostdata->madapter_info.os_type));
   2033	return len;
   2034}
   2035
   2036static struct device_attribute ibmvscsi_host_os_type = {
   2037	.attr = {
   2038		 .name = "os_type",
   2039		 .mode = S_IRUGO,
   2040		 },
   2041	.show = show_host_os_type,
   2042};
   2043
   2044static ssize_t show_host_config(struct device *dev,
   2045				struct device_attribute *attr, char *buf)
   2046{
   2047	return 0;
   2048}
   2049
   2050static struct device_attribute ibmvscsi_host_config = {
   2051	.attr = {
   2052		.name = "config",
   2053		.mode = S_IRUGO,
   2054		},
   2055	.show = show_host_config,
   2056};
   2057
   2058static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
   2059{
   2060	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
   2061
   2062	dev_info(hostdata->dev, "Initiating adapter reset!\n");
   2063	ibmvscsi_reset_host(hostdata);
   2064
   2065	return 0;
   2066}
   2067
   2068static struct attribute *ibmvscsi_host_attrs[] = {
   2069	&ibmvscsi_host_vhost_loc.attr,
   2070	&ibmvscsi_host_vhost_name.attr,
   2071	&ibmvscsi_host_srp_version.attr,
   2072	&ibmvscsi_host_partition_name.attr,
   2073	&ibmvscsi_host_partition_number.attr,
   2074	&ibmvscsi_host_mad_version.attr,
   2075	&ibmvscsi_host_os_type.attr,
   2076	&ibmvscsi_host_config.attr,
   2077	NULL
   2078};
   2079
   2080ATTRIBUTE_GROUPS(ibmvscsi_host);
   2081
   2082/* ------------------------------------------------------------
   2083 * SCSI driver registration
   2084 */
   2085static struct scsi_host_template driver_template = {
   2086	.module = THIS_MODULE,
   2087	.name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
   2088	.proc_name = "ibmvscsi",
   2089	.queuecommand = ibmvscsi_queuecommand,
   2090	.eh_timed_out = srp_timed_out,
   2091	.eh_abort_handler = ibmvscsi_eh_abort_handler,
   2092	.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
   2093	.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
   2094	.slave_configure = ibmvscsi_slave_configure,
   2095	.change_queue_depth = ibmvscsi_change_queue_depth,
   2096	.host_reset = ibmvscsi_host_reset,
   2097	.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
   2098	.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
   2099	.this_id = -1,
   2100	.sg_tablesize = SG_ALL,
   2101	.shost_groups = ibmvscsi_host_groups,
   2102};
   2103
   2104/**
   2105 * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
   2106 *
   2107 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
   2108 *
   2109 * Return value:
   2110 *	Number of bytes of IO data the driver will need to perform well.
   2111 */
   2112static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
   2113{
   2114	/* iu_storage data allocated in initialize_event_pool */
   2115	unsigned long desired_io = max_events * sizeof(union viosrp_iu);
   2116
   2117	/* add io space for sg data */
   2118	desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
   2119	                     IBMVSCSI_CMDS_PER_LUN_DEFAULT);
   2120
   2121	return desired_io;
   2122}
   2123
   2124static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
   2125{
   2126	unsigned long flags;
   2127	int rc;
   2128	char *action = "reset";
   2129
   2130	spin_lock_irqsave(hostdata->host->host_lock, flags);
   2131	switch (hostdata->action) {
   2132	case IBMVSCSI_HOST_ACTION_UNBLOCK:
   2133		rc = 0;
   2134		break;
   2135	case IBMVSCSI_HOST_ACTION_RESET:
   2136		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   2137		rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
   2138		spin_lock_irqsave(hostdata->host->host_lock, flags);
   2139		if (!rc)
   2140			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
   2141		vio_enable_interrupts(to_vio_dev(hostdata->dev));
   2142		break;
   2143	case IBMVSCSI_HOST_ACTION_REENABLE:
   2144		action = "enable";
   2145		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   2146		rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
   2147		spin_lock_irqsave(hostdata->host->host_lock, flags);
   2148		if (!rc)
   2149			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
   2150		break;
   2151	case IBMVSCSI_HOST_ACTION_NONE:
   2152	default:
   2153		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   2154		return;
   2155	}
   2156
   2157	hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
   2158	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   2159
   2160	if (rc) {
   2161		ibmvscsi_set_request_limit(hostdata, -1);
   2162		dev_err(hostdata->dev, "error after %s\n", action);
   2163	}
   2164
   2165	scsi_unblock_requests(hostdata->host);
   2166}
   2167
   2168static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
   2169{
   2170	if (kthread_should_stop())
   2171		return 1;
   2172	switch (hostdata->action) {
   2173	case IBMVSCSI_HOST_ACTION_NONE:
   2174		return 0;
   2175	case IBMVSCSI_HOST_ACTION_RESET:
   2176	case IBMVSCSI_HOST_ACTION_REENABLE:
   2177	case IBMVSCSI_HOST_ACTION_UNBLOCK:
   2178	default:
   2179		break;
   2180	}
   2181
   2182	return 1;
   2183}
   2184
   2185static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
   2186{
   2187	unsigned long flags;
   2188	int rc;
   2189
   2190	spin_lock_irqsave(hostdata->host->host_lock, flags);
   2191	rc = __ibmvscsi_work_to_do(hostdata);
   2192	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
   2193
   2194	return rc;
   2195}
   2196
   2197static int ibmvscsi_work(void *data)
   2198{
   2199	struct ibmvscsi_host_data *hostdata = data;
   2200	int rc;
   2201
   2202	set_user_nice(current, MIN_NICE);
   2203
   2204	while (1) {
   2205		rc = wait_event_interruptible(hostdata->work_wait_q,
   2206					      ibmvscsi_work_to_do(hostdata));
   2207
   2208		BUG_ON(rc);
   2209
   2210		if (kthread_should_stop())
   2211			break;
   2212
   2213		ibmvscsi_do_work(hostdata);
   2214	}
   2215
   2216	return 0;
   2217}
   2218
   2219/*
   2220 * Called by bus code for each adapter
   2221 */
   2222static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
   2223{
   2224	struct ibmvscsi_host_data *hostdata;
   2225	struct Scsi_Host *host;
   2226	struct device *dev = &vdev->dev;
   2227	struct srp_rport_identifiers ids;
   2228	struct srp_rport *rport;
   2229	unsigned long wait_switch = 0;
   2230	int rc;
   2231
   2232	dev_set_drvdata(&vdev->dev, NULL);
   2233
   2234	host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
   2235	if (!host) {
   2236		dev_err(&vdev->dev, "couldn't allocate host data\n");
   2237		goto scsi_host_alloc_failed;
   2238	}
   2239
   2240	host->transportt = ibmvscsi_transport_template;
   2241	hostdata = shost_priv(host);
   2242	memset(hostdata, 0x00, sizeof(*hostdata));
   2243	INIT_LIST_HEAD(&hostdata->sent);
   2244	init_waitqueue_head(&hostdata->work_wait_q);
   2245	hostdata->host = host;
   2246	hostdata->dev = dev;
   2247	ibmvscsi_set_request_limit(hostdata, -1);
   2248	hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
   2249
   2250	if (map_persist_bufs(hostdata)) {
   2251		dev_err(&vdev->dev, "couldn't map persistent buffers\n");
   2252		goto persist_bufs_failed;
   2253	}
   2254
   2255	hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
   2256					    "ibmvscsi", host->host_no);
   2257
   2258	if (IS_ERR(hostdata->work_thread)) {
   2259		dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
   2260			PTR_ERR(hostdata->work_thread));
   2261		goto init_crq_failed;
   2262	}
   2263
   2264	rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
   2265	if (rc != 0 && rc != H_RESOURCE) {
   2266		dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
   2267		goto kill_kthread;
   2268	}
   2269	if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
   2270		dev_err(&vdev->dev, "couldn't initialize event pool\n");
   2271		goto init_pool_failed;
   2272	}
   2273
   2274	host->max_lun = IBMVSCSI_MAX_LUN;
   2275	host->max_id = max_id;
   2276	host->max_channel = max_channel;
   2277	host->max_cmd_len = 16;
   2278
   2279	dev_info(dev,
   2280		 "Maximum ID: %d Maximum LUN: %llu Maximum Channel: %d\n",
   2281		 host->max_id, host->max_lun, host->max_channel);
   2282
   2283	if (scsi_add_host(hostdata->host, hostdata->dev))
   2284		goto add_host_failed;
   2285
   2286	/* we don't have a proper target_port_id so let's use the fake one */
   2287	memcpy(ids.port_id, hostdata->madapter_info.partition_name,
   2288	       sizeof(ids.port_id));
   2289	ids.roles = SRP_RPORT_ROLE_TARGET;
   2290	rport = srp_rport_add(host, &ids);
   2291	if (IS_ERR(rport))
   2292		goto add_srp_port_failed;
   2293
   2294	/* Try to send an initialization message.  Note that this is allowed
   2295	 * to fail if the other end is not acive.  In that case we don't
   2296	 * want to scan
   2297	 */
   2298	if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
   2299	    || rc == H_RESOURCE) {
   2300		/*
   2301		 * Wait around max init_timeout secs for the adapter to finish
   2302		 * initializing. When we are done initializing, we will have a
   2303		 * valid request_limit.  We don't want Linux scanning before
   2304		 * we are ready.
   2305		 */
   2306		for (wait_switch = jiffies + (init_timeout * HZ);
   2307		     time_before(jiffies, wait_switch) &&
   2308		     atomic_read(&hostdata->request_limit) < 2;) {
   2309
   2310			msleep(10);
   2311		}
   2312
   2313		/* if we now have a valid request_limit, initiate a scan */
   2314		if (atomic_read(&hostdata->request_limit) > 0)
   2315			scsi_scan_host(host);
   2316	}
   2317
   2318	dev_set_drvdata(&vdev->dev, hostdata);
   2319	spin_lock(&ibmvscsi_driver_lock);
   2320	list_add_tail(&hostdata->host_list, &ibmvscsi_head);
   2321	spin_unlock(&ibmvscsi_driver_lock);
   2322	return 0;
   2323
   2324      add_srp_port_failed:
   2325	scsi_remove_host(hostdata->host);
   2326      add_host_failed:
   2327	release_event_pool(&hostdata->pool, hostdata);
   2328      init_pool_failed:
   2329	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
   2330      kill_kthread:
   2331      kthread_stop(hostdata->work_thread);
   2332      init_crq_failed:
   2333	unmap_persist_bufs(hostdata);
   2334      persist_bufs_failed:
   2335	scsi_host_put(host);
   2336      scsi_host_alloc_failed:
   2337	return -1;
   2338}
   2339
   2340static void ibmvscsi_remove(struct vio_dev *vdev)
   2341{
   2342	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
   2343
   2344	srp_remove_host(hostdata->host);
   2345	scsi_remove_host(hostdata->host);
   2346
   2347	purge_requests(hostdata, DID_ERROR);
   2348	release_event_pool(&hostdata->pool, hostdata);
   2349
   2350	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
   2351					max_events);
   2352
   2353	kthread_stop(hostdata->work_thread);
   2354	unmap_persist_bufs(hostdata);
   2355
   2356	spin_lock(&ibmvscsi_driver_lock);
   2357	list_del(&hostdata->host_list);
   2358	spin_unlock(&ibmvscsi_driver_lock);
   2359
   2360	scsi_host_put(hostdata->host);
   2361}
   2362
   2363/**
   2364 * ibmvscsi_resume: Resume from suspend
   2365 * @dev:	device struct
   2366 *
   2367 * We may have lost an interrupt across suspend/resume, so kick the
   2368 * interrupt handler
   2369 */
   2370static int ibmvscsi_resume(struct device *dev)
   2371{
   2372	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
   2373	vio_disable_interrupts(to_vio_dev(hostdata->dev));
   2374	tasklet_schedule(&hostdata->srp_task);
   2375
   2376	return 0;
   2377}
   2378
   2379/*
   2380 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
   2381 * support.
   2382 */
   2383static const struct vio_device_id ibmvscsi_device_table[] = {
   2384	{"vscsi", "IBM,v-scsi"},
   2385	{ "", "" }
   2386};
   2387MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
   2388
   2389static const struct dev_pm_ops ibmvscsi_pm_ops = {
   2390	.resume = ibmvscsi_resume
   2391};
   2392
   2393static struct vio_driver ibmvscsi_driver = {
   2394	.id_table = ibmvscsi_device_table,
   2395	.probe = ibmvscsi_probe,
   2396	.remove = ibmvscsi_remove,
   2397	.get_desired_dma = ibmvscsi_get_desired_dma,
   2398	.name = "ibmvscsi",
   2399	.pm = &ibmvscsi_pm_ops,
   2400};
   2401
   2402static struct srp_function_template ibmvscsi_transport_functions = {
   2403};
   2404
   2405static int __init ibmvscsi_module_init(void)
   2406{
   2407	int ret;
   2408
   2409	/* Ensure we have two requests to do error recovery */
   2410	driver_template.can_queue = max_requests;
   2411	max_events = max_requests + 2;
   2412
   2413	if (!firmware_has_feature(FW_FEATURE_VIO))
   2414		return -ENODEV;
   2415
   2416	ibmvscsi_transport_template =
   2417		srp_attach_transport(&ibmvscsi_transport_functions);
   2418	if (!ibmvscsi_transport_template)
   2419		return -ENOMEM;
   2420
   2421	ret = vio_register_driver(&ibmvscsi_driver);
   2422	if (ret)
   2423		srp_release_transport(ibmvscsi_transport_template);
   2424	return ret;
   2425}
   2426
   2427static void __exit ibmvscsi_module_exit(void)
   2428{
   2429	vio_unregister_driver(&ibmvscsi_driver);
   2430	srp_release_transport(ibmvscsi_transport_template);
   2431}
   2432
   2433module_init(ibmvscsi_module_init);
   2434module_exit(ibmvscsi_module_exit);