cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ibmvfc.c (182094B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
      4 *
      5 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
      6 *
      7 * Copyright (C) IBM Corporation, 2008
      8 */
      9
     10#include <linux/module.h>
     11#include <linux/moduleparam.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/dmapool.h>
     14#include <linux/delay.h>
     15#include <linux/interrupt.h>
     16#include <linux/irqdomain.h>
     17#include <linux/kthread.h>
     18#include <linux/slab.h>
     19#include <linux/of.h>
     20#include <linux/pm.h>
     21#include <linux/stringify.h>
     22#include <linux/bsg-lib.h>
     23#include <asm/firmware.h>
     24#include <asm/irq.h>
     25#include <asm/rtas.h>
     26#include <asm/vio.h>
     27#include <scsi/scsi.h>
     28#include <scsi/scsi_cmnd.h>
     29#include <scsi/scsi_host.h>
     30#include <scsi/scsi_device.h>
     31#include <scsi/scsi_tcq.h>
     32#include <scsi/scsi_transport_fc.h>
     33#include <scsi/scsi_bsg_fc.h>
     34#include "ibmvfc.h"
     35
     36static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
     37static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
     38static u64 max_lun = IBMVFC_MAX_LUN;
     39static unsigned int max_targets = IBMVFC_MAX_TARGETS;
     40static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
     41static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
     42static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
     43static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
     44static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
     45static unsigned int mq_enabled = IBMVFC_MQ;
     46static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
     47static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
     48static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
     49static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
     50
     51static LIST_HEAD(ibmvfc_head);
     52static DEFINE_SPINLOCK(ibmvfc_driver_lock);
     53static struct scsi_transport_template *ibmvfc_transport_template;
     54
     55MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
     56MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
     57MODULE_LICENSE("GPL");
     58MODULE_VERSION(IBMVFC_DRIVER_VERSION);
     59
     60module_param_named(mq, mq_enabled, uint, S_IRUGO);
     61MODULE_PARM_DESC(mq, "Enable multiqueue support. "
     62		 "[Default=" __stringify(IBMVFC_MQ) "]");
     63module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
     64MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
     65		 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
     66module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
     67MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
     68		 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
     69module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
     70MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
     71		 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
     72module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
     73MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
     74		 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
     75
     76module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
     77MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
     78		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
     79module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
     80MODULE_PARM_DESC(default_timeout,
     81		 "Default timeout in seconds for initialization and EH commands. "
     82		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
     83module_param_named(max_requests, max_requests, uint, S_IRUGO);
     84MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
     85		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
     86module_param_named(max_lun, max_lun, ullong, S_IRUGO);
     87MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
     88		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
     89module_param_named(max_targets, max_targets, uint, S_IRUGO);
     90MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
     91		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
     92module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
     93MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
     94		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
     95module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
     96MODULE_PARM_DESC(debug, "Enable driver debug information. "
     97		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
     98module_param_named(log_level, log_level, uint, 0);
     99MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
    100		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
    101module_param_named(cls3_error, cls3_error, uint, 0);
    102MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
    103		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
    104
    105static const struct {
    106	u16 status;
    107	u16 error;
    108	u8 result;
    109	u8 retry;
    110	int log;
    111	char *name;
    112} cmd_status [] = {
    113	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
    114	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
    115	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
    116	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
    117	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
    118	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
    119	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
    120	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
    121	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
    122	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
    123	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
    124	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
    125	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
    126	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
    127
    128	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
    129	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
    130	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
    131	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
    132	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
    133	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
    134	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
    135	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
    136	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
    137	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
    138
    139	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
    140	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
    141	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
    142	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
    143	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
    144	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
    145	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
    146	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
    147	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
    148	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
    149	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
    150
    151	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
    152	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
    153};
    154
    155static void ibmvfc_npiv_login(struct ibmvfc_host *);
    156static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
    157static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
    158static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
    159static void ibmvfc_npiv_logout(struct ibmvfc_host *);
    160static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
    161static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
    162
    163static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
    164static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
    165
    166static const char *unknown_error = "unknown error";
    167
    168static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
    169			  unsigned long length, unsigned long *cookie,
    170			  unsigned long *irq)
    171{
    172	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
    173	long rc;
    174
    175	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
    176	*cookie = retbuf[0];
    177	*irq = retbuf[1];
    178
    179	return rc;
    180}
    181
    182static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
    183{
    184	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
    185
    186	return (host_caps & cap_flags) ? 1 : 0;
    187}
    188
    189static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
    190						   struct ibmvfc_cmd *vfc_cmd)
    191{
    192	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
    193		return &vfc_cmd->v2.iu;
    194	else
    195		return &vfc_cmd->v1.iu;
    196}
    197
    198static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
    199						 struct ibmvfc_cmd *vfc_cmd)
    200{
    201	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
    202		return &vfc_cmd->v2.rsp;
    203	else
    204		return &vfc_cmd->v1.rsp;
    205}
    206
    207#ifdef CONFIG_SCSI_IBMVFC_TRACE
    208/**
    209 * ibmvfc_trc_start - Log a start trace entry
    210 * @evt:		ibmvfc event struct
    211 *
    212 **/
    213static void ibmvfc_trc_start(struct ibmvfc_event *evt)
    214{
    215	struct ibmvfc_host *vhost = evt->vhost;
    216	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
    217	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
    218	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
    219	struct ibmvfc_trace_entry *entry;
    220	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
    221
    222	entry = &vhost->trace[index];
    223	entry->evt = evt;
    224	entry->time = jiffies;
    225	entry->fmt = evt->crq.format;
    226	entry->type = IBMVFC_TRC_START;
    227
    228	switch (entry->fmt) {
    229	case IBMVFC_CMD_FORMAT:
    230		entry->op_code = iu->cdb[0];
    231		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
    232		entry->lun = scsilun_to_int(&iu->lun);
    233		entry->tmf_flags = iu->tmf_flags;
    234		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
    235		break;
    236	case IBMVFC_MAD_FORMAT:
    237		entry->op_code = be32_to_cpu(mad->opcode);
    238		break;
    239	default:
    240		break;
    241	}
    242}
    243
    244/**
    245 * ibmvfc_trc_end - Log an end trace entry
    246 * @evt:		ibmvfc event struct
    247 *
    248 **/
    249static void ibmvfc_trc_end(struct ibmvfc_event *evt)
    250{
    251	struct ibmvfc_host *vhost = evt->vhost;
    252	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
    253	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
    254	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
    255	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
    256	struct ibmvfc_trace_entry *entry;
    257	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
    258
    259	entry = &vhost->trace[index];
    260	entry->evt = evt;
    261	entry->time = jiffies;
    262	entry->fmt = evt->crq.format;
    263	entry->type = IBMVFC_TRC_END;
    264
    265	switch (entry->fmt) {
    266	case IBMVFC_CMD_FORMAT:
    267		entry->op_code = iu->cdb[0];
    268		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
    269		entry->lun = scsilun_to_int(&iu->lun);
    270		entry->tmf_flags = iu->tmf_flags;
    271		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
    272		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
    273		entry->u.end.fcp_rsp_flags = rsp->flags;
    274		entry->u.end.rsp_code = rsp->data.info.rsp_code;
    275		entry->u.end.scsi_status = rsp->scsi_status;
    276		break;
    277	case IBMVFC_MAD_FORMAT:
    278		entry->op_code = be32_to_cpu(mad->opcode);
    279		entry->u.end.status = be16_to_cpu(mad->status);
    280		break;
    281	default:
    282		break;
    283
    284	}
    285}
    286
    287#else
    288#define ibmvfc_trc_start(evt) do { } while (0)
    289#define ibmvfc_trc_end(evt) do { } while (0)
    290#endif
    291
    292/**
    293 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
    294 * @status:		status / error class
    295 * @error:		error
    296 *
    297 * Return value:
    298 *	index into cmd_status / -EINVAL on failure
    299 **/
    300static int ibmvfc_get_err_index(u16 status, u16 error)
    301{
    302	int i;
    303
    304	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
    305		if ((cmd_status[i].status & status) == cmd_status[i].status &&
    306		    cmd_status[i].error == error)
    307			return i;
    308
    309	return -EINVAL;
    310}
    311
    312/**
    313 * ibmvfc_get_cmd_error - Find the error description for the fcp response
    314 * @status:		status / error class
    315 * @error:		error
    316 *
    317 * Return value:
    318 *	error description string
    319 **/
    320static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
    321{
    322	int rc = ibmvfc_get_err_index(status, error);
    323	if (rc >= 0)
    324		return cmd_status[rc].name;
    325	return unknown_error;
    326}
    327
    328/**
    329 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
    330 * @vhost:      ibmvfc host struct
    331 * @vfc_cmd:	ibmvfc command struct
    332 *
    333 * Return value:
    334 *	SCSI result value to return for completed command
    335 **/
    336static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
    337{
    338	int err;
    339	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
    340	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
    341
    342	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
    343	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
    344	     rsp->data.info.rsp_code))
    345		return DID_ERROR << 16;
    346
    347	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
    348	if (err >= 0)
    349		return rsp->scsi_status | (cmd_status[err].result << 16);
    350	return rsp->scsi_status | (DID_ERROR << 16);
    351}
    352
    353/**
    354 * ibmvfc_retry_cmd - Determine if error status is retryable
    355 * @status:		status / error class
    356 * @error:		error
    357 *
    358 * Return value:
    359 *	1 if error should be retried / 0 if it should not
    360 **/
    361static int ibmvfc_retry_cmd(u16 status, u16 error)
    362{
    363	int rc = ibmvfc_get_err_index(status, error);
    364
    365	if (rc >= 0)
    366		return cmd_status[rc].retry;
    367	return 1;
    368}
    369
    370static const char *unknown_fc_explain = "unknown fc explain";
    371
    372static const struct {
    373	u16 fc_explain;
    374	char *name;
    375} ls_explain [] = {
    376	{ 0x00, "no additional explanation" },
    377	{ 0x01, "service parameter error - options" },
    378	{ 0x03, "service parameter error - initiator control" },
    379	{ 0x05, "service parameter error - recipient control" },
    380	{ 0x07, "service parameter error - received data field size" },
    381	{ 0x09, "service parameter error - concurrent seq" },
    382	{ 0x0B, "service parameter error - credit" },
    383	{ 0x0D, "invalid N_Port/F_Port_Name" },
    384	{ 0x0E, "invalid node/Fabric Name" },
    385	{ 0x0F, "invalid common service parameters" },
    386	{ 0x11, "invalid association header" },
    387	{ 0x13, "association header required" },
    388	{ 0x15, "invalid originator S_ID" },
    389	{ 0x17, "invalid OX_ID-RX-ID combination" },
    390	{ 0x19, "command (request) already in progress" },
    391	{ 0x1E, "N_Port Login requested" },
    392	{ 0x1F, "Invalid N_Port_ID" },
    393};
    394
    395static const struct {
    396	u16 fc_explain;
    397	char *name;
    398} gs_explain [] = {
    399	{ 0x00, "no additional explanation" },
    400	{ 0x01, "port identifier not registered" },
    401	{ 0x02, "port name not registered" },
    402	{ 0x03, "node name not registered" },
    403	{ 0x04, "class of service not registered" },
    404	{ 0x06, "initial process associator not registered" },
    405	{ 0x07, "FC-4 TYPEs not registered" },
    406	{ 0x08, "symbolic port name not registered" },
    407	{ 0x09, "symbolic node name not registered" },
    408	{ 0x0A, "port type not registered" },
    409	{ 0xF0, "authorization exception" },
    410	{ 0xF1, "authentication exception" },
    411	{ 0xF2, "data base full" },
    412	{ 0xF3, "data base empty" },
    413	{ 0xF4, "processing request" },
    414	{ 0xF5, "unable to verify connection" },
    415	{ 0xF6, "devices not in a common zone" },
    416};
    417
    418/**
    419 * ibmvfc_get_ls_explain - Return the FC Explain description text
    420 * @status:	FC Explain status
    421 *
    422 * Returns:
    423 *	error string
    424 **/
    425static const char *ibmvfc_get_ls_explain(u16 status)
    426{
    427	int i;
    428
    429	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
    430		if (ls_explain[i].fc_explain == status)
    431			return ls_explain[i].name;
    432
    433	return unknown_fc_explain;
    434}
    435
    436/**
    437 * ibmvfc_get_gs_explain - Return the FC Explain description text
    438 * @status:	FC Explain status
    439 *
    440 * Returns:
    441 *	error string
    442 **/
    443static const char *ibmvfc_get_gs_explain(u16 status)
    444{
    445	int i;
    446
    447	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
    448		if (gs_explain[i].fc_explain == status)
    449			return gs_explain[i].name;
    450
    451	return unknown_fc_explain;
    452}
    453
    454static const struct {
    455	enum ibmvfc_fc_type fc_type;
    456	char *name;
    457} fc_type [] = {
    458	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
    459	{ IBMVFC_PORT_REJECT, "port reject" },
    460	{ IBMVFC_LS_REJECT, "ELS reject" },
    461	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
    462	{ IBMVFC_PORT_BUSY, "port busy" },
    463	{ IBMVFC_BASIC_REJECT, "basic reject" },
    464};
    465
    466static const char *unknown_fc_type = "unknown fc type";
    467
    468/**
    469 * ibmvfc_get_fc_type - Return the FC Type description text
    470 * @status:	FC Type error status
    471 *
    472 * Returns:
    473 *	error string
    474 **/
    475static const char *ibmvfc_get_fc_type(u16 status)
    476{
    477	int i;
    478
    479	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
    480		if (fc_type[i].fc_type == status)
    481			return fc_type[i].name;
    482
    483	return unknown_fc_type;
    484}
    485
    486/**
    487 * ibmvfc_set_tgt_action - Set the next init action for the target
    488 * @tgt:		ibmvfc target struct
    489 * @action:		action to perform
    490 *
    491 * Returns:
    492 *	0 if action changed / non-zero if not changed
    493 **/
    494static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
    495				  enum ibmvfc_target_action action)
    496{
    497	int rc = -EINVAL;
    498
    499	switch (tgt->action) {
    500	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
    501		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
    502		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
    503			tgt->action = action;
    504			rc = 0;
    505		}
    506		break;
    507	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
    508		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
    509		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
    510			tgt->action = action;
    511			rc = 0;
    512		}
    513		break;
    514	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
    515		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
    516			tgt->action = action;
    517			rc = 0;
    518		}
    519		break;
    520	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
    521		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
    522			tgt->action = action;
    523			rc = 0;
    524		}
    525		break;
    526	case IBMVFC_TGT_ACTION_DEL_RPORT:
    527		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
    528			tgt->action = action;
    529			rc = 0;
    530		}
    531		break;
    532	case IBMVFC_TGT_ACTION_DELETED_RPORT:
    533		break;
    534	default:
    535		tgt->action = action;
    536		rc = 0;
    537		break;
    538	}
    539
    540	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
    541		tgt->add_rport = 0;
    542
    543	return rc;
    544}
    545
    546/**
    547 * ibmvfc_set_host_state - Set the state for the host
    548 * @vhost:		ibmvfc host struct
    549 * @state:		state to set host to
    550 *
    551 * Returns:
    552 *	0 if state changed / non-zero if not changed
    553 **/
    554static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
    555				  enum ibmvfc_host_state state)
    556{
    557	int rc = 0;
    558
    559	switch (vhost->state) {
    560	case IBMVFC_HOST_OFFLINE:
    561		rc = -EINVAL;
    562		break;
    563	default:
    564		vhost->state = state;
    565		break;
    566	}
    567
    568	return rc;
    569}
    570
    571/**
    572 * ibmvfc_set_host_action - Set the next init action for the host
    573 * @vhost:		ibmvfc host struct
    574 * @action:		action to perform
    575 *
    576 **/
    577static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
    578				   enum ibmvfc_host_action action)
    579{
    580	switch (action) {
    581	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
    582		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
    583			vhost->action = action;
    584		break;
    585	case IBMVFC_HOST_ACTION_LOGO_WAIT:
    586		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
    587			vhost->action = action;
    588		break;
    589	case IBMVFC_HOST_ACTION_INIT_WAIT:
    590		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
    591			vhost->action = action;
    592		break;
    593	case IBMVFC_HOST_ACTION_QUERY:
    594		switch (vhost->action) {
    595		case IBMVFC_HOST_ACTION_INIT_WAIT:
    596		case IBMVFC_HOST_ACTION_NONE:
    597		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
    598			vhost->action = action;
    599			break;
    600		default:
    601			break;
    602		}
    603		break;
    604	case IBMVFC_HOST_ACTION_TGT_INIT:
    605		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
    606			vhost->action = action;
    607		break;
    608	case IBMVFC_HOST_ACTION_REENABLE:
    609	case IBMVFC_HOST_ACTION_RESET:
    610		vhost->action = action;
    611		break;
    612	case IBMVFC_HOST_ACTION_INIT:
    613	case IBMVFC_HOST_ACTION_TGT_DEL:
    614	case IBMVFC_HOST_ACTION_LOGO:
    615	case IBMVFC_HOST_ACTION_QUERY_TGTS:
    616	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
    617	case IBMVFC_HOST_ACTION_NONE:
    618	default:
    619		switch (vhost->action) {
    620		case IBMVFC_HOST_ACTION_RESET:
    621		case IBMVFC_HOST_ACTION_REENABLE:
    622			break;
    623		default:
    624			vhost->action = action;
    625			break;
    626		}
    627		break;
    628	}
    629}
    630
    631/**
    632 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
    633 * @vhost:		ibmvfc host struct
    634 *
    635 * Return value:
    636 *	nothing
    637 **/
    638static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
    639{
    640	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
    641	    vhost->state == IBMVFC_ACTIVE) {
    642		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
    643			scsi_block_requests(vhost->host);
    644			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
    645		}
    646	} else
    647		vhost->reinit = 1;
    648
    649	wake_up(&vhost->work_wait_q);
    650}
    651
    652/**
    653 * ibmvfc_del_tgt - Schedule cleanup and removal of the target
    654 * @tgt:		ibmvfc target struct
    655 **/
    656static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
    657{
    658	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
    659		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
    660		tgt->init_retries = 0;
    661	}
    662	wake_up(&tgt->vhost->work_wait_q);
    663}
    664
    665/**
    666 * ibmvfc_link_down - Handle a link down event from the adapter
    667 * @vhost:	ibmvfc host struct
    668 * @state:	ibmvfc host state to enter
    669 *
    670 **/
    671static void ibmvfc_link_down(struct ibmvfc_host *vhost,
    672			     enum ibmvfc_host_state state)
    673{
    674	struct ibmvfc_target *tgt;
    675
    676	ENTER;
    677	scsi_block_requests(vhost->host);
    678	list_for_each_entry(tgt, &vhost->targets, queue)
    679		ibmvfc_del_tgt(tgt);
    680	ibmvfc_set_host_state(vhost, state);
    681	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
    682	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
    683	wake_up(&vhost->work_wait_q);
    684	LEAVE;
    685}
    686
    687/**
    688 * ibmvfc_init_host - Start host initialization
    689 * @vhost:		ibmvfc host struct
    690 *
    691 * Return value:
    692 *	nothing
    693 **/
    694static void ibmvfc_init_host(struct ibmvfc_host *vhost)
    695{
    696	struct ibmvfc_target *tgt;
    697
    698	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
    699		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
    700			dev_err(vhost->dev,
    701				"Host initialization retries exceeded. Taking adapter offline\n");
    702			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
    703			return;
    704		}
    705	}
    706
    707	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
    708		memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
    709		vhost->async_crq.cur = 0;
    710
    711		list_for_each_entry(tgt, &vhost->targets, queue)
    712			ibmvfc_del_tgt(tgt);
    713		scsi_block_requests(vhost->host);
    714		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
    715		vhost->job_step = ibmvfc_npiv_login;
    716		wake_up(&vhost->work_wait_q);
    717	}
    718}
    719
    720/**
    721 * ibmvfc_send_crq - Send a CRQ
    722 * @vhost:	ibmvfc host struct
    723 * @word1:	the first 64 bits of the data
    724 * @word2:	the second 64 bits of the data
    725 *
    726 * Return value:
    727 *	0 on success / other on failure
    728 **/
    729static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
    730{
    731	struct vio_dev *vdev = to_vio_dev(vhost->dev);
    732	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
    733}
    734
    735static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
    736			       u64 word2, u64 word3, u64 word4)
    737{
    738	struct vio_dev *vdev = to_vio_dev(vhost->dev);
    739
    740	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
    741				  word1, word2, word3, word4);
    742}
    743
    744/**
    745 * ibmvfc_send_crq_init - Send a CRQ init message
    746 * @vhost:	ibmvfc host struct
    747 *
    748 * Return value:
    749 *	0 on success / other on failure
    750 **/
    751static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
    752{
    753	ibmvfc_dbg(vhost, "Sending CRQ init\n");
    754	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
    755}
    756
    757/**
    758 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
    759 * @vhost:	ibmvfc host struct
    760 *
    761 * Return value:
    762 *	0 on success / other on failure
    763 **/
    764static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
    765{
    766	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
    767	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
    768}
    769
    770/**
    771 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
    772 * @vhost:	ibmvfc host who owns the event pool
    773 * @queue:      ibmvfc queue struct
    774 * @size:       pool size
    775 *
    776 * Returns zero on success.
    777 **/
    778static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
    779				  struct ibmvfc_queue *queue,
    780				  unsigned int size)
    781{
    782	int i;
    783	struct ibmvfc_event_pool *pool = &queue->evt_pool;
    784
    785	ENTER;
    786	if (!size)
    787		return 0;
    788
    789	pool->size = size;
    790	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
    791	if (!pool->events)
    792		return -ENOMEM;
    793
    794	pool->iu_storage = dma_alloc_coherent(vhost->dev,
    795					      size * sizeof(*pool->iu_storage),
    796					      &pool->iu_token, 0);
    797
    798	if (!pool->iu_storage) {
    799		kfree(pool->events);
    800		return -ENOMEM;
    801	}
    802
    803	INIT_LIST_HEAD(&queue->sent);
    804	INIT_LIST_HEAD(&queue->free);
    805	spin_lock_init(&queue->l_lock);
    806
    807	for (i = 0; i < size; ++i) {
    808		struct ibmvfc_event *evt = &pool->events[i];
    809
    810		/*
    811		 * evt->active states
    812		 *  1 = in flight
    813		 *  0 = being completed
    814		 * -1 = free/freed
    815		 */
    816		atomic_set(&evt->active, -1);
    817		atomic_set(&evt->free, 1);
    818		evt->crq.valid = 0x80;
    819		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
    820		evt->xfer_iu = pool->iu_storage + i;
    821		evt->vhost = vhost;
    822		evt->queue = queue;
    823		evt->ext_list = NULL;
    824		list_add_tail(&evt->queue_list, &queue->free);
    825	}
    826
    827	LEAVE;
    828	return 0;
    829}
    830
    831/**
    832 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
    833 * @vhost:	ibmvfc host who owns the event pool
    834 * @queue:      ibmvfc queue struct
    835 *
    836 **/
    837static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
    838				   struct ibmvfc_queue *queue)
    839{
    840	int i;
    841	struct ibmvfc_event_pool *pool = &queue->evt_pool;
    842
    843	ENTER;
    844	for (i = 0; i < pool->size; ++i) {
    845		list_del(&pool->events[i].queue_list);
    846		BUG_ON(atomic_read(&pool->events[i].free) != 1);
    847		if (pool->events[i].ext_list)
    848			dma_pool_free(vhost->sg_pool,
    849				      pool->events[i].ext_list,
    850				      pool->events[i].ext_list_token);
    851	}
    852
    853	kfree(pool->events);
    854	dma_free_coherent(vhost->dev,
    855			  pool->size * sizeof(*pool->iu_storage),
    856			  pool->iu_storage, pool->iu_token);
    857	LEAVE;
    858}
    859
    860/**
    861 * ibmvfc_free_queue - Deallocate queue
    862 * @vhost:	ibmvfc host struct
    863 * @queue:	ibmvfc queue struct
    864 *
    865 * Unmaps dma and deallocates page for messages
    866 **/
    867static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
    868			      struct ibmvfc_queue *queue)
    869{
    870	struct device *dev = vhost->dev;
    871
    872	dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
    873	free_page((unsigned long)queue->msgs.handle);
    874	queue->msgs.handle = NULL;
    875
    876	ibmvfc_free_event_pool(vhost, queue);
    877}
    878
    879/**
    880 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
    881 * @vhost:	ibmvfc host struct
    882 *
    883 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
    884 * the crq with the hypervisor.
    885 **/
    886static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
    887{
    888	long rc = 0;
    889	struct vio_dev *vdev = to_vio_dev(vhost->dev);
    890	struct ibmvfc_queue *crq = &vhost->crq;
    891
    892	ibmvfc_dbg(vhost, "Releasing CRQ\n");
    893	free_irq(vdev->irq, vhost);
    894	tasklet_kill(&vhost->tasklet);
    895	do {
    896		if (rc)
    897			msleep(100);
    898		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
    899	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
    900
    901	vhost->state = IBMVFC_NO_CRQ;
    902	vhost->logged_in = 0;
    903
    904	ibmvfc_free_queue(vhost, crq);
    905}
    906
    907/**
    908 * ibmvfc_reenable_crq_queue - reenables the CRQ
    909 * @vhost:	ibmvfc host struct
    910 *
    911 * Return value:
    912 *	0 on success / other on failure
    913 **/
    914static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
    915{
    916	int rc = 0;
    917	struct vio_dev *vdev = to_vio_dev(vhost->dev);
    918	unsigned long flags;
    919
    920	ibmvfc_dereg_sub_crqs(vhost);
    921
    922	/* Re-enable the CRQ */
    923	do {
    924		if (rc)
    925			msleep(100);
    926		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
    927	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
    928
    929	if (rc)
    930		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
    931
    932	spin_lock_irqsave(vhost->host->host_lock, flags);
    933	spin_lock(vhost->crq.q_lock);
    934	vhost->do_enquiry = 1;
    935	vhost->using_channels = 0;
    936	spin_unlock(vhost->crq.q_lock);
    937	spin_unlock_irqrestore(vhost->host->host_lock, flags);
    938
    939	ibmvfc_reg_sub_crqs(vhost);
    940
    941	return rc;
    942}
    943
    944/**
    945 * ibmvfc_reset_crq - resets a crq after a failure
    946 * @vhost:	ibmvfc host struct
    947 *
    948 * Return value:
    949 *	0 on success / other on failure
    950 **/
    951static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
    952{
    953	int rc = 0;
    954	unsigned long flags;
    955	struct vio_dev *vdev = to_vio_dev(vhost->dev);
    956	struct ibmvfc_queue *crq = &vhost->crq;
    957
    958	ibmvfc_dereg_sub_crqs(vhost);
    959
    960	/* Close the CRQ */
    961	do {
    962		if (rc)
    963			msleep(100);
    964		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
    965	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
    966
    967	spin_lock_irqsave(vhost->host->host_lock, flags);
    968	spin_lock(vhost->crq.q_lock);
    969	vhost->state = IBMVFC_NO_CRQ;
    970	vhost->logged_in = 0;
    971	vhost->do_enquiry = 1;
    972	vhost->using_channels = 0;
    973
    974	/* Clean out the queue */
    975	memset(crq->msgs.crq, 0, PAGE_SIZE);
    976	crq->cur = 0;
    977
    978	/* And re-open it again */
    979	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
    980				crq->msg_token, PAGE_SIZE);
    981
    982	if (rc == H_CLOSED)
    983		/* Adapter is good, but other end is not ready */
    984		dev_warn(vhost->dev, "Partner adapter not ready\n");
    985	else if (rc != 0)
    986		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
    987
    988	spin_unlock(vhost->crq.q_lock);
    989	spin_unlock_irqrestore(vhost->host->host_lock, flags);
    990
    991	ibmvfc_reg_sub_crqs(vhost);
    992
    993	return rc;
    994}
    995
    996/**
    997 * ibmvfc_valid_event - Determines if event is valid.
    998 * @pool:	event_pool that contains the event
    999 * @evt:	ibmvfc event to be checked for validity
   1000 *
   1001 * Return value:
   1002 *	1 if event is valid / 0 if event is not valid
   1003 **/
   1004static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
   1005			      struct ibmvfc_event *evt)
   1006{
   1007	int index = evt - pool->events;
   1008	if (index < 0 || index >= pool->size)	/* outside of bounds */
   1009		return 0;
   1010	if (evt != pool->events + index)	/* unaligned */
   1011		return 0;
   1012	return 1;
   1013}
   1014
   1015/**
   1016 * ibmvfc_free_event - Free the specified event
   1017 * @evt:	ibmvfc_event to be freed
   1018 *
   1019 **/
   1020static void ibmvfc_free_event(struct ibmvfc_event *evt)
   1021{
   1022	struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
   1023	unsigned long flags;
   1024
   1025	BUG_ON(!ibmvfc_valid_event(pool, evt));
   1026	BUG_ON(atomic_inc_return(&evt->free) != 1);
   1027	BUG_ON(atomic_dec_and_test(&evt->active));
   1028
   1029	spin_lock_irqsave(&evt->queue->l_lock, flags);
   1030	list_add_tail(&evt->queue_list, &evt->queue->free);
   1031	if (evt->eh_comp)
   1032		complete(evt->eh_comp);
   1033	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
   1034}
   1035
   1036/**
   1037 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
   1038 * @evt:	ibmvfc event struct
   1039 *
   1040 * This function does not setup any error status, that must be done
   1041 * before this function gets called.
   1042 **/
   1043static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
   1044{
   1045	struct scsi_cmnd *cmnd = evt->cmnd;
   1046
   1047	if (cmnd) {
   1048		scsi_dma_unmap(cmnd);
   1049		scsi_done(cmnd);
   1050	}
   1051
   1052	ibmvfc_free_event(evt);
   1053}
   1054
   1055/**
   1056 * ibmvfc_complete_purge - Complete failed command list
   1057 * @purge_list:		list head of failed commands
   1058 *
   1059 * This function runs completions on commands to fail as a result of a
   1060 * host reset or platform migration.
   1061 **/
   1062static void ibmvfc_complete_purge(struct list_head *purge_list)
   1063{
   1064	struct ibmvfc_event *evt, *pos;
   1065
   1066	list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
   1067		list_del(&evt->queue_list);
   1068		ibmvfc_trc_end(evt);
   1069		evt->done(evt);
   1070	}
   1071}
   1072
   1073/**
   1074 * ibmvfc_fail_request - Fail request with specified error code
   1075 * @evt:		ibmvfc event struct
   1076 * @error_code:	error code to fail request with
   1077 *
   1078 * Return value:
   1079 *	none
   1080 **/
   1081static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
   1082{
   1083	/*
   1084	 * Anything we are failing should still be active. Otherwise, it
   1085	 * implies we already got a response for the command and are doing
   1086	 * something bad like double completing it.
   1087	 */
   1088	BUG_ON(!atomic_dec_and_test(&evt->active));
   1089	if (evt->cmnd) {
   1090		evt->cmnd->result = (error_code << 16);
   1091		evt->done = ibmvfc_scsi_eh_done;
   1092	} else
   1093		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
   1094
   1095	del_timer(&evt->timer);
   1096}
   1097
   1098/**
   1099 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
   1100 * @vhost:		ibmvfc host struct
   1101 * @error_code:	error code to fail requests with
   1102 *
   1103 * Return value:
   1104 *	none
   1105 **/
   1106static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
   1107{
   1108	struct ibmvfc_event *evt, *pos;
   1109	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
   1110	unsigned long flags;
   1111	int hwqs = 0;
   1112	int i;
   1113
   1114	if (vhost->using_channels)
   1115		hwqs = vhost->scsi_scrqs.active_queues;
   1116
   1117	ibmvfc_dbg(vhost, "Purging all requests\n");
   1118	spin_lock_irqsave(&vhost->crq.l_lock, flags);
   1119	list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
   1120		ibmvfc_fail_request(evt, error_code);
   1121	list_splice_init(&vhost->crq.sent, &vhost->purge);
   1122	spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
   1123
   1124	for (i = 0; i < hwqs; i++) {
   1125		spin_lock_irqsave(queues[i].q_lock, flags);
   1126		spin_lock(&queues[i].l_lock);
   1127		list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
   1128			ibmvfc_fail_request(evt, error_code);
   1129		list_splice_init(&queues[i].sent, &vhost->purge);
   1130		spin_unlock(&queues[i].l_lock);
   1131		spin_unlock_irqrestore(queues[i].q_lock, flags);
   1132	}
   1133}
   1134
   1135/**
   1136 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
   1137 * @vhost:	struct ibmvfc host to reset
   1138 **/
   1139static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
   1140{
   1141	ibmvfc_purge_requests(vhost, DID_ERROR);
   1142	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
   1143	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
   1144}
   1145
   1146/**
   1147 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
   1148 * @vhost:	struct ibmvfc host to reset
   1149 **/
   1150static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
   1151{
   1152	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
   1153	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
   1154		scsi_block_requests(vhost->host);
   1155		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
   1156		vhost->job_step = ibmvfc_npiv_logout;
   1157		wake_up(&vhost->work_wait_q);
   1158	} else
   1159		ibmvfc_hard_reset_host(vhost);
   1160}
   1161
   1162/**
   1163 * ibmvfc_reset_host - Reset the connection to the server
   1164 * @vhost:	ibmvfc host struct
   1165 **/
   1166static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
   1167{
   1168	unsigned long flags;
   1169
   1170	spin_lock_irqsave(vhost->host->host_lock, flags);
   1171	__ibmvfc_reset_host(vhost);
   1172	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   1173}
   1174
   1175/**
   1176 * ibmvfc_retry_host_init - Retry host initialization if allowed
   1177 * @vhost:	ibmvfc host struct
   1178 *
   1179 * Returns: 1 if init will be retried / 0 if not
   1180 *
   1181 **/
   1182static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
   1183{
   1184	int retry = 0;
   1185
   1186	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
   1187		vhost->delay_init = 1;
   1188		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
   1189			dev_err(vhost->dev,
   1190				"Host initialization retries exceeded. Taking adapter offline\n");
   1191			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
   1192		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
   1193			__ibmvfc_reset_host(vhost);
   1194		else {
   1195			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
   1196			retry = 1;
   1197		}
   1198	}
   1199
   1200	wake_up(&vhost->work_wait_q);
   1201	return retry;
   1202}
   1203
   1204/**
   1205 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
   1206 * @starget:	scsi target struct
   1207 *
   1208 * Return value:
   1209 *	ibmvfc_target struct / NULL if not found
   1210 **/
   1211static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
   1212{
   1213	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
   1214	struct ibmvfc_host *vhost = shost_priv(shost);
   1215	struct ibmvfc_target *tgt;
   1216
   1217	list_for_each_entry(tgt, &vhost->targets, queue)
   1218		if (tgt->target_id == starget->id) {
   1219			kref_get(&tgt->kref);
   1220			return tgt;
   1221		}
   1222	return NULL;
   1223}
   1224
   1225/**
   1226 * ibmvfc_get_target - Find the specified scsi_target
   1227 * @starget:	scsi target struct
   1228 *
   1229 * Return value:
   1230 *	ibmvfc_target struct / NULL if not found
   1231 **/
   1232static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
   1233{
   1234	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
   1235	struct ibmvfc_target *tgt;
   1236	unsigned long flags;
   1237
   1238	spin_lock_irqsave(shost->host_lock, flags);
   1239	tgt = __ibmvfc_get_target(starget);
   1240	spin_unlock_irqrestore(shost->host_lock, flags);
   1241	return tgt;
   1242}
   1243
   1244/**
   1245 * ibmvfc_get_host_speed - Get host port speed
   1246 * @shost:		scsi host struct
   1247 *
   1248 * Return value:
   1249 * 	none
   1250 **/
   1251static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
   1252{
   1253	struct ibmvfc_host *vhost = shost_priv(shost);
   1254	unsigned long flags;
   1255
   1256	spin_lock_irqsave(shost->host_lock, flags);
   1257	if (vhost->state == IBMVFC_ACTIVE) {
   1258		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
   1259		case 1:
   1260			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
   1261			break;
   1262		case 2:
   1263			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
   1264			break;
   1265		case 4:
   1266			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
   1267			break;
   1268		case 8:
   1269			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
   1270			break;
   1271		case 10:
   1272			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
   1273			break;
   1274		case 16:
   1275			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
   1276			break;
   1277		default:
   1278			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
   1279				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
   1280			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
   1281			break;
   1282		}
   1283	} else
   1284		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
   1285	spin_unlock_irqrestore(shost->host_lock, flags);
   1286}
   1287
   1288/**
   1289 * ibmvfc_get_host_port_state - Get host port state
   1290 * @shost:		scsi host struct
   1291 *
   1292 * Return value:
   1293 * 	none
   1294 **/
   1295static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
   1296{
   1297	struct ibmvfc_host *vhost = shost_priv(shost);
   1298	unsigned long flags;
   1299
   1300	spin_lock_irqsave(shost->host_lock, flags);
   1301	switch (vhost->state) {
   1302	case IBMVFC_INITIALIZING:
   1303	case IBMVFC_ACTIVE:
   1304		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
   1305		break;
   1306	case IBMVFC_LINK_DOWN:
   1307		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
   1308		break;
   1309	case IBMVFC_LINK_DEAD:
   1310	case IBMVFC_HOST_OFFLINE:
   1311		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
   1312		break;
   1313	case IBMVFC_HALTED:
   1314		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
   1315		break;
   1316	case IBMVFC_NO_CRQ:
   1317		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
   1318		break;
   1319	default:
   1320		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
   1321		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
   1322		break;
   1323	}
   1324	spin_unlock_irqrestore(shost->host_lock, flags);
   1325}
   1326
   1327/**
   1328 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
   1329 * @rport:		rport struct
   1330 * @timeout:	timeout value
   1331 *
   1332 * Return value:
   1333 * 	none
   1334 **/
   1335static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
   1336{
   1337	if (timeout)
   1338		rport->dev_loss_tmo = timeout;
   1339	else
   1340		rport->dev_loss_tmo = 1;
   1341}
   1342
   1343/**
   1344 * ibmvfc_release_tgt - Free memory allocated for a target
   1345 * @kref:		kref struct
   1346 *
   1347 **/
   1348static void ibmvfc_release_tgt(struct kref *kref)
   1349{
   1350	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
   1351	kfree(tgt);
   1352}
   1353
   1354/**
   1355 * ibmvfc_get_starget_node_name - Get SCSI target's node name
   1356 * @starget:	scsi target struct
   1357 *
   1358 * Return value:
   1359 * 	none
   1360 **/
   1361static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
   1362{
   1363	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
   1364	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
   1365	if (tgt)
   1366		kref_put(&tgt->kref, ibmvfc_release_tgt);
   1367}
   1368
   1369/**
   1370 * ibmvfc_get_starget_port_name - Get SCSI target's port name
   1371 * @starget:	scsi target struct
   1372 *
   1373 * Return value:
   1374 * 	none
   1375 **/
   1376static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
   1377{
   1378	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
   1379	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
   1380	if (tgt)
   1381		kref_put(&tgt->kref, ibmvfc_release_tgt);
   1382}
   1383
   1384/**
   1385 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
   1386 * @starget:	scsi target struct
   1387 *
   1388 * Return value:
   1389 * 	none
   1390 **/
   1391static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
   1392{
   1393	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
   1394	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
   1395	if (tgt)
   1396		kref_put(&tgt->kref, ibmvfc_release_tgt);
   1397}
   1398
   1399/**
   1400 * ibmvfc_wait_while_resetting - Wait while the host resets
   1401 * @vhost:		ibmvfc host struct
   1402 *
   1403 * Return value:
   1404 * 	0 on success / other on failure
   1405 **/
   1406static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
   1407{
   1408	long timeout = wait_event_timeout(vhost->init_wait_q,
   1409					  ((vhost->state == IBMVFC_ACTIVE ||
   1410					    vhost->state == IBMVFC_HOST_OFFLINE ||
   1411					    vhost->state == IBMVFC_LINK_DEAD) &&
   1412					   vhost->action == IBMVFC_HOST_ACTION_NONE),
   1413					  (init_timeout * HZ));
   1414
   1415	return timeout ? 0 : -EIO;
   1416}
   1417
   1418/**
   1419 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
   1420 * @shost:		scsi host struct
   1421 *
   1422 * Return value:
   1423 * 	0 on success / other on failure
   1424 **/
   1425static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
   1426{
   1427	struct ibmvfc_host *vhost = shost_priv(shost);
   1428
   1429	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
   1430	ibmvfc_reset_host(vhost);
   1431	return ibmvfc_wait_while_resetting(vhost);
   1432}
   1433
   1434/**
   1435 * ibmvfc_gather_partition_info - Gather info about the LPAR
   1436 * @vhost:      ibmvfc host struct
   1437 *
   1438 * Return value:
   1439 *	none
   1440 **/
   1441static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
   1442{
   1443	struct device_node *rootdn;
   1444	const char *name;
   1445	const unsigned int *num;
   1446
   1447	rootdn = of_find_node_by_path("/");
   1448	if (!rootdn)
   1449		return;
   1450
   1451	name = of_get_property(rootdn, "ibm,partition-name", NULL);
   1452	if (name)
   1453		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
   1454	num = of_get_property(rootdn, "ibm,partition-no", NULL);
   1455	if (num)
   1456		vhost->partition_number = *num;
   1457	of_node_put(rootdn);
   1458}
   1459
   1460/**
   1461 * ibmvfc_set_login_info - Setup info for NPIV login
   1462 * @vhost:	ibmvfc host struct
   1463 *
   1464 * Return value:
   1465 *	none
   1466 **/
   1467static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
   1468{
   1469	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
   1470	struct ibmvfc_queue *async_crq = &vhost->async_crq;
   1471	struct device_node *of_node = vhost->dev->of_node;
   1472	const char *location;
   1473
   1474	memset(login_info, 0, sizeof(*login_info));
   1475
   1476	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
   1477	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
   1478	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
   1479	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
   1480	login_info->partition_num = cpu_to_be32(vhost->partition_number);
   1481	login_info->vfc_frame_version = cpu_to_be32(1);
   1482	login_info->fcp_version = cpu_to_be16(3);
   1483	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
   1484	if (vhost->client_migrated)
   1485		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
   1486
   1487	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
   1488	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
   1489
   1490	if (vhost->mq_enabled || vhost->using_channels)
   1491		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
   1492
   1493	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
   1494	login_info->async.len = cpu_to_be32(async_crq->size *
   1495					    sizeof(*async_crq->msgs.async));
   1496	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
   1497	strncpy(login_info->device_name,
   1498		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
   1499
   1500	location = of_get_property(of_node, "ibm,loc-code", NULL);
   1501	location = location ? location : dev_name(vhost->dev);
   1502	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
   1503}
   1504
   1505/**
   1506 * ibmvfc_get_event - Gets the next free event in pool
   1507 * @queue:      ibmvfc queue struct
   1508 *
   1509 * Returns a free event from the pool.
   1510 **/
   1511static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
   1512{
   1513	struct ibmvfc_event *evt;
   1514	unsigned long flags;
   1515
   1516	spin_lock_irqsave(&queue->l_lock, flags);
   1517	BUG_ON(list_empty(&queue->free));
   1518	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
   1519	atomic_set(&evt->free, 0);
   1520	list_del(&evt->queue_list);
   1521	spin_unlock_irqrestore(&queue->l_lock, flags);
   1522	return evt;
   1523}
   1524
   1525/**
   1526 * ibmvfc_locked_done - Calls evt completion with host_lock held
   1527 * @evt:	ibmvfc evt to complete
   1528 *
   1529 * All non-scsi command completion callbacks have the expectation that the
   1530 * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
   1531 * MAD evt with the host_lock.
   1532 **/
   1533static void ibmvfc_locked_done(struct ibmvfc_event *evt)
   1534{
   1535	unsigned long flags;
   1536
   1537	spin_lock_irqsave(evt->vhost->host->host_lock, flags);
   1538	evt->_done(evt);
   1539	spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
   1540}
   1541
   1542/**
   1543 * ibmvfc_init_event - Initialize fields in an event struct that are always
   1544 *				required.
   1545 * @evt:	The event
   1546 * @done:	Routine to call when the event is responded to
   1547 * @format:	SRP or MAD format
   1548 **/
   1549static void ibmvfc_init_event(struct ibmvfc_event *evt,
   1550			      void (*done) (struct ibmvfc_event *), u8 format)
   1551{
   1552	evt->cmnd = NULL;
   1553	evt->sync_iu = NULL;
   1554	evt->eh_comp = NULL;
   1555	evt->crq.format = format;
   1556	if (format == IBMVFC_CMD_FORMAT)
   1557		evt->done = done;
   1558	else {
   1559		evt->_done = done;
   1560		evt->done = ibmvfc_locked_done;
   1561	}
   1562	evt->hwq = 0;
   1563}
   1564
   1565/**
   1566 * ibmvfc_map_sg_list - Initialize scatterlist
   1567 * @scmd:	scsi command struct
   1568 * @nseg:	number of scatterlist segments
   1569 * @md:	memory descriptor list to initialize
   1570 **/
   1571static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
   1572			       struct srp_direct_buf *md)
   1573{
   1574	int i;
   1575	struct scatterlist *sg;
   1576
   1577	scsi_for_each_sg(scmd, sg, nseg, i) {
   1578		md[i].va = cpu_to_be64(sg_dma_address(sg));
   1579		md[i].len = cpu_to_be32(sg_dma_len(sg));
   1580		md[i].key = 0;
   1581	}
   1582}
   1583
   1584/**
   1585 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
   1586 * @scmd:		struct scsi_cmnd with the scatterlist
   1587 * @evt:		ibmvfc event struct
   1588 * @vfc_cmd:	vfc_cmd that contains the memory descriptor
   1589 * @dev:		device for which to map dma memory
   1590 *
   1591 * Returns:
   1592 *	0 on success / non-zero on failure
   1593 **/
   1594static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
   1595			      struct ibmvfc_event *evt,
   1596			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
   1597{
   1598
   1599	int sg_mapped;
   1600	struct srp_direct_buf *data = &vfc_cmd->ioba;
   1601	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
   1602	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
   1603
   1604	if (cls3_error)
   1605		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
   1606
   1607	sg_mapped = scsi_dma_map(scmd);
   1608	if (!sg_mapped) {
   1609		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
   1610		return 0;
   1611	} else if (unlikely(sg_mapped < 0)) {
   1612		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   1613			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
   1614		return sg_mapped;
   1615	}
   1616
   1617	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
   1618		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
   1619		iu->add_cdb_len |= IBMVFC_WRDATA;
   1620	} else {
   1621		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
   1622		iu->add_cdb_len |= IBMVFC_RDDATA;
   1623	}
   1624
   1625	if (sg_mapped == 1) {
   1626		ibmvfc_map_sg_list(scmd, sg_mapped, data);
   1627		return 0;
   1628	}
   1629
   1630	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
   1631
   1632	if (!evt->ext_list) {
   1633		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
   1634					       &evt->ext_list_token);
   1635
   1636		if (!evt->ext_list) {
   1637			scsi_dma_unmap(scmd);
   1638			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   1639				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
   1640			return -ENOMEM;
   1641		}
   1642	}
   1643
   1644	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
   1645
   1646	data->va = cpu_to_be64(evt->ext_list_token);
   1647	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
   1648	data->key = 0;
   1649	return 0;
   1650}
   1651
   1652/**
   1653 * ibmvfc_timeout - Internal command timeout handler
   1654 * @t:	struct ibmvfc_event that timed out
   1655 *
   1656 * Called when an internally generated command times out
   1657 **/
   1658static void ibmvfc_timeout(struct timer_list *t)
   1659{
   1660	struct ibmvfc_event *evt = from_timer(evt, t, timer);
   1661	struct ibmvfc_host *vhost = evt->vhost;
   1662	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
   1663	ibmvfc_reset_host(vhost);
   1664}
   1665
   1666/**
   1667 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
   1668 * @evt:		event to be sent
   1669 * @vhost:		ibmvfc host struct
   1670 * @timeout:	timeout in seconds - 0 means do not time command
   1671 *
   1672 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
   1673 **/
   1674static int ibmvfc_send_event(struct ibmvfc_event *evt,
   1675			     struct ibmvfc_host *vhost, unsigned long timeout)
   1676{
   1677	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
   1678	unsigned long flags;
   1679	int rc;
   1680
   1681	/* Copy the IU into the transfer area */
   1682	*evt->xfer_iu = evt->iu;
   1683	if (evt->crq.format == IBMVFC_CMD_FORMAT)
   1684		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
   1685	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
   1686		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
   1687	else
   1688		BUG();
   1689
   1690	timer_setup(&evt->timer, ibmvfc_timeout, 0);
   1691
   1692	if (timeout) {
   1693		evt->timer.expires = jiffies + (timeout * HZ);
   1694		add_timer(&evt->timer);
   1695	}
   1696
   1697	spin_lock_irqsave(&evt->queue->l_lock, flags);
   1698	list_add_tail(&evt->queue_list, &evt->queue->sent);
   1699	atomic_set(&evt->active, 1);
   1700
   1701	mb();
   1702
   1703	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
   1704		rc = ibmvfc_send_sub_crq(vhost,
   1705					 evt->queue->vios_cookie,
   1706					 be64_to_cpu(crq_as_u64[0]),
   1707					 be64_to_cpu(crq_as_u64[1]),
   1708					 0, 0);
   1709	else
   1710		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
   1711				     be64_to_cpu(crq_as_u64[1]));
   1712
   1713	if (rc) {
   1714		atomic_set(&evt->active, 0);
   1715		list_del(&evt->queue_list);
   1716		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
   1717		del_timer(&evt->timer);
   1718
   1719		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
   1720		 * Firmware will send a CRQ with a transport event (0xFF) to
   1721		 * tell this client what has happened to the transport. This
   1722		 * will be handled in ibmvfc_handle_crq()
   1723		 */
   1724		if (rc == H_CLOSED) {
   1725			if (printk_ratelimit())
   1726				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
   1727			if (evt->cmnd)
   1728				scsi_dma_unmap(evt->cmnd);
   1729			ibmvfc_free_event(evt);
   1730			return SCSI_MLQUEUE_HOST_BUSY;
   1731		}
   1732
   1733		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
   1734		if (evt->cmnd) {
   1735			evt->cmnd->result = DID_ERROR << 16;
   1736			evt->done = ibmvfc_scsi_eh_done;
   1737		} else
   1738			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
   1739
   1740		evt->done(evt);
   1741	} else {
   1742		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
   1743		ibmvfc_trc_start(evt);
   1744	}
   1745
   1746	return 0;
   1747}
   1748
   1749/**
   1750 * ibmvfc_log_error - Log an error for the failed command if appropriate
   1751 * @evt:	ibmvfc event to log
   1752 *
   1753 **/
   1754static void ibmvfc_log_error(struct ibmvfc_event *evt)
   1755{
   1756	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
   1757	struct ibmvfc_host *vhost = evt->vhost;
   1758	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
   1759	struct scsi_cmnd *cmnd = evt->cmnd;
   1760	const char *err = unknown_error;
   1761	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
   1762	int logerr = 0;
   1763	int rsp_code = 0;
   1764
   1765	if (index >= 0) {
   1766		logerr = cmd_status[index].log;
   1767		err = cmd_status[index].name;
   1768	}
   1769
   1770	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
   1771		return;
   1772
   1773	if (rsp->flags & FCP_RSP_LEN_VALID)
   1774		rsp_code = rsp->data.info.rsp_code;
   1775
   1776	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
   1777		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
   1778		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
   1779		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
   1780}
   1781
   1782/**
   1783 * ibmvfc_relogin - Log back into the specified device
   1784 * @sdev:	scsi device struct
   1785 *
   1786 **/
   1787static void ibmvfc_relogin(struct scsi_device *sdev)
   1788{
   1789	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   1790	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
   1791	struct ibmvfc_target *tgt;
   1792	unsigned long flags;
   1793
   1794	spin_lock_irqsave(vhost->host->host_lock, flags);
   1795	list_for_each_entry(tgt, &vhost->targets, queue) {
   1796		if (rport == tgt->rport) {
   1797			ibmvfc_del_tgt(tgt);
   1798			break;
   1799		}
   1800	}
   1801
   1802	ibmvfc_reinit_host(vhost);
   1803	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   1804}
   1805
   1806/**
   1807 * ibmvfc_scsi_done - Handle responses from commands
   1808 * @evt:	ibmvfc event to be handled
   1809 *
   1810 * Used as a callback when sending scsi cmds.
   1811 **/
   1812static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
   1813{
   1814	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
   1815	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
   1816	struct scsi_cmnd *cmnd = evt->cmnd;
   1817	u32 rsp_len = 0;
   1818	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
   1819
   1820	if (cmnd) {
   1821		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
   1822			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
   1823		else if (rsp->flags & FCP_RESID_UNDER)
   1824			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
   1825		else
   1826			scsi_set_resid(cmnd, 0);
   1827
   1828		if (vfc_cmd->status) {
   1829			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
   1830
   1831			if (rsp->flags & FCP_RSP_LEN_VALID)
   1832				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
   1833			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
   1834				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
   1835			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
   1836				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
   1837			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
   1838			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
   1839				ibmvfc_relogin(cmnd->device);
   1840
   1841			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
   1842				cmnd->result = (DID_ERROR << 16);
   1843
   1844			ibmvfc_log_error(evt);
   1845		}
   1846
   1847		if (!cmnd->result &&
   1848		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
   1849			cmnd->result = (DID_ERROR << 16);
   1850
   1851		scsi_dma_unmap(cmnd);
   1852		scsi_done(cmnd);
   1853	}
   1854
   1855	ibmvfc_free_event(evt);
   1856}
   1857
   1858/**
   1859 * ibmvfc_host_chkready - Check if the host can accept commands
   1860 * @vhost:	 struct ibmvfc host
   1861 *
   1862 * Returns:
   1863 *	1 if host can accept command / 0 if not
   1864 **/
   1865static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
   1866{
   1867	int result = 0;
   1868
   1869	switch (vhost->state) {
   1870	case IBMVFC_LINK_DEAD:
   1871	case IBMVFC_HOST_OFFLINE:
   1872		result = DID_NO_CONNECT << 16;
   1873		break;
   1874	case IBMVFC_NO_CRQ:
   1875	case IBMVFC_INITIALIZING:
   1876	case IBMVFC_HALTED:
   1877	case IBMVFC_LINK_DOWN:
   1878		result = DID_REQUEUE << 16;
   1879		break;
   1880	case IBMVFC_ACTIVE:
   1881		result = 0;
   1882		break;
   1883	}
   1884
   1885	return result;
   1886}
   1887
   1888static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
   1889{
   1890	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
   1891	struct ibmvfc_host *vhost = evt->vhost;
   1892	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
   1893	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
   1894	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
   1895	size_t offset;
   1896
   1897	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
   1898	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
   1899		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
   1900		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
   1901	} else
   1902		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
   1903	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
   1904	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
   1905	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
   1906	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
   1907	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
   1908	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
   1909	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
   1910	int_to_scsilun(sdev->lun, &iu->lun);
   1911
   1912	return vfc_cmd;
   1913}
   1914
   1915/**
   1916 * ibmvfc_queuecommand - The queuecommand function of the scsi template
   1917 * @shost:	scsi host struct
   1918 * @cmnd:	struct scsi_cmnd to be executed
   1919 *
   1920 * Returns:
   1921 *	0 on success / other on failure
   1922 **/
   1923static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
   1924{
   1925	struct ibmvfc_host *vhost = shost_priv(shost);
   1926	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
   1927	struct ibmvfc_cmd *vfc_cmd;
   1928	struct ibmvfc_fcp_cmd_iu *iu;
   1929	struct ibmvfc_event *evt;
   1930	u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
   1931	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
   1932	u16 scsi_channel;
   1933	int rc;
   1934
   1935	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
   1936	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
   1937		cmnd->result = rc;
   1938		scsi_done(cmnd);
   1939		return 0;
   1940	}
   1941
   1942	cmnd->result = (DID_OK << 16);
   1943	if (vhost->using_channels) {
   1944		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
   1945		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
   1946		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
   1947	} else
   1948		evt = ibmvfc_get_event(&vhost->crq);
   1949
   1950	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
   1951	evt->cmnd = cmnd;
   1952
   1953	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
   1954	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
   1955
   1956	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
   1957	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
   1958
   1959	if (cmnd->flags & SCMD_TAGGED) {
   1960		vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
   1961		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
   1962	}
   1963
   1964	vfc_cmd->correlation = cpu_to_be64((u64)evt);
   1965
   1966	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
   1967		return ibmvfc_send_event(evt, vhost, 0);
   1968
   1969	ibmvfc_free_event(evt);
   1970	if (rc == -ENOMEM)
   1971		return SCSI_MLQUEUE_HOST_BUSY;
   1972
   1973	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   1974		scmd_printk(KERN_ERR, cmnd,
   1975			    "Failed to map DMA buffer for command. rc=%d\n", rc);
   1976
   1977	cmnd->result = DID_ERROR << 16;
   1978	scsi_done(cmnd);
   1979	return 0;
   1980}
   1981
   1982/**
   1983 * ibmvfc_sync_completion - Signal that a synchronous command has completed
   1984 * @evt:	ibmvfc event struct
   1985 *
   1986 **/
   1987static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
   1988{
   1989	/* copy the response back */
   1990	if (evt->sync_iu)
   1991		*evt->sync_iu = *evt->xfer_iu;
   1992
   1993	complete(&evt->comp);
   1994}
   1995
   1996/**
   1997 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
   1998 * @evt:	struct ibmvfc_event
   1999 *
   2000 **/
   2001static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
   2002{
   2003	struct ibmvfc_host *vhost = evt->vhost;
   2004
   2005	ibmvfc_free_event(evt);
   2006	vhost->aborting_passthru = 0;
   2007	dev_info(vhost->dev, "Passthru command cancelled\n");
   2008}
   2009
   2010/**
   2011 * ibmvfc_bsg_timeout - Handle a BSG timeout
   2012 * @job:	struct bsg_job that timed out
   2013 *
   2014 * Returns:
   2015 *	0 on success / other on failure
   2016 **/
   2017static int ibmvfc_bsg_timeout(struct bsg_job *job)
   2018{
   2019	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
   2020	unsigned long port_id = (unsigned long)job->dd_data;
   2021	struct ibmvfc_event *evt;
   2022	struct ibmvfc_tmf *tmf;
   2023	unsigned long flags;
   2024	int rc;
   2025
   2026	ENTER;
   2027	spin_lock_irqsave(vhost->host->host_lock, flags);
   2028	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
   2029		__ibmvfc_reset_host(vhost);
   2030		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2031		return 0;
   2032	}
   2033
   2034	vhost->aborting_passthru = 1;
   2035	evt = ibmvfc_get_event(&vhost->crq);
   2036	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
   2037
   2038	tmf = &evt->iu.tmf;
   2039	memset(tmf, 0, sizeof(*tmf));
   2040	tmf->common.version = cpu_to_be32(1);
   2041	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
   2042	tmf->common.length = cpu_to_be16(sizeof(*tmf));
   2043	tmf->scsi_id = cpu_to_be64(port_id);
   2044	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
   2045	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
   2046	rc = ibmvfc_send_event(evt, vhost, default_timeout);
   2047
   2048	if (rc != 0) {
   2049		vhost->aborting_passthru = 0;
   2050		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
   2051		rc = -EIO;
   2052	} else
   2053		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
   2054			 port_id);
   2055
   2056	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2057
   2058	LEAVE;
   2059	return rc;
   2060}
   2061
   2062/**
   2063 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
   2064 * @vhost:		struct ibmvfc_host to send command
   2065 * @port_id:	port ID to send command
   2066 *
   2067 * Returns:
   2068 *	0 on success / other on failure
   2069 **/
   2070static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
   2071{
   2072	struct ibmvfc_port_login *plogi;
   2073	struct ibmvfc_target *tgt;
   2074	struct ibmvfc_event *evt;
   2075	union ibmvfc_iu rsp_iu;
   2076	unsigned long flags;
   2077	int rc = 0, issue_login = 1;
   2078
   2079	ENTER;
   2080	spin_lock_irqsave(vhost->host->host_lock, flags);
   2081	list_for_each_entry(tgt, &vhost->targets, queue) {
   2082		if (tgt->scsi_id == port_id) {
   2083			issue_login = 0;
   2084			break;
   2085		}
   2086	}
   2087
   2088	if (!issue_login)
   2089		goto unlock_out;
   2090	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
   2091		goto unlock_out;
   2092
   2093	evt = ibmvfc_get_event(&vhost->crq);
   2094	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
   2095	plogi = &evt->iu.plogi;
   2096	memset(plogi, 0, sizeof(*plogi));
   2097	plogi->common.version = cpu_to_be32(1);
   2098	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
   2099	plogi->common.length = cpu_to_be16(sizeof(*plogi));
   2100	plogi->scsi_id = cpu_to_be64(port_id);
   2101	evt->sync_iu = &rsp_iu;
   2102	init_completion(&evt->comp);
   2103
   2104	rc = ibmvfc_send_event(evt, vhost, default_timeout);
   2105	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2106
   2107	if (rc)
   2108		return -EIO;
   2109
   2110	wait_for_completion(&evt->comp);
   2111
   2112	if (rsp_iu.plogi.common.status)
   2113		rc = -EIO;
   2114
   2115	spin_lock_irqsave(vhost->host->host_lock, flags);
   2116	ibmvfc_free_event(evt);
   2117unlock_out:
   2118	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2119	LEAVE;
   2120	return rc;
   2121}
   2122
   2123/**
   2124 * ibmvfc_bsg_request - Handle a BSG request
   2125 * @job:	struct bsg_job to be executed
   2126 *
   2127 * Returns:
   2128 *	0 on success / other on failure
   2129 **/
   2130static int ibmvfc_bsg_request(struct bsg_job *job)
   2131{
   2132	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
   2133	struct fc_rport *rport = fc_bsg_to_rport(job);
   2134	struct ibmvfc_passthru_mad *mad;
   2135	struct ibmvfc_event *evt;
   2136	union ibmvfc_iu rsp_iu;
   2137	unsigned long flags, port_id = -1;
   2138	struct fc_bsg_request *bsg_request = job->request;
   2139	struct fc_bsg_reply *bsg_reply = job->reply;
   2140	unsigned int code = bsg_request->msgcode;
   2141	int rc = 0, req_seg, rsp_seg, issue_login = 0;
   2142	u32 fc_flags, rsp_len;
   2143
   2144	ENTER;
   2145	bsg_reply->reply_payload_rcv_len = 0;
   2146	if (rport)
   2147		port_id = rport->port_id;
   2148
   2149	switch (code) {
   2150	case FC_BSG_HST_ELS_NOLOGIN:
   2151		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
   2152			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
   2153			bsg_request->rqst_data.h_els.port_id[2];
   2154		fallthrough;
   2155	case FC_BSG_RPT_ELS:
   2156		fc_flags = IBMVFC_FC_ELS;
   2157		break;
   2158	case FC_BSG_HST_CT:
   2159		issue_login = 1;
   2160		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
   2161			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
   2162			bsg_request->rqst_data.h_ct.port_id[2];
   2163		fallthrough;
   2164	case FC_BSG_RPT_CT:
   2165		fc_flags = IBMVFC_FC_CT_IU;
   2166		break;
   2167	default:
   2168		return -ENOTSUPP;
   2169	}
   2170
   2171	if (port_id == -1)
   2172		return -EINVAL;
   2173	if (!mutex_trylock(&vhost->passthru_mutex))
   2174		return -EBUSY;
   2175
   2176	job->dd_data = (void *)port_id;
   2177	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
   2178			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
   2179
   2180	if (!req_seg) {
   2181		mutex_unlock(&vhost->passthru_mutex);
   2182		return -ENOMEM;
   2183	}
   2184
   2185	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
   2186			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
   2187
   2188	if (!rsp_seg) {
   2189		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
   2190			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
   2191		mutex_unlock(&vhost->passthru_mutex);
   2192		return -ENOMEM;
   2193	}
   2194
   2195	if (req_seg > 1 || rsp_seg > 1) {
   2196		rc = -EINVAL;
   2197		goto out;
   2198	}
   2199
   2200	if (issue_login)
   2201		rc = ibmvfc_bsg_plogi(vhost, port_id);
   2202
   2203	spin_lock_irqsave(vhost->host->host_lock, flags);
   2204
   2205	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
   2206	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
   2207		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2208		goto out;
   2209	}
   2210
   2211	evt = ibmvfc_get_event(&vhost->crq);
   2212	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
   2213	mad = &evt->iu.passthru;
   2214
   2215	memset(mad, 0, sizeof(*mad));
   2216	mad->common.version = cpu_to_be32(1);
   2217	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
   2218	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
   2219
   2220	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
   2221		offsetof(struct ibmvfc_passthru_mad, iu));
   2222	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
   2223
   2224	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
   2225	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
   2226	mad->iu.flags = cpu_to_be32(fc_flags);
   2227	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
   2228
   2229	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
   2230	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
   2231	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
   2232	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
   2233	mad->iu.scsi_id = cpu_to_be64(port_id);
   2234	mad->iu.tag = cpu_to_be64((u64)evt);
   2235	rsp_len = be32_to_cpu(mad->iu.rsp.len);
   2236
   2237	evt->sync_iu = &rsp_iu;
   2238	init_completion(&evt->comp);
   2239	rc = ibmvfc_send_event(evt, vhost, 0);
   2240	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2241
   2242	if (rc) {
   2243		rc = -EIO;
   2244		goto out;
   2245	}
   2246
   2247	wait_for_completion(&evt->comp);
   2248
   2249	if (rsp_iu.passthru.common.status)
   2250		rc = -EIO;
   2251	else
   2252		bsg_reply->reply_payload_rcv_len = rsp_len;
   2253
   2254	spin_lock_irqsave(vhost->host->host_lock, flags);
   2255	ibmvfc_free_event(evt);
   2256	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2257	bsg_reply->result = rc;
   2258	bsg_job_done(job, bsg_reply->result,
   2259		       bsg_reply->reply_payload_rcv_len);
   2260	rc = 0;
   2261out:
   2262	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
   2263		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
   2264	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
   2265		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
   2266	mutex_unlock(&vhost->passthru_mutex);
   2267	LEAVE;
   2268	return rc;
   2269}
   2270
   2271/**
   2272 * ibmvfc_reset_device - Reset the device with the specified reset type
   2273 * @sdev:	scsi device to reset
   2274 * @type:	reset type
   2275 * @desc:	reset type description for log messages
   2276 *
   2277 * Returns:
   2278 *	0 on success / other on failure
   2279 **/
   2280static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
   2281{
   2282	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2283	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
   2284	struct ibmvfc_cmd *tmf;
   2285	struct ibmvfc_event *evt = NULL;
   2286	union ibmvfc_iu rsp_iu;
   2287	struct ibmvfc_fcp_cmd_iu *iu;
   2288	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
   2289	int rsp_rc = -EBUSY;
   2290	unsigned long flags;
   2291	int rsp_code = 0;
   2292
   2293	spin_lock_irqsave(vhost->host->host_lock, flags);
   2294	if (vhost->state == IBMVFC_ACTIVE) {
   2295		if (vhost->using_channels)
   2296			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
   2297		else
   2298			evt = ibmvfc_get_event(&vhost->crq);
   2299
   2300		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
   2301		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
   2302		iu = ibmvfc_get_fcp_iu(vhost, tmf);
   2303
   2304		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
   2305		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
   2306			tmf->target_wwpn = cpu_to_be64(rport->port_name);
   2307		iu->tmf_flags = type;
   2308		evt->sync_iu = &rsp_iu;
   2309
   2310		init_completion(&evt->comp);
   2311		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
   2312	}
   2313	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2314
   2315	if (rsp_rc != 0) {
   2316		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
   2317			    desc, rsp_rc);
   2318		return -EIO;
   2319	}
   2320
   2321	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
   2322	wait_for_completion(&evt->comp);
   2323
   2324	if (rsp_iu.cmd.status)
   2325		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
   2326
   2327	if (rsp_code) {
   2328		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
   2329			rsp_code = fc_rsp->data.info.rsp_code;
   2330
   2331		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
   2332			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
   2333			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
   2334			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
   2335			    fc_rsp->scsi_status);
   2336		rsp_rc = -EIO;
   2337	} else
   2338		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
   2339
   2340	spin_lock_irqsave(vhost->host->host_lock, flags);
   2341	ibmvfc_free_event(evt);
   2342	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2343	return rsp_rc;
   2344}
   2345
   2346/**
   2347 * ibmvfc_match_rport - Match function for specified remote port
   2348 * @evt:	ibmvfc event struct
   2349 * @rport:	device to match
   2350 *
   2351 * Returns:
   2352 *	1 if event matches rport / 0 if event does not match rport
   2353 **/
   2354static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
   2355{
   2356	struct fc_rport *cmd_rport;
   2357
   2358	if (evt->cmnd) {
   2359		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
   2360		if (cmd_rport == rport)
   2361			return 1;
   2362	}
   2363	return 0;
   2364}
   2365
   2366/**
   2367 * ibmvfc_match_target - Match function for specified target
   2368 * @evt:	ibmvfc event struct
   2369 * @device:	device to match (starget)
   2370 *
   2371 * Returns:
   2372 *	1 if event matches starget / 0 if event does not match starget
   2373 **/
   2374static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
   2375{
   2376	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
   2377		return 1;
   2378	return 0;
   2379}
   2380
   2381/**
   2382 * ibmvfc_match_lun - Match function for specified LUN
   2383 * @evt:	ibmvfc event struct
   2384 * @device:	device to match (sdev)
   2385 *
   2386 * Returns:
   2387 *	1 if event matches sdev / 0 if event does not match sdev
   2388 **/
   2389static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
   2390{
   2391	if (evt->cmnd && evt->cmnd->device == device)
   2392		return 1;
   2393	return 0;
   2394}
   2395
   2396/**
   2397 * ibmvfc_event_is_free - Check if event is free or not
   2398 * @evt:	ibmvfc event struct
   2399 *
   2400 * Returns:
   2401 *	true / false
   2402 **/
   2403static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
   2404{
   2405	struct ibmvfc_event *loop_evt;
   2406
   2407	list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
   2408		if (loop_evt == evt)
   2409			return true;
   2410
   2411	return false;
   2412}
   2413
   2414/**
   2415 * ibmvfc_wait_for_ops - Wait for ops to complete
   2416 * @vhost:	ibmvfc host struct
   2417 * @device:	device to match (starget or sdev)
   2418 * @match:	match function
   2419 *
   2420 * Returns:
   2421 *	SUCCESS / FAILED
   2422 **/
   2423static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
   2424			       int (*match) (struct ibmvfc_event *, void *))
   2425{
   2426	struct ibmvfc_event *evt;
   2427	DECLARE_COMPLETION_ONSTACK(comp);
   2428	int wait, i, q_index, q_size;
   2429	unsigned long flags;
   2430	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
   2431	struct ibmvfc_queue *queues;
   2432
   2433	ENTER;
   2434	if (vhost->mq_enabled && vhost->using_channels) {
   2435		queues = vhost->scsi_scrqs.scrqs;
   2436		q_size = vhost->scsi_scrqs.active_queues;
   2437	} else {
   2438		queues = &vhost->crq;
   2439		q_size = 1;
   2440	}
   2441
   2442	do {
   2443		wait = 0;
   2444		spin_lock_irqsave(vhost->host->host_lock, flags);
   2445		for (q_index = 0; q_index < q_size; q_index++) {
   2446			spin_lock(&queues[q_index].l_lock);
   2447			for (i = 0; i < queues[q_index].evt_pool.size; i++) {
   2448				evt = &queues[q_index].evt_pool.events[i];
   2449				if (!ibmvfc_event_is_free(evt)) {
   2450					if (match(evt, device)) {
   2451						evt->eh_comp = &comp;
   2452						wait++;
   2453					}
   2454				}
   2455			}
   2456			spin_unlock(&queues[q_index].l_lock);
   2457		}
   2458		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2459
   2460		if (wait) {
   2461			timeout = wait_for_completion_timeout(&comp, timeout);
   2462
   2463			if (!timeout) {
   2464				wait = 0;
   2465				spin_lock_irqsave(vhost->host->host_lock, flags);
   2466				for (q_index = 0; q_index < q_size; q_index++) {
   2467					spin_lock(&queues[q_index].l_lock);
   2468					for (i = 0; i < queues[q_index].evt_pool.size; i++) {
   2469						evt = &queues[q_index].evt_pool.events[i];
   2470						if (!ibmvfc_event_is_free(evt)) {
   2471							if (match(evt, device)) {
   2472								evt->eh_comp = NULL;
   2473								wait++;
   2474							}
   2475						}
   2476					}
   2477					spin_unlock(&queues[q_index].l_lock);
   2478				}
   2479				spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2480				if (wait)
   2481					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
   2482				LEAVE;
   2483				return wait ? FAILED : SUCCESS;
   2484			}
   2485		}
   2486	} while (wait);
   2487
   2488	LEAVE;
   2489	return SUCCESS;
   2490}
   2491
   2492static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
   2493					    struct scsi_device *sdev,
   2494					    int type)
   2495{
   2496	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2497	struct scsi_target *starget = scsi_target(sdev);
   2498	struct fc_rport *rport = starget_to_rport(starget);
   2499	struct ibmvfc_event *evt;
   2500	struct ibmvfc_tmf *tmf;
   2501
   2502	evt = ibmvfc_get_event(queue);
   2503	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
   2504
   2505	tmf = &evt->iu.tmf;
   2506	memset(tmf, 0, sizeof(*tmf));
   2507	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
   2508		tmf->common.version = cpu_to_be32(2);
   2509		tmf->target_wwpn = cpu_to_be64(rport->port_name);
   2510	} else {
   2511		tmf->common.version = cpu_to_be32(1);
   2512	}
   2513	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
   2514	tmf->common.length = cpu_to_be16(sizeof(*tmf));
   2515	tmf->scsi_id = cpu_to_be64(rport->port_id);
   2516	int_to_scsilun(sdev->lun, &tmf->lun);
   2517	if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
   2518		type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
   2519	if (vhost->state == IBMVFC_ACTIVE)
   2520		tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
   2521	else
   2522		tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
   2523	tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
   2524	tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
   2525
   2526	init_completion(&evt->comp);
   2527
   2528	return evt;
   2529}
   2530
   2531static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
   2532{
   2533	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2534	struct ibmvfc_event *evt, *found_evt, *temp;
   2535	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
   2536	unsigned long flags;
   2537	int num_hwq, i;
   2538	int fail = 0;
   2539	LIST_HEAD(cancelq);
   2540	u16 status;
   2541
   2542	ENTER;
   2543	spin_lock_irqsave(vhost->host->host_lock, flags);
   2544	num_hwq = vhost->scsi_scrqs.active_queues;
   2545	for (i = 0; i < num_hwq; i++) {
   2546		spin_lock(queues[i].q_lock);
   2547		spin_lock(&queues[i].l_lock);
   2548		found_evt = NULL;
   2549		list_for_each_entry(evt, &queues[i].sent, queue_list) {
   2550			if (evt->cmnd && evt->cmnd->device == sdev) {
   2551				found_evt = evt;
   2552				break;
   2553			}
   2554		}
   2555		spin_unlock(&queues[i].l_lock);
   2556
   2557		if (found_evt && vhost->logged_in) {
   2558			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
   2559			evt->sync_iu = &queues[i].cancel_rsp;
   2560			ibmvfc_send_event(evt, vhost, default_timeout);
   2561			list_add_tail(&evt->cancel, &cancelq);
   2562		}
   2563
   2564		spin_unlock(queues[i].q_lock);
   2565	}
   2566	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2567
   2568	if (list_empty(&cancelq)) {
   2569		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   2570			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
   2571		return 0;
   2572	}
   2573
   2574	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
   2575
   2576	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
   2577		wait_for_completion(&evt->comp);
   2578		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
   2579		list_del(&evt->cancel);
   2580		ibmvfc_free_event(evt);
   2581
   2582		if (status != IBMVFC_MAD_SUCCESS) {
   2583			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
   2584			switch (status) {
   2585			case IBMVFC_MAD_DRIVER_FAILED:
   2586			case IBMVFC_MAD_CRQ_ERROR:
   2587			/* Host adapter most likely going through reset, return success to
   2588			 * the caller will wait for the command being cancelled to get returned
   2589			 */
   2590				break;
   2591			default:
   2592				fail = 1;
   2593				break;
   2594			}
   2595		}
   2596	}
   2597
   2598	if (fail)
   2599		return -EIO;
   2600
   2601	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
   2602	LEAVE;
   2603	return 0;
   2604}
   2605
   2606static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
   2607{
   2608	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2609	struct ibmvfc_event *evt, *found_evt;
   2610	union ibmvfc_iu rsp;
   2611	int rsp_rc = -EBUSY;
   2612	unsigned long flags;
   2613	u16 status;
   2614
   2615	ENTER;
   2616	found_evt = NULL;
   2617	spin_lock_irqsave(vhost->host->host_lock, flags);
   2618	spin_lock(&vhost->crq.l_lock);
   2619	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
   2620		if (evt->cmnd && evt->cmnd->device == sdev) {
   2621			found_evt = evt;
   2622			break;
   2623		}
   2624	}
   2625	spin_unlock(&vhost->crq.l_lock);
   2626
   2627	if (!found_evt) {
   2628		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   2629			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
   2630		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2631		return 0;
   2632	}
   2633
   2634	if (vhost->logged_in) {
   2635		evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
   2636		evt->sync_iu = &rsp;
   2637		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
   2638	}
   2639
   2640	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2641
   2642	if (rsp_rc != 0) {
   2643		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
   2644		/* If failure is received, the host adapter is most likely going
   2645		 through reset, return success so the caller will wait for the command
   2646		 being cancelled to get returned */
   2647		return 0;
   2648	}
   2649
   2650	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
   2651
   2652	wait_for_completion(&evt->comp);
   2653	status = be16_to_cpu(rsp.mad_common.status);
   2654	spin_lock_irqsave(vhost->host->host_lock, flags);
   2655	ibmvfc_free_event(evt);
   2656	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2657
   2658	if (status != IBMVFC_MAD_SUCCESS) {
   2659		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
   2660		switch (status) {
   2661		case IBMVFC_MAD_DRIVER_FAILED:
   2662		case IBMVFC_MAD_CRQ_ERROR:
   2663			/* Host adapter most likely going through reset, return success to
   2664			 the caller will wait for the command being cancelled to get returned */
   2665			return 0;
   2666		default:
   2667			return -EIO;
   2668		};
   2669	}
   2670
   2671	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
   2672	return 0;
   2673}
   2674
   2675/**
   2676 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
   2677 * @sdev:	scsi device to cancel commands
   2678 * @type:	type of error recovery being performed
   2679 *
   2680 * This sends a cancel to the VIOS for the specified device. This does
   2681 * NOT send any abort to the actual device. That must be done separately.
   2682 *
   2683 * Returns:
   2684 *	0 on success / other on failure
   2685 **/
   2686static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
   2687{
   2688	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2689
   2690	if (vhost->mq_enabled && vhost->using_channels)
   2691		return ibmvfc_cancel_all_mq(sdev, type);
   2692	else
   2693		return ibmvfc_cancel_all_sq(sdev, type);
   2694}
   2695
   2696/**
   2697 * ibmvfc_match_key - Match function for specified cancel key
   2698 * @evt:	ibmvfc event struct
   2699 * @key:	cancel key to match
   2700 *
   2701 * Returns:
   2702 *	1 if event matches key / 0 if event does not match key
   2703 **/
   2704static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
   2705{
   2706	unsigned long cancel_key = (unsigned long)key;
   2707
   2708	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
   2709	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
   2710		return 1;
   2711	return 0;
   2712}
   2713
   2714/**
   2715 * ibmvfc_match_evt - Match function for specified event
   2716 * @evt:	ibmvfc event struct
   2717 * @match:	event to match
   2718 *
   2719 * Returns:
   2720 *	1 if event matches key / 0 if event does not match key
   2721 **/
   2722static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
   2723{
   2724	if (evt == match)
   2725		return 1;
   2726	return 0;
   2727}
   2728
   2729/**
   2730 * ibmvfc_abort_task_set - Abort outstanding commands to the device
   2731 * @sdev:	scsi device to abort commands
   2732 *
   2733 * This sends an Abort Task Set to the VIOS for the specified device. This does
   2734 * NOT send any cancel to the VIOS. That must be done separately.
   2735 *
   2736 * Returns:
   2737 *	0 on success / other on failure
   2738 **/
   2739static int ibmvfc_abort_task_set(struct scsi_device *sdev)
   2740{
   2741	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2742	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
   2743	struct ibmvfc_cmd *tmf;
   2744	struct ibmvfc_event *evt, *found_evt;
   2745	union ibmvfc_iu rsp_iu;
   2746	struct ibmvfc_fcp_cmd_iu *iu;
   2747	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
   2748	int rc, rsp_rc = -EBUSY;
   2749	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
   2750	int rsp_code = 0;
   2751
   2752	found_evt = NULL;
   2753	spin_lock_irqsave(vhost->host->host_lock, flags);
   2754	spin_lock(&vhost->crq.l_lock);
   2755	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
   2756		if (evt->cmnd && evt->cmnd->device == sdev) {
   2757			found_evt = evt;
   2758			break;
   2759		}
   2760	}
   2761	spin_unlock(&vhost->crq.l_lock);
   2762
   2763	if (!found_evt) {
   2764		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   2765			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
   2766		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2767		return 0;
   2768	}
   2769
   2770	if (vhost->state == IBMVFC_ACTIVE) {
   2771		evt = ibmvfc_get_event(&vhost->crq);
   2772		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
   2773		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
   2774		iu = ibmvfc_get_fcp_iu(vhost, tmf);
   2775
   2776		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
   2777			tmf->target_wwpn = cpu_to_be64(rport->port_name);
   2778		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
   2779		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
   2780		evt->sync_iu = &rsp_iu;
   2781
   2782		tmf->correlation = cpu_to_be64((u64)evt);
   2783
   2784		init_completion(&evt->comp);
   2785		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
   2786	}
   2787
   2788	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2789
   2790	if (rsp_rc != 0) {
   2791		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
   2792		return -EIO;
   2793	}
   2794
   2795	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
   2796	timeout = wait_for_completion_timeout(&evt->comp, timeout);
   2797
   2798	if (!timeout) {
   2799		rc = ibmvfc_cancel_all(sdev, 0);
   2800		if (!rc) {
   2801			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
   2802			if (rc == SUCCESS)
   2803				rc = 0;
   2804		}
   2805
   2806		if (rc) {
   2807			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
   2808			ibmvfc_reset_host(vhost);
   2809			rsp_rc = -EIO;
   2810			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
   2811
   2812			if (rc == SUCCESS)
   2813				rsp_rc = 0;
   2814
   2815			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
   2816			if (rc != SUCCESS) {
   2817				spin_lock_irqsave(vhost->host->host_lock, flags);
   2818				ibmvfc_hard_reset_host(vhost);
   2819				spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2820				rsp_rc = 0;
   2821			}
   2822
   2823			goto out;
   2824		}
   2825	}
   2826
   2827	if (rsp_iu.cmd.status)
   2828		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
   2829
   2830	if (rsp_code) {
   2831		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
   2832			rsp_code = fc_rsp->data.info.rsp_code;
   2833
   2834		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
   2835			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
   2836			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
   2837			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
   2838			    fc_rsp->scsi_status);
   2839		rsp_rc = -EIO;
   2840	} else
   2841		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
   2842
   2843out:
   2844	spin_lock_irqsave(vhost->host->host_lock, flags);
   2845	ibmvfc_free_event(evt);
   2846	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   2847	return rsp_rc;
   2848}
   2849
   2850/**
   2851 * ibmvfc_eh_abort_handler - Abort a command
   2852 * @cmd:	scsi command to abort
   2853 *
   2854 * Returns:
   2855 *	SUCCESS / FAST_IO_FAIL / FAILED
   2856 **/
   2857static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
   2858{
   2859	struct scsi_device *sdev = cmd->device;
   2860	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2861	int cancel_rc, block_rc;
   2862	int rc = FAILED;
   2863
   2864	ENTER;
   2865	block_rc = fc_block_scsi_eh(cmd);
   2866	ibmvfc_wait_while_resetting(vhost);
   2867	if (block_rc != FAST_IO_FAIL) {
   2868		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
   2869		ibmvfc_abort_task_set(sdev);
   2870	} else
   2871		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
   2872
   2873	if (!cancel_rc)
   2874		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
   2875
   2876	if (block_rc == FAST_IO_FAIL && rc != FAILED)
   2877		rc = FAST_IO_FAIL;
   2878
   2879	LEAVE;
   2880	return rc;
   2881}
   2882
   2883/**
   2884 * ibmvfc_eh_device_reset_handler - Reset a single LUN
   2885 * @cmd:	scsi command struct
   2886 *
   2887 * Returns:
   2888 *	SUCCESS / FAST_IO_FAIL / FAILED
   2889 **/
   2890static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
   2891{
   2892	struct scsi_device *sdev = cmd->device;
   2893	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2894	int cancel_rc, block_rc, reset_rc = 0;
   2895	int rc = FAILED;
   2896
   2897	ENTER;
   2898	block_rc = fc_block_scsi_eh(cmd);
   2899	ibmvfc_wait_while_resetting(vhost);
   2900	if (block_rc != FAST_IO_FAIL) {
   2901		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
   2902		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
   2903	} else
   2904		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
   2905
   2906	if (!cancel_rc && !reset_rc)
   2907		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
   2908
   2909	if (block_rc == FAST_IO_FAIL && rc != FAILED)
   2910		rc = FAST_IO_FAIL;
   2911
   2912	LEAVE;
   2913	return rc;
   2914}
   2915
   2916/**
   2917 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
   2918 * @sdev:	scsi device struct
   2919 * @data:	return code
   2920 *
   2921 **/
   2922static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
   2923{
   2924	unsigned long *rc = data;
   2925	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
   2926}
   2927
   2928/**
   2929 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
   2930 * @sdev:	scsi device struct
   2931 * @data:	return code
   2932 *
   2933 **/
   2934static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
   2935{
   2936	unsigned long *rc = data;
   2937	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
   2938}
   2939
   2940/**
   2941 * ibmvfc_eh_target_reset_handler - Reset the target
   2942 * @cmd:	scsi command struct
   2943 *
   2944 * Returns:
   2945 *	SUCCESS / FAST_IO_FAIL / FAILED
   2946 **/
   2947static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
   2948{
   2949	struct scsi_device *sdev = cmd->device;
   2950	struct ibmvfc_host *vhost = shost_priv(sdev->host);
   2951	struct scsi_target *starget = scsi_target(sdev);
   2952	int block_rc;
   2953	int reset_rc = 0;
   2954	int rc = FAILED;
   2955	unsigned long cancel_rc = 0;
   2956
   2957	ENTER;
   2958	block_rc = fc_block_scsi_eh(cmd);
   2959	ibmvfc_wait_while_resetting(vhost);
   2960	if (block_rc != FAST_IO_FAIL) {
   2961		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
   2962		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
   2963	} else
   2964		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
   2965
   2966	if (!cancel_rc && !reset_rc)
   2967		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
   2968
   2969	if (block_rc == FAST_IO_FAIL && rc != FAILED)
   2970		rc = FAST_IO_FAIL;
   2971
   2972	LEAVE;
   2973	return rc;
   2974}
   2975
   2976/**
   2977 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
   2978 * @cmd:	struct scsi_cmnd having problems
   2979 *
   2980 **/
   2981static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
   2982{
   2983	int rc;
   2984	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
   2985
   2986	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
   2987	rc = ibmvfc_issue_fc_host_lip(vhost->host);
   2988
   2989	return rc ? FAILED : SUCCESS;
   2990}
   2991
   2992/**
   2993 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
   2994 * @rport:		rport struct
   2995 *
   2996 * Return value:
   2997 * 	none
   2998 **/
   2999static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
   3000{
   3001	struct Scsi_Host *shost = rport_to_shost(rport);
   3002	struct ibmvfc_host *vhost = shost_priv(shost);
   3003	struct fc_rport *dev_rport;
   3004	struct scsi_device *sdev;
   3005	struct ibmvfc_target *tgt;
   3006	unsigned long rc, flags;
   3007	unsigned int found;
   3008
   3009	ENTER;
   3010	shost_for_each_device(sdev, shost) {
   3011		dev_rport = starget_to_rport(scsi_target(sdev));
   3012		if (dev_rport != rport)
   3013			continue;
   3014		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
   3015	}
   3016
   3017	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
   3018
   3019	if (rc == FAILED)
   3020		ibmvfc_issue_fc_host_lip(shost);
   3021
   3022	spin_lock_irqsave(shost->host_lock, flags);
   3023	found = 0;
   3024	list_for_each_entry(tgt, &vhost->targets, queue) {
   3025		if (tgt->scsi_id == rport->port_id) {
   3026			found++;
   3027			break;
   3028		}
   3029	}
   3030
   3031	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
   3032		/*
   3033		 * If we get here, that means we previously attempted to send
   3034		 * an implicit logout to the target but it failed, most likely
   3035		 * due to I/O being pending, so we need to send it again
   3036		 */
   3037		ibmvfc_del_tgt(tgt);
   3038		ibmvfc_reinit_host(vhost);
   3039	}
   3040
   3041	spin_unlock_irqrestore(shost->host_lock, flags);
   3042	LEAVE;
   3043}
   3044
   3045static const struct ibmvfc_async_desc ae_desc [] = {
   3046	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
   3047	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
   3048	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
   3049	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
   3050	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
   3051	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
   3052	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
   3053	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
   3054	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
   3055	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
   3056	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
   3057	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
   3058	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
   3059};
   3060
   3061static const struct ibmvfc_async_desc unknown_ae = {
   3062	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
   3063};
   3064
   3065/**
   3066 * ibmvfc_get_ae_desc - Get text description for async event
   3067 * @ae:	async event
   3068 *
   3069 **/
   3070static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
   3071{
   3072	int i;
   3073
   3074	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
   3075		if (ae_desc[i].ae == ae)
   3076			return &ae_desc[i];
   3077
   3078	return &unknown_ae;
   3079}
   3080
   3081static const struct {
   3082	enum ibmvfc_ae_link_state state;
   3083	const char *desc;
   3084} link_desc [] = {
   3085	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
   3086	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
   3087	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
   3088	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
   3089};
   3090
   3091/**
   3092 * ibmvfc_get_link_state - Get text description for link state
   3093 * @state:	link state
   3094 *
   3095 **/
   3096static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
   3097{
   3098	int i;
   3099
   3100	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
   3101		if (link_desc[i].state == state)
   3102			return link_desc[i].desc;
   3103
   3104	return "";
   3105}
   3106
   3107/**
   3108 * ibmvfc_handle_async - Handle an async event from the adapter
   3109 * @crq:	crq to process
   3110 * @vhost:	ibmvfc host struct
   3111 *
   3112 **/
   3113static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
   3114				struct ibmvfc_host *vhost)
   3115{
   3116	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
   3117	struct ibmvfc_target *tgt;
   3118
   3119	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
   3120		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
   3121		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
   3122		   ibmvfc_get_link_state(crq->link_state));
   3123
   3124	switch (be64_to_cpu(crq->event)) {
   3125	case IBMVFC_AE_RESUME:
   3126		switch (crq->link_state) {
   3127		case IBMVFC_AE_LS_LINK_DOWN:
   3128			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
   3129			break;
   3130		case IBMVFC_AE_LS_LINK_DEAD:
   3131			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   3132			break;
   3133		case IBMVFC_AE_LS_LINK_UP:
   3134		case IBMVFC_AE_LS_LINK_BOUNCED:
   3135		default:
   3136			vhost->events_to_log |= IBMVFC_AE_LINKUP;
   3137			vhost->delay_init = 1;
   3138			__ibmvfc_reset_host(vhost);
   3139			break;
   3140		}
   3141
   3142		break;
   3143	case IBMVFC_AE_LINK_UP:
   3144		vhost->events_to_log |= IBMVFC_AE_LINKUP;
   3145		vhost->delay_init = 1;
   3146		__ibmvfc_reset_host(vhost);
   3147		break;
   3148	case IBMVFC_AE_SCN_FABRIC:
   3149	case IBMVFC_AE_SCN_DOMAIN:
   3150		vhost->events_to_log |= IBMVFC_AE_RSCN;
   3151		if (vhost->state < IBMVFC_HALTED) {
   3152			vhost->delay_init = 1;
   3153			__ibmvfc_reset_host(vhost);
   3154		}
   3155		break;
   3156	case IBMVFC_AE_SCN_NPORT:
   3157	case IBMVFC_AE_SCN_GROUP:
   3158		vhost->events_to_log |= IBMVFC_AE_RSCN;
   3159		ibmvfc_reinit_host(vhost);
   3160		break;
   3161	case IBMVFC_AE_ELS_LOGO:
   3162	case IBMVFC_AE_ELS_PRLO:
   3163	case IBMVFC_AE_ELS_PLOGI:
   3164		list_for_each_entry(tgt, &vhost->targets, queue) {
   3165			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
   3166				break;
   3167			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
   3168				continue;
   3169			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
   3170				continue;
   3171			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
   3172				continue;
   3173			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
   3174				tgt->logo_rcvd = 1;
   3175			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
   3176				ibmvfc_del_tgt(tgt);
   3177				ibmvfc_reinit_host(vhost);
   3178			}
   3179		}
   3180		break;
   3181	case IBMVFC_AE_LINK_DOWN:
   3182	case IBMVFC_AE_ADAPTER_FAILED:
   3183		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
   3184		break;
   3185	case IBMVFC_AE_LINK_DEAD:
   3186		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   3187		break;
   3188	case IBMVFC_AE_HALT:
   3189		ibmvfc_link_down(vhost, IBMVFC_HALTED);
   3190		break;
   3191	default:
   3192		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
   3193		break;
   3194	}
   3195}
   3196
   3197/**
   3198 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
   3199 * @crq:	Command/Response queue
   3200 * @vhost:	ibmvfc host struct
   3201 * @evt_doneq:	Event done queue
   3202 *
   3203**/
   3204static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
   3205			      struct list_head *evt_doneq)
   3206{
   3207	long rc;
   3208	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
   3209
   3210	switch (crq->valid) {
   3211	case IBMVFC_CRQ_INIT_RSP:
   3212		switch (crq->format) {
   3213		case IBMVFC_CRQ_INIT:
   3214			dev_info(vhost->dev, "Partner initialized\n");
   3215			/* Send back a response */
   3216			rc = ibmvfc_send_crq_init_complete(vhost);
   3217			if (rc == 0)
   3218				ibmvfc_init_host(vhost);
   3219			else
   3220				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
   3221			break;
   3222		case IBMVFC_CRQ_INIT_COMPLETE:
   3223			dev_info(vhost->dev, "Partner initialization complete\n");
   3224			ibmvfc_init_host(vhost);
   3225			break;
   3226		default:
   3227			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
   3228		}
   3229		return;
   3230	case IBMVFC_CRQ_XPORT_EVENT:
   3231		vhost->state = IBMVFC_NO_CRQ;
   3232		vhost->logged_in = 0;
   3233		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
   3234		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
   3235			/* We need to re-setup the interpartition connection */
   3236			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
   3237			vhost->client_migrated = 1;
   3238			ibmvfc_purge_requests(vhost, DID_REQUEUE);
   3239			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
   3240			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
   3241		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
   3242			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
   3243			ibmvfc_purge_requests(vhost, DID_ERROR);
   3244			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
   3245			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
   3246		} else {
   3247			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
   3248		}
   3249		return;
   3250	case IBMVFC_CRQ_CMD_RSP:
   3251		break;
   3252	default:
   3253		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
   3254		return;
   3255	}
   3256
   3257	if (crq->format == IBMVFC_ASYNC_EVENT)
   3258		return;
   3259
   3260	/* The only kind of payload CRQs we should get are responses to
   3261	 * things we send. Make sure this response is to something we
   3262	 * actually sent
   3263	 */
   3264	if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
   3265		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
   3266			crq->ioba);
   3267		return;
   3268	}
   3269
   3270	if (unlikely(atomic_dec_if_positive(&evt->active))) {
   3271		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
   3272			crq->ioba);
   3273		return;
   3274	}
   3275
   3276	spin_lock(&evt->queue->l_lock);
   3277	list_move_tail(&evt->queue_list, evt_doneq);
   3278	spin_unlock(&evt->queue->l_lock);
   3279}
   3280
   3281/**
   3282 * ibmvfc_scan_finished - Check if the device scan is done.
   3283 * @shost:	scsi host struct
   3284 * @time:	current elapsed time
   3285 *
   3286 * Returns:
   3287 *	0 if scan is not done / 1 if scan is done
   3288 **/
   3289static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
   3290{
   3291	unsigned long flags;
   3292	struct ibmvfc_host *vhost = shost_priv(shost);
   3293	int done = 0;
   3294
   3295	spin_lock_irqsave(shost->host_lock, flags);
   3296	if (!vhost->scan_timeout)
   3297		done = 1;
   3298	else if (time >= (vhost->scan_timeout * HZ)) {
   3299		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
   3300			 "continuing initialization\n", vhost->scan_timeout);
   3301		done = 1;
   3302	}
   3303
   3304	if (vhost->scan_complete) {
   3305		vhost->scan_timeout = init_timeout;
   3306		done = 1;
   3307	}
   3308	spin_unlock_irqrestore(shost->host_lock, flags);
   3309	return done;
   3310}
   3311
   3312/**
   3313 * ibmvfc_slave_alloc - Setup the device's task set value
   3314 * @sdev:	struct scsi_device device to configure
   3315 *
   3316 * Set the device's task set value so that error handling works as
   3317 * expected.
   3318 *
   3319 * Returns:
   3320 *	0 on success / -ENXIO if device does not exist
   3321 **/
   3322static int ibmvfc_slave_alloc(struct scsi_device *sdev)
   3323{
   3324	struct Scsi_Host *shost = sdev->host;
   3325	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
   3326	struct ibmvfc_host *vhost = shost_priv(shost);
   3327	unsigned long flags = 0;
   3328
   3329	if (!rport || fc_remote_port_chkready(rport))
   3330		return -ENXIO;
   3331
   3332	spin_lock_irqsave(shost->host_lock, flags);
   3333	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
   3334	spin_unlock_irqrestore(shost->host_lock, flags);
   3335	return 0;
   3336}
   3337
   3338/**
   3339 * ibmvfc_target_alloc - Setup the target's task set value
   3340 * @starget:	struct scsi_target
   3341 *
   3342 * Set the target's task set value so that error handling works as
   3343 * expected.
   3344 *
   3345 * Returns:
   3346 *	0 on success / -ENXIO if device does not exist
   3347 **/
   3348static int ibmvfc_target_alloc(struct scsi_target *starget)
   3349{
   3350	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
   3351	struct ibmvfc_host *vhost = shost_priv(shost);
   3352	unsigned long flags = 0;
   3353
   3354	spin_lock_irqsave(shost->host_lock, flags);
   3355	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
   3356	spin_unlock_irqrestore(shost->host_lock, flags);
   3357	return 0;
   3358}
   3359
   3360/**
   3361 * ibmvfc_slave_configure - Configure the device
   3362 * @sdev:	struct scsi_device device to configure
   3363 *
   3364 * Enable allow_restart for a device if it is a disk. Adjust the
   3365 * queue_depth here also.
   3366 *
   3367 * Returns:
   3368 *	0
   3369 **/
   3370static int ibmvfc_slave_configure(struct scsi_device *sdev)
   3371{
   3372	struct Scsi_Host *shost = sdev->host;
   3373	unsigned long flags = 0;
   3374
   3375	spin_lock_irqsave(shost->host_lock, flags);
   3376	if (sdev->type == TYPE_DISK) {
   3377		sdev->allow_restart = 1;
   3378		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
   3379	}
   3380	spin_unlock_irqrestore(shost->host_lock, flags);
   3381	return 0;
   3382}
   3383
   3384/**
   3385 * ibmvfc_change_queue_depth - Change the device's queue depth
   3386 * @sdev:	scsi device struct
   3387 * @qdepth:	depth to set
   3388 *
   3389 * Return value:
   3390 * 	actual depth set
   3391 **/
   3392static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
   3393{
   3394	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
   3395		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
   3396
   3397	return scsi_change_queue_depth(sdev, qdepth);
   3398}
   3399
   3400static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
   3401						 struct device_attribute *attr, char *buf)
   3402{
   3403	struct Scsi_Host *shost = class_to_shost(dev);
   3404	struct ibmvfc_host *vhost = shost_priv(shost);
   3405
   3406	return snprintf(buf, PAGE_SIZE, "%s\n",
   3407			vhost->login_buf->resp.partition_name);
   3408}
   3409
   3410static ssize_t ibmvfc_show_host_device_name(struct device *dev,
   3411					    struct device_attribute *attr, char *buf)
   3412{
   3413	struct Scsi_Host *shost = class_to_shost(dev);
   3414	struct ibmvfc_host *vhost = shost_priv(shost);
   3415
   3416	return snprintf(buf, PAGE_SIZE, "%s\n",
   3417			vhost->login_buf->resp.device_name);
   3418}
   3419
   3420static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
   3421					 struct device_attribute *attr, char *buf)
   3422{
   3423	struct Scsi_Host *shost = class_to_shost(dev);
   3424	struct ibmvfc_host *vhost = shost_priv(shost);
   3425
   3426	return snprintf(buf, PAGE_SIZE, "%s\n",
   3427			vhost->login_buf->resp.port_loc_code);
   3428}
   3429
   3430static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
   3431					 struct device_attribute *attr, char *buf)
   3432{
   3433	struct Scsi_Host *shost = class_to_shost(dev);
   3434	struct ibmvfc_host *vhost = shost_priv(shost);
   3435
   3436	return snprintf(buf, PAGE_SIZE, "%s\n",
   3437			vhost->login_buf->resp.drc_name);
   3438}
   3439
   3440static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
   3441					     struct device_attribute *attr, char *buf)
   3442{
   3443	struct Scsi_Host *shost = class_to_shost(dev);
   3444	struct ibmvfc_host *vhost = shost_priv(shost);
   3445	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
   3446}
   3447
   3448static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
   3449					     struct device_attribute *attr, char *buf)
   3450{
   3451	struct Scsi_Host *shost = class_to_shost(dev);
   3452	struct ibmvfc_host *vhost = shost_priv(shost);
   3453	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
   3454}
   3455
   3456/**
   3457 * ibmvfc_show_log_level - Show the adapter's error logging level
   3458 * @dev:	class device struct
   3459 * @attr:	unused
   3460 * @buf:	buffer
   3461 *
   3462 * Return value:
   3463 * 	number of bytes printed to buffer
   3464 **/
   3465static ssize_t ibmvfc_show_log_level(struct device *dev,
   3466				     struct device_attribute *attr, char *buf)
   3467{
   3468	struct Scsi_Host *shost = class_to_shost(dev);
   3469	struct ibmvfc_host *vhost = shost_priv(shost);
   3470	unsigned long flags = 0;
   3471	int len;
   3472
   3473	spin_lock_irqsave(shost->host_lock, flags);
   3474	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
   3475	spin_unlock_irqrestore(shost->host_lock, flags);
   3476	return len;
   3477}
   3478
   3479/**
   3480 * ibmvfc_store_log_level - Change the adapter's error logging level
   3481 * @dev:	class device struct
   3482 * @attr:	unused
   3483 * @buf:	buffer
   3484 * @count:      buffer size
   3485 *
   3486 * Return value:
   3487 * 	number of bytes printed to buffer
   3488 **/
   3489static ssize_t ibmvfc_store_log_level(struct device *dev,
   3490				      struct device_attribute *attr,
   3491				      const char *buf, size_t count)
   3492{
   3493	struct Scsi_Host *shost = class_to_shost(dev);
   3494	struct ibmvfc_host *vhost = shost_priv(shost);
   3495	unsigned long flags = 0;
   3496
   3497	spin_lock_irqsave(shost->host_lock, flags);
   3498	vhost->log_level = simple_strtoul(buf, NULL, 10);
   3499	spin_unlock_irqrestore(shost->host_lock, flags);
   3500	return strlen(buf);
   3501}
   3502
   3503static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
   3504					 struct device_attribute *attr, char *buf)
   3505{
   3506	struct Scsi_Host *shost = class_to_shost(dev);
   3507	struct ibmvfc_host *vhost = shost_priv(shost);
   3508	unsigned long flags = 0;
   3509	int len;
   3510
   3511	spin_lock_irqsave(shost->host_lock, flags);
   3512	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
   3513	spin_unlock_irqrestore(shost->host_lock, flags);
   3514	return len;
   3515}
   3516
   3517static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
   3518					 struct device_attribute *attr,
   3519					 const char *buf, size_t count)
   3520{
   3521	struct Scsi_Host *shost = class_to_shost(dev);
   3522	struct ibmvfc_host *vhost = shost_priv(shost);
   3523	unsigned long flags = 0;
   3524	unsigned int channels;
   3525
   3526	spin_lock_irqsave(shost->host_lock, flags);
   3527	channels = simple_strtoul(buf, NULL, 10);
   3528	vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
   3529	ibmvfc_hard_reset_host(vhost);
   3530	spin_unlock_irqrestore(shost->host_lock, flags);
   3531	return strlen(buf);
   3532}
   3533
   3534static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
   3535static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
   3536static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
   3537static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
   3538static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
   3539static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
   3540static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
   3541		   ibmvfc_show_log_level, ibmvfc_store_log_level);
   3542static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
   3543		   ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
   3544
   3545#ifdef CONFIG_SCSI_IBMVFC_TRACE
   3546/**
   3547 * ibmvfc_read_trace - Dump the adapter trace
   3548 * @filp:		open sysfs file
   3549 * @kobj:		kobject struct
   3550 * @bin_attr:	bin_attribute struct
   3551 * @buf:		buffer
   3552 * @off:		offset
   3553 * @count:		buffer size
   3554 *
   3555 * Return value:
   3556 *	number of bytes printed to buffer
   3557 **/
   3558static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
   3559				 struct bin_attribute *bin_attr,
   3560				 char *buf, loff_t off, size_t count)
   3561{
   3562	struct device *dev = kobj_to_dev(kobj);
   3563	struct Scsi_Host *shost = class_to_shost(dev);
   3564	struct ibmvfc_host *vhost = shost_priv(shost);
   3565	unsigned long flags = 0;
   3566	int size = IBMVFC_TRACE_SIZE;
   3567	char *src = (char *)vhost->trace;
   3568
   3569	if (off > size)
   3570		return 0;
   3571	if (off + count > size) {
   3572		size -= off;
   3573		count = size;
   3574	}
   3575
   3576	spin_lock_irqsave(shost->host_lock, flags);
   3577	memcpy(buf, &src[off], count);
   3578	spin_unlock_irqrestore(shost->host_lock, flags);
   3579	return count;
   3580}
   3581
   3582static struct bin_attribute ibmvfc_trace_attr = {
   3583	.attr =	{
   3584		.name = "trace",
   3585		.mode = S_IRUGO,
   3586	},
   3587	.size = 0,
   3588	.read = ibmvfc_read_trace,
   3589};
   3590#endif
   3591
   3592static struct attribute *ibmvfc_host_attrs[] = {
   3593	&dev_attr_partition_name.attr,
   3594	&dev_attr_device_name.attr,
   3595	&dev_attr_port_loc_code.attr,
   3596	&dev_attr_drc_name.attr,
   3597	&dev_attr_npiv_version.attr,
   3598	&dev_attr_capabilities.attr,
   3599	&dev_attr_log_level.attr,
   3600	&dev_attr_nr_scsi_channels.attr,
   3601	NULL
   3602};
   3603
   3604ATTRIBUTE_GROUPS(ibmvfc_host);
   3605
   3606static struct scsi_host_template driver_template = {
   3607	.module = THIS_MODULE,
   3608	.name = "IBM POWER Virtual FC Adapter",
   3609	.proc_name = IBMVFC_NAME,
   3610	.queuecommand = ibmvfc_queuecommand,
   3611	.eh_timed_out = fc_eh_timed_out,
   3612	.eh_abort_handler = ibmvfc_eh_abort_handler,
   3613	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
   3614	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
   3615	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
   3616	.slave_alloc = ibmvfc_slave_alloc,
   3617	.slave_configure = ibmvfc_slave_configure,
   3618	.target_alloc = ibmvfc_target_alloc,
   3619	.scan_finished = ibmvfc_scan_finished,
   3620	.change_queue_depth = ibmvfc_change_queue_depth,
   3621	.cmd_per_lun = 16,
   3622	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
   3623	.this_id = -1,
   3624	.sg_tablesize = SG_ALL,
   3625	.max_sectors = IBMVFC_MAX_SECTORS,
   3626	.shost_groups = ibmvfc_host_groups,
   3627	.track_queue_depth = 1,
   3628	.host_tagset = 1,
   3629};
   3630
   3631/**
   3632 * ibmvfc_next_async_crq - Returns the next entry in async queue
   3633 * @vhost:	ibmvfc host struct
   3634 *
   3635 * Returns:
   3636 *	Pointer to next entry in queue / NULL if empty
   3637 **/
   3638static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
   3639{
   3640	struct ibmvfc_queue *async_crq = &vhost->async_crq;
   3641	struct ibmvfc_async_crq *crq;
   3642
   3643	crq = &async_crq->msgs.async[async_crq->cur];
   3644	if (crq->valid & 0x80) {
   3645		if (++async_crq->cur == async_crq->size)
   3646			async_crq->cur = 0;
   3647		rmb();
   3648	} else
   3649		crq = NULL;
   3650
   3651	return crq;
   3652}
   3653
   3654/**
   3655 * ibmvfc_next_crq - Returns the next entry in message queue
   3656 * @vhost:	ibmvfc host struct
   3657 *
   3658 * Returns:
   3659 *	Pointer to next entry in queue / NULL if empty
   3660 **/
   3661static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
   3662{
   3663	struct ibmvfc_queue *queue = &vhost->crq;
   3664	struct ibmvfc_crq *crq;
   3665
   3666	crq = &queue->msgs.crq[queue->cur];
   3667	if (crq->valid & 0x80) {
   3668		if (++queue->cur == queue->size)
   3669			queue->cur = 0;
   3670		rmb();
   3671	} else
   3672		crq = NULL;
   3673
   3674	return crq;
   3675}
   3676
   3677/**
   3678 * ibmvfc_interrupt - Interrupt handler
   3679 * @irq:		number of irq to handle, not used
   3680 * @dev_instance: ibmvfc_host that received interrupt
   3681 *
   3682 * Returns:
   3683 *	IRQ_HANDLED
   3684 **/
   3685static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
   3686{
   3687	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
   3688	unsigned long flags;
   3689
   3690	spin_lock_irqsave(vhost->host->host_lock, flags);
   3691	vio_disable_interrupts(to_vio_dev(vhost->dev));
   3692	tasklet_schedule(&vhost->tasklet);
   3693	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   3694	return IRQ_HANDLED;
   3695}
   3696
   3697/**
   3698 * ibmvfc_tasklet - Interrupt handler tasklet
   3699 * @data:		ibmvfc host struct
   3700 *
   3701 * Returns:
   3702 *	Nothing
   3703 **/
   3704static void ibmvfc_tasklet(void *data)
   3705{
   3706	struct ibmvfc_host *vhost = data;
   3707	struct vio_dev *vdev = to_vio_dev(vhost->dev);
   3708	struct ibmvfc_crq *crq;
   3709	struct ibmvfc_async_crq *async;
   3710	struct ibmvfc_event *evt, *temp;
   3711	unsigned long flags;
   3712	int done = 0;
   3713	LIST_HEAD(evt_doneq);
   3714
   3715	spin_lock_irqsave(vhost->host->host_lock, flags);
   3716	spin_lock(vhost->crq.q_lock);
   3717	while (!done) {
   3718		/* Pull all the valid messages off the async CRQ */
   3719		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
   3720			ibmvfc_handle_async(async, vhost);
   3721			async->valid = 0;
   3722			wmb();
   3723		}
   3724
   3725		/* Pull all the valid messages off the CRQ */
   3726		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
   3727			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
   3728			crq->valid = 0;
   3729			wmb();
   3730		}
   3731
   3732		vio_enable_interrupts(vdev);
   3733		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
   3734			vio_disable_interrupts(vdev);
   3735			ibmvfc_handle_async(async, vhost);
   3736			async->valid = 0;
   3737			wmb();
   3738		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
   3739			vio_disable_interrupts(vdev);
   3740			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
   3741			crq->valid = 0;
   3742			wmb();
   3743		} else
   3744			done = 1;
   3745	}
   3746
   3747	spin_unlock(vhost->crq.q_lock);
   3748	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   3749
   3750	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
   3751		del_timer(&evt->timer);
   3752		list_del(&evt->queue_list);
   3753		ibmvfc_trc_end(evt);
   3754		evt->done(evt);
   3755	}
   3756}
   3757
   3758static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
   3759{
   3760	struct device *dev = scrq->vhost->dev;
   3761	struct vio_dev *vdev = to_vio_dev(dev);
   3762	unsigned long rc;
   3763	int irq_action = H_ENABLE_VIO_INTERRUPT;
   3764
   3765	if (!enable)
   3766		irq_action = H_DISABLE_VIO_INTERRUPT;
   3767
   3768	rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
   3769				scrq->hw_irq, 0, 0);
   3770
   3771	if (rc)
   3772		dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
   3773			enable ? "enable" : "disable", scrq->hwq_id, rc);
   3774
   3775	return rc;
   3776}
   3777
   3778static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
   3779			       struct list_head *evt_doneq)
   3780{
   3781	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
   3782
   3783	switch (crq->valid) {
   3784	case IBMVFC_CRQ_CMD_RSP:
   3785		break;
   3786	case IBMVFC_CRQ_XPORT_EVENT:
   3787		return;
   3788	default:
   3789		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
   3790		return;
   3791	}
   3792
   3793	/* The only kind of payload CRQs we should get are responses to
   3794	 * things we send. Make sure this response is to something we
   3795	 * actually sent
   3796	 */
   3797	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
   3798		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
   3799			crq->ioba);
   3800		return;
   3801	}
   3802
   3803	if (unlikely(atomic_dec_if_positive(&evt->active))) {
   3804		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
   3805			crq->ioba);
   3806		return;
   3807	}
   3808
   3809	spin_lock(&evt->queue->l_lock);
   3810	list_move_tail(&evt->queue_list, evt_doneq);
   3811	spin_unlock(&evt->queue->l_lock);
   3812}
   3813
   3814static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
   3815{
   3816	struct ibmvfc_crq *crq;
   3817
   3818	crq = &scrq->msgs.scrq[scrq->cur].crq;
   3819	if (crq->valid & 0x80) {
   3820		if (++scrq->cur == scrq->size)
   3821			scrq->cur = 0;
   3822		rmb();
   3823	} else
   3824		crq = NULL;
   3825
   3826	return crq;
   3827}
   3828
   3829static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
   3830{
   3831	struct ibmvfc_crq *crq;
   3832	struct ibmvfc_event *evt, *temp;
   3833	unsigned long flags;
   3834	int done = 0;
   3835	LIST_HEAD(evt_doneq);
   3836
   3837	spin_lock_irqsave(scrq->q_lock, flags);
   3838	while (!done) {
   3839		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
   3840			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
   3841			crq->valid = 0;
   3842			wmb();
   3843		}
   3844
   3845		ibmvfc_toggle_scrq_irq(scrq, 1);
   3846		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
   3847			ibmvfc_toggle_scrq_irq(scrq, 0);
   3848			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
   3849			crq->valid = 0;
   3850			wmb();
   3851		} else
   3852			done = 1;
   3853	}
   3854	spin_unlock_irqrestore(scrq->q_lock, flags);
   3855
   3856	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
   3857		del_timer(&evt->timer);
   3858		list_del(&evt->queue_list);
   3859		ibmvfc_trc_end(evt);
   3860		evt->done(evt);
   3861	}
   3862}
   3863
   3864static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
   3865{
   3866	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
   3867
   3868	ibmvfc_toggle_scrq_irq(scrq, 0);
   3869	ibmvfc_drain_sub_crq(scrq);
   3870
   3871	return IRQ_HANDLED;
   3872}
   3873
   3874/**
   3875 * ibmvfc_init_tgt - Set the next init job step for the target
   3876 * @tgt:		ibmvfc target struct
   3877 * @job_step:	job step to perform
   3878 *
   3879 **/
   3880static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
   3881			    void (*job_step) (struct ibmvfc_target *))
   3882{
   3883	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
   3884		tgt->job_step = job_step;
   3885	wake_up(&tgt->vhost->work_wait_q);
   3886}
   3887
   3888/**
   3889 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
   3890 * @tgt:		ibmvfc target struct
   3891 * @job_step:	initialization job step
   3892 *
   3893 * Returns: 1 if step will be retried / 0 if not
   3894 *
   3895 **/
   3896static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
   3897				  void (*job_step) (struct ibmvfc_target *))
   3898{
   3899	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
   3900		ibmvfc_del_tgt(tgt);
   3901		wake_up(&tgt->vhost->work_wait_q);
   3902		return 0;
   3903	} else
   3904		ibmvfc_init_tgt(tgt, job_step);
   3905	return 1;
   3906}
   3907
   3908/* Defined in FC-LS */
   3909static const struct {
   3910	int code;
   3911	int retry;
   3912	int logged_in;
   3913} prli_rsp [] = {
   3914	{ 0, 1, 0 },
   3915	{ 1, 0, 1 },
   3916	{ 2, 1, 0 },
   3917	{ 3, 1, 0 },
   3918	{ 4, 0, 0 },
   3919	{ 5, 0, 0 },
   3920	{ 6, 0, 1 },
   3921	{ 7, 0, 0 },
   3922	{ 8, 1, 0 },
   3923};
   3924
   3925/**
   3926 * ibmvfc_get_prli_rsp - Find PRLI response index
   3927 * @flags:	PRLI response flags
   3928 *
   3929 **/
   3930static int ibmvfc_get_prli_rsp(u16 flags)
   3931{
   3932	int i;
   3933	int code = (flags & 0x0f00) >> 8;
   3934
   3935	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
   3936		if (prli_rsp[i].code == code)
   3937			return i;
   3938
   3939	return 0;
   3940}
   3941
   3942/**
   3943 * ibmvfc_tgt_prli_done - Completion handler for Process Login
   3944 * @evt:	ibmvfc event struct
   3945 *
   3946 **/
   3947static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
   3948{
   3949	struct ibmvfc_target *tgt = evt->tgt;
   3950	struct ibmvfc_host *vhost = evt->vhost;
   3951	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
   3952	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
   3953	u32 status = be16_to_cpu(rsp->common.status);
   3954	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
   3955
   3956	vhost->discovery_threads--;
   3957	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   3958	switch (status) {
   3959	case IBMVFC_MAD_SUCCESS:
   3960		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
   3961			parms->type, parms->flags, parms->service_parms);
   3962
   3963		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
   3964			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
   3965			if (prli_rsp[index].logged_in) {
   3966				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
   3967					tgt->need_login = 0;
   3968					tgt->ids.roles = 0;
   3969					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
   3970						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
   3971					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
   3972						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
   3973					tgt->add_rport = 1;
   3974				} else
   3975					ibmvfc_del_tgt(tgt);
   3976			} else if (prli_rsp[index].retry)
   3977				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
   3978			else
   3979				ibmvfc_del_tgt(tgt);
   3980		} else
   3981			ibmvfc_del_tgt(tgt);
   3982		break;
   3983	case IBMVFC_MAD_DRIVER_FAILED:
   3984		break;
   3985	case IBMVFC_MAD_CRQ_ERROR:
   3986		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
   3987		break;
   3988	case IBMVFC_MAD_FAILED:
   3989	default:
   3990		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
   3991		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
   3992			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
   3993		else if (tgt->logo_rcvd)
   3994			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
   3995		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
   3996			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
   3997		else
   3998			ibmvfc_del_tgt(tgt);
   3999
   4000		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
   4001			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
   4002			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
   4003		break;
   4004	}
   4005
   4006	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4007	ibmvfc_free_event(evt);
   4008	wake_up(&vhost->work_wait_q);
   4009}
   4010
   4011/**
   4012 * ibmvfc_tgt_send_prli - Send a process login
   4013 * @tgt:	ibmvfc target struct
   4014 *
   4015 **/
   4016static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
   4017{
   4018	struct ibmvfc_process_login *prli;
   4019	struct ibmvfc_host *vhost = tgt->vhost;
   4020	struct ibmvfc_event *evt;
   4021
   4022	if (vhost->discovery_threads >= disc_threads)
   4023		return;
   4024
   4025	kref_get(&tgt->kref);
   4026	evt = ibmvfc_get_event(&vhost->crq);
   4027	vhost->discovery_threads++;
   4028	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
   4029	evt->tgt = tgt;
   4030	prli = &evt->iu.prli;
   4031	memset(prli, 0, sizeof(*prli));
   4032	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
   4033		prli->common.version = cpu_to_be32(2);
   4034		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
   4035	} else {
   4036		prli->common.version = cpu_to_be32(1);
   4037	}
   4038	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
   4039	prli->common.length = cpu_to_be16(sizeof(*prli));
   4040	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
   4041
   4042	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
   4043	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
   4044	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
   4045	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
   4046
   4047	if (cls3_error)
   4048		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
   4049
   4050	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
   4051	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
   4052		vhost->discovery_threads--;
   4053		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4054		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4055	} else
   4056		tgt_dbg(tgt, "Sent process login\n");
   4057}
   4058
   4059/**
   4060 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
   4061 * @evt:	ibmvfc event struct
   4062 *
   4063 **/
   4064static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
   4065{
   4066	struct ibmvfc_target *tgt = evt->tgt;
   4067	struct ibmvfc_host *vhost = evt->vhost;
   4068	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
   4069	u32 status = be16_to_cpu(rsp->common.status);
   4070	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   4071
   4072	vhost->discovery_threads--;
   4073	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4074	switch (status) {
   4075	case IBMVFC_MAD_SUCCESS:
   4076		tgt_dbg(tgt, "Port Login succeeded\n");
   4077		if (tgt->ids.port_name &&
   4078		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
   4079			vhost->reinit = 1;
   4080			tgt_dbg(tgt, "Port re-init required\n");
   4081			break;
   4082		}
   4083		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
   4084		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
   4085		tgt->ids.port_id = tgt->scsi_id;
   4086		memcpy(&tgt->service_parms, &rsp->service_parms,
   4087		       sizeof(tgt->service_parms));
   4088		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
   4089		       sizeof(tgt->service_parms_change));
   4090		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
   4091		break;
   4092	case IBMVFC_MAD_DRIVER_FAILED:
   4093		break;
   4094	case IBMVFC_MAD_CRQ_ERROR:
   4095		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
   4096		break;
   4097	case IBMVFC_MAD_FAILED:
   4098	default:
   4099		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
   4100			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
   4101		else
   4102			ibmvfc_del_tgt(tgt);
   4103
   4104		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
   4105			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
   4106					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
   4107			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
   4108			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
   4109		break;
   4110	}
   4111
   4112	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4113	ibmvfc_free_event(evt);
   4114	wake_up(&vhost->work_wait_q);
   4115}
   4116
   4117/**
   4118 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
   4119 * @tgt:	ibmvfc target struct
   4120 *
   4121 **/
   4122static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
   4123{
   4124	struct ibmvfc_port_login *plogi;
   4125	struct ibmvfc_host *vhost = tgt->vhost;
   4126	struct ibmvfc_event *evt;
   4127
   4128	if (vhost->discovery_threads >= disc_threads)
   4129		return;
   4130
   4131	kref_get(&tgt->kref);
   4132	tgt->logo_rcvd = 0;
   4133	evt = ibmvfc_get_event(&vhost->crq);
   4134	vhost->discovery_threads++;
   4135	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
   4136	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
   4137	evt->tgt = tgt;
   4138	plogi = &evt->iu.plogi;
   4139	memset(plogi, 0, sizeof(*plogi));
   4140	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
   4141		plogi->common.version = cpu_to_be32(2);
   4142		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
   4143	} else {
   4144		plogi->common.version = cpu_to_be32(1);
   4145	}
   4146	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
   4147	plogi->common.length = cpu_to_be16(sizeof(*plogi));
   4148	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
   4149
   4150	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
   4151		vhost->discovery_threads--;
   4152		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4153		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4154	} else
   4155		tgt_dbg(tgt, "Sent port login\n");
   4156}
   4157
   4158/**
   4159 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
   4160 * @evt:	ibmvfc event struct
   4161 *
   4162 **/
   4163static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
   4164{
   4165	struct ibmvfc_target *tgt = evt->tgt;
   4166	struct ibmvfc_host *vhost = evt->vhost;
   4167	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
   4168	u32 status = be16_to_cpu(rsp->common.status);
   4169
   4170	vhost->discovery_threads--;
   4171	ibmvfc_free_event(evt);
   4172	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4173
   4174	switch (status) {
   4175	case IBMVFC_MAD_SUCCESS:
   4176		tgt_dbg(tgt, "Implicit Logout succeeded\n");
   4177		break;
   4178	case IBMVFC_MAD_DRIVER_FAILED:
   4179		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4180		wake_up(&vhost->work_wait_q);
   4181		return;
   4182	case IBMVFC_MAD_FAILED:
   4183	default:
   4184		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
   4185		break;
   4186	}
   4187
   4188	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
   4189	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4190	wake_up(&vhost->work_wait_q);
   4191}
   4192
   4193/**
   4194 * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
   4195 * @tgt:		ibmvfc target struct
   4196 * @done:		Routine to call when the event is responded to
   4197 *
   4198 * Returns:
   4199 *	Allocated and initialized ibmvfc_event struct
   4200 **/
   4201static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
   4202								 void (*done) (struct ibmvfc_event *))
   4203{
   4204	struct ibmvfc_implicit_logout *mad;
   4205	struct ibmvfc_host *vhost = tgt->vhost;
   4206	struct ibmvfc_event *evt;
   4207
   4208	kref_get(&tgt->kref);
   4209	evt = ibmvfc_get_event(&vhost->crq);
   4210	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
   4211	evt->tgt = tgt;
   4212	mad = &evt->iu.implicit_logout;
   4213	memset(mad, 0, sizeof(*mad));
   4214	mad->common.version = cpu_to_be32(1);
   4215	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
   4216	mad->common.length = cpu_to_be16(sizeof(*mad));
   4217	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
   4218	return evt;
   4219}
   4220
   4221/**
   4222 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
   4223 * @tgt:		ibmvfc target struct
   4224 *
   4225 **/
   4226static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
   4227{
   4228	struct ibmvfc_host *vhost = tgt->vhost;
   4229	struct ibmvfc_event *evt;
   4230
   4231	if (vhost->discovery_threads >= disc_threads)
   4232		return;
   4233
   4234	vhost->discovery_threads++;
   4235	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
   4236						   ibmvfc_tgt_implicit_logout_done);
   4237
   4238	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
   4239	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
   4240		vhost->discovery_threads--;
   4241		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4242		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4243	} else
   4244		tgt_dbg(tgt, "Sent Implicit Logout\n");
   4245}
   4246
   4247/**
   4248 * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
   4249 * @evt:	ibmvfc event struct
   4250 *
   4251 **/
   4252static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
   4253{
   4254	struct ibmvfc_target *tgt = evt->tgt;
   4255	struct ibmvfc_host *vhost = evt->vhost;
   4256	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
   4257	u32 status = be16_to_cpu(mad->common.status);
   4258
   4259	vhost->discovery_threads--;
   4260	ibmvfc_free_event(evt);
   4261
   4262	/*
   4263	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
   4264	 * driver in which case we need to free up all the targets. If we are
   4265	 * not unloading, we will still go through a hard reset to get out of
   4266	 * offline state, so there is no need to track the old targets in that
   4267	 * case.
   4268	 */
   4269	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
   4270		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
   4271	else
   4272		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
   4273
   4274	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
   4275	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4276	wake_up(&vhost->work_wait_q);
   4277}
   4278
   4279/**
   4280 * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
   4281 * @tgt:		ibmvfc target struct
   4282 *
   4283 **/
   4284static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
   4285{
   4286	struct ibmvfc_host *vhost = tgt->vhost;
   4287	struct ibmvfc_event *evt;
   4288
   4289	if (!vhost->logged_in) {
   4290		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
   4291		return;
   4292	}
   4293
   4294	if (vhost->discovery_threads >= disc_threads)
   4295		return;
   4296
   4297	vhost->discovery_threads++;
   4298	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
   4299						   ibmvfc_tgt_implicit_logout_and_del_done);
   4300
   4301	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
   4302	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
   4303		vhost->discovery_threads--;
   4304		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
   4305		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4306	} else
   4307		tgt_dbg(tgt, "Sent Implicit Logout\n");
   4308}
   4309
   4310/**
   4311 * ibmvfc_tgt_move_login_done - Completion handler for Move Login
   4312 * @evt:	ibmvfc event struct
   4313 *
   4314 **/
   4315static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
   4316{
   4317	struct ibmvfc_target *tgt = evt->tgt;
   4318	struct ibmvfc_host *vhost = evt->vhost;
   4319	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
   4320	u32 status = be16_to_cpu(rsp->common.status);
   4321	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   4322
   4323	vhost->discovery_threads--;
   4324	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4325	switch (status) {
   4326	case IBMVFC_MAD_SUCCESS:
   4327		tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
   4328		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
   4329		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
   4330		tgt->scsi_id = tgt->new_scsi_id;
   4331		tgt->ids.port_id = tgt->scsi_id;
   4332		memcpy(&tgt->service_parms, &rsp->service_parms,
   4333		       sizeof(tgt->service_parms));
   4334		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
   4335		       sizeof(tgt->service_parms_change));
   4336		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
   4337		break;
   4338	case IBMVFC_MAD_DRIVER_FAILED:
   4339		break;
   4340	case IBMVFC_MAD_CRQ_ERROR:
   4341		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
   4342		break;
   4343	case IBMVFC_MAD_FAILED:
   4344	default:
   4345		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
   4346
   4347		tgt_log(tgt, level,
   4348			"Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
   4349			tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
   4350			status);
   4351		break;
   4352	}
   4353
   4354	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4355	ibmvfc_free_event(evt);
   4356	wake_up(&vhost->work_wait_q);
   4357}
   4358
   4359
   4360/**
   4361 * ibmvfc_tgt_move_login - Initiate a move login for specified target
   4362 * @tgt:		ibmvfc target struct
   4363 *
   4364 **/
   4365static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
   4366{
   4367	struct ibmvfc_host *vhost = tgt->vhost;
   4368	struct ibmvfc_move_login *move;
   4369	struct ibmvfc_event *evt;
   4370
   4371	if (vhost->discovery_threads >= disc_threads)
   4372		return;
   4373
   4374	kref_get(&tgt->kref);
   4375	evt = ibmvfc_get_event(&vhost->crq);
   4376	vhost->discovery_threads++;
   4377	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
   4378	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
   4379	evt->tgt = tgt;
   4380	move = &evt->iu.move_login;
   4381	memset(move, 0, sizeof(*move));
   4382	move->common.version = cpu_to_be32(1);
   4383	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
   4384	move->common.length = cpu_to_be16(sizeof(*move));
   4385
   4386	move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
   4387	move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
   4388	move->wwpn = cpu_to_be64(tgt->wwpn);
   4389	move->node_name = cpu_to_be64(tgt->ids.node_name);
   4390
   4391	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
   4392		vhost->discovery_threads--;
   4393		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
   4394		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4395	} else
   4396		tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
   4397}
   4398
   4399/**
   4400 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
   4401 * @mad:	ibmvfc passthru mad struct
   4402 * @tgt:	ibmvfc target struct
   4403 *
   4404 * Returns:
   4405 *	1 if PLOGI needed / 0 if PLOGI not needed
   4406 **/
   4407static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
   4408				    struct ibmvfc_target *tgt)
   4409{
   4410	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
   4411		return 1;
   4412	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
   4413		return 1;
   4414	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
   4415		return 1;
   4416	return 0;
   4417}
   4418
   4419/**
   4420 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
   4421 * @evt:	ibmvfc event struct
   4422 *
   4423 **/
   4424static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
   4425{
   4426	struct ibmvfc_target *tgt = evt->tgt;
   4427	struct ibmvfc_host *vhost = evt->vhost;
   4428	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
   4429	u32 status = be16_to_cpu(mad->common.status);
   4430	u8 fc_reason, fc_explain;
   4431
   4432	vhost->discovery_threads--;
   4433	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4434	del_timer(&tgt->timer);
   4435
   4436	switch (status) {
   4437	case IBMVFC_MAD_SUCCESS:
   4438		tgt_dbg(tgt, "ADISC succeeded\n");
   4439		if (ibmvfc_adisc_needs_plogi(mad, tgt))
   4440			ibmvfc_del_tgt(tgt);
   4441		break;
   4442	case IBMVFC_MAD_DRIVER_FAILED:
   4443		break;
   4444	case IBMVFC_MAD_FAILED:
   4445	default:
   4446		ibmvfc_del_tgt(tgt);
   4447		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
   4448		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
   4449		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
   4450			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
   4451			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
   4452			 ibmvfc_get_fc_type(fc_reason), fc_reason,
   4453			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
   4454		break;
   4455	}
   4456
   4457	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4458	ibmvfc_free_event(evt);
   4459	wake_up(&vhost->work_wait_q);
   4460}
   4461
   4462/**
   4463 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
   4464 * @evt:		ibmvfc event struct
   4465 *
   4466 **/
   4467static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
   4468{
   4469	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
   4470
   4471	memset(mad, 0, sizeof(*mad));
   4472	mad->common.version = cpu_to_be32(1);
   4473	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
   4474	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
   4475	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
   4476		offsetof(struct ibmvfc_passthru_mad, iu));
   4477	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
   4478	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
   4479	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
   4480	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
   4481		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
   4482		offsetof(struct ibmvfc_passthru_fc_iu, payload));
   4483	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
   4484	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
   4485		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
   4486		offsetof(struct ibmvfc_passthru_fc_iu, response));
   4487	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
   4488}
   4489
   4490/**
   4491 * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
   4492 * @evt:		ibmvfc event struct
   4493 *
   4494 * Just cleanup this event struct. Everything else is handled by
   4495 * the ADISC completion handler. If the ADISC never actually comes
   4496 * back, we still have the timer running on the ADISC event struct
   4497 * which will fire and cause the CRQ to get reset.
   4498 *
   4499 **/
   4500static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
   4501{
   4502	struct ibmvfc_host *vhost = evt->vhost;
   4503	struct ibmvfc_target *tgt = evt->tgt;
   4504
   4505	tgt_dbg(tgt, "ADISC cancel complete\n");
   4506	vhost->abort_threads--;
   4507	ibmvfc_free_event(evt);
   4508	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4509	wake_up(&vhost->work_wait_q);
   4510}
   4511
   4512/**
   4513 * ibmvfc_adisc_timeout - Handle an ADISC timeout
   4514 * @t:		ibmvfc target struct
   4515 *
   4516 * If an ADISC times out, send a cancel. If the cancel times
   4517 * out, reset the CRQ. When the ADISC comes back as cancelled,
   4518 * log back into the target.
   4519 **/
   4520static void ibmvfc_adisc_timeout(struct timer_list *t)
   4521{
   4522	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
   4523	struct ibmvfc_host *vhost = tgt->vhost;
   4524	struct ibmvfc_event *evt;
   4525	struct ibmvfc_tmf *tmf;
   4526	unsigned long flags;
   4527	int rc;
   4528
   4529	tgt_dbg(tgt, "ADISC timeout\n");
   4530	spin_lock_irqsave(vhost->host->host_lock, flags);
   4531	if (vhost->abort_threads >= disc_threads ||
   4532	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
   4533	    vhost->state != IBMVFC_INITIALIZING ||
   4534	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
   4535		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   4536		return;
   4537	}
   4538
   4539	vhost->abort_threads++;
   4540	kref_get(&tgt->kref);
   4541	evt = ibmvfc_get_event(&vhost->crq);
   4542	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
   4543
   4544	evt->tgt = tgt;
   4545	tmf = &evt->iu.tmf;
   4546	memset(tmf, 0, sizeof(*tmf));
   4547	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
   4548		tmf->common.version = cpu_to_be32(2);
   4549		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
   4550	} else {
   4551		tmf->common.version = cpu_to_be32(1);
   4552	}
   4553	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
   4554	tmf->common.length = cpu_to_be16(sizeof(*tmf));
   4555	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
   4556	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
   4557
   4558	rc = ibmvfc_send_event(evt, vhost, default_timeout);
   4559
   4560	if (rc) {
   4561		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
   4562		vhost->abort_threads--;
   4563		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4564		__ibmvfc_reset_host(vhost);
   4565	} else
   4566		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
   4567	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   4568}
   4569
   4570/**
   4571 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
   4572 * @tgt:		ibmvfc target struct
   4573 *
   4574 * When sending an ADISC we end up with two timers running. The
   4575 * first timer is the timer in the ibmvfc target struct. If this
   4576 * fires, we send a cancel to the target. The second timer is the
   4577 * timer on the ibmvfc event for the ADISC, which is longer. If that
   4578 * fires, it means the ADISC timed out and our attempt to cancel it
   4579 * also failed, so we need to reset the CRQ.
   4580 **/
   4581static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
   4582{
   4583	struct ibmvfc_passthru_mad *mad;
   4584	struct ibmvfc_host *vhost = tgt->vhost;
   4585	struct ibmvfc_event *evt;
   4586
   4587	if (vhost->discovery_threads >= disc_threads)
   4588		return;
   4589
   4590	kref_get(&tgt->kref);
   4591	evt = ibmvfc_get_event(&vhost->crq);
   4592	vhost->discovery_threads++;
   4593	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
   4594	evt->tgt = tgt;
   4595
   4596	ibmvfc_init_passthru(evt);
   4597	mad = &evt->iu.passthru;
   4598	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
   4599	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
   4600	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
   4601
   4602	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
   4603	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
   4604	       sizeof(vhost->login_buf->resp.port_name));
   4605	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
   4606	       sizeof(vhost->login_buf->resp.node_name));
   4607	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
   4608
   4609	if (timer_pending(&tgt->timer))
   4610		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
   4611	else {
   4612		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
   4613		add_timer(&tgt->timer);
   4614	}
   4615
   4616	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
   4617	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
   4618		vhost->discovery_threads--;
   4619		del_timer(&tgt->timer);
   4620		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4621		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4622	} else
   4623		tgt_dbg(tgt, "Sent ADISC\n");
   4624}
   4625
   4626/**
   4627 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
   4628 * @evt:	ibmvfc event struct
   4629 *
   4630 **/
   4631static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
   4632{
   4633	struct ibmvfc_target *tgt = evt->tgt;
   4634	struct ibmvfc_host *vhost = evt->vhost;
   4635	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
   4636	u32 status = be16_to_cpu(rsp->common.status);
   4637	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   4638
   4639	vhost->discovery_threads--;
   4640	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4641	switch (status) {
   4642	case IBMVFC_MAD_SUCCESS:
   4643		tgt_dbg(tgt, "Query Target succeeded\n");
   4644		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
   4645			ibmvfc_del_tgt(tgt);
   4646		else
   4647			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
   4648		break;
   4649	case IBMVFC_MAD_DRIVER_FAILED:
   4650		break;
   4651	case IBMVFC_MAD_CRQ_ERROR:
   4652		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
   4653		break;
   4654	case IBMVFC_MAD_FAILED:
   4655	default:
   4656		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
   4657		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
   4658		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
   4659			ibmvfc_del_tgt(tgt);
   4660		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
   4661			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
   4662		else
   4663			ibmvfc_del_tgt(tgt);
   4664
   4665		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
   4666			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
   4667			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
   4668			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
   4669			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
   4670			status);
   4671		break;
   4672	}
   4673
   4674	kref_put(&tgt->kref, ibmvfc_release_tgt);
   4675	ibmvfc_free_event(evt);
   4676	wake_up(&vhost->work_wait_q);
   4677}
   4678
   4679/**
   4680 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
   4681 * @tgt:	ibmvfc target struct
   4682 *
   4683 **/
   4684static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
   4685{
   4686	struct ibmvfc_query_tgt *query_tgt;
   4687	struct ibmvfc_host *vhost = tgt->vhost;
   4688	struct ibmvfc_event *evt;
   4689
   4690	if (vhost->discovery_threads >= disc_threads)
   4691		return;
   4692
   4693	kref_get(&tgt->kref);
   4694	evt = ibmvfc_get_event(&vhost->crq);
   4695	vhost->discovery_threads++;
   4696	evt->tgt = tgt;
   4697	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
   4698	query_tgt = &evt->iu.query_tgt;
   4699	memset(query_tgt, 0, sizeof(*query_tgt));
   4700	query_tgt->common.version = cpu_to_be32(1);
   4701	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
   4702	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
   4703	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
   4704
   4705	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
   4706	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
   4707		vhost->discovery_threads--;
   4708		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
   4709		kref_put(&tgt->kref, ibmvfc_release_tgt);
   4710	} else
   4711		tgt_dbg(tgt, "Sent Query Target\n");
   4712}
   4713
   4714/**
   4715 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
   4716 * @vhost:		ibmvfc host struct
   4717 * @target:		Holds SCSI ID to allocate target forand the WWPN
   4718 *
   4719 * Returns:
   4720 *	0 on success / other on failure
   4721 **/
   4722static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
   4723			       struct ibmvfc_discover_targets_entry *target)
   4724{
   4725	struct ibmvfc_target *stgt = NULL;
   4726	struct ibmvfc_target *wtgt = NULL;
   4727	struct ibmvfc_target *tgt;
   4728	unsigned long flags;
   4729	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
   4730	u64 wwpn = be64_to_cpu(target->wwpn);
   4731
   4732	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
   4733	spin_lock_irqsave(vhost->host->host_lock, flags);
   4734	list_for_each_entry(tgt, &vhost->targets, queue) {
   4735		if (tgt->wwpn == wwpn) {
   4736			wtgt = tgt;
   4737			break;
   4738		}
   4739	}
   4740
   4741	list_for_each_entry(tgt, &vhost->targets, queue) {
   4742		if (tgt->scsi_id == scsi_id) {
   4743			stgt = tgt;
   4744			break;
   4745		}
   4746	}
   4747
   4748	if (wtgt && !stgt) {
   4749		/*
   4750		 * A WWPN target has moved and we still are tracking the old
   4751		 * SCSI ID.  The only way we should be able to get here is if
   4752		 * we attempted to send an implicit logout for the old SCSI ID
   4753		 * and it failed for some reason, such as there being I/O
   4754		 * pending to the target. In this case, we will have already
   4755		 * deleted the rport from the FC transport so we do a move
   4756		 * login, which works even with I/O pending, however, if
   4757		 * there is still I/O pending, it will stay outstanding, so
   4758		 * we only do this if fast fail is disabled for the rport,
   4759		 * otherwise we let terminate_rport_io clean up the port
   4760		 * before we login at the new location.
   4761		 */
   4762		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
   4763			if (wtgt->move_login) {
   4764				/*
   4765				 * Do a move login here. The old target is no longer
   4766				 * known to the transport layer We don't use the
   4767				 * normal ibmvfc_set_tgt_action to set this, as we
   4768				 * don't normally want to allow this state change.
   4769				 */
   4770				wtgt->new_scsi_id = scsi_id;
   4771				wtgt->action = IBMVFC_TGT_ACTION_INIT;
   4772				wtgt->init_retries = 0;
   4773				ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
   4774			}
   4775			goto unlock_out;
   4776		} else {
   4777			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
   4778				wtgt->action, wtgt->rport);
   4779		}
   4780	} else if (stgt) {
   4781		if (tgt->need_login)
   4782			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
   4783		goto unlock_out;
   4784	}
   4785	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   4786
   4787	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
   4788	memset(tgt, 0, sizeof(*tgt));
   4789	tgt->scsi_id = scsi_id;
   4790	tgt->wwpn = wwpn;
   4791	tgt->vhost = vhost;
   4792	tgt->need_login = 1;
   4793	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
   4794	kref_init(&tgt->kref);
   4795	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
   4796	spin_lock_irqsave(vhost->host->host_lock, flags);
   4797	tgt->cancel_key = vhost->task_set++;
   4798	list_add_tail(&tgt->queue, &vhost->targets);
   4799
   4800unlock_out:
   4801	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   4802	return 0;
   4803}
   4804
   4805/**
   4806 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
   4807 * @vhost:		ibmvfc host struct
   4808 *
   4809 * Returns:
   4810 *	0 on success / other on failure
   4811 **/
   4812static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
   4813{
   4814	int i, rc;
   4815
   4816	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
   4817		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
   4818
   4819	return rc;
   4820}
   4821
   4822/**
   4823 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
   4824 * @evt:	ibmvfc event struct
   4825 *
   4826 **/
   4827static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
   4828{
   4829	struct ibmvfc_host *vhost = evt->vhost;
   4830	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
   4831	u32 mad_status = be16_to_cpu(rsp->common.status);
   4832	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   4833
   4834	switch (mad_status) {
   4835	case IBMVFC_MAD_SUCCESS:
   4836		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
   4837		vhost->num_targets = be32_to_cpu(rsp->num_written);
   4838		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
   4839		break;
   4840	case IBMVFC_MAD_FAILED:
   4841		level += ibmvfc_retry_host_init(vhost);
   4842		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
   4843			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
   4844			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
   4845		break;
   4846	case IBMVFC_MAD_DRIVER_FAILED:
   4847		break;
   4848	default:
   4849		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
   4850		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   4851		break;
   4852	}
   4853
   4854	ibmvfc_free_event(evt);
   4855	wake_up(&vhost->work_wait_q);
   4856}
   4857
   4858/**
   4859 * ibmvfc_discover_targets - Send Discover Targets MAD
   4860 * @vhost:	ibmvfc host struct
   4861 *
   4862 **/
   4863static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
   4864{
   4865	struct ibmvfc_discover_targets *mad;
   4866	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
   4867
   4868	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
   4869	mad = &evt->iu.discover_targets;
   4870	memset(mad, 0, sizeof(*mad));
   4871	mad->common.version = cpu_to_be32(1);
   4872	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
   4873	mad->common.length = cpu_to_be16(sizeof(*mad));
   4874	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
   4875	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
   4876	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
   4877	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
   4878	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
   4879
   4880	if (!ibmvfc_send_event(evt, vhost, default_timeout))
   4881		ibmvfc_dbg(vhost, "Sent discover targets\n");
   4882	else
   4883		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   4884}
   4885
   4886static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
   4887{
   4888	struct ibmvfc_host *vhost = evt->vhost;
   4889	struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
   4890	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
   4891	u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
   4892	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   4893	int flags, active_queues, i;
   4894
   4895	ibmvfc_free_event(evt);
   4896
   4897	switch (mad_status) {
   4898	case IBMVFC_MAD_SUCCESS:
   4899		ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
   4900		flags = be32_to_cpu(setup->flags);
   4901		vhost->do_enquiry = 0;
   4902		active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
   4903		scrqs->active_queues = active_queues;
   4904
   4905		if (flags & IBMVFC_CHANNELS_CANCELED) {
   4906			ibmvfc_dbg(vhost, "Channels Canceled\n");
   4907			vhost->using_channels = 0;
   4908		} else {
   4909			if (active_queues)
   4910				vhost->using_channels = 1;
   4911			for (i = 0; i < active_queues; i++)
   4912				scrqs->scrqs[i].vios_cookie =
   4913					be64_to_cpu(setup->channel_handles[i]);
   4914
   4915			ibmvfc_dbg(vhost, "Using %u channels\n",
   4916				   vhost->scsi_scrqs.active_queues);
   4917		}
   4918		break;
   4919	case IBMVFC_MAD_FAILED:
   4920		level += ibmvfc_retry_host_init(vhost);
   4921		ibmvfc_log(vhost, level, "Channel Setup failed\n");
   4922		fallthrough;
   4923	case IBMVFC_MAD_DRIVER_FAILED:
   4924		return;
   4925	default:
   4926		dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
   4927			mad_status);
   4928		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   4929		return;
   4930	}
   4931
   4932	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
   4933	wake_up(&vhost->work_wait_q);
   4934}
   4935
   4936static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
   4937{
   4938	struct ibmvfc_channel_setup_mad *mad;
   4939	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
   4940	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
   4941	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
   4942	unsigned int num_channels =
   4943		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
   4944	int i;
   4945
   4946	memset(setup_buf, 0, sizeof(*setup_buf));
   4947	if (num_channels == 0)
   4948		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
   4949	else {
   4950		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
   4951		for (i = 0; i < num_channels; i++)
   4952			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
   4953	}
   4954
   4955	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
   4956	mad = &evt->iu.channel_setup;
   4957	memset(mad, 0, sizeof(*mad));
   4958	mad->common.version = cpu_to_be32(1);
   4959	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
   4960	mad->common.length = cpu_to_be16(sizeof(*mad));
   4961	mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
   4962	mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
   4963
   4964	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
   4965
   4966	if (!ibmvfc_send_event(evt, vhost, default_timeout))
   4967		ibmvfc_dbg(vhost, "Sent channel setup\n");
   4968	else
   4969		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
   4970}
   4971
   4972static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
   4973{
   4974	struct ibmvfc_host *vhost = evt->vhost;
   4975	struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
   4976	u32 mad_status = be16_to_cpu(rsp->common.status);
   4977	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   4978
   4979	switch (mad_status) {
   4980	case IBMVFC_MAD_SUCCESS:
   4981		ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
   4982		vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
   4983		ibmvfc_free_event(evt);
   4984		break;
   4985	case IBMVFC_MAD_FAILED:
   4986		level += ibmvfc_retry_host_init(vhost);
   4987		ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
   4988		fallthrough;
   4989	case IBMVFC_MAD_DRIVER_FAILED:
   4990		ibmvfc_free_event(evt);
   4991		return;
   4992	default:
   4993		dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
   4994			mad_status);
   4995		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   4996		ibmvfc_free_event(evt);
   4997		return;
   4998	}
   4999
   5000	ibmvfc_channel_setup(vhost);
   5001}
   5002
   5003static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
   5004{
   5005	struct ibmvfc_channel_enquiry *mad;
   5006	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
   5007
   5008	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
   5009	mad = &evt->iu.channel_enquiry;
   5010	memset(mad, 0, sizeof(*mad));
   5011	mad->common.version = cpu_to_be32(1);
   5012	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
   5013	mad->common.length = cpu_to_be16(sizeof(*mad));
   5014
   5015	if (mig_channels_only)
   5016		mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
   5017	if (mig_no_less_channels)
   5018		mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
   5019
   5020	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
   5021
   5022	if (!ibmvfc_send_event(evt, vhost, default_timeout))
   5023		ibmvfc_dbg(vhost, "Send channel enquiry\n");
   5024	else
   5025		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5026}
   5027
   5028/**
   5029 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
   5030 * @evt:	ibmvfc event struct
   5031 *
   5032 **/
   5033static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
   5034{
   5035	struct ibmvfc_host *vhost = evt->vhost;
   5036	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
   5037	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
   5038	unsigned int npiv_max_sectors;
   5039	int level = IBMVFC_DEFAULT_LOG_LEVEL;
   5040
   5041	switch (mad_status) {
   5042	case IBMVFC_MAD_SUCCESS:
   5043		ibmvfc_free_event(evt);
   5044		break;
   5045	case IBMVFC_MAD_FAILED:
   5046		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
   5047			level += ibmvfc_retry_host_init(vhost);
   5048		else
   5049			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5050		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
   5051			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
   5052						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
   5053		ibmvfc_free_event(evt);
   5054		return;
   5055	case IBMVFC_MAD_CRQ_ERROR:
   5056		ibmvfc_retry_host_init(vhost);
   5057		fallthrough;
   5058	case IBMVFC_MAD_DRIVER_FAILED:
   5059		ibmvfc_free_event(evt);
   5060		return;
   5061	default:
   5062		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
   5063		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5064		ibmvfc_free_event(evt);
   5065		return;
   5066	}
   5067
   5068	vhost->client_migrated = 0;
   5069
   5070	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
   5071		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
   5072			rsp->flags);
   5073		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5074		wake_up(&vhost->work_wait_q);
   5075		return;
   5076	}
   5077
   5078	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
   5079		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
   5080			rsp->max_cmds);
   5081		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5082		wake_up(&vhost->work_wait_q);
   5083		return;
   5084	}
   5085
   5086	vhost->logged_in = 1;
   5087	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
   5088	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
   5089		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
   5090		 rsp->drc_name, npiv_max_sectors);
   5091
   5092	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
   5093	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
   5094	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
   5095	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
   5096	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
   5097	fc_host_supported_classes(vhost->host) = 0;
   5098	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
   5099		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
   5100	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
   5101		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
   5102	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
   5103		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
   5104	fc_host_maxframe_size(vhost->host) =
   5105		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
   5106
   5107	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
   5108	vhost->host->max_sectors = npiv_max_sectors;
   5109
   5110	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
   5111		ibmvfc_channel_enquiry(vhost);
   5112	} else {
   5113		vhost->do_enquiry = 0;
   5114		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
   5115		wake_up(&vhost->work_wait_q);
   5116	}
   5117}
   5118
   5119/**
   5120 * ibmvfc_npiv_login - Sends NPIV login
   5121 * @vhost:	ibmvfc host struct
   5122 *
   5123 **/
   5124static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
   5125{
   5126	struct ibmvfc_npiv_login_mad *mad;
   5127	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
   5128
   5129	ibmvfc_gather_partition_info(vhost);
   5130	ibmvfc_set_login_info(vhost);
   5131	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
   5132
   5133	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
   5134	mad = &evt->iu.npiv_login;
   5135	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
   5136	mad->common.version = cpu_to_be32(1);
   5137	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
   5138	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
   5139	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
   5140	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
   5141
   5142	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
   5143
   5144	if (!ibmvfc_send_event(evt, vhost, default_timeout))
   5145		ibmvfc_dbg(vhost, "Sent NPIV login\n");
   5146	else
   5147		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5148}
   5149
   5150/**
   5151 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
   5152 * @evt:		ibmvfc event struct
   5153 *
   5154 **/
   5155static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
   5156{
   5157	struct ibmvfc_host *vhost = evt->vhost;
   5158	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
   5159
   5160	ibmvfc_free_event(evt);
   5161
   5162	switch (mad_status) {
   5163	case IBMVFC_MAD_SUCCESS:
   5164		if (list_empty(&vhost->crq.sent) &&
   5165		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
   5166			ibmvfc_init_host(vhost);
   5167			return;
   5168		}
   5169		break;
   5170	case IBMVFC_MAD_FAILED:
   5171	case IBMVFC_MAD_NOT_SUPPORTED:
   5172	case IBMVFC_MAD_CRQ_ERROR:
   5173	case IBMVFC_MAD_DRIVER_FAILED:
   5174	default:
   5175		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
   5176		break;
   5177	}
   5178
   5179	ibmvfc_hard_reset_host(vhost);
   5180}
   5181
   5182/**
   5183 * ibmvfc_npiv_logout - Issue an NPIV Logout
   5184 * @vhost:		ibmvfc host struct
   5185 *
   5186 **/
   5187static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
   5188{
   5189	struct ibmvfc_npiv_logout_mad *mad;
   5190	struct ibmvfc_event *evt;
   5191
   5192	evt = ibmvfc_get_event(&vhost->crq);
   5193	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
   5194
   5195	mad = &evt->iu.npiv_logout;
   5196	memset(mad, 0, sizeof(*mad));
   5197	mad->common.version = cpu_to_be32(1);
   5198	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
   5199	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
   5200
   5201	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
   5202
   5203	if (!ibmvfc_send_event(evt, vhost, default_timeout))
   5204		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
   5205	else
   5206		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5207}
   5208
   5209/**
   5210 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
   5211 * @vhost:		ibmvfc host struct
   5212 *
   5213 * Returns:
   5214 *	1 if work to do / 0 if not
   5215 **/
   5216static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
   5217{
   5218	struct ibmvfc_target *tgt;
   5219
   5220	list_for_each_entry(tgt, &vhost->targets, queue) {
   5221		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
   5222		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
   5223			return 1;
   5224	}
   5225
   5226	return 0;
   5227}
   5228
   5229/**
   5230 * ibmvfc_dev_logo_to_do - Is there target logout work to do?
   5231 * @vhost:		ibmvfc host struct
   5232 *
   5233 * Returns:
   5234 *	1 if work to do / 0 if not
   5235 **/
   5236static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
   5237{
   5238	struct ibmvfc_target *tgt;
   5239
   5240	list_for_each_entry(tgt, &vhost->targets, queue) {
   5241		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
   5242		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
   5243			return 1;
   5244	}
   5245	return 0;
   5246}
   5247
   5248/**
   5249 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
   5250 * @vhost:		ibmvfc host struct
   5251 *
   5252 * Returns:
   5253 *	1 if work to do / 0 if not
   5254 **/
   5255static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
   5256{
   5257	struct ibmvfc_target *tgt;
   5258
   5259	if (kthread_should_stop())
   5260		return 1;
   5261	switch (vhost->action) {
   5262	case IBMVFC_HOST_ACTION_NONE:
   5263	case IBMVFC_HOST_ACTION_INIT_WAIT:
   5264	case IBMVFC_HOST_ACTION_LOGO_WAIT:
   5265		return 0;
   5266	case IBMVFC_HOST_ACTION_TGT_INIT:
   5267	case IBMVFC_HOST_ACTION_QUERY_TGTS:
   5268		if (vhost->discovery_threads == disc_threads)
   5269			return 0;
   5270		list_for_each_entry(tgt, &vhost->targets, queue)
   5271			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
   5272				return 1;
   5273		list_for_each_entry(tgt, &vhost->targets, queue)
   5274			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
   5275				return 0;
   5276		return 1;
   5277	case IBMVFC_HOST_ACTION_TGT_DEL:
   5278	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
   5279		if (vhost->discovery_threads == disc_threads)
   5280			return 0;
   5281		list_for_each_entry(tgt, &vhost->targets, queue)
   5282			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
   5283				return 1;
   5284		list_for_each_entry(tgt, &vhost->targets, queue)
   5285			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
   5286				return 0;
   5287		return 1;
   5288	case IBMVFC_HOST_ACTION_LOGO:
   5289	case IBMVFC_HOST_ACTION_INIT:
   5290	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
   5291	case IBMVFC_HOST_ACTION_QUERY:
   5292	case IBMVFC_HOST_ACTION_RESET:
   5293	case IBMVFC_HOST_ACTION_REENABLE:
   5294	default:
   5295		break;
   5296	}
   5297
   5298	return 1;
   5299}
   5300
   5301/**
   5302 * ibmvfc_work_to_do - Is there task level work to do?
   5303 * @vhost:		ibmvfc host struct
   5304 *
   5305 * Returns:
   5306 *	1 if work to do / 0 if not
   5307 **/
   5308static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
   5309{
   5310	unsigned long flags;
   5311	int rc;
   5312
   5313	spin_lock_irqsave(vhost->host->host_lock, flags);
   5314	rc = __ibmvfc_work_to_do(vhost);
   5315	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5316	return rc;
   5317}
   5318
   5319/**
   5320 * ibmvfc_log_ae - Log async events if necessary
   5321 * @vhost:		ibmvfc host struct
   5322 * @events:		events to log
   5323 *
   5324 **/
   5325static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
   5326{
   5327	if (events & IBMVFC_AE_RSCN)
   5328		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
   5329	if ((events & IBMVFC_AE_LINKDOWN) &&
   5330	    vhost->state >= IBMVFC_HALTED)
   5331		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
   5332	if ((events & IBMVFC_AE_LINKUP) &&
   5333	    vhost->state == IBMVFC_INITIALIZING)
   5334		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
   5335}
   5336
   5337/**
   5338 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
   5339 * @tgt:		ibmvfc target struct
   5340 *
   5341 **/
   5342static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
   5343{
   5344	struct ibmvfc_host *vhost = tgt->vhost;
   5345	struct fc_rport *rport;
   5346	unsigned long flags;
   5347
   5348	tgt_dbg(tgt, "Adding rport\n");
   5349	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
   5350	spin_lock_irqsave(vhost->host->host_lock, flags);
   5351
   5352	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
   5353		tgt_dbg(tgt, "Deleting rport\n");
   5354		list_del(&tgt->queue);
   5355		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
   5356		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5357		fc_remote_port_delete(rport);
   5358		del_timer_sync(&tgt->timer);
   5359		kref_put(&tgt->kref, ibmvfc_release_tgt);
   5360		return;
   5361	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
   5362		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
   5363		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
   5364		tgt->rport = NULL;
   5365		tgt->init_retries = 0;
   5366		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5367		fc_remote_port_delete(rport);
   5368		return;
   5369	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
   5370		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5371		return;
   5372	}
   5373
   5374	if (rport) {
   5375		tgt_dbg(tgt, "rport add succeeded\n");
   5376		tgt->rport = rport;
   5377		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
   5378		rport->supported_classes = 0;
   5379		tgt->target_id = rport->scsi_target_id;
   5380		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
   5381			rport->supported_classes |= FC_COS_CLASS1;
   5382		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
   5383			rport->supported_classes |= FC_COS_CLASS2;
   5384		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
   5385			rport->supported_classes |= FC_COS_CLASS3;
   5386		if (rport->rqst_q)
   5387			blk_queue_max_segments(rport->rqst_q, 1);
   5388	} else
   5389		tgt_dbg(tgt, "rport add failed\n");
   5390	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5391}
   5392
   5393/**
   5394 * ibmvfc_do_work - Do task level work
   5395 * @vhost:		ibmvfc host struct
   5396 *
   5397 **/
   5398static void ibmvfc_do_work(struct ibmvfc_host *vhost)
   5399{
   5400	struct ibmvfc_target *tgt;
   5401	unsigned long flags;
   5402	struct fc_rport *rport;
   5403	LIST_HEAD(purge);
   5404	int rc;
   5405
   5406	ibmvfc_log_ae(vhost, vhost->events_to_log);
   5407	spin_lock_irqsave(vhost->host->host_lock, flags);
   5408	vhost->events_to_log = 0;
   5409	switch (vhost->action) {
   5410	case IBMVFC_HOST_ACTION_NONE:
   5411	case IBMVFC_HOST_ACTION_LOGO_WAIT:
   5412	case IBMVFC_HOST_ACTION_INIT_WAIT:
   5413		break;
   5414	case IBMVFC_HOST_ACTION_RESET:
   5415		list_splice_init(&vhost->purge, &purge);
   5416		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5417		ibmvfc_complete_purge(&purge);
   5418		rc = ibmvfc_reset_crq(vhost);
   5419
   5420		spin_lock_irqsave(vhost->host->host_lock, flags);
   5421		if (!rc || rc == H_CLOSED)
   5422			vio_enable_interrupts(to_vio_dev(vhost->dev));
   5423		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
   5424			/*
   5425			 * The only action we could have changed to would have
   5426			 * been reenable, in which case, we skip the rest of
   5427			 * this path and wait until we've done the re-enable
   5428			 * before sending the crq init.
   5429			 */
   5430			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
   5431
   5432			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
   5433			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
   5434				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5435				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
   5436			}
   5437		}
   5438		break;
   5439	case IBMVFC_HOST_ACTION_REENABLE:
   5440		list_splice_init(&vhost->purge, &purge);
   5441		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5442		ibmvfc_complete_purge(&purge);
   5443		rc = ibmvfc_reenable_crq_queue(vhost);
   5444
   5445		spin_lock_irqsave(vhost->host->host_lock, flags);
   5446		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
   5447			/*
   5448			 * The only action we could have changed to would have
   5449			 * been reset, in which case, we skip the rest of this
   5450			 * path and wait until we've done the reset before
   5451			 * sending the crq init.
   5452			 */
   5453			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
   5454			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
   5455				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
   5456				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
   5457			}
   5458		}
   5459		break;
   5460	case IBMVFC_HOST_ACTION_LOGO:
   5461		vhost->job_step(vhost);
   5462		break;
   5463	case IBMVFC_HOST_ACTION_INIT:
   5464		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
   5465		if (vhost->delay_init) {
   5466			vhost->delay_init = 0;
   5467			spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5468			ssleep(15);
   5469			return;
   5470		} else
   5471			vhost->job_step(vhost);
   5472		break;
   5473	case IBMVFC_HOST_ACTION_QUERY:
   5474		list_for_each_entry(tgt, &vhost->targets, queue)
   5475			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
   5476		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
   5477		break;
   5478	case IBMVFC_HOST_ACTION_QUERY_TGTS:
   5479		list_for_each_entry(tgt, &vhost->targets, queue) {
   5480			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
   5481				tgt->job_step(tgt);
   5482				break;
   5483			}
   5484		}
   5485
   5486		if (!ibmvfc_dev_init_to_do(vhost))
   5487			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
   5488		break;
   5489	case IBMVFC_HOST_ACTION_TGT_DEL:
   5490	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
   5491		list_for_each_entry(tgt, &vhost->targets, queue) {
   5492			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
   5493				tgt->job_step(tgt);
   5494				break;
   5495			}
   5496		}
   5497
   5498		if (ibmvfc_dev_logo_to_do(vhost)) {
   5499			spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5500			return;
   5501		}
   5502
   5503		list_for_each_entry(tgt, &vhost->targets, queue) {
   5504			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
   5505				tgt_dbg(tgt, "Deleting rport\n");
   5506				rport = tgt->rport;
   5507				tgt->rport = NULL;
   5508				list_del(&tgt->queue);
   5509				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
   5510				spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5511				if (rport)
   5512					fc_remote_port_delete(rport);
   5513				del_timer_sync(&tgt->timer);
   5514				kref_put(&tgt->kref, ibmvfc_release_tgt);
   5515				return;
   5516			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
   5517				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
   5518				rport = tgt->rport;
   5519				tgt->rport = NULL;
   5520				tgt->init_retries = 0;
   5521				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
   5522
   5523				/*
   5524				 * If fast fail is enabled, we wait for it to fire and then clean up
   5525				 * the old port, since we expect the fast fail timer to clean up the
   5526				 * outstanding I/O faster than waiting for normal command timeouts.
   5527				 * However, if fast fail is disabled, any I/O outstanding to the
   5528				 * rport LUNs will stay outstanding indefinitely, since the EH handlers
   5529				 * won't get invoked for I/O's timing out. If this is a NPIV failover
   5530				 * scenario, the better alternative is to use the move login.
   5531				 */
   5532				if (rport && rport->fast_io_fail_tmo == -1)
   5533					tgt->move_login = 1;
   5534				spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5535				if (rport)
   5536					fc_remote_port_delete(rport);
   5537				return;
   5538			}
   5539		}
   5540
   5541		if (vhost->state == IBMVFC_INITIALIZING) {
   5542			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
   5543				if (vhost->reinit) {
   5544					vhost->reinit = 0;
   5545					scsi_block_requests(vhost->host);
   5546					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
   5547					spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5548				} else {
   5549					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
   5550					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
   5551					wake_up(&vhost->init_wait_q);
   5552					schedule_work(&vhost->rport_add_work_q);
   5553					vhost->init_retries = 0;
   5554					spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5555					scsi_unblock_requests(vhost->host);
   5556				}
   5557
   5558				return;
   5559			} else {
   5560				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
   5561				vhost->job_step = ibmvfc_discover_targets;
   5562			}
   5563		} else {
   5564			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
   5565			spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5566			scsi_unblock_requests(vhost->host);
   5567			wake_up(&vhost->init_wait_q);
   5568			return;
   5569		}
   5570		break;
   5571	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
   5572		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
   5573		spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5574		ibmvfc_alloc_targets(vhost);
   5575		spin_lock_irqsave(vhost->host->host_lock, flags);
   5576		break;
   5577	case IBMVFC_HOST_ACTION_TGT_INIT:
   5578		list_for_each_entry(tgt, &vhost->targets, queue) {
   5579			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
   5580				tgt->job_step(tgt);
   5581				break;
   5582			}
   5583		}
   5584
   5585		if (!ibmvfc_dev_init_to_do(vhost))
   5586			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
   5587		break;
   5588	default:
   5589		break;
   5590	}
   5591
   5592	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   5593}
   5594
   5595/**
   5596 * ibmvfc_work - Do task level work
   5597 * @data:		ibmvfc host struct
   5598 *
   5599 * Returns:
   5600 *	zero
   5601 **/
   5602static int ibmvfc_work(void *data)
   5603{
   5604	struct ibmvfc_host *vhost = data;
   5605	int rc;
   5606
   5607	set_user_nice(current, MIN_NICE);
   5608
   5609	while (1) {
   5610		rc = wait_event_interruptible(vhost->work_wait_q,
   5611					      ibmvfc_work_to_do(vhost));
   5612
   5613		BUG_ON(rc);
   5614
   5615		if (kthread_should_stop())
   5616			break;
   5617
   5618		ibmvfc_do_work(vhost);
   5619	}
   5620
   5621	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
   5622	return 0;
   5623}
   5624
   5625/**
   5626 * ibmvfc_alloc_queue - Allocate queue
   5627 * @vhost:	ibmvfc host struct
   5628 * @queue:	ibmvfc queue to allocate
   5629 * @fmt:	queue format to allocate
   5630 *
   5631 * Returns:
   5632 *	0 on success / non-zero on failure
   5633 **/
   5634static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
   5635			      struct ibmvfc_queue *queue,
   5636			      enum ibmvfc_msg_fmt fmt)
   5637{
   5638	struct device *dev = vhost->dev;
   5639	size_t fmt_size;
   5640	unsigned int pool_size = 0;
   5641
   5642	ENTER;
   5643	spin_lock_init(&queue->_lock);
   5644	queue->q_lock = &queue->_lock;
   5645
   5646	switch (fmt) {
   5647	case IBMVFC_CRQ_FMT:
   5648		fmt_size = sizeof(*queue->msgs.crq);
   5649		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
   5650		break;
   5651	case IBMVFC_ASYNC_FMT:
   5652		fmt_size = sizeof(*queue->msgs.async);
   5653		break;
   5654	case IBMVFC_SUB_CRQ_FMT:
   5655		fmt_size = sizeof(*queue->msgs.scrq);
   5656		/* We need one extra event for Cancel Commands */
   5657		pool_size = max_requests + 1;
   5658		break;
   5659	default:
   5660		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
   5661		return -EINVAL;
   5662	}
   5663
   5664	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
   5665		dev_err(dev, "Couldn't initialize event pool.\n");
   5666		return -ENOMEM;
   5667	}
   5668
   5669	queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
   5670	if (!queue->msgs.handle)
   5671		return -ENOMEM;
   5672
   5673	queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
   5674					  DMA_BIDIRECTIONAL);
   5675
   5676	if (dma_mapping_error(dev, queue->msg_token)) {
   5677		free_page((unsigned long)queue->msgs.handle);
   5678		queue->msgs.handle = NULL;
   5679		return -ENOMEM;
   5680	}
   5681
   5682	queue->cur = 0;
   5683	queue->fmt = fmt;
   5684	queue->size = PAGE_SIZE / fmt_size;
   5685
   5686	queue->vhost = vhost;
   5687	return 0;
   5688}
   5689
   5690/**
   5691 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
   5692 * @vhost:	ibmvfc host struct
   5693 *
   5694 * Allocates a page for messages, maps it for dma, and registers
   5695 * the crq with the hypervisor.
   5696 *
   5697 * Return value:
   5698 *	zero on success / other on failure
   5699 **/
   5700static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
   5701{
   5702	int rc, retrc = -ENOMEM;
   5703	struct device *dev = vhost->dev;
   5704	struct vio_dev *vdev = to_vio_dev(dev);
   5705	struct ibmvfc_queue *crq = &vhost->crq;
   5706
   5707	ENTER;
   5708	if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
   5709		return -ENOMEM;
   5710
   5711	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
   5712					crq->msg_token, PAGE_SIZE);
   5713
   5714	if (rc == H_RESOURCE)
   5715		/* maybe kexecing and resource is busy. try a reset */
   5716		retrc = rc = ibmvfc_reset_crq(vhost);
   5717
   5718	if (rc == H_CLOSED)
   5719		dev_warn(dev, "Partner adapter not ready\n");
   5720	else if (rc) {
   5721		dev_warn(dev, "Error %d opening adapter\n", rc);
   5722		goto reg_crq_failed;
   5723	}
   5724
   5725	retrc = 0;
   5726
   5727	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
   5728
   5729	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
   5730		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
   5731		goto req_irq_failed;
   5732	}
   5733
   5734	if ((rc = vio_enable_interrupts(vdev))) {
   5735		dev_err(dev, "Error %d enabling interrupts\n", rc);
   5736		goto req_irq_failed;
   5737	}
   5738
   5739	LEAVE;
   5740	return retrc;
   5741
   5742req_irq_failed:
   5743	tasklet_kill(&vhost->tasklet);
   5744	do {
   5745		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
   5746	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
   5747reg_crq_failed:
   5748	ibmvfc_free_queue(vhost, crq);
   5749	return retrc;
   5750}
   5751
   5752static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
   5753				  int index)
   5754{
   5755	struct device *dev = vhost->dev;
   5756	struct vio_dev *vdev = to_vio_dev(dev);
   5757	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
   5758	int rc = -ENOMEM;
   5759
   5760	ENTER;
   5761
   5762	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
   5763			   &scrq->cookie, &scrq->hw_irq);
   5764
   5765	/* H_CLOSED indicates successful register, but no CRQ partner */
   5766	if (rc && rc != H_CLOSED) {
   5767		dev_warn(dev, "Error registering sub-crq: %d\n", rc);
   5768		if (rc == H_PARAMETER)
   5769			dev_warn_once(dev, "Firmware may not support MQ\n");
   5770		goto reg_failed;
   5771	}
   5772
   5773	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
   5774
   5775	if (!scrq->irq) {
   5776		rc = -EINVAL;
   5777		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
   5778		goto irq_failed;
   5779	}
   5780
   5781	snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
   5782		 vdev->unit_address, index);
   5783	rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
   5784
   5785	if (rc) {
   5786		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
   5787		irq_dispose_mapping(scrq->irq);
   5788		goto irq_failed;
   5789	}
   5790
   5791	scrq->hwq_id = index;
   5792
   5793	LEAVE;
   5794	return 0;
   5795
   5796irq_failed:
   5797	do {
   5798		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
   5799	} while (rtas_busy_delay(rc));
   5800reg_failed:
   5801	LEAVE;
   5802	return rc;
   5803}
   5804
   5805static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
   5806{
   5807	struct device *dev = vhost->dev;
   5808	struct vio_dev *vdev = to_vio_dev(dev);
   5809	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
   5810	long rc;
   5811
   5812	ENTER;
   5813
   5814	free_irq(scrq->irq, scrq);
   5815	irq_dispose_mapping(scrq->irq);
   5816	scrq->irq = 0;
   5817
   5818	do {
   5819		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
   5820					scrq->cookie);
   5821	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
   5822
   5823	if (rc)
   5824		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
   5825
   5826	/* Clean out the queue */
   5827	memset(scrq->msgs.crq, 0, PAGE_SIZE);
   5828	scrq->cur = 0;
   5829
   5830	LEAVE;
   5831}
   5832
   5833static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
   5834{
   5835	int i, j;
   5836
   5837	ENTER;
   5838	if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
   5839		return;
   5840
   5841	for (i = 0; i < nr_scsi_hw_queues; i++) {
   5842		if (ibmvfc_register_scsi_channel(vhost, i)) {
   5843			for (j = i; j > 0; j--)
   5844				ibmvfc_deregister_scsi_channel(vhost, j - 1);
   5845			vhost->do_enquiry = 0;
   5846			return;
   5847		}
   5848	}
   5849
   5850	LEAVE;
   5851}
   5852
   5853static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
   5854{
   5855	int i;
   5856
   5857	ENTER;
   5858	if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
   5859		return;
   5860
   5861	for (i = 0; i < nr_scsi_hw_queues; i++)
   5862		ibmvfc_deregister_scsi_channel(vhost, i);
   5863
   5864	LEAVE;
   5865}
   5866
   5867static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
   5868{
   5869	struct ibmvfc_queue *scrq;
   5870	int i, j;
   5871
   5872	ENTER;
   5873	if (!vhost->mq_enabled)
   5874		return;
   5875
   5876	vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
   5877					  sizeof(*vhost->scsi_scrqs.scrqs),
   5878					  GFP_KERNEL);
   5879	if (!vhost->scsi_scrqs.scrqs) {
   5880		vhost->do_enquiry = 0;
   5881		return;
   5882	}
   5883
   5884	for (i = 0; i < nr_scsi_hw_queues; i++) {
   5885		scrq = &vhost->scsi_scrqs.scrqs[i];
   5886		if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
   5887			for (j = i; j > 0; j--) {
   5888				scrq = &vhost->scsi_scrqs.scrqs[j - 1];
   5889				ibmvfc_free_queue(vhost, scrq);
   5890			}
   5891			kfree(vhost->scsi_scrqs.scrqs);
   5892			vhost->scsi_scrqs.scrqs = NULL;
   5893			vhost->scsi_scrqs.active_queues = 0;
   5894			vhost->do_enquiry = 0;
   5895			vhost->mq_enabled = 0;
   5896			return;
   5897		}
   5898	}
   5899
   5900	ibmvfc_reg_sub_crqs(vhost);
   5901
   5902	LEAVE;
   5903}
   5904
   5905static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
   5906{
   5907	struct ibmvfc_queue *scrq;
   5908	int i;
   5909
   5910	ENTER;
   5911	if (!vhost->scsi_scrqs.scrqs)
   5912		return;
   5913
   5914	ibmvfc_dereg_sub_crqs(vhost);
   5915
   5916	for (i = 0; i < nr_scsi_hw_queues; i++) {
   5917		scrq = &vhost->scsi_scrqs.scrqs[i];
   5918		ibmvfc_free_queue(vhost, scrq);
   5919	}
   5920
   5921	kfree(vhost->scsi_scrqs.scrqs);
   5922	vhost->scsi_scrqs.scrqs = NULL;
   5923	vhost->scsi_scrqs.active_queues = 0;
   5924	LEAVE;
   5925}
   5926
   5927/**
   5928 * ibmvfc_free_mem - Free memory for vhost
   5929 * @vhost:	ibmvfc host struct
   5930 *
   5931 * Return value:
   5932 * 	none
   5933 **/
   5934static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
   5935{
   5936	struct ibmvfc_queue *async_q = &vhost->async_crq;
   5937
   5938	ENTER;
   5939	mempool_destroy(vhost->tgt_pool);
   5940	kfree(vhost->trace);
   5941	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
   5942			  vhost->disc_buf_dma);
   5943	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
   5944			  vhost->login_buf, vhost->login_buf_dma);
   5945	dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
   5946			  vhost->channel_setup_buf, vhost->channel_setup_dma);
   5947	dma_pool_destroy(vhost->sg_pool);
   5948	ibmvfc_free_queue(vhost, async_q);
   5949	LEAVE;
   5950}
   5951
   5952/**
   5953 * ibmvfc_alloc_mem - Allocate memory for vhost
   5954 * @vhost:	ibmvfc host struct
   5955 *
   5956 * Return value:
   5957 * 	0 on success / non-zero on failure
   5958 **/
   5959static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
   5960{
   5961	struct ibmvfc_queue *async_q = &vhost->async_crq;
   5962	struct device *dev = vhost->dev;
   5963
   5964	ENTER;
   5965	if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
   5966		dev_err(dev, "Couldn't allocate/map async queue.\n");
   5967		goto nomem;
   5968	}
   5969
   5970	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
   5971					 SG_ALL * sizeof(struct srp_direct_buf),
   5972					 sizeof(struct srp_direct_buf), 0);
   5973
   5974	if (!vhost->sg_pool) {
   5975		dev_err(dev, "Failed to allocate sg pool\n");
   5976		goto unmap_async_crq;
   5977	}
   5978
   5979	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
   5980					      &vhost->login_buf_dma, GFP_KERNEL);
   5981
   5982	if (!vhost->login_buf) {
   5983		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
   5984		goto free_sg_pool;
   5985	}
   5986
   5987	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
   5988	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
   5989					     &vhost->disc_buf_dma, GFP_KERNEL);
   5990
   5991	if (!vhost->disc_buf) {
   5992		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
   5993		goto free_login_buffer;
   5994	}
   5995
   5996	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
   5997			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
   5998	atomic_set(&vhost->trace_index, -1);
   5999
   6000	if (!vhost->trace)
   6001		goto free_disc_buffer;
   6002
   6003	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
   6004						      sizeof(struct ibmvfc_target));
   6005
   6006	if (!vhost->tgt_pool) {
   6007		dev_err(dev, "Couldn't allocate target memory pool\n");
   6008		goto free_trace;
   6009	}
   6010
   6011	vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
   6012						      &vhost->channel_setup_dma,
   6013						      GFP_KERNEL);
   6014
   6015	if (!vhost->channel_setup_buf) {
   6016		dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
   6017		goto free_tgt_pool;
   6018	}
   6019
   6020	LEAVE;
   6021	return 0;
   6022
   6023free_tgt_pool:
   6024	mempool_destroy(vhost->tgt_pool);
   6025free_trace:
   6026	kfree(vhost->trace);
   6027free_disc_buffer:
   6028	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
   6029			  vhost->disc_buf_dma);
   6030free_login_buffer:
   6031	dma_free_coherent(dev, sizeof(*vhost->login_buf),
   6032			  vhost->login_buf, vhost->login_buf_dma);
   6033free_sg_pool:
   6034	dma_pool_destroy(vhost->sg_pool);
   6035unmap_async_crq:
   6036	ibmvfc_free_queue(vhost, async_q);
   6037nomem:
   6038	LEAVE;
   6039	return -ENOMEM;
   6040}
   6041
   6042/**
   6043 * ibmvfc_rport_add_thread - Worker thread for rport adds
   6044 * @work:	work struct
   6045 *
   6046 **/
   6047static void ibmvfc_rport_add_thread(struct work_struct *work)
   6048{
   6049	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
   6050						 rport_add_work_q);
   6051	struct ibmvfc_target *tgt;
   6052	struct fc_rport *rport;
   6053	unsigned long flags;
   6054	int did_work;
   6055
   6056	ENTER;
   6057	spin_lock_irqsave(vhost->host->host_lock, flags);
   6058	do {
   6059		did_work = 0;
   6060		if (vhost->state != IBMVFC_ACTIVE)
   6061			break;
   6062
   6063		list_for_each_entry(tgt, &vhost->targets, queue) {
   6064			if (tgt->add_rport) {
   6065				did_work = 1;
   6066				tgt->add_rport = 0;
   6067				kref_get(&tgt->kref);
   6068				rport = tgt->rport;
   6069				if (!rport) {
   6070					spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6071					ibmvfc_tgt_add_rport(tgt);
   6072				} else if (get_device(&rport->dev)) {
   6073					spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6074					tgt_dbg(tgt, "Setting rport roles\n");
   6075					fc_remote_port_rolechg(rport, tgt->ids.roles);
   6076					put_device(&rport->dev);
   6077				} else {
   6078					spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6079				}
   6080
   6081				kref_put(&tgt->kref, ibmvfc_release_tgt);
   6082				spin_lock_irqsave(vhost->host->host_lock, flags);
   6083				break;
   6084			}
   6085		}
   6086	} while(did_work);
   6087
   6088	if (vhost->state == IBMVFC_ACTIVE)
   6089		vhost->scan_complete = 1;
   6090	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6091	LEAVE;
   6092}
   6093
   6094/**
   6095 * ibmvfc_probe - Adapter hot plug add entry point
   6096 * @vdev:	vio device struct
   6097 * @id:	vio device id struct
   6098 *
   6099 * Return value:
   6100 * 	0 on success / non-zero on failure
   6101 **/
   6102static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
   6103{
   6104	struct ibmvfc_host *vhost;
   6105	struct Scsi_Host *shost;
   6106	struct device *dev = &vdev->dev;
   6107	int rc = -ENOMEM;
   6108	unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
   6109
   6110	ENTER;
   6111	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
   6112	if (!shost) {
   6113		dev_err(dev, "Couldn't allocate host data\n");
   6114		goto out;
   6115	}
   6116
   6117	shost->transportt = ibmvfc_transport_template;
   6118	shost->can_queue = max_requests;
   6119	shost->max_lun = max_lun;
   6120	shost->max_id = max_targets;
   6121	shost->max_sectors = IBMVFC_MAX_SECTORS;
   6122	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
   6123	shost->unique_id = shost->host_no;
   6124	shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
   6125
   6126	vhost = shost_priv(shost);
   6127	INIT_LIST_HEAD(&vhost->targets);
   6128	INIT_LIST_HEAD(&vhost->purge);
   6129	sprintf(vhost->name, IBMVFC_NAME);
   6130	vhost->host = shost;
   6131	vhost->dev = dev;
   6132	vhost->partition_number = -1;
   6133	vhost->log_level = log_level;
   6134	vhost->task_set = 1;
   6135
   6136	vhost->mq_enabled = mq_enabled;
   6137	vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
   6138	vhost->using_channels = 0;
   6139	vhost->do_enquiry = 1;
   6140	vhost->scan_timeout = 0;
   6141
   6142	strcpy(vhost->partition_name, "UNKNOWN");
   6143	init_waitqueue_head(&vhost->work_wait_q);
   6144	init_waitqueue_head(&vhost->init_wait_q);
   6145	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
   6146	mutex_init(&vhost->passthru_mutex);
   6147
   6148	if ((rc = ibmvfc_alloc_mem(vhost)))
   6149		goto free_scsi_host;
   6150
   6151	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
   6152					 shost->host_no);
   6153
   6154	if (IS_ERR(vhost->work_thread)) {
   6155		dev_err(dev, "Couldn't create kernel thread: %ld\n",
   6156			PTR_ERR(vhost->work_thread));
   6157		rc = PTR_ERR(vhost->work_thread);
   6158		goto free_host_mem;
   6159	}
   6160
   6161	if ((rc = ibmvfc_init_crq(vhost))) {
   6162		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
   6163		goto kill_kthread;
   6164	}
   6165
   6166	if ((rc = scsi_add_host(shost, dev)))
   6167		goto release_crq;
   6168
   6169	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
   6170
   6171	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
   6172					   &ibmvfc_trace_attr))) {
   6173		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
   6174		goto remove_shost;
   6175	}
   6176
   6177	ibmvfc_init_sub_crqs(vhost);
   6178
   6179	if (shost_to_fc_host(shost)->rqst_q)
   6180		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
   6181	dev_set_drvdata(dev, vhost);
   6182	spin_lock(&ibmvfc_driver_lock);
   6183	list_add_tail(&vhost->queue, &ibmvfc_head);
   6184	spin_unlock(&ibmvfc_driver_lock);
   6185
   6186	ibmvfc_send_crq_init(vhost);
   6187	scsi_scan_host(shost);
   6188	return 0;
   6189
   6190remove_shost:
   6191	scsi_remove_host(shost);
   6192release_crq:
   6193	ibmvfc_release_crq_queue(vhost);
   6194kill_kthread:
   6195	kthread_stop(vhost->work_thread);
   6196free_host_mem:
   6197	ibmvfc_free_mem(vhost);
   6198free_scsi_host:
   6199	scsi_host_put(shost);
   6200out:
   6201	LEAVE;
   6202	return rc;
   6203}
   6204
   6205/**
   6206 * ibmvfc_remove - Adapter hot plug remove entry point
   6207 * @vdev:	vio device struct
   6208 *
   6209 * Return value:
   6210 * 	0
   6211 **/
   6212static void ibmvfc_remove(struct vio_dev *vdev)
   6213{
   6214	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
   6215	LIST_HEAD(purge);
   6216	unsigned long flags;
   6217
   6218	ENTER;
   6219	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
   6220
   6221	spin_lock_irqsave(vhost->host->host_lock, flags);
   6222	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
   6223	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6224
   6225	ibmvfc_wait_while_resetting(vhost);
   6226	kthread_stop(vhost->work_thread);
   6227	fc_remove_host(vhost->host);
   6228	scsi_remove_host(vhost->host);
   6229
   6230	spin_lock_irqsave(vhost->host->host_lock, flags);
   6231	ibmvfc_purge_requests(vhost, DID_ERROR);
   6232	list_splice_init(&vhost->purge, &purge);
   6233	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6234	ibmvfc_complete_purge(&purge);
   6235	ibmvfc_release_sub_crqs(vhost);
   6236	ibmvfc_release_crq_queue(vhost);
   6237
   6238	ibmvfc_free_mem(vhost);
   6239	spin_lock(&ibmvfc_driver_lock);
   6240	list_del(&vhost->queue);
   6241	spin_unlock(&ibmvfc_driver_lock);
   6242	scsi_host_put(vhost->host);
   6243	LEAVE;
   6244}
   6245
   6246/**
   6247 * ibmvfc_resume - Resume from suspend
   6248 * @dev:	device struct
   6249 *
   6250 * We may have lost an interrupt across suspend/resume, so kick the
   6251 * interrupt handler
   6252 *
   6253 */
   6254static int ibmvfc_resume(struct device *dev)
   6255{
   6256	unsigned long flags;
   6257	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
   6258	struct vio_dev *vdev = to_vio_dev(dev);
   6259
   6260	spin_lock_irqsave(vhost->host->host_lock, flags);
   6261	vio_disable_interrupts(vdev);
   6262	tasklet_schedule(&vhost->tasklet);
   6263	spin_unlock_irqrestore(vhost->host->host_lock, flags);
   6264	return 0;
   6265}
   6266
   6267/**
   6268 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
   6269 * @vdev:	vio device struct
   6270 *
   6271 * Return value:
   6272 *	Number of bytes the driver will need to DMA map at the same time in
   6273 *	order to perform well.
   6274 */
   6275static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
   6276{
   6277	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
   6278	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
   6279}
   6280
   6281static const struct vio_device_id ibmvfc_device_table[] = {
   6282	{"fcp", "IBM,vfc-client"},
   6283	{ "", "" }
   6284};
   6285MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
   6286
   6287static const struct dev_pm_ops ibmvfc_pm_ops = {
   6288	.resume = ibmvfc_resume
   6289};
   6290
   6291static struct vio_driver ibmvfc_driver = {
   6292	.id_table = ibmvfc_device_table,
   6293	.probe = ibmvfc_probe,
   6294	.remove = ibmvfc_remove,
   6295	.get_desired_dma = ibmvfc_get_desired_dma,
   6296	.name = IBMVFC_NAME,
   6297	.pm = &ibmvfc_pm_ops,
   6298};
   6299
   6300static struct fc_function_template ibmvfc_transport_functions = {
   6301	.show_host_fabric_name = 1,
   6302	.show_host_node_name = 1,
   6303	.show_host_port_name = 1,
   6304	.show_host_supported_classes = 1,
   6305	.show_host_port_type = 1,
   6306	.show_host_port_id = 1,
   6307	.show_host_maxframe_size = 1,
   6308
   6309	.get_host_port_state = ibmvfc_get_host_port_state,
   6310	.show_host_port_state = 1,
   6311
   6312	.get_host_speed = ibmvfc_get_host_speed,
   6313	.show_host_speed = 1,
   6314
   6315	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
   6316	.terminate_rport_io = ibmvfc_terminate_rport_io,
   6317
   6318	.show_rport_maxframe_size = 1,
   6319	.show_rport_supported_classes = 1,
   6320
   6321	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
   6322	.show_rport_dev_loss_tmo = 1,
   6323
   6324	.get_starget_node_name = ibmvfc_get_starget_node_name,
   6325	.show_starget_node_name = 1,
   6326
   6327	.get_starget_port_name = ibmvfc_get_starget_port_name,
   6328	.show_starget_port_name = 1,
   6329
   6330	.get_starget_port_id = ibmvfc_get_starget_port_id,
   6331	.show_starget_port_id = 1,
   6332
   6333	.bsg_request = ibmvfc_bsg_request,
   6334	.bsg_timeout = ibmvfc_bsg_timeout,
   6335};
   6336
   6337/**
   6338 * ibmvfc_module_init - Initialize the ibmvfc module
   6339 *
   6340 * Return value:
   6341 * 	0 on success / other on failure
   6342 **/
   6343static int __init ibmvfc_module_init(void)
   6344{
   6345	int rc;
   6346
   6347	if (!firmware_has_feature(FW_FEATURE_VIO))
   6348		return -ENODEV;
   6349
   6350	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
   6351	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
   6352
   6353	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
   6354	if (!ibmvfc_transport_template)
   6355		return -ENOMEM;
   6356
   6357	rc = vio_register_driver(&ibmvfc_driver);
   6358	if (rc)
   6359		fc_release_transport(ibmvfc_transport_template);
   6360	return rc;
   6361}
   6362
   6363/**
   6364 * ibmvfc_module_exit - Teardown the ibmvfc module
   6365 *
   6366 * Return value:
   6367 * 	nothing
   6368 **/
   6369static void __exit ibmvfc_module_exit(void)
   6370{
   6371	vio_unregister_driver(&ibmvfc_driver);
   6372	fc_release_transport(ibmvfc_transport_template);
   6373}
   6374
   6375module_init(ibmvfc_module_init);
   6376module_exit(ibmvfc_module_exit);