cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sas_scsi_host.c (33120B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Serial Attached SCSI (SAS) class SCSI Host glue.
      4 *
      5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
      6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
      7 */
      8
      9#include <linux/kthread.h>
     10#include <linux/firmware.h>
     11#include <linux/export.h>
     12#include <linux/ctype.h>
     13#include <linux/kernel.h>
     14
     15#include "sas_internal.h"
     16
     17#include <scsi/scsi_host.h>
     18#include <scsi/scsi_device.h>
     19#include <scsi/scsi_tcq.h>
     20#include <scsi/scsi.h>
     21#include <scsi/scsi_eh.h>
     22#include <scsi/scsi_transport.h>
     23#include <scsi/scsi_transport_sas.h>
     24#include <scsi/sas_ata.h>
     25#include "scsi_sas_internal.h"
     26#include "scsi_transport_api.h"
     27#include "scsi_priv.h"
     28
     29#include <linux/err.h>
     30#include <linux/blkdev.h>
     31#include <linux/freezer.h>
     32#include <linux/gfp.h>
     33#include <linux/scatterlist.h>
     34#include <linux/libata.h>
     35
     36/* record final status and free the task */
     37static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
     38{
     39	struct task_status_struct *ts = &task->task_status;
     40	enum scsi_host_status hs = DID_OK;
     41	enum exec_status stat = SAS_SAM_STAT_GOOD;
     42
     43	if (ts->resp == SAS_TASK_UNDELIVERED) {
     44		/* transport error */
     45		hs = DID_NO_CONNECT;
     46	} else { /* ts->resp == SAS_TASK_COMPLETE */
     47		/* task delivered, what happened afterwards? */
     48		switch (ts->stat) {
     49		case SAS_DEV_NO_RESPONSE:
     50		case SAS_INTERRUPTED:
     51		case SAS_PHY_DOWN:
     52		case SAS_NAK_R_ERR:
     53		case SAS_OPEN_TO:
     54			hs = DID_NO_CONNECT;
     55			break;
     56		case SAS_DATA_UNDERRUN:
     57			scsi_set_resid(sc, ts->residual);
     58			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
     59				hs = DID_ERROR;
     60			break;
     61		case SAS_DATA_OVERRUN:
     62			hs = DID_ERROR;
     63			break;
     64		case SAS_QUEUE_FULL:
     65			hs = DID_SOFT_ERROR; /* retry */
     66			break;
     67		case SAS_DEVICE_UNKNOWN:
     68			hs = DID_BAD_TARGET;
     69			break;
     70		case SAS_OPEN_REJECT:
     71			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
     72				hs = DID_SOFT_ERROR; /* retry */
     73			else
     74				hs = DID_ERROR;
     75			break;
     76		case SAS_PROTO_RESPONSE:
     77			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
     78				  task->dev->port->ha->sas_ha_name);
     79			break;
     80		case SAS_ABORTED_TASK:
     81			hs = DID_ABORT;
     82			break;
     83		case SAS_SAM_STAT_CHECK_CONDITION:
     84			memcpy(sc->sense_buffer, ts->buf,
     85			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
     86			stat = SAS_SAM_STAT_CHECK_CONDITION;
     87			break;
     88		default:
     89			stat = ts->stat;
     90			break;
     91		}
     92	}
     93
     94	sc->result = (hs << 16) | stat;
     95	ASSIGN_SAS_TASK(sc, NULL);
     96	sas_free_task(task);
     97}
     98
     99static void sas_scsi_task_done(struct sas_task *task)
    100{
    101	struct scsi_cmnd *sc = task->uldd_task;
    102	struct domain_device *dev = task->dev;
    103	struct sas_ha_struct *ha = dev->port->ha;
    104	unsigned long flags;
    105
    106	spin_lock_irqsave(&dev->done_lock, flags);
    107	if (test_bit(SAS_HA_FROZEN, &ha->state))
    108		task = NULL;
    109	else
    110		ASSIGN_SAS_TASK(sc, NULL);
    111	spin_unlock_irqrestore(&dev->done_lock, flags);
    112
    113	if (unlikely(!task)) {
    114		/* task will be completed by the error handler */
    115		pr_debug("task done but aborted\n");
    116		return;
    117	}
    118
    119	if (unlikely(!sc)) {
    120		pr_debug("task_done called with non existing SCSI cmnd!\n");
    121		sas_free_task(task);
    122		return;
    123	}
    124
    125	sas_end_task(sc, task);
    126	scsi_done(sc);
    127}
    128
    129static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
    130					       struct domain_device *dev,
    131					       gfp_t gfp_flags)
    132{
    133	struct sas_task *task = sas_alloc_task(gfp_flags);
    134	struct scsi_lun lun;
    135
    136	if (!task)
    137		return NULL;
    138
    139	task->uldd_task = cmd;
    140	ASSIGN_SAS_TASK(cmd, task);
    141
    142	task->dev = dev;
    143	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
    144
    145	task->ssp_task.retry_count = 1;
    146	int_to_scsilun(cmd->device->lun, &lun);
    147	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
    148	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
    149	task->ssp_task.cmd = cmd;
    150
    151	task->scatter = scsi_sglist(cmd);
    152	task->num_scatter = scsi_sg_count(cmd);
    153	task->total_xfer_len = scsi_bufflen(cmd);
    154	task->data_dir = cmd->sc_data_direction;
    155
    156	task->task_done = sas_scsi_task_done;
    157
    158	return task;
    159}
    160
    161int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
    162{
    163	struct sas_internal *i = to_sas_internal(host->transportt);
    164	struct domain_device *dev = cmd_to_domain_dev(cmd);
    165	struct sas_task *task;
    166	int res = 0;
    167
    168	/* If the device fell off, no sense in issuing commands */
    169	if (test_bit(SAS_DEV_GONE, &dev->state)) {
    170		cmd->result = DID_BAD_TARGET << 16;
    171		goto out_done;
    172	}
    173
    174	if (dev_is_sata(dev)) {
    175		spin_lock_irq(dev->sata_dev.ap->lock);
    176		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
    177		spin_unlock_irq(dev->sata_dev.ap->lock);
    178		return res;
    179	}
    180
    181	task = sas_create_task(cmd, dev, GFP_ATOMIC);
    182	if (!task)
    183		return SCSI_MLQUEUE_HOST_BUSY;
    184
    185	res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
    186	if (res)
    187		goto out_free_task;
    188	return 0;
    189
    190out_free_task:
    191	pr_debug("lldd_execute_task returned: %d\n", res);
    192	ASSIGN_SAS_TASK(cmd, NULL);
    193	sas_free_task(task);
    194	if (res == -SAS_QUEUE_FULL)
    195		cmd->result = DID_SOFT_ERROR << 16; /* retry */
    196	else
    197		cmd->result = DID_ERROR << 16;
    198out_done:
    199	scsi_done(cmd);
    200	return 0;
    201}
    202EXPORT_SYMBOL_GPL(sas_queuecommand);
    203
    204static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
    205{
    206	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
    207	struct domain_device *dev = cmd_to_domain_dev(cmd);
    208	struct sas_task *task = TO_SAS_TASK(cmd);
    209
    210	/* At this point, we only get called following an actual abort
    211	 * of the task, so we should be guaranteed not to be racing with
    212	 * any completions from the LLD.  Task is freed after this.
    213	 */
    214	sas_end_task(cmd, task);
    215
    216	if (dev_is_sata(dev)) {
    217		/* defer commands to libata so that libata EH can
    218		 * handle ata qcs correctly
    219		 */
    220		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
    221		return;
    222	}
    223
    224	/* now finish the command and move it on to the error
    225	 * handler done list, this also takes it off the
    226	 * error handler pending list.
    227	 */
    228	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
    229}
    230
    231static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
    232{
    233	struct scsi_cmnd *cmd, *n;
    234
    235	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
    236		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
    237		    cmd->device->lun == my_cmd->device->lun)
    238			sas_eh_finish_cmd(cmd);
    239	}
    240}
    241
    242static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
    243				     struct domain_device *dev)
    244{
    245	struct scsi_cmnd *cmd, *n;
    246
    247	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
    248		struct domain_device *x = cmd_to_domain_dev(cmd);
    249
    250		if (x == dev)
    251			sas_eh_finish_cmd(cmd);
    252	}
    253}
    254
    255static void sas_scsi_clear_queue_port(struct list_head *error_q,
    256				      struct asd_sas_port *port)
    257{
    258	struct scsi_cmnd *cmd, *n;
    259
    260	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
    261		struct domain_device *dev = cmd_to_domain_dev(cmd);
    262		struct asd_sas_port *x = dev->port;
    263
    264		if (x == port)
    265			sas_eh_finish_cmd(cmd);
    266	}
    267}
    268
    269enum task_disposition {
    270	TASK_IS_DONE,
    271	TASK_IS_ABORTED,
    272	TASK_IS_AT_LU,
    273	TASK_IS_NOT_AT_LU,
    274	TASK_ABORT_FAILED,
    275};
    276
    277static enum task_disposition sas_scsi_find_task(struct sas_task *task)
    278{
    279	unsigned long flags;
    280	int i, res;
    281	struct sas_internal *si =
    282		to_sas_internal(task->dev->port->ha->core.shost->transportt);
    283
    284	for (i = 0; i < 5; i++) {
    285		pr_notice("%s: aborting task 0x%p\n", __func__, task);
    286		res = si->dft->lldd_abort_task(task);
    287
    288		spin_lock_irqsave(&task->task_state_lock, flags);
    289		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
    290			spin_unlock_irqrestore(&task->task_state_lock, flags);
    291			pr_debug("%s: task 0x%p is done\n", __func__, task);
    292			return TASK_IS_DONE;
    293		}
    294		spin_unlock_irqrestore(&task->task_state_lock, flags);
    295
    296		if (res == TMF_RESP_FUNC_COMPLETE) {
    297			pr_notice("%s: task 0x%p is aborted\n",
    298				  __func__, task);
    299			return TASK_IS_ABORTED;
    300		} else if (si->dft->lldd_query_task) {
    301			pr_notice("%s: querying task 0x%p\n", __func__, task);
    302			res = si->dft->lldd_query_task(task);
    303			switch (res) {
    304			case TMF_RESP_FUNC_SUCC:
    305				pr_notice("%s: task 0x%p at LU\n", __func__,
    306					  task);
    307				return TASK_IS_AT_LU;
    308			case TMF_RESP_FUNC_COMPLETE:
    309				pr_notice("%s: task 0x%p not at LU\n",
    310					  __func__, task);
    311				return TASK_IS_NOT_AT_LU;
    312			case TMF_RESP_FUNC_FAILED:
    313				pr_notice("%s: task 0x%p failed to abort\n",
    314					  __func__, task);
    315				return TASK_ABORT_FAILED;
    316			default:
    317				pr_notice("%s: task 0x%p result code %d not handled\n",
    318					  __func__, task, res);
    319			}
    320		}
    321	}
    322	return TASK_ABORT_FAILED;
    323}
    324
    325static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
    326{
    327	int res = TMF_RESP_FUNC_FAILED;
    328	struct scsi_lun lun;
    329	struct sas_internal *i =
    330		to_sas_internal(dev->port->ha->core.shost->transportt);
    331
    332	int_to_scsilun(cmd->device->lun, &lun);
    333
    334	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
    335		  SAS_ADDR(dev->sas_addr),
    336		  cmd->device->lun);
    337
    338	if (i->dft->lldd_abort_task_set)
    339		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
    340
    341	if (res == TMF_RESP_FUNC_FAILED) {
    342		if (i->dft->lldd_clear_task_set)
    343			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
    344	}
    345
    346	if (res == TMF_RESP_FUNC_FAILED) {
    347		if (i->dft->lldd_lu_reset)
    348			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
    349	}
    350
    351	return res;
    352}
    353
    354static int sas_recover_I_T(struct domain_device *dev)
    355{
    356	int res = TMF_RESP_FUNC_FAILED;
    357	struct sas_internal *i =
    358		to_sas_internal(dev->port->ha->core.shost->transportt);
    359
    360	pr_notice("I_T nexus reset for dev %016llx\n",
    361		  SAS_ADDR(dev->sas_addr));
    362
    363	if (i->dft->lldd_I_T_nexus_reset)
    364		res = i->dft->lldd_I_T_nexus_reset(dev);
    365
    366	return res;
    367}
    368
    369/* take a reference on the last known good phy for this device */
    370struct sas_phy *sas_get_local_phy(struct domain_device *dev)
    371{
    372	struct sas_ha_struct *ha = dev->port->ha;
    373	struct sas_phy *phy;
    374	unsigned long flags;
    375
    376	/* a published domain device always has a valid phy, it may be
    377	 * stale, but it is never NULL
    378	 */
    379	BUG_ON(!dev->phy);
    380
    381	spin_lock_irqsave(&ha->phy_port_lock, flags);
    382	phy = dev->phy;
    383	get_device(&phy->dev);
    384	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
    385
    386	return phy;
    387}
    388EXPORT_SYMBOL_GPL(sas_get_local_phy);
    389
    390static void sas_wait_eh(struct domain_device *dev)
    391{
    392	struct sas_ha_struct *ha = dev->port->ha;
    393	DEFINE_WAIT(wait);
    394
    395	if (dev_is_sata(dev)) {
    396		ata_port_wait_eh(dev->sata_dev.ap);
    397		return;
    398	}
    399 retry:
    400	spin_lock_irq(&ha->lock);
    401
    402	while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
    403		prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
    404		spin_unlock_irq(&ha->lock);
    405		schedule();
    406		spin_lock_irq(&ha->lock);
    407	}
    408	finish_wait(&ha->eh_wait_q, &wait);
    409
    410	spin_unlock_irq(&ha->lock);
    411
    412	/* make sure SCSI EH is complete */
    413	if (scsi_host_in_recovery(ha->core.shost)) {
    414		msleep(10);
    415		goto retry;
    416	}
    417}
    418
    419static int sas_queue_reset(struct domain_device *dev, int reset_type,
    420			   u64 lun, int wait)
    421{
    422	struct sas_ha_struct *ha = dev->port->ha;
    423	int scheduled = 0, tries = 100;
    424
    425	/* ata: promote lun reset to bus reset */
    426	if (dev_is_sata(dev)) {
    427		sas_ata_schedule_reset(dev);
    428		if (wait)
    429			sas_ata_wait_eh(dev);
    430		return SUCCESS;
    431	}
    432
    433	while (!scheduled && tries--) {
    434		spin_lock_irq(&ha->lock);
    435		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
    436		    !test_bit(reset_type, &dev->state)) {
    437			scheduled = 1;
    438			ha->eh_active++;
    439			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
    440			set_bit(SAS_DEV_EH_PENDING, &dev->state);
    441			set_bit(reset_type, &dev->state);
    442			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
    443			scsi_schedule_eh(ha->core.shost);
    444		}
    445		spin_unlock_irq(&ha->lock);
    446
    447		if (wait)
    448			sas_wait_eh(dev);
    449
    450		if (scheduled)
    451			return SUCCESS;
    452	}
    453
    454	pr_warn("%s reset of %s failed\n",
    455		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
    456		dev_name(&dev->rphy->dev));
    457
    458	return FAILED;
    459}
    460
    461int sas_eh_abort_handler(struct scsi_cmnd *cmd)
    462{
    463	int res = TMF_RESP_FUNC_FAILED;
    464	struct sas_task *task = TO_SAS_TASK(cmd);
    465	struct Scsi_Host *host = cmd->device->host;
    466	struct domain_device *dev = cmd_to_domain_dev(cmd);
    467	struct sas_internal *i = to_sas_internal(host->transportt);
    468	unsigned long flags;
    469
    470	if (!i->dft->lldd_abort_task)
    471		return FAILED;
    472
    473	spin_lock_irqsave(host->host_lock, flags);
    474	/* We cannot do async aborts for SATA devices */
    475	if (dev_is_sata(dev) && !host->host_eh_scheduled) {
    476		spin_unlock_irqrestore(host->host_lock, flags);
    477		return FAILED;
    478	}
    479	spin_unlock_irqrestore(host->host_lock, flags);
    480
    481	if (task)
    482		res = i->dft->lldd_abort_task(task);
    483	else
    484		pr_notice("no task to abort\n");
    485	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
    486		return SUCCESS;
    487
    488	return FAILED;
    489}
    490EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
    491
    492/* Attempt to send a LUN reset message to a device */
    493int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
    494{
    495	int res;
    496	struct scsi_lun lun;
    497	struct Scsi_Host *host = cmd->device->host;
    498	struct domain_device *dev = cmd_to_domain_dev(cmd);
    499	struct sas_internal *i = to_sas_internal(host->transportt);
    500
    501	if (current != host->ehandler)
    502		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
    503
    504	int_to_scsilun(cmd->device->lun, &lun);
    505
    506	if (!i->dft->lldd_lu_reset)
    507		return FAILED;
    508
    509	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
    510	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
    511		return SUCCESS;
    512
    513	return FAILED;
    514}
    515EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
    516
    517int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
    518{
    519	int res;
    520	struct Scsi_Host *host = cmd->device->host;
    521	struct domain_device *dev = cmd_to_domain_dev(cmd);
    522	struct sas_internal *i = to_sas_internal(host->transportt);
    523
    524	if (current != host->ehandler)
    525		return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
    526
    527	if (!i->dft->lldd_I_T_nexus_reset)
    528		return FAILED;
    529
    530	res = i->dft->lldd_I_T_nexus_reset(dev);
    531	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
    532	    res == -ENODEV)
    533		return SUCCESS;
    534
    535	return FAILED;
    536}
    537EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
    538
    539/* Try to reset a device */
    540static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
    541{
    542	int res;
    543	struct Scsi_Host *shost = cmd->device->host;
    544
    545	if (!shost->hostt->eh_device_reset_handler)
    546		goto try_target_reset;
    547
    548	res = shost->hostt->eh_device_reset_handler(cmd);
    549	if (res == SUCCESS)
    550		return res;
    551
    552try_target_reset:
    553	if (shost->hostt->eh_target_reset_handler)
    554		return shost->hostt->eh_target_reset_handler(cmd);
    555
    556	return FAILED;
    557}
    558
    559static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
    560{
    561	struct scsi_cmnd *cmd, *n;
    562	enum task_disposition res = TASK_IS_DONE;
    563	int tmf_resp, need_reset;
    564	struct sas_internal *i = to_sas_internal(shost->transportt);
    565	unsigned long flags;
    566	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
    567	LIST_HEAD(done);
    568
    569	/* clean out any commands that won the completion vs eh race */
    570	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
    571		struct domain_device *dev = cmd_to_domain_dev(cmd);
    572		struct sas_task *task;
    573
    574		spin_lock_irqsave(&dev->done_lock, flags);
    575		/* by this point the lldd has either observed
    576		 * SAS_HA_FROZEN and is leaving the task alone, or has
    577		 * won the race with eh and decided to complete it
    578		 */
    579		task = TO_SAS_TASK(cmd);
    580		spin_unlock_irqrestore(&dev->done_lock, flags);
    581
    582		if (!task)
    583			list_move_tail(&cmd->eh_entry, &done);
    584	}
    585
    586 Again:
    587	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
    588		struct sas_task *task = TO_SAS_TASK(cmd);
    589
    590		list_del_init(&cmd->eh_entry);
    591
    592		spin_lock_irqsave(&task->task_state_lock, flags);
    593		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
    594		spin_unlock_irqrestore(&task->task_state_lock, flags);
    595
    596		if (need_reset) {
    597			pr_notice("%s: task 0x%p requests reset\n",
    598				  __func__, task);
    599			goto reset;
    600		}
    601
    602		pr_debug("trying to find task 0x%p\n", task);
    603		res = sas_scsi_find_task(task);
    604
    605		switch (res) {
    606		case TASK_IS_DONE:
    607			pr_notice("%s: task 0x%p is done\n", __func__,
    608				    task);
    609			sas_eh_finish_cmd(cmd);
    610			continue;
    611		case TASK_IS_ABORTED:
    612			pr_notice("%s: task 0x%p is aborted\n",
    613				  __func__, task);
    614			sas_eh_finish_cmd(cmd);
    615			continue;
    616		case TASK_IS_AT_LU:
    617			pr_info("task 0x%p is at LU: lu recover\n", task);
    618 reset:
    619			tmf_resp = sas_recover_lu(task->dev, cmd);
    620			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
    621				pr_notice("dev %016llx LU 0x%llx is recovered\n",
    622					  SAS_ADDR(task->dev),
    623					  cmd->device->lun);
    624				sas_eh_finish_cmd(cmd);
    625				sas_scsi_clear_queue_lu(work_q, cmd);
    626				goto Again;
    627			}
    628			fallthrough;
    629		case TASK_IS_NOT_AT_LU:
    630		case TASK_ABORT_FAILED:
    631			pr_notice("task 0x%p is not at LU: I_T recover\n",
    632				  task);
    633			tmf_resp = sas_recover_I_T(task->dev);
    634			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
    635			    tmf_resp == -ENODEV) {
    636				struct domain_device *dev = task->dev;
    637				pr_notice("I_T %016llx recovered\n",
    638					  SAS_ADDR(task->dev->sas_addr));
    639				sas_eh_finish_cmd(cmd);
    640				sas_scsi_clear_queue_I_T(work_q, dev);
    641				goto Again;
    642			}
    643			/* Hammer time :-) */
    644			try_to_reset_cmd_device(cmd);
    645			if (i->dft->lldd_clear_nexus_port) {
    646				struct asd_sas_port *port = task->dev->port;
    647				pr_debug("clearing nexus for port:%d\n",
    648					  port->id);
    649				res = i->dft->lldd_clear_nexus_port(port);
    650				if (res == TMF_RESP_FUNC_COMPLETE) {
    651					pr_notice("clear nexus port:%d succeeded\n",
    652						  port->id);
    653					sas_eh_finish_cmd(cmd);
    654					sas_scsi_clear_queue_port(work_q,
    655								  port);
    656					goto Again;
    657				}
    658			}
    659			if (i->dft->lldd_clear_nexus_ha) {
    660				pr_debug("clear nexus ha\n");
    661				res = i->dft->lldd_clear_nexus_ha(ha);
    662				if (res == TMF_RESP_FUNC_COMPLETE) {
    663					pr_notice("clear nexus ha succeeded\n");
    664					sas_eh_finish_cmd(cmd);
    665					goto clear_q;
    666				}
    667			}
    668			/* If we are here -- this means that no amount
    669			 * of effort could recover from errors.  Quite
    670			 * possibly the HA just disappeared.
    671			 */
    672			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
    673			       SAS_ADDR(task->dev->sas_addr),
    674			       cmd->device->lun);
    675
    676			sas_eh_finish_cmd(cmd);
    677			goto clear_q;
    678		}
    679	}
    680 out:
    681	list_splice_tail(&done, work_q);
    682	list_splice_tail_init(&ha->eh_ata_q, work_q);
    683	return;
    684
    685 clear_q:
    686	pr_debug("--- Exit %s -- clear_q\n", __func__);
    687	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
    688		sas_eh_finish_cmd(cmd);
    689	goto out;
    690}
    691
    692static void sas_eh_handle_resets(struct Scsi_Host *shost)
    693{
    694	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
    695	struct sas_internal *i = to_sas_internal(shost->transportt);
    696
    697	/* handle directed resets to sas devices */
    698	spin_lock_irq(&ha->lock);
    699	while (!list_empty(&ha->eh_dev_q)) {
    700		struct domain_device *dev;
    701		struct ssp_device *ssp;
    702
    703		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
    704		list_del_init(&ssp->eh_list_node);
    705		dev = container_of(ssp, typeof(*dev), ssp_dev);
    706		kref_get(&dev->kref);
    707		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
    708
    709		spin_unlock_irq(&ha->lock);
    710
    711		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
    712			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
    713
    714		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
    715			i->dft->lldd_I_T_nexus_reset(dev);
    716
    717		sas_put_device(dev);
    718		spin_lock_irq(&ha->lock);
    719		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
    720		ha->eh_active--;
    721	}
    722	spin_unlock_irq(&ha->lock);
    723}
    724
    725
    726void sas_scsi_recover_host(struct Scsi_Host *shost)
    727{
    728	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
    729	LIST_HEAD(eh_work_q);
    730	int tries = 0;
    731	bool retry;
    732
    733retry:
    734	tries++;
    735	retry = true;
    736	spin_lock_irq(shost->host_lock);
    737	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
    738	spin_unlock_irq(shost->host_lock);
    739
    740	pr_notice("Enter %s busy: %d failed: %d\n",
    741		  __func__, scsi_host_busy(shost), shost->host_failed);
    742	/*
    743	 * Deal with commands that still have SAS tasks (i.e. they didn't
    744	 * complete via the normal sas_task completion mechanism),
    745	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
    746	 */
    747	set_bit(SAS_HA_FROZEN, &ha->state);
    748	sas_eh_handle_sas_errors(shost, &eh_work_q);
    749	clear_bit(SAS_HA_FROZEN, &ha->state);
    750	if (list_empty(&eh_work_q))
    751		goto out;
    752
    753	/*
    754	 * Now deal with SCSI commands that completed ok but have a an error
    755	 * code (and hopefully sense data) attached.  This is roughly what
    756	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
    757	 * command we see here has no sas_task and is thus unknown to the HA.
    758	 */
    759	sas_ata_eh(shost, &eh_work_q);
    760	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
    761		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
    762
    763out:
    764	sas_eh_handle_resets(shost);
    765
    766	/* now link into libata eh --- if we have any ata devices */
    767	sas_ata_strategy_handler(shost);
    768
    769	scsi_eh_flush_done_q(&ha->eh_done_q);
    770
    771	/* check if any new eh work was scheduled during the last run */
    772	spin_lock_irq(&ha->lock);
    773	if (ha->eh_active == 0) {
    774		shost->host_eh_scheduled = 0;
    775		retry = false;
    776	}
    777	spin_unlock_irq(&ha->lock);
    778
    779	if (retry)
    780		goto retry;
    781
    782	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
    783		  __func__, scsi_host_busy(shost),
    784		  shost->host_failed, tries);
    785}
    786
    787int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
    788{
    789	struct domain_device *dev = sdev_to_domain_dev(sdev);
    790
    791	if (dev_is_sata(dev))
    792		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
    793
    794	return -EINVAL;
    795}
    796EXPORT_SYMBOL_GPL(sas_ioctl);
    797
    798struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
    799{
    800	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
    801	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
    802	struct domain_device *found_dev = NULL;
    803	int i;
    804	unsigned long flags;
    805
    806	spin_lock_irqsave(&ha->phy_port_lock, flags);
    807	for (i = 0; i < ha->num_phys; i++) {
    808		struct asd_sas_port *port = ha->sas_port[i];
    809		struct domain_device *dev;
    810
    811		spin_lock(&port->dev_list_lock);
    812		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
    813			if (rphy == dev->rphy) {
    814				found_dev = dev;
    815				spin_unlock(&port->dev_list_lock);
    816				goto found;
    817			}
    818		}
    819		spin_unlock(&port->dev_list_lock);
    820	}
    821 found:
    822	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
    823
    824	return found_dev;
    825}
    826
    827int sas_target_alloc(struct scsi_target *starget)
    828{
    829	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
    830	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
    831
    832	if (!found_dev)
    833		return -ENODEV;
    834
    835	kref_get(&found_dev->kref);
    836	starget->hostdata = found_dev;
    837	return 0;
    838}
    839EXPORT_SYMBOL_GPL(sas_target_alloc);
    840
    841#define SAS_DEF_QD 256
    842
    843int sas_slave_configure(struct scsi_device *scsi_dev)
    844{
    845	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
    846
    847	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
    848
    849	if (dev_is_sata(dev)) {
    850		ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
    851		return 0;
    852	}
    853
    854	sas_read_port_mode_page(scsi_dev);
    855
    856	if (scsi_dev->tagged_supported) {
    857		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
    858	} else {
    859		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
    860			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
    861		scsi_change_queue_depth(scsi_dev, 1);
    862	}
    863
    864	scsi_dev->allow_restart = 1;
    865
    866	return 0;
    867}
    868EXPORT_SYMBOL_GPL(sas_slave_configure);
    869
    870int sas_change_queue_depth(struct scsi_device *sdev, int depth)
    871{
    872	struct domain_device *dev = sdev_to_domain_dev(sdev);
    873
    874	if (dev_is_sata(dev))
    875		return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
    876
    877	if (!sdev->tagged_supported)
    878		depth = 1;
    879	return scsi_change_queue_depth(sdev, depth);
    880}
    881EXPORT_SYMBOL_GPL(sas_change_queue_depth);
    882
    883int sas_bios_param(struct scsi_device *scsi_dev,
    884			  struct block_device *bdev,
    885			  sector_t capacity, int *hsc)
    886{
    887	hsc[0] = 255;
    888	hsc[1] = 63;
    889	sector_div(capacity, 255*63);
    890	hsc[2] = capacity;
    891
    892	return 0;
    893}
    894EXPORT_SYMBOL_GPL(sas_bios_param);
    895
    896void sas_task_internal_done(struct sas_task *task)
    897{
    898	del_timer(&task->slow_task->timer);
    899	complete(&task->slow_task->completion);
    900}
    901
    902void sas_task_internal_timedout(struct timer_list *t)
    903{
    904	struct sas_task_slow *slow = from_timer(slow, t, timer);
    905	struct sas_task *task = slow->task;
    906	bool is_completed = true;
    907	unsigned long flags;
    908
    909	spin_lock_irqsave(&task->task_state_lock, flags);
    910	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
    911		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
    912		is_completed = false;
    913	}
    914	spin_unlock_irqrestore(&task->task_state_lock, flags);
    915
    916	if (!is_completed)
    917		complete(&task->slow_task->completion);
    918}
    919
    920#define TASK_TIMEOUT			(20 * HZ)
    921#define TASK_RETRY			3
    922
    923static int sas_execute_internal_abort(struct domain_device *device,
    924				      enum sas_internal_abort type, u16 tag,
    925				      unsigned int qid, void *data)
    926{
    927	struct sas_ha_struct *ha = device->port->ha;
    928	struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
    929	struct sas_task *task = NULL;
    930	int res, retry;
    931
    932	for (retry = 0; retry < TASK_RETRY; retry++) {
    933		task = sas_alloc_slow_task(GFP_KERNEL);
    934		if (!task)
    935			return -ENOMEM;
    936
    937		task->dev = device;
    938		task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
    939		task->task_done = sas_task_internal_done;
    940		task->slow_task->timer.function = sas_task_internal_timedout;
    941		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
    942		add_timer(&task->slow_task->timer);
    943
    944		task->abort_task.tag = tag;
    945		task->abort_task.type = type;
    946		task->abort_task.qid = qid;
    947
    948		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
    949		if (res) {
    950			del_timer_sync(&task->slow_task->timer);
    951			pr_err("Executing internal abort failed %016llx (%d)\n",
    952			       SAS_ADDR(device->sas_addr), res);
    953			break;
    954		}
    955
    956		wait_for_completion(&task->slow_task->completion);
    957		res = TMF_RESP_FUNC_FAILED;
    958
    959		/* Even if the internal abort timed out, return direct. */
    960		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
    961			bool quit = true;
    962
    963			if (i->dft->lldd_abort_timeout)
    964				quit = i->dft->lldd_abort_timeout(task, data);
    965			else
    966				pr_err("Internal abort: timeout %016llx\n",
    967				       SAS_ADDR(device->sas_addr));
    968			res = -EIO;
    969			if (quit)
    970				break;
    971		}
    972
    973		if (task->task_status.resp == SAS_TASK_COMPLETE &&
    974			task->task_status.stat == SAS_SAM_STAT_GOOD) {
    975			res = TMF_RESP_FUNC_COMPLETE;
    976			break;
    977		}
    978
    979		if (task->task_status.resp == SAS_TASK_COMPLETE &&
    980			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
    981			res = TMF_RESP_FUNC_SUCC;
    982			break;
    983		}
    984
    985		pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
    986		       SAS_ADDR(device->sas_addr), task->task_status.resp,
    987		       task->task_status.stat);
    988		sas_free_task(task);
    989		task = NULL;
    990	}
    991	BUG_ON(retry == TASK_RETRY && task != NULL);
    992	sas_free_task(task);
    993	return res;
    994}
    995
    996int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
    997				      unsigned int qid, void *data)
    998{
    999	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
   1000					  tag, qid, data);
   1001}
   1002EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
   1003
   1004int sas_execute_internal_abort_dev(struct domain_device *device,
   1005				   unsigned int qid, void *data)
   1006{
   1007	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
   1008					  SCSI_NO_TAG, qid, data);
   1009}
   1010EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
   1011
   1012int sas_execute_tmf(struct domain_device *device, void *parameter,
   1013		    int para_len, int force_phy_id,
   1014		    struct sas_tmf_task *tmf)
   1015{
   1016	struct sas_task *task;
   1017	struct sas_internal *i =
   1018		to_sas_internal(device->port->ha->core.shost->transportt);
   1019	int res, retry;
   1020
   1021	for (retry = 0; retry < TASK_RETRY; retry++) {
   1022		task = sas_alloc_slow_task(GFP_KERNEL);
   1023		if (!task)
   1024			return -ENOMEM;
   1025
   1026		task->dev = device;
   1027		task->task_proto = device->tproto;
   1028
   1029		if (dev_is_sata(device)) {
   1030			task->ata_task.device_control_reg_update = 1;
   1031			if (force_phy_id >= 0) {
   1032				task->ata_task.force_phy = true;
   1033				task->ata_task.force_phy_id = force_phy_id;
   1034			}
   1035			memcpy(&task->ata_task.fis, parameter, para_len);
   1036		} else {
   1037			memcpy(&task->ssp_task, parameter, para_len);
   1038		}
   1039
   1040		task->task_done = sas_task_internal_done;
   1041		task->tmf = tmf;
   1042
   1043		task->slow_task->timer.function = sas_task_internal_timedout;
   1044		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
   1045		add_timer(&task->slow_task->timer);
   1046
   1047		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
   1048		if (res) {
   1049			del_timer_sync(&task->slow_task->timer);
   1050			pr_err("executing TMF task failed %016llx (%d)\n",
   1051			       SAS_ADDR(device->sas_addr), res);
   1052			break;
   1053		}
   1054
   1055		wait_for_completion(&task->slow_task->completion);
   1056
   1057		if (i->dft->lldd_tmf_exec_complete)
   1058			i->dft->lldd_tmf_exec_complete(device);
   1059
   1060		res = TMF_RESP_FUNC_FAILED;
   1061
   1062		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
   1063			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
   1064				pr_err("TMF task timeout for %016llx and not done\n",
   1065				       SAS_ADDR(device->sas_addr));
   1066				if (i->dft->lldd_tmf_aborted)
   1067					i->dft->lldd_tmf_aborted(task);
   1068				break;
   1069			}
   1070			pr_warn("TMF task timeout for %016llx and done\n",
   1071				SAS_ADDR(device->sas_addr));
   1072		}
   1073
   1074		if (task->task_status.resp == SAS_TASK_COMPLETE &&
   1075		    task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
   1076			res = TMF_RESP_FUNC_COMPLETE;
   1077			break;
   1078		}
   1079
   1080		if (task->task_status.resp == SAS_TASK_COMPLETE &&
   1081		    task->task_status.stat == TMF_RESP_FUNC_SUCC) {
   1082			res = TMF_RESP_FUNC_SUCC;
   1083			break;
   1084		}
   1085
   1086		if (task->task_status.resp == SAS_TASK_COMPLETE &&
   1087		    task->task_status.stat == SAS_DATA_UNDERRUN) {
   1088			/* no error, but return the number of bytes of
   1089			 * underrun
   1090			 */
   1091			pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
   1092				SAS_ADDR(device->sas_addr),
   1093				task->task_status.resp,
   1094				task->task_status.stat);
   1095			res = task->task_status.residual;
   1096			break;
   1097		}
   1098
   1099		if (task->task_status.resp == SAS_TASK_COMPLETE &&
   1100		    task->task_status.stat == SAS_DATA_OVERRUN) {
   1101			pr_warn("TMF task blocked task error %016llx\n",
   1102				SAS_ADDR(device->sas_addr));
   1103			res = -EMSGSIZE;
   1104			break;
   1105		}
   1106
   1107		if (task->task_status.resp == SAS_TASK_COMPLETE &&
   1108		    task->task_status.stat == SAS_OPEN_REJECT) {
   1109			pr_warn("TMF task open reject failed  %016llx\n",
   1110				SAS_ADDR(device->sas_addr));
   1111			res = -EIO;
   1112		} else {
   1113			pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
   1114				SAS_ADDR(device->sas_addr),
   1115				task->task_status.resp,
   1116				task->task_status.stat);
   1117		}
   1118		sas_free_task(task);
   1119		task = NULL;
   1120	}
   1121
   1122	if (retry == TASK_RETRY)
   1123		pr_warn("executing TMF for %016llx failed after %d attempts!\n",
   1124			SAS_ADDR(device->sas_addr), TASK_RETRY);
   1125	sas_free_task(task);
   1126
   1127	return res;
   1128}
   1129
   1130static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
   1131			       struct sas_tmf_task *tmf)
   1132{
   1133	struct sas_ssp_task ssp_task;
   1134
   1135	if (!(device->tproto & SAS_PROTOCOL_SSP))
   1136		return TMF_RESP_FUNC_ESUPP;
   1137
   1138	memcpy(ssp_task.LUN, lun, 8);
   1139
   1140	return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
   1141}
   1142
   1143int sas_abort_task_set(struct domain_device *dev, u8 *lun)
   1144{
   1145	struct sas_tmf_task tmf_task = {
   1146		.tmf = TMF_ABORT_TASK_SET,
   1147	};
   1148
   1149	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
   1150}
   1151EXPORT_SYMBOL_GPL(sas_abort_task_set);
   1152
   1153int sas_clear_task_set(struct domain_device *dev, u8 *lun)
   1154{
   1155	struct sas_tmf_task tmf_task = {
   1156		.tmf = TMF_CLEAR_TASK_SET,
   1157	};
   1158
   1159	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
   1160}
   1161EXPORT_SYMBOL_GPL(sas_clear_task_set);
   1162
   1163int sas_lu_reset(struct domain_device *dev, u8 *lun)
   1164{
   1165	struct sas_tmf_task tmf_task = {
   1166		.tmf = TMF_LU_RESET,
   1167	};
   1168
   1169	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
   1170}
   1171EXPORT_SYMBOL_GPL(sas_lu_reset);
   1172
   1173int sas_query_task(struct sas_task *task, u16 tag)
   1174{
   1175	struct sas_tmf_task tmf_task = {
   1176		.tmf = TMF_QUERY_TASK,
   1177		.tag_of_task_to_be_managed = tag,
   1178	};
   1179	struct scsi_cmnd *cmnd = task->uldd_task;
   1180	struct domain_device *dev = task->dev;
   1181	struct scsi_lun lun;
   1182
   1183	int_to_scsilun(cmnd->device->lun, &lun);
   1184
   1185	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
   1186}
   1187EXPORT_SYMBOL_GPL(sas_query_task);
   1188
   1189int sas_abort_task(struct sas_task *task, u16 tag)
   1190{
   1191	struct sas_tmf_task tmf_task = {
   1192		.tmf = TMF_ABORT_TASK,
   1193		.tag_of_task_to_be_managed = tag,
   1194	};
   1195	struct scsi_cmnd *cmnd = task->uldd_task;
   1196	struct domain_device *dev = task->dev;
   1197	struct scsi_lun lun;
   1198
   1199	int_to_scsilun(cmnd->device->lun, &lun);
   1200
   1201	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
   1202}
   1203EXPORT_SYMBOL_GPL(sas_abort_task);
   1204
   1205/*
   1206 * Tell an upper layer that it needs to initiate an abort for a given task.
   1207 * This should only ever be called by an LLDD.
   1208 */
   1209void sas_task_abort(struct sas_task *task)
   1210{
   1211	struct scsi_cmnd *sc = task->uldd_task;
   1212
   1213	/* Escape for libsas internal commands */
   1214	if (!sc) {
   1215		struct sas_task_slow *slow = task->slow_task;
   1216
   1217		if (!slow)
   1218			return;
   1219		if (!del_timer(&slow->timer))
   1220			return;
   1221		slow->timer.function(&slow->timer);
   1222		return;
   1223	}
   1224
   1225	if (dev_is_sata(task->dev))
   1226		sas_ata_task_abort(task);
   1227	else
   1228		blk_abort_request(scsi_cmd_to_rq(sc));
   1229}
   1230EXPORT_SYMBOL_GPL(sas_task_abort);
   1231
   1232int sas_slave_alloc(struct scsi_device *sdev)
   1233{
   1234	if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
   1235		return -ENXIO;
   1236
   1237	return 0;
   1238}
   1239EXPORT_SYMBOL_GPL(sas_slave_alloc);
   1240
   1241void sas_target_destroy(struct scsi_target *starget)
   1242{
   1243	struct domain_device *found_dev = starget->hostdata;
   1244
   1245	if (!found_dev)
   1246		return;
   1247
   1248	starget->hostdata = NULL;
   1249	sas_put_device(found_dev);
   1250}
   1251EXPORT_SYMBOL_GPL(sas_target_destroy);
   1252
   1253#define SAS_STRING_ADDR_SIZE	16
   1254
   1255int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
   1256{
   1257	int res;
   1258	const struct firmware *fw;
   1259
   1260	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
   1261	if (res)
   1262		return res;
   1263
   1264	if (fw->size < SAS_STRING_ADDR_SIZE) {
   1265		res = -ENODEV;
   1266		goto out;
   1267	}
   1268
   1269	res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
   1270	if (res)
   1271		goto out;
   1272
   1273out:
   1274	release_firmware(fw);
   1275	return res;
   1276}
   1277EXPORT_SYMBOL_GPL(sas_request_addr);
   1278