cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hisi_sas_main.c (66582B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (c) 2015 Linaro Ltd.
      4 * Copyright (c) 2015 Hisilicon Limited.
      5 */
      6
      7#include "hisi_sas.h"
      8#define DRV_NAME "hisi_sas"
      9
     10#define DEV_IS_GONE(dev) \
     11	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
     12
     13static int hisi_sas_softreset_ata_disk(struct domain_device *device);
     14static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
     15				void *funcdata);
     16static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
     17				  struct domain_device *device);
     18static void hisi_sas_dev_gone(struct domain_device *device);
     19
     20struct hisi_sas_internal_abort_data {
     21	bool rst_ha_timeout; /* reset the HA for timeout */
     22};
     23
     24u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
     25{
     26	switch (fis->command) {
     27	case ATA_CMD_FPDMA_WRITE:
     28	case ATA_CMD_FPDMA_READ:
     29	case ATA_CMD_FPDMA_RECV:
     30	case ATA_CMD_FPDMA_SEND:
     31	case ATA_CMD_NCQ_NON_DATA:
     32		return HISI_SAS_SATA_PROTOCOL_FPDMA;
     33
     34	case ATA_CMD_DOWNLOAD_MICRO:
     35	case ATA_CMD_ID_ATA:
     36	case ATA_CMD_PMP_READ:
     37	case ATA_CMD_READ_LOG_EXT:
     38	case ATA_CMD_PIO_READ:
     39	case ATA_CMD_PIO_READ_EXT:
     40	case ATA_CMD_PMP_WRITE:
     41	case ATA_CMD_WRITE_LOG_EXT:
     42	case ATA_CMD_PIO_WRITE:
     43	case ATA_CMD_PIO_WRITE_EXT:
     44		return HISI_SAS_SATA_PROTOCOL_PIO;
     45
     46	case ATA_CMD_DSM:
     47	case ATA_CMD_DOWNLOAD_MICRO_DMA:
     48	case ATA_CMD_PMP_READ_DMA:
     49	case ATA_CMD_PMP_WRITE_DMA:
     50	case ATA_CMD_READ:
     51	case ATA_CMD_READ_EXT:
     52	case ATA_CMD_READ_LOG_DMA_EXT:
     53	case ATA_CMD_READ_STREAM_DMA_EXT:
     54	case ATA_CMD_TRUSTED_RCV_DMA:
     55	case ATA_CMD_TRUSTED_SND_DMA:
     56	case ATA_CMD_WRITE:
     57	case ATA_CMD_WRITE_EXT:
     58	case ATA_CMD_WRITE_FUA_EXT:
     59	case ATA_CMD_WRITE_QUEUED:
     60	case ATA_CMD_WRITE_LOG_DMA_EXT:
     61	case ATA_CMD_WRITE_STREAM_DMA_EXT:
     62	case ATA_CMD_ZAC_MGMT_IN:
     63		return HISI_SAS_SATA_PROTOCOL_DMA;
     64
     65	case ATA_CMD_CHK_POWER:
     66	case ATA_CMD_DEV_RESET:
     67	case ATA_CMD_EDD:
     68	case ATA_CMD_FLUSH:
     69	case ATA_CMD_FLUSH_EXT:
     70	case ATA_CMD_VERIFY:
     71	case ATA_CMD_VERIFY_EXT:
     72	case ATA_CMD_SET_FEATURES:
     73	case ATA_CMD_STANDBY:
     74	case ATA_CMD_STANDBYNOW1:
     75	case ATA_CMD_ZAC_MGMT_OUT:
     76		return HISI_SAS_SATA_PROTOCOL_NONDATA;
     77
     78	case ATA_CMD_SET_MAX:
     79		switch (fis->features) {
     80		case ATA_SET_MAX_PASSWD:
     81		case ATA_SET_MAX_LOCK:
     82			return HISI_SAS_SATA_PROTOCOL_PIO;
     83
     84		case ATA_SET_MAX_PASSWD_DMA:
     85		case ATA_SET_MAX_UNLOCK_DMA:
     86			return HISI_SAS_SATA_PROTOCOL_DMA;
     87
     88		default:
     89			return HISI_SAS_SATA_PROTOCOL_NONDATA;
     90		}
     91
     92	default:
     93	{
     94		if (direction == DMA_NONE)
     95			return HISI_SAS_SATA_PROTOCOL_NONDATA;
     96		return HISI_SAS_SATA_PROTOCOL_PIO;
     97	}
     98	}
     99}
    100EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
    101
    102void hisi_sas_sata_done(struct sas_task *task,
    103			    struct hisi_sas_slot *slot)
    104{
    105	struct task_status_struct *ts = &task->task_status;
    106	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
    107	struct hisi_sas_status_buffer *status_buf =
    108			hisi_sas_status_buf_addr_mem(slot);
    109	u8 *iu = &status_buf->iu[0];
    110	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
    111
    112	resp->frame_len = sizeof(struct dev_to_host_fis);
    113	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
    114
    115	ts->buf_valid_size = sizeof(*resp);
    116}
    117EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
    118
    119/*
    120 * This function assumes linkrate mask fits in 8 bits, which it
    121 * does for all HW versions supported.
    122 */
    123u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
    124{
    125	u8 rate = 0;
    126	int i;
    127
    128	max -= SAS_LINK_RATE_1_5_GBPS;
    129	for (i = 0; i <= max; i++)
    130		rate |= 1 << (i * 2);
    131	return rate;
    132}
    133EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
    134
    135static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
    136{
    137	return device->port->ha->lldd_ha;
    138}
    139
    140struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
    141{
    142	return container_of(sas_port, struct hisi_sas_port, sas_port);
    143}
    144EXPORT_SYMBOL_GPL(to_hisi_sas_port);
    145
    146void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
    147{
    148	int phy_no;
    149
    150	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
    151		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
    152}
    153EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
    154
    155static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
    156{
    157	void *bitmap = hisi_hba->slot_index_tags;
    158
    159	__clear_bit(slot_idx, bitmap);
    160}
    161
    162static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
    163{
    164	if (hisi_hba->hw->slot_index_alloc ||
    165	    slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
    166		spin_lock(&hisi_hba->lock);
    167		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
    168		spin_unlock(&hisi_hba->lock);
    169	}
    170}
    171
    172static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
    173{
    174	void *bitmap = hisi_hba->slot_index_tags;
    175
    176	__set_bit(slot_idx, bitmap);
    177}
    178
    179static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
    180				     struct scsi_cmnd *scsi_cmnd)
    181{
    182	int index;
    183	void *bitmap = hisi_hba->slot_index_tags;
    184
    185	if (scsi_cmnd)
    186		return scsi_cmd_to_rq(scsi_cmnd)->tag;
    187
    188	spin_lock(&hisi_hba->lock);
    189	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
    190				   hisi_hba->last_slot_index + 1);
    191	if (index >= hisi_hba->slot_index_count) {
    192		index = find_next_zero_bit(bitmap,
    193				hisi_hba->slot_index_count,
    194				HISI_SAS_UNRESERVED_IPTT);
    195		if (index >= hisi_hba->slot_index_count) {
    196			spin_unlock(&hisi_hba->lock);
    197			return -SAS_QUEUE_FULL;
    198		}
    199	}
    200	hisi_sas_slot_index_set(hisi_hba, index);
    201	hisi_hba->last_slot_index = index;
    202	spin_unlock(&hisi_hba->lock);
    203
    204	return index;
    205}
    206
    207void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
    208			     struct hisi_sas_slot *slot)
    209{
    210	int device_id = slot->device_id;
    211	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
    212
    213	if (task) {
    214		struct device *dev = hisi_hba->dev;
    215
    216		if (!task->lldd_task)
    217			return;
    218
    219		task->lldd_task = NULL;
    220
    221		if (!sas_protocol_ata(task->task_proto)) {
    222			if (slot->n_elem)
    223				dma_unmap_sg(dev, task->scatter,
    224					     task->num_scatter,
    225					     task->data_dir);
    226			if (slot->n_elem_dif) {
    227				struct sas_ssp_task *ssp_task = &task->ssp_task;
    228				struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
    229
    230				dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
    231					     scsi_prot_sg_count(scsi_cmnd),
    232					     task->data_dir);
    233			}
    234		}
    235	}
    236
    237	spin_lock(&sas_dev->lock);
    238	list_del_init(&slot->entry);
    239	spin_unlock(&sas_dev->lock);
    240
    241	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
    242
    243	hisi_sas_slot_index_free(hisi_hba, slot->idx);
    244}
    245EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
    246
    247static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
    248				  struct hisi_sas_slot *slot)
    249{
    250	hisi_hba->hw->prep_smp(hisi_hba, slot);
    251}
    252
    253static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
    254				  struct hisi_sas_slot *slot)
    255{
    256	hisi_hba->hw->prep_ssp(hisi_hba, slot);
    257}
    258
    259static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
    260				  struct hisi_sas_slot *slot)
    261{
    262	hisi_hba->hw->prep_stp(hisi_hba, slot);
    263}
    264
    265static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
    266				     struct hisi_sas_slot *slot)
    267{
    268	hisi_hba->hw->prep_abort(hisi_hba, slot);
    269}
    270
    271static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
    272			       struct sas_task *task, int n_elem,
    273			       int n_elem_req)
    274{
    275	struct device *dev = hisi_hba->dev;
    276
    277	if (!sas_protocol_ata(task->task_proto)) {
    278		if (task->num_scatter) {
    279			if (n_elem)
    280				dma_unmap_sg(dev, task->scatter,
    281					     task->num_scatter,
    282					     task->data_dir);
    283		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
    284			if (n_elem_req)
    285				dma_unmap_sg(dev, &task->smp_task.smp_req,
    286					     1, DMA_TO_DEVICE);
    287		}
    288	}
    289}
    290
    291static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
    292			    struct sas_task *task, int *n_elem,
    293			    int *n_elem_req)
    294{
    295	struct device *dev = hisi_hba->dev;
    296	int rc;
    297
    298	if (sas_protocol_ata(task->task_proto)) {
    299		*n_elem = task->num_scatter;
    300	} else {
    301		unsigned int req_len;
    302
    303		if (task->num_scatter) {
    304			*n_elem = dma_map_sg(dev, task->scatter,
    305					     task->num_scatter, task->data_dir);
    306			if (!*n_elem) {
    307				rc = -ENOMEM;
    308				goto prep_out;
    309			}
    310		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
    311			*n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
    312						 1, DMA_TO_DEVICE);
    313			if (!*n_elem_req) {
    314				rc = -ENOMEM;
    315				goto prep_out;
    316			}
    317			req_len = sg_dma_len(&task->smp_task.smp_req);
    318			if (req_len & 0x3) {
    319				rc = -EINVAL;
    320				goto err_out_dma_unmap;
    321			}
    322		}
    323	}
    324
    325	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
    326		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
    327			*n_elem);
    328		rc = -EINVAL;
    329		goto err_out_dma_unmap;
    330	}
    331	return 0;
    332
    333err_out_dma_unmap:
    334	/* It would be better to call dma_unmap_sg() here, but it's messy */
    335	hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
    336			   *n_elem_req);
    337prep_out:
    338	return rc;
    339}
    340
    341static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
    342				   struct sas_task *task, int n_elem_dif)
    343{
    344	struct device *dev = hisi_hba->dev;
    345
    346	if (n_elem_dif) {
    347		struct sas_ssp_task *ssp_task = &task->ssp_task;
    348		struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
    349
    350		dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
    351			     scsi_prot_sg_count(scsi_cmnd),
    352			     task->data_dir);
    353	}
    354}
    355
    356static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
    357				int *n_elem_dif, struct sas_task *task)
    358{
    359	struct device *dev = hisi_hba->dev;
    360	struct sas_ssp_task *ssp_task;
    361	struct scsi_cmnd *scsi_cmnd;
    362	int rc;
    363
    364	if (task->num_scatter) {
    365		ssp_task = &task->ssp_task;
    366		scsi_cmnd = ssp_task->cmd;
    367
    368		if (scsi_prot_sg_count(scsi_cmnd)) {
    369			*n_elem_dif = dma_map_sg(dev,
    370						 scsi_prot_sglist(scsi_cmnd),
    371						 scsi_prot_sg_count(scsi_cmnd),
    372						 task->data_dir);
    373
    374			if (!*n_elem_dif)
    375				return -ENOMEM;
    376
    377			if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
    378				dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
    379					*n_elem_dif);
    380				rc = -EINVAL;
    381				goto err_out_dif_dma_unmap;
    382			}
    383		}
    384	}
    385
    386	return 0;
    387
    388err_out_dif_dma_unmap:
    389	dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
    390		     scsi_prot_sg_count(scsi_cmnd), task->data_dir);
    391	return rc;
    392}
    393
    394static
    395void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
    396			   struct hisi_sas_slot *slot,
    397			   struct hisi_sas_dq *dq,
    398			   struct hisi_sas_device *sas_dev)
    399{
    400	struct hisi_sas_cmd_hdr *cmd_hdr_base;
    401	int dlvry_queue_slot, dlvry_queue;
    402	struct sas_task *task = slot->task;
    403	int wr_q_index;
    404
    405	spin_lock(&dq->lock);
    406	wr_q_index = dq->wr_point;
    407	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
    408	list_add_tail(&slot->delivery, &dq->list);
    409	spin_unlock(&dq->lock);
    410	spin_lock(&sas_dev->lock);
    411	list_add_tail(&slot->entry, &sas_dev->list);
    412	spin_unlock(&sas_dev->lock);
    413
    414	dlvry_queue = dq->id;
    415	dlvry_queue_slot = wr_q_index;
    416
    417	slot->device_id = sas_dev->device_id;
    418	slot->dlvry_queue = dlvry_queue;
    419	slot->dlvry_queue_slot = dlvry_queue_slot;
    420	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
    421	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
    422
    423	task->lldd_task = slot;
    424
    425	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
    426	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
    427	memset(hisi_sas_status_buf_addr_mem(slot), 0,
    428	       sizeof(struct hisi_sas_err_record));
    429
    430	switch (task->task_proto) {
    431	case SAS_PROTOCOL_SMP:
    432		hisi_sas_task_prep_smp(hisi_hba, slot);
    433		break;
    434	case SAS_PROTOCOL_SSP:
    435		hisi_sas_task_prep_ssp(hisi_hba, slot);
    436		break;
    437	case SAS_PROTOCOL_SATA:
    438	case SAS_PROTOCOL_STP:
    439	case SAS_PROTOCOL_STP_ALL:
    440		hisi_sas_task_prep_ata(hisi_hba, slot);
    441		break;
    442	case SAS_PROTOCOL_INTERNAL_ABORT:
    443		hisi_sas_task_prep_abort(hisi_hba, slot);
    444		break;
    445	default:
    446		return;
    447	}
    448
    449	/* Make slot memories observable before marking as ready */
    450	smp_wmb();
    451	WRITE_ONCE(slot->ready, 1);
    452
    453	spin_lock(&dq->lock);
    454	hisi_hba->hw->start_delivery(dq);
    455	spin_unlock(&dq->lock);
    456}
    457
    458static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
    459{
    460	int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
    461	struct domain_device *device = task->dev;
    462	struct asd_sas_port *sas_port = device->port;
    463	struct hisi_sas_device *sas_dev = device->lldd_dev;
    464	bool internal_abort = sas_is_internal_abort(task);
    465	struct scsi_cmnd *scmd = NULL;
    466	struct hisi_sas_dq *dq = NULL;
    467	struct hisi_sas_port *port;
    468	struct hisi_hba *hisi_hba;
    469	struct hisi_sas_slot *slot;
    470	struct device *dev;
    471	int rc;
    472
    473	if (!sas_port) {
    474		struct task_status_struct *ts = &task->task_status;
    475
    476		ts->resp = SAS_TASK_UNDELIVERED;
    477		ts->stat = SAS_PHY_DOWN;
    478		/*
    479		 * libsas will use dev->port, should
    480		 * not call task_done for sata
    481		 */
    482		if (device->dev_type != SAS_SATA_DEV && !internal_abort)
    483			task->task_done(task);
    484		return -ECOMM;
    485	}
    486
    487	hisi_hba = dev_to_hisi_hba(device);
    488	dev = hisi_hba->dev;
    489
    490	switch (task->task_proto) {
    491	case SAS_PROTOCOL_SSP:
    492	case SAS_PROTOCOL_SMP:
    493	case SAS_PROTOCOL_SATA:
    494	case SAS_PROTOCOL_STP:
    495	case SAS_PROTOCOL_STP_ALL:
    496		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
    497			if (!gfpflags_allow_blocking(gfp_flags))
    498				return -EINVAL;
    499
    500			down(&hisi_hba->sem);
    501			up(&hisi_hba->sem);
    502		}
    503
    504		if (DEV_IS_GONE(sas_dev)) {
    505			if (sas_dev)
    506				dev_info(dev, "task prep: device %d not ready\n",
    507					 sas_dev->device_id);
    508			else
    509				dev_info(dev, "task prep: device %016llx not ready\n",
    510					 SAS_ADDR(device->sas_addr));
    511
    512			return -ECOMM;
    513		}
    514
    515		port = to_hisi_sas_port(sas_port);
    516		if (!port->port_attached) {
    517			dev_info(dev, "task prep: %s port%d not attach device\n",
    518				 dev_is_sata(device) ? "SATA/STP" : "SAS",
    519				 device->port->id);
    520
    521				return -ECOMM;
    522		}
    523
    524		if (task->uldd_task) {
    525			struct ata_queued_cmd *qc;
    526
    527			if (dev_is_sata(device)) {
    528				qc = task->uldd_task;
    529				scmd = qc->scsicmd;
    530			} else {
    531				scmd = task->uldd_task;
    532			}
    533		}
    534
    535		if (scmd) {
    536			unsigned int dq_index;
    537			u32 blk_tag;
    538
    539			blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
    540			dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
    541			dq = &hisi_hba->dq[dq_index];
    542		} else {
    543			struct Scsi_Host *shost = hisi_hba->shost;
    544			struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
    545			int queue = qmap->mq_map[raw_smp_processor_id()];
    546
    547			dq = &hisi_hba->dq[queue];
    548		}
    549		break;
    550	case SAS_PROTOCOL_INTERNAL_ABORT:
    551		if (!hisi_hba->hw->prep_abort)
    552			return TMF_RESP_FUNC_FAILED;
    553
    554		if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
    555			return -EIO;
    556
    557		hisi_hba = dev_to_hisi_hba(device);
    558
    559		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
    560			return -EINVAL;
    561
    562		port = to_hisi_sas_port(sas_port);
    563		dq = &hisi_hba->dq[task->abort_task.qid];
    564		break;
    565	default:
    566		dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
    567			task->task_proto);
    568		return -EINVAL;
    569	}
    570
    571	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
    572			      &n_elem_req);
    573	if (rc < 0)
    574		goto prep_out;
    575
    576	if (!sas_protocol_ata(task->task_proto)) {
    577		rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
    578		if (rc < 0)
    579			goto err_out_dma_unmap;
    580	}
    581
    582	if (!internal_abort && hisi_hba->hw->slot_index_alloc)
    583		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
    584	else
    585		rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
    586
    587	if (rc < 0)
    588		goto err_out_dif_dma_unmap;
    589
    590	slot = &hisi_hba->slot_info[rc];
    591	slot->n_elem = n_elem;
    592	slot->n_elem_dif = n_elem_dif;
    593	slot->task = task;
    594	slot->port = port;
    595
    596	slot->tmf = task->tmf;
    597	slot->is_internal = !!task->tmf || internal_abort;
    598
    599	/* protect task_prep and start_delivery sequence */
    600	hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
    601
    602	return 0;
    603
    604err_out_dif_dma_unmap:
    605	if (!sas_protocol_ata(task->task_proto))
    606		hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
    607err_out_dma_unmap:
    608	hisi_sas_dma_unmap(hisi_hba, task, n_elem,
    609				   n_elem_req);
    610prep_out:
    611	dev_err(dev, "task exec: failed[%d]!\n", rc);
    612	return rc;
    613}
    614
    615static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
    616				 gfp_t gfp_flags)
    617{
    618	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
    619	struct asd_sas_phy *sas_phy = &phy->sas_phy;
    620
    621	if (!phy->phy_attached)
    622		return;
    623
    624	sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
    625
    626	if (sas_phy->phy) {
    627		struct sas_phy *sphy = sas_phy->phy;
    628
    629		sphy->negotiated_linkrate = sas_phy->linkrate;
    630		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
    631		sphy->maximum_linkrate_hw =
    632			hisi_hba->hw->phy_get_max_linkrate();
    633		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
    634			sphy->minimum_linkrate = phy->minimum_linkrate;
    635
    636		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
    637			sphy->maximum_linkrate = phy->maximum_linkrate;
    638	}
    639
    640	if (phy->phy_type & PORT_TYPE_SAS) {
    641		struct sas_identify_frame *id;
    642
    643		id = (struct sas_identify_frame *)phy->frame_rcvd;
    644		id->dev_type = phy->identify.device_type;
    645		id->initiator_bits = SAS_PROTOCOL_ALL;
    646		id->target_bits = phy->identify.target_port_protocols;
    647	} else if (phy->phy_type & PORT_TYPE_SATA) {
    648		/* Nothing */
    649	}
    650
    651	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
    652	sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
    653}
    654
    655static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
    656{
    657	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
    658	struct hisi_sas_device *sas_dev = NULL;
    659	int last = hisi_hba->last_dev_id;
    660	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
    661	int i;
    662
    663	spin_lock(&hisi_hba->lock);
    664	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
    665		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
    666			int queue = i % hisi_hba->queue_count;
    667			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
    668
    669			hisi_hba->devices[i].device_id = i;
    670			sas_dev = &hisi_hba->devices[i];
    671			sas_dev->dev_status = HISI_SAS_DEV_INIT;
    672			sas_dev->dev_type = device->dev_type;
    673			sas_dev->hisi_hba = hisi_hba;
    674			sas_dev->sas_device = device;
    675			sas_dev->dq = dq;
    676			spin_lock_init(&sas_dev->lock);
    677			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
    678			break;
    679		}
    680		i++;
    681	}
    682	hisi_hba->last_dev_id = i;
    683	spin_unlock(&hisi_hba->lock);
    684
    685	return sas_dev;
    686}
    687
    688static void hisi_sas_tmf_aborted(struct sas_task *task)
    689{
    690	struct hisi_sas_slot *slot = task->lldd_task;
    691	struct domain_device *device = task->dev;
    692	struct hisi_sas_device *sas_dev = device->lldd_dev;
    693	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
    694
    695	if (slot) {
    696		struct hisi_sas_cq *cq =
    697			   &hisi_hba->cq[slot->dlvry_queue];
    698		/*
    699		 * sync irq to avoid free'ing task
    700		 * before using task in IO completion
    701		 */
    702		synchronize_irq(cq->irq_no);
    703		slot->task = NULL;
    704	}
    705}
    706
    707#define HISI_SAS_DISK_RECOVER_CNT 3
    708static int hisi_sas_init_device(struct domain_device *device)
    709{
    710	int rc = TMF_RESP_FUNC_COMPLETE;
    711	struct scsi_lun lun;
    712	int retry = HISI_SAS_DISK_RECOVER_CNT;
    713	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
    714
    715	switch (device->dev_type) {
    716	case SAS_END_DEVICE:
    717		int_to_scsilun(0, &lun);
    718
    719		while (retry-- > 0) {
    720			rc = sas_clear_task_set(device, lun.scsi_lun);
    721			if (rc == TMF_RESP_FUNC_COMPLETE) {
    722				hisi_sas_release_task(hisi_hba, device);
    723				break;
    724			}
    725		}
    726		break;
    727	case SAS_SATA_DEV:
    728	case SAS_SATA_PM:
    729	case SAS_SATA_PM_PORT:
    730	case SAS_SATA_PENDING:
    731		/*
    732		 * If an expander is swapped when a SATA disk is attached then
    733		 * we should issue a hard reset to clear previous affiliation
    734		 * of STP target port, see SPL (chapter 6.19.4).
    735		 *
    736		 * However we don't need to issue a hard reset here for these
    737		 * reasons:
    738		 * a. When probing the device, libsas/libata already issues a
    739		 * hard reset in sas_probe_sata() -> ata_sas_async_probe().
    740		 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
    741		 * to issue a hard reset by checking the dev status (== INIT).
    742		 * b. When resetting the controller, this is simply unnecessary.
    743		 */
    744		while (retry-- > 0) {
    745			rc = hisi_sas_softreset_ata_disk(device);
    746			if (!rc)
    747				break;
    748		}
    749		break;
    750	default:
    751		break;
    752	}
    753
    754	return rc;
    755}
    756
    757int hisi_sas_slave_alloc(struct scsi_device *sdev)
    758{
    759	struct domain_device *ddev = sdev_to_domain_dev(sdev);
    760	struct hisi_sas_device *sas_dev = ddev->lldd_dev;
    761	int rc;
    762
    763	rc = sas_slave_alloc(sdev);
    764	if (rc)
    765		return rc;
    766
    767	rc = hisi_sas_init_device(ddev);
    768	if (rc)
    769		return rc;
    770	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
    771	return 0;
    772}
    773EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
    774
    775static int hisi_sas_dev_found(struct domain_device *device)
    776{
    777	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
    778	struct domain_device *parent_dev = device->parent;
    779	struct hisi_sas_device *sas_dev;
    780	struct device *dev = hisi_hba->dev;
    781	int rc;
    782
    783	if (hisi_hba->hw->alloc_dev)
    784		sas_dev = hisi_hba->hw->alloc_dev(device);
    785	else
    786		sas_dev = hisi_sas_alloc_dev(device);
    787	if (!sas_dev) {
    788		dev_err(dev, "fail alloc dev: max support %d devices\n",
    789			HISI_SAS_MAX_DEVICES);
    790		return -EINVAL;
    791	}
    792
    793	device->lldd_dev = sas_dev;
    794	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
    795
    796	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
    797		int phy_no;
    798		u8 phy_num = parent_dev->ex_dev.num_phys;
    799		struct ex_phy *phy;
    800
    801		for (phy_no = 0; phy_no < phy_num; phy_no++) {
    802			phy = &parent_dev->ex_dev.ex_phy[phy_no];
    803			if (SAS_ADDR(phy->attached_sas_addr) ==
    804				SAS_ADDR(device->sas_addr))
    805				break;
    806		}
    807
    808		if (phy_no == phy_num) {
    809			dev_info(dev, "dev found: no attached "
    810				 "dev:%016llx at ex:%016llx\n",
    811				 SAS_ADDR(device->sas_addr),
    812				 SAS_ADDR(parent_dev->sas_addr));
    813			rc = -EINVAL;
    814			goto err_out;
    815		}
    816	}
    817
    818	dev_info(dev, "dev[%d:%x] found\n",
    819		sas_dev->device_id, sas_dev->dev_type);
    820
    821	return 0;
    822
    823err_out:
    824	hisi_sas_dev_gone(device);
    825	return rc;
    826}
    827
    828int hisi_sas_slave_configure(struct scsi_device *sdev)
    829{
    830	struct domain_device *dev = sdev_to_domain_dev(sdev);
    831	int ret = sas_slave_configure(sdev);
    832
    833	if (ret)
    834		return ret;
    835	if (!dev_is_sata(dev))
    836		sas_change_queue_depth(sdev, 64);
    837
    838	return 0;
    839}
    840EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
    841
    842void hisi_sas_scan_start(struct Scsi_Host *shost)
    843{
    844	struct hisi_hba *hisi_hba = shost_priv(shost);
    845
    846	hisi_hba->hw->phys_init(hisi_hba);
    847}
    848EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
    849
    850int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
    851{
    852	struct hisi_hba *hisi_hba = shost_priv(shost);
    853	struct sas_ha_struct *sha = &hisi_hba->sha;
    854
    855	/* Wait for PHY up interrupt to occur */
    856	if (time < HZ)
    857		return 0;
    858
    859	sas_drain_work(sha);
    860	return 1;
    861}
    862EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
    863
    864static void hisi_sas_phyup_work_common(struct work_struct *work,
    865		enum hisi_sas_phy_event event)
    866{
    867	struct hisi_sas_phy *phy =
    868		container_of(work, typeof(*phy), works[event]);
    869	struct hisi_hba *hisi_hba = phy->hisi_hba;
    870	struct asd_sas_phy *sas_phy = &phy->sas_phy;
    871	int phy_no = sas_phy->id;
    872
    873	phy->wait_phyup_cnt = 0;
    874	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
    875		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
    876	hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
    877}
    878
    879static void hisi_sas_phyup_work(struct work_struct *work)
    880{
    881	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
    882}
    883
    884static void hisi_sas_linkreset_work(struct work_struct *work)
    885{
    886	struct hisi_sas_phy *phy =
    887		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
    888	struct asd_sas_phy *sas_phy = &phy->sas_phy;
    889
    890	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
    891}
    892
    893static void hisi_sas_phyup_pm_work(struct work_struct *work)
    894{
    895	struct hisi_sas_phy *phy =
    896		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
    897	struct hisi_hba *hisi_hba = phy->hisi_hba;
    898	struct device *dev = hisi_hba->dev;
    899
    900	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
    901	pm_runtime_put_sync(dev);
    902}
    903
    904static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
    905	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
    906	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
    907	[HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
    908};
    909
    910bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
    911				enum hisi_sas_phy_event event)
    912{
    913	struct hisi_hba *hisi_hba = phy->hisi_hba;
    914
    915	if (WARN_ON(event >= HISI_PHYES_NUM))
    916		return false;
    917
    918	return queue_work(hisi_hba->wq, &phy->works[event]);
    919}
    920EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
    921
    922static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
    923{
    924	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
    925	struct hisi_hba *hisi_hba = phy->hisi_hba;
    926	struct device *dev = hisi_hba->dev;
    927	int phy_no = phy->sas_phy.id;
    928
    929	dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
    930	hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
    931}
    932
    933#define HISI_SAS_WAIT_PHYUP_RETRIES	10
    934
    935void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
    936{
    937	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
    938	struct device *dev = hisi_hba->dev;
    939	unsigned long flags;
    940
    941	dev_dbg(dev, "phy%d OOB ready\n", phy_no);
    942	spin_lock_irqsave(&phy->lock, flags);
    943	if (phy->phy_attached) {
    944		spin_unlock_irqrestore(&phy->lock, flags);
    945		return;
    946	}
    947
    948	if (!timer_pending(&phy->timer)) {
    949		if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
    950			phy->wait_phyup_cnt++;
    951			phy->timer.expires = jiffies +
    952					     HISI_SAS_WAIT_PHYUP_TIMEOUT;
    953			add_timer(&phy->timer);
    954			spin_unlock_irqrestore(&phy->lock, flags);
    955			return;
    956		}
    957
    958		dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
    959			 phy_no, phy->wait_phyup_cnt);
    960		phy->wait_phyup_cnt = 0;
    961	}
    962	spin_unlock_irqrestore(&phy->lock, flags);
    963}
    964
    965EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
    966
    967static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
    968{
    969	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
    970	struct asd_sas_phy *sas_phy = &phy->sas_phy;
    971	int i;
    972
    973	phy->hisi_hba = hisi_hba;
    974	phy->port = NULL;
    975	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
    976	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
    977	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
    978	sas_phy->class = SAS;
    979	sas_phy->iproto = SAS_PROTOCOL_ALL;
    980	sas_phy->tproto = 0;
    981	sas_phy->type = PHY_TYPE_PHYSICAL;
    982	sas_phy->role = PHY_ROLE_INITIATOR;
    983	sas_phy->oob_mode = OOB_NOT_CONNECTED;
    984	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
    985	sas_phy->id = phy_no;
    986	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
    987	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
    988	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
    989	sas_phy->lldd_phy = phy;
    990
    991	for (i = 0; i < HISI_PHYES_NUM; i++)
    992		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
    993
    994	spin_lock_init(&phy->lock);
    995
    996	timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
    997}
    998
    999/* Wrapper to ensure we track hisi_sas_phy.enable properly */
   1000void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
   1001{
   1002	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
   1003	struct asd_sas_phy *aphy = &phy->sas_phy;
   1004	struct sas_phy *sphy = aphy->phy;
   1005	unsigned long flags;
   1006
   1007	spin_lock_irqsave(&phy->lock, flags);
   1008
   1009	if (enable) {
   1010		/* We may have been enabled already; if so, don't touch */
   1011		if (!phy->enable)
   1012			sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
   1013		hisi_hba->hw->phy_start(hisi_hba, phy_no);
   1014	} else {
   1015		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
   1016		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
   1017	}
   1018	phy->enable = enable;
   1019	spin_unlock_irqrestore(&phy->lock, flags);
   1020}
   1021EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
   1022
   1023static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
   1024{
   1025	struct sas_ha_struct *sas_ha = sas_phy->ha;
   1026	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
   1027	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
   1028	struct asd_sas_port *sas_port = sas_phy->port;
   1029	struct hisi_sas_port *port;
   1030	unsigned long flags;
   1031
   1032	if (!sas_port)
   1033		return;
   1034
   1035	port = to_hisi_sas_port(sas_port);
   1036	spin_lock_irqsave(&hisi_hba->lock, flags);
   1037	port->port_attached = 1;
   1038	port->id = phy->port_id;
   1039	phy->port = port;
   1040	sas_port->lldd_port = port;
   1041	spin_unlock_irqrestore(&hisi_hba->lock, flags);
   1042}
   1043
   1044static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
   1045				     struct hisi_sas_slot *slot)
   1046{
   1047	if (task) {
   1048		unsigned long flags;
   1049		struct task_status_struct *ts;
   1050
   1051		ts = &task->task_status;
   1052
   1053		ts->resp = SAS_TASK_COMPLETE;
   1054		ts->stat = SAS_ABORTED_TASK;
   1055		spin_lock_irqsave(&task->task_state_lock, flags);
   1056		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
   1057		if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
   1058			task->task_state_flags |= SAS_TASK_STATE_DONE;
   1059		spin_unlock_irqrestore(&task->task_state_lock, flags);
   1060	}
   1061
   1062	hisi_sas_slot_task_free(hisi_hba, task, slot);
   1063}
   1064
   1065static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
   1066			struct domain_device *device)
   1067{
   1068	struct hisi_sas_slot *slot, *slot2;
   1069	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1070
   1071	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
   1072		hisi_sas_do_release_task(hisi_hba, slot->task, slot);
   1073}
   1074
   1075void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
   1076{
   1077	struct hisi_sas_device *sas_dev;
   1078	struct domain_device *device;
   1079	int i;
   1080
   1081	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
   1082		sas_dev = &hisi_hba->devices[i];
   1083		device = sas_dev->sas_device;
   1084
   1085		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
   1086		    !device)
   1087			continue;
   1088
   1089		hisi_sas_release_task(hisi_hba, device);
   1090	}
   1091}
   1092EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
   1093
   1094static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
   1095				struct domain_device *device)
   1096{
   1097	if (hisi_hba->hw->dereg_device)
   1098		hisi_hba->hw->dereg_device(hisi_hba, device);
   1099}
   1100
   1101static int
   1102hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
   1103				 bool rst_ha_timeout)
   1104{
   1105	struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
   1106	struct domain_device *device = sas_dev->sas_device;
   1107	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
   1108	int i, rc;
   1109
   1110	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
   1111		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
   1112		const struct cpumask *mask = cq->irq_mask;
   1113
   1114		if (mask && !cpumask_intersects(cpu_online_mask, mask))
   1115			continue;
   1116		rc = sas_execute_internal_abort_dev(device, i, &data);
   1117		if (rc)
   1118			return rc;
   1119	}
   1120
   1121	return 0;
   1122}
   1123
   1124static void hisi_sas_dev_gone(struct domain_device *device)
   1125{
   1126	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1127	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1128	struct device *dev = hisi_hba->dev;
   1129	int ret = 0;
   1130
   1131	dev_info(dev, "dev[%d:%x] is gone\n",
   1132		 sas_dev->device_id, sas_dev->dev_type);
   1133
   1134	down(&hisi_hba->sem);
   1135	if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
   1136		hisi_sas_internal_task_abort_dev(sas_dev, true);
   1137
   1138		hisi_sas_dereg_device(hisi_hba, device);
   1139
   1140		ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
   1141		device->lldd_dev = NULL;
   1142	}
   1143
   1144	if (hisi_hba->hw->free_device)
   1145		hisi_hba->hw->free_device(sas_dev);
   1146
   1147	/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
   1148	if (!ret)
   1149		sas_dev->dev_type = SAS_PHY_UNUSED;
   1150	sas_dev->sas_device = NULL;
   1151	up(&hisi_hba->sem);
   1152}
   1153
   1154static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
   1155			struct sas_phy_linkrates *r)
   1156{
   1157	struct sas_phy_linkrates _r;
   1158
   1159	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
   1160	struct asd_sas_phy *sas_phy = &phy->sas_phy;
   1161	enum sas_linkrate min, max;
   1162
   1163	if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
   1164		return -EINVAL;
   1165
   1166	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
   1167		max = sas_phy->phy->maximum_linkrate;
   1168		min = r->minimum_linkrate;
   1169	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
   1170		max = r->maximum_linkrate;
   1171		min = sas_phy->phy->minimum_linkrate;
   1172	} else
   1173		return -EINVAL;
   1174
   1175	_r.maximum_linkrate = max;
   1176	_r.minimum_linkrate = min;
   1177
   1178	sas_phy->phy->maximum_linkrate = max;
   1179	sas_phy->phy->minimum_linkrate = min;
   1180
   1181	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
   1182	msleep(100);
   1183	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
   1184	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
   1185
   1186	return 0;
   1187}
   1188
   1189static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
   1190				void *funcdata)
   1191{
   1192	struct hisi_sas_phy *phy = container_of(sas_phy,
   1193			struct hisi_sas_phy, sas_phy);
   1194	struct sas_ha_struct *sas_ha = sas_phy->ha;
   1195	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
   1196	struct device *dev = hisi_hba->dev;
   1197	DECLARE_COMPLETION_ONSTACK(completion);
   1198	int phy_no = sas_phy->id;
   1199	u8 sts = phy->phy_attached;
   1200	int ret = 0;
   1201
   1202	down(&hisi_hba->sem);
   1203	phy->reset_completion = &completion;
   1204
   1205	switch (func) {
   1206	case PHY_FUNC_HARD_RESET:
   1207		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
   1208		break;
   1209
   1210	case PHY_FUNC_LINK_RESET:
   1211		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
   1212		msleep(100);
   1213		hisi_sas_phy_enable(hisi_hba, phy_no, 1);
   1214		break;
   1215
   1216	case PHY_FUNC_DISABLE:
   1217		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
   1218		goto out;
   1219
   1220	case PHY_FUNC_SET_LINK_RATE:
   1221		ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
   1222		break;
   1223
   1224	case PHY_FUNC_GET_EVENTS:
   1225		if (hisi_hba->hw->get_events) {
   1226			hisi_hba->hw->get_events(hisi_hba, phy_no);
   1227			goto out;
   1228		}
   1229		fallthrough;
   1230	case PHY_FUNC_RELEASE_SPINUP_HOLD:
   1231	default:
   1232		ret = -EOPNOTSUPP;
   1233		goto out;
   1234	}
   1235
   1236	if (sts && !wait_for_completion_timeout(&completion,
   1237		HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
   1238		dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
   1239			 phy_no, func);
   1240		if (phy->in_reset)
   1241			ret = -ETIMEDOUT;
   1242	}
   1243
   1244out:
   1245	phy->reset_completion = NULL;
   1246
   1247	up(&hisi_hba->sem);
   1248	return ret;
   1249}
   1250
   1251static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
   1252		bool reset, int pmp, u8 *fis)
   1253{
   1254	struct ata_taskfile tf;
   1255
   1256	ata_tf_init(dev, &tf);
   1257	if (reset)
   1258		tf.ctl |= ATA_SRST;
   1259	else
   1260		tf.ctl &= ~ATA_SRST;
   1261	tf.command = ATA_CMD_DEV_RESET;
   1262	ata_tf_to_fis(&tf, pmp, 0, fis);
   1263}
   1264
   1265static int hisi_sas_softreset_ata_disk(struct domain_device *device)
   1266{
   1267	u8 fis[20] = {0};
   1268	struct ata_port *ap = device->sata_dev.ap;
   1269	struct ata_link *link;
   1270	int rc = TMF_RESP_FUNC_FAILED;
   1271	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1272	struct device *dev = hisi_hba->dev;
   1273
   1274	ata_for_each_link(link, ap, EDGE) {
   1275		int pmp = sata_srst_pmp(link);
   1276
   1277		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
   1278		rc = sas_execute_ata_cmd(device, fis, -1);
   1279		if (rc != TMF_RESP_FUNC_COMPLETE)
   1280			break;
   1281	}
   1282
   1283	if (rc == TMF_RESP_FUNC_COMPLETE) {
   1284		ata_for_each_link(link, ap, EDGE) {
   1285			int pmp = sata_srst_pmp(link);
   1286
   1287			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
   1288			rc = sas_execute_ata_cmd(device, fis, -1);
   1289			if (rc != TMF_RESP_FUNC_COMPLETE)
   1290				dev_err(dev, "ata disk %016llx de-reset failed\n",
   1291					SAS_ADDR(device->sas_addr));
   1292		}
   1293	} else {
   1294		dev_err(dev, "ata disk %016llx reset failed\n",
   1295			SAS_ADDR(device->sas_addr));
   1296	}
   1297
   1298	if (rc == TMF_RESP_FUNC_COMPLETE)
   1299		hisi_sas_release_task(hisi_hba, device);
   1300
   1301	return rc;
   1302}
   1303
   1304static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
   1305{
   1306	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
   1307	int i;
   1308
   1309	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
   1310		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
   1311		struct domain_device *device = sas_dev->sas_device;
   1312		struct asd_sas_port *sas_port;
   1313		struct hisi_sas_port *port;
   1314		struct hisi_sas_phy *phy = NULL;
   1315		struct asd_sas_phy *sas_phy;
   1316
   1317		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
   1318				|| !device || !device->port)
   1319			continue;
   1320
   1321		sas_port = device->port;
   1322		port = to_hisi_sas_port(sas_port);
   1323
   1324		spin_lock(&sas_port->phy_list_lock);
   1325		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
   1326			if (state & BIT(sas_phy->id)) {
   1327				phy = sas_phy->lldd_phy;
   1328				break;
   1329			}
   1330		spin_unlock(&sas_port->phy_list_lock);
   1331
   1332		if (phy) {
   1333			port->id = phy->port_id;
   1334
   1335			/* Update linkrate of directly attached device. */
   1336			if (!device->parent)
   1337				device->linkrate = phy->sas_phy.linkrate;
   1338
   1339			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
   1340		} else
   1341			port->id = 0xff;
   1342	}
   1343}
   1344
   1345static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
   1346{
   1347	struct asd_sas_port *_sas_port = NULL;
   1348	int phy_no;
   1349
   1350	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
   1351		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
   1352		struct asd_sas_phy *sas_phy = &phy->sas_phy;
   1353		struct asd_sas_port *sas_port = sas_phy->port;
   1354		bool do_port_check = _sas_port != sas_port;
   1355
   1356		if (!sas_phy->phy->enabled)
   1357			continue;
   1358
   1359		/* Report PHY state change to libsas */
   1360		if (state & BIT(phy_no)) {
   1361			if (do_port_check && sas_port && sas_port->port_dev) {
   1362				struct domain_device *dev = sas_port->port_dev;
   1363
   1364				_sas_port = sas_port;
   1365
   1366				if (dev_is_expander(dev->dev_type))
   1367					sas_notify_port_event(sas_phy,
   1368							PORTE_BROADCAST_RCVD,
   1369							GFP_KERNEL);
   1370			}
   1371		} else {
   1372			hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
   1373		}
   1374	}
   1375}
   1376
   1377static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
   1378{
   1379	struct hisi_sas_device *sas_dev;
   1380	struct domain_device *device;
   1381	int i;
   1382
   1383	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
   1384		sas_dev = &hisi_hba->devices[i];
   1385		device = sas_dev->sas_device;
   1386
   1387		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
   1388			continue;
   1389
   1390		hisi_sas_init_device(device);
   1391	}
   1392}
   1393
   1394static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
   1395					     struct asd_sas_port *sas_port,
   1396					     struct domain_device *device)
   1397{
   1398	struct ata_port *ap = device->sata_dev.ap;
   1399	struct device *dev = hisi_hba->dev;
   1400	int rc = TMF_RESP_FUNC_FAILED;
   1401	struct ata_link *link;
   1402	u8 fis[20] = {0};
   1403	int i;
   1404
   1405	for (i = 0; i < hisi_hba->n_phy; i++) {
   1406		if (!(sas_port->phy_mask & BIT(i)))
   1407			continue;
   1408
   1409		ata_for_each_link(link, ap, EDGE) {
   1410			int pmp = sata_srst_pmp(link);
   1411
   1412			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
   1413			rc = sas_execute_ata_cmd(device, fis, i);
   1414			if (rc != TMF_RESP_FUNC_COMPLETE) {
   1415				dev_err(dev, "phy%d ata reset failed rc=%d\n",
   1416					i, rc);
   1417				break;
   1418			}
   1419		}
   1420	}
   1421}
   1422
   1423static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
   1424{
   1425	struct device *dev = hisi_hba->dev;
   1426	int port_no, rc, i;
   1427
   1428	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
   1429		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
   1430		struct domain_device *device = sas_dev->sas_device;
   1431
   1432		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
   1433			continue;
   1434
   1435		rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
   1436		if (rc < 0)
   1437			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
   1438	}
   1439
   1440	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
   1441		struct hisi_sas_port *port = &hisi_hba->port[port_no];
   1442		struct asd_sas_port *sas_port = &port->sas_port;
   1443		struct domain_device *port_dev = sas_port->port_dev;
   1444		struct domain_device *device;
   1445
   1446		if (!port_dev || !dev_is_expander(port_dev->dev_type))
   1447			continue;
   1448
   1449		/* Try to find a SATA device */
   1450		list_for_each_entry(device, &sas_port->dev_list,
   1451				    dev_list_node) {
   1452			if (dev_is_sata(device)) {
   1453				hisi_sas_send_ata_reset_each_phy(hisi_hba,
   1454								 sas_port,
   1455								 device);
   1456				break;
   1457			}
   1458		}
   1459	}
   1460}
   1461
   1462void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
   1463{
   1464	struct Scsi_Host *shost = hisi_hba->shost;
   1465
   1466	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
   1467
   1468	scsi_block_requests(shost);
   1469	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
   1470
   1471	del_timer_sync(&hisi_hba->timer);
   1472
   1473	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
   1474}
   1475EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
   1476
   1477void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
   1478{
   1479	struct Scsi_Host *shost = hisi_hba->shost;
   1480
   1481	/* Init and wait for PHYs to come up and all libsas event finished. */
   1482	hisi_hba->hw->phys_init(hisi_hba);
   1483	msleep(1000);
   1484	hisi_sas_refresh_port_id(hisi_hba);
   1485	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
   1486
   1487	if (hisi_hba->reject_stp_links_msk)
   1488		hisi_sas_terminate_stp_reject(hisi_hba);
   1489	hisi_sas_reset_init_all_devices(hisi_hba);
   1490	scsi_unblock_requests(shost);
   1491	clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
   1492	up(&hisi_hba->sem);
   1493
   1494	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
   1495}
   1496EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
   1497
   1498static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
   1499{
   1500	if (!hisi_hba->hw->soft_reset)
   1501		return -1;
   1502
   1503	down(&hisi_hba->sem);
   1504	if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
   1505		up(&hisi_hba->sem);
   1506		return -1;
   1507	}
   1508
   1509	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
   1510		hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
   1511
   1512	return 0;
   1513}
   1514
   1515static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
   1516{
   1517	struct device *dev = hisi_hba->dev;
   1518	struct Scsi_Host *shost = hisi_hba->shost;
   1519	int rc;
   1520
   1521	dev_info(dev, "controller resetting...\n");
   1522	hisi_sas_controller_reset_prepare(hisi_hba);
   1523
   1524	rc = hisi_hba->hw->soft_reset(hisi_hba);
   1525	if (rc) {
   1526		dev_warn(dev, "controller reset failed (%d)\n", rc);
   1527		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
   1528		up(&hisi_hba->sem);
   1529		scsi_unblock_requests(shost);
   1530		clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
   1531		return rc;
   1532	}
   1533
   1534	hisi_sas_controller_reset_done(hisi_hba);
   1535	clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
   1536	dev_info(dev, "controller reset complete\n");
   1537
   1538	return 0;
   1539}
   1540
   1541static int hisi_sas_abort_task(struct sas_task *task)
   1542{
   1543	struct hisi_sas_internal_abort_data internal_abort_data = { false };
   1544	struct domain_device *device = task->dev;
   1545	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1546	struct hisi_hba *hisi_hba;
   1547	struct device *dev;
   1548	int rc = TMF_RESP_FUNC_FAILED;
   1549	unsigned long flags;
   1550
   1551	if (!sas_dev)
   1552		return TMF_RESP_FUNC_FAILED;
   1553
   1554	hisi_hba = dev_to_hisi_hba(task->dev);
   1555	dev = hisi_hba->dev;
   1556
   1557	spin_lock_irqsave(&task->task_state_lock, flags);
   1558	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
   1559		struct hisi_sas_slot *slot = task->lldd_task;
   1560		struct hisi_sas_cq *cq;
   1561
   1562		if (slot) {
   1563			/*
   1564			 * sync irq to avoid free'ing task
   1565			 * before using task in IO completion
   1566			 */
   1567			cq = &hisi_hba->cq[slot->dlvry_queue];
   1568			synchronize_irq(cq->irq_no);
   1569		}
   1570		spin_unlock_irqrestore(&task->task_state_lock, flags);
   1571		rc = TMF_RESP_FUNC_COMPLETE;
   1572		goto out;
   1573	}
   1574	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
   1575	spin_unlock_irqrestore(&task->task_state_lock, flags);
   1576
   1577	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
   1578		struct hisi_sas_slot *slot = task->lldd_task;
   1579		u16 tag = slot->idx;
   1580		int rc2;
   1581
   1582		rc = sas_abort_task(task, tag);
   1583		rc2 = sas_execute_internal_abort_single(device, tag,
   1584				slot->dlvry_queue, &internal_abort_data);
   1585		if (rc2 < 0) {
   1586			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
   1587			return TMF_RESP_FUNC_FAILED;
   1588		}
   1589
   1590		/*
   1591		 * If the TMF finds that the IO is not in the device and also
   1592		 * the internal abort does not succeed, then it is safe to
   1593		 * free the slot.
   1594		 * Note: if the internal abort succeeds then the slot
   1595		 * will have already been completed
   1596		 */
   1597		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
   1598			if (task->lldd_task)
   1599				hisi_sas_do_release_task(hisi_hba, task, slot);
   1600		}
   1601	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
   1602		task->task_proto & SAS_PROTOCOL_STP) {
   1603		if (task->dev->dev_type == SAS_SATA_DEV) {
   1604			rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
   1605			if (rc < 0) {
   1606				dev_err(dev, "abort task: internal abort failed\n");
   1607				goto out;
   1608			}
   1609			hisi_sas_dereg_device(hisi_hba, device);
   1610			rc = hisi_sas_softreset_ata_disk(device);
   1611		}
   1612	} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
   1613		/* SMP */
   1614		struct hisi_sas_slot *slot = task->lldd_task;
   1615		u32 tag = slot->idx;
   1616		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
   1617
   1618		rc = sas_execute_internal_abort_single(device,
   1619						       tag, slot->dlvry_queue,
   1620						       &internal_abort_data);
   1621		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
   1622					task->lldd_task) {
   1623			/*
   1624			 * sync irq to avoid free'ing task
   1625			 * before using task in IO completion
   1626			 */
   1627			synchronize_irq(cq->irq_no);
   1628			slot->task = NULL;
   1629		}
   1630	}
   1631
   1632out:
   1633	if (rc != TMF_RESP_FUNC_COMPLETE)
   1634		dev_notice(dev, "abort task: rc=%d\n", rc);
   1635	return rc;
   1636}
   1637
   1638static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
   1639{
   1640	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1641	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1642	struct device *dev = hisi_hba->dev;
   1643	int rc;
   1644
   1645	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
   1646	if (rc < 0) {
   1647		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
   1648		return TMF_RESP_FUNC_FAILED;
   1649	}
   1650	hisi_sas_dereg_device(hisi_hba, device);
   1651
   1652	rc = sas_abort_task_set(device, lun);
   1653	if (rc == TMF_RESP_FUNC_COMPLETE)
   1654		hisi_sas_release_task(hisi_hba, device);
   1655
   1656	return rc;
   1657}
   1658
   1659static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
   1660{
   1661	struct sas_phy *local_phy = sas_get_local_phy(device);
   1662	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1663	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1664	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
   1665	int rc, reset_type;
   1666
   1667	if (!local_phy->enabled) {
   1668		sas_put_local_phy(local_phy);
   1669		return -ENODEV;
   1670	}
   1671
   1672	if (scsi_is_sas_phy_local(local_phy)) {
   1673		struct asd_sas_phy *sas_phy =
   1674			sas_ha->sas_phy[local_phy->number];
   1675		struct hisi_sas_phy *phy =
   1676			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
   1677		unsigned long flags;
   1678
   1679		spin_lock_irqsave(&phy->lock, flags);
   1680		phy->in_reset = 1;
   1681		spin_unlock_irqrestore(&phy->lock, flags);
   1682	}
   1683
   1684	reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
   1685		      !dev_is_sata(device)) ? true : false;
   1686
   1687	rc = sas_phy_reset(local_phy, reset_type);
   1688	sas_put_local_phy(local_phy);
   1689
   1690	if (scsi_is_sas_phy_local(local_phy)) {
   1691		struct asd_sas_phy *sas_phy =
   1692			sas_ha->sas_phy[local_phy->number];
   1693		struct hisi_sas_phy *phy =
   1694			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
   1695		unsigned long flags;
   1696
   1697		spin_lock_irqsave(&phy->lock, flags);
   1698		phy->in_reset = 0;
   1699		spin_unlock_irqrestore(&phy->lock, flags);
   1700
   1701		/* report PHY down if timed out */
   1702		if (rc == -ETIMEDOUT)
   1703			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
   1704		return rc;
   1705	}
   1706
   1707	if (rc)
   1708		return rc;
   1709
   1710	/* Remote phy */
   1711	if (dev_is_sata(device)) {
   1712		rc = sas_ata_wait_after_reset(device,
   1713					HISI_SAS_WAIT_PHYUP_TIMEOUT);
   1714	} else {
   1715		msleep(2000);
   1716	}
   1717
   1718	return rc;
   1719}
   1720
   1721static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
   1722{
   1723	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1724	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1725	struct device *dev = hisi_hba->dev;
   1726	int rc;
   1727
   1728	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
   1729	if (rc < 0) {
   1730		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
   1731		return TMF_RESP_FUNC_FAILED;
   1732	}
   1733	hisi_sas_dereg_device(hisi_hba, device);
   1734
   1735	rc = hisi_sas_debug_I_T_nexus_reset(device);
   1736	if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
   1737		struct sas_phy *local_phy;
   1738
   1739		rc = hisi_sas_softreset_ata_disk(device);
   1740		switch (rc) {
   1741		case -ECOMM:
   1742			rc = -ENODEV;
   1743			break;
   1744		case TMF_RESP_FUNC_FAILED:
   1745		case -EMSGSIZE:
   1746		case -EIO:
   1747			local_phy = sas_get_local_phy(device);
   1748			rc = sas_phy_enable(local_phy, 0);
   1749			if (!rc) {
   1750				local_phy->enabled = 0;
   1751				dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
   1752					SAS_ADDR(device->sas_addr), rc);
   1753				rc = -ENODEV;
   1754			}
   1755			sas_put_local_phy(local_phy);
   1756			break;
   1757		default:
   1758			break;
   1759		}
   1760	}
   1761
   1762	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
   1763		hisi_sas_release_task(hisi_hba, device);
   1764
   1765	return rc;
   1766}
   1767
   1768static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
   1769{
   1770	struct hisi_sas_device *sas_dev = device->lldd_dev;
   1771	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1772	struct device *dev = hisi_hba->dev;
   1773	int rc = TMF_RESP_FUNC_FAILED;
   1774
   1775	/* Clear internal IO and then lu reset */
   1776	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
   1777	if (rc < 0) {
   1778		dev_err(dev, "lu_reset: internal abort failed\n");
   1779		goto out;
   1780	}
   1781	hisi_sas_dereg_device(hisi_hba, device);
   1782
   1783	if (dev_is_sata(device)) {
   1784		struct sas_phy *phy;
   1785
   1786		phy = sas_get_local_phy(device);
   1787
   1788		rc = sas_phy_reset(phy, true);
   1789
   1790		if (rc == 0)
   1791			hisi_sas_release_task(hisi_hba, device);
   1792		sas_put_local_phy(phy);
   1793	} else {
   1794		rc = sas_lu_reset(device, lun);
   1795		if (rc == TMF_RESP_FUNC_COMPLETE)
   1796			hisi_sas_release_task(hisi_hba, device);
   1797	}
   1798out:
   1799	if (rc != TMF_RESP_FUNC_COMPLETE)
   1800		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
   1801			     sas_dev->device_id, rc);
   1802	return rc;
   1803}
   1804
   1805static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
   1806{
   1807	struct domain_device *device = data;
   1808	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1809	int rc;
   1810
   1811	rc = hisi_sas_debug_I_T_nexus_reset(device);
   1812	if (rc != TMF_RESP_FUNC_COMPLETE)
   1813		dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
   1814			 SAS_ADDR(device->sas_addr), rc);
   1815}
   1816
   1817static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
   1818{
   1819	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
   1820	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
   1821	ASYNC_DOMAIN_EXCLUSIVE(async);
   1822	int i;
   1823
   1824	queue_work(hisi_hba->wq, &r.work);
   1825	wait_for_completion(r.completion);
   1826	if (!r.done)
   1827		return TMF_RESP_FUNC_FAILED;
   1828
   1829	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
   1830		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
   1831		struct domain_device *device = sas_dev->sas_device;
   1832
   1833		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
   1834		    dev_is_expander(device->dev_type))
   1835			continue;
   1836
   1837		async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
   1838				      device, &async);
   1839	}
   1840
   1841	async_synchronize_full_domain(&async);
   1842	hisi_sas_release_tasks(hisi_hba);
   1843
   1844	return TMF_RESP_FUNC_COMPLETE;
   1845}
   1846
   1847static int hisi_sas_query_task(struct sas_task *task)
   1848{
   1849	int rc = TMF_RESP_FUNC_FAILED;
   1850
   1851	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
   1852		struct hisi_sas_slot *slot = task->lldd_task;
   1853		u32 tag = slot->idx;
   1854
   1855		rc = sas_query_task(task, tag);
   1856		switch (rc) {
   1857		/* The task is still in Lun, release it then */
   1858		case TMF_RESP_FUNC_SUCC:
   1859		/* The task is not in Lun or failed, reset the phy */
   1860		case TMF_RESP_FUNC_FAILED:
   1861		case TMF_RESP_FUNC_COMPLETE:
   1862			break;
   1863		default:
   1864			rc = TMF_RESP_FUNC_FAILED;
   1865			break;
   1866		}
   1867	}
   1868	return rc;
   1869}
   1870
   1871static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
   1872					    void *data)
   1873{
   1874	struct domain_device *device = task->dev;
   1875	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
   1876	struct hisi_sas_internal_abort_data *timeout = data;
   1877
   1878	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
   1879		queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
   1880
   1881	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
   1882		pr_err("Internal abort: timeout %016llx\n",
   1883		       SAS_ADDR(device->sas_addr));
   1884	} else {
   1885		struct hisi_sas_slot *slot = task->lldd_task;
   1886
   1887		set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
   1888
   1889		if (slot) {
   1890			struct hisi_sas_cq *cq =
   1891				&hisi_hba->cq[slot->dlvry_queue];
   1892			/*
   1893			 * sync irq to avoid free'ing task
   1894			 * before using task in IO completion
   1895			 */
   1896			synchronize_irq(cq->irq_no);
   1897			slot->task = NULL;
   1898		}
   1899
   1900		if (timeout->rst_ha_timeout) {
   1901			pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
   1902			       SAS_ADDR(device->sas_addr));
   1903			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
   1904		} else {
   1905			pr_err("Internal abort: timeout and not done %016llx.\n",
   1906			       SAS_ADDR(device->sas_addr));
   1907		}
   1908
   1909		return true;
   1910	}
   1911
   1912	return false;
   1913}
   1914
   1915static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
   1916{
   1917	hisi_sas_port_notify_formed(sas_phy);
   1918}
   1919
   1920static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
   1921			u8 reg_index, u8 reg_count, u8 *write_data)
   1922{
   1923	struct hisi_hba *hisi_hba = sha->lldd_ha;
   1924
   1925	if (!hisi_hba->hw->write_gpio)
   1926		return -EOPNOTSUPP;
   1927
   1928	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
   1929				reg_index, reg_count, write_data);
   1930}
   1931
   1932static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
   1933{
   1934	struct asd_sas_phy *sas_phy = &phy->sas_phy;
   1935	struct sas_phy *sphy = sas_phy->phy;
   1936	unsigned long flags;
   1937
   1938	phy->phy_attached = 0;
   1939	phy->phy_type = 0;
   1940	phy->port = NULL;
   1941
   1942	spin_lock_irqsave(&phy->lock, flags);
   1943	if (phy->enable)
   1944		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
   1945	else
   1946		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
   1947	spin_unlock_irqrestore(&phy->lock, flags);
   1948}
   1949
   1950void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
   1951		       gfp_t gfp_flags)
   1952{
   1953	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
   1954	struct asd_sas_phy *sas_phy = &phy->sas_phy;
   1955	struct device *dev = hisi_hba->dev;
   1956
   1957	if (rdy) {
   1958		/* Phy down but ready */
   1959		hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
   1960		hisi_sas_port_notify_formed(sas_phy);
   1961	} else {
   1962		struct hisi_sas_port *port  = phy->port;
   1963
   1964		if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
   1965		    phy->in_reset) {
   1966			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
   1967			return;
   1968		}
   1969		/* Phy down and not ready */
   1970		sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
   1971		sas_phy_disconnected(sas_phy);
   1972
   1973		if (port) {
   1974			if (phy->phy_type & PORT_TYPE_SAS) {
   1975				int port_id = port->id;
   1976
   1977				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
   1978								       port_id))
   1979					port->port_attached = 0;
   1980			} else if (phy->phy_type & PORT_TYPE_SATA)
   1981				port->port_attached = 0;
   1982		}
   1983		hisi_sas_phy_disconnected(phy);
   1984	}
   1985}
   1986EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
   1987
   1988void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
   1989{
   1990	int i;
   1991
   1992	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
   1993		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
   1994
   1995		synchronize_irq(cq->irq_no);
   1996	}
   1997}
   1998EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
   1999
   2000int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
   2001{
   2002	struct hisi_hba *hisi_hba = shost_priv(shost);
   2003
   2004	if (reset_type != SCSI_ADAPTER_RESET)
   2005		return -EOPNOTSUPP;
   2006
   2007	queue_work(hisi_hba->wq, &hisi_hba->rst_work);
   2008
   2009	return 0;
   2010}
   2011EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
   2012
   2013struct scsi_transport_template *hisi_sas_stt;
   2014EXPORT_SYMBOL_GPL(hisi_sas_stt);
   2015
   2016static struct sas_domain_function_template hisi_sas_transport_ops = {
   2017	.lldd_dev_found		= hisi_sas_dev_found,
   2018	.lldd_dev_gone		= hisi_sas_dev_gone,
   2019	.lldd_execute_task	= hisi_sas_queue_command,
   2020	.lldd_control_phy	= hisi_sas_control_phy,
   2021	.lldd_abort_task	= hisi_sas_abort_task,
   2022	.lldd_abort_task_set	= hisi_sas_abort_task_set,
   2023	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
   2024	.lldd_lu_reset		= hisi_sas_lu_reset,
   2025	.lldd_query_task	= hisi_sas_query_task,
   2026	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
   2027	.lldd_port_formed	= hisi_sas_port_formed,
   2028	.lldd_write_gpio	= hisi_sas_write_gpio,
   2029	.lldd_tmf_aborted	= hisi_sas_tmf_aborted,
   2030	.lldd_abort_timeout	= hisi_sas_internal_abort_timeout,
   2031};
   2032
   2033void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
   2034{
   2035	int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
   2036	struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
   2037
   2038	for (i = 0; i < hisi_hba->queue_count; i++) {
   2039		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
   2040		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
   2041		struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
   2042
   2043		s = sizeof(struct hisi_sas_cmd_hdr);
   2044		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
   2045			memset(&cmd_hdr[j], 0, s);
   2046
   2047		dq->wr_point = 0;
   2048
   2049		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
   2050		memset(hisi_hba->complete_hdr[i], 0, s);
   2051		cq->rd_point = 0;
   2052	}
   2053
   2054	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
   2055	memset(hisi_hba->initial_fis, 0, s);
   2056
   2057	s = max_command_entries * sizeof(struct hisi_sas_iost);
   2058	memset(hisi_hba->iost, 0, s);
   2059
   2060	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
   2061	memset(hisi_hba->breakpoint, 0, s);
   2062
   2063	s = sizeof(struct hisi_sas_sata_breakpoint);
   2064	for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
   2065		memset(&sata_breakpoint[j], 0, s);
   2066}
   2067EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
   2068
   2069int hisi_sas_alloc(struct hisi_hba *hisi_hba)
   2070{
   2071	struct device *dev = hisi_hba->dev;
   2072	int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
   2073	int max_command_entries_ru, sz_slot_buf_ru;
   2074	int blk_cnt, slots_per_blk;
   2075
   2076	sema_init(&hisi_hba->sem, 1);
   2077	spin_lock_init(&hisi_hba->lock);
   2078	for (i = 0; i < hisi_hba->n_phy; i++) {
   2079		hisi_sas_phy_init(hisi_hba, i);
   2080		hisi_hba->port[i].port_attached = 0;
   2081		hisi_hba->port[i].id = -1;
   2082	}
   2083
   2084	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
   2085		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
   2086		hisi_hba->devices[i].device_id = i;
   2087		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
   2088	}
   2089
   2090	for (i = 0; i < hisi_hba->queue_count; i++) {
   2091		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
   2092		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
   2093
   2094		/* Completion queue structure */
   2095		cq->id = i;
   2096		cq->hisi_hba = hisi_hba;
   2097
   2098		/* Delivery queue structure */
   2099		spin_lock_init(&dq->lock);
   2100		INIT_LIST_HEAD(&dq->list);
   2101		dq->id = i;
   2102		dq->hisi_hba = hisi_hba;
   2103
   2104		/* Delivery queue */
   2105		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
   2106		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
   2107						&hisi_hba->cmd_hdr_dma[i],
   2108						GFP_KERNEL);
   2109		if (!hisi_hba->cmd_hdr[i])
   2110			goto err_out;
   2111
   2112		/* Completion queue */
   2113		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
   2114		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
   2115						&hisi_hba->complete_hdr_dma[i],
   2116						GFP_KERNEL);
   2117		if (!hisi_hba->complete_hdr[i])
   2118			goto err_out;
   2119	}
   2120
   2121	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
   2122	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
   2123					     GFP_KERNEL);
   2124	if (!hisi_hba->itct)
   2125		goto err_out;
   2126
   2127	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
   2128					   sizeof(struct hisi_sas_slot),
   2129					   GFP_KERNEL);
   2130	if (!hisi_hba->slot_info)
   2131		goto err_out;
   2132
   2133	/* roundup to avoid overly large block size */
   2134	max_command_entries_ru = roundup(max_command_entries, 64);
   2135	if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
   2136		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
   2137	else
   2138		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
   2139	sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
   2140	s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
   2141	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
   2142	slots_per_blk = s / sz_slot_buf_ru;
   2143
   2144	for (i = 0; i < blk_cnt; i++) {
   2145		int slot_index = i * slots_per_blk;
   2146		dma_addr_t buf_dma;
   2147		void *buf;
   2148
   2149		buf = dmam_alloc_coherent(dev, s, &buf_dma,
   2150					  GFP_KERNEL);
   2151		if (!buf)
   2152			goto err_out;
   2153
   2154		for (j = 0; j < slots_per_blk; j++, slot_index++) {
   2155			struct hisi_sas_slot *slot;
   2156
   2157			slot = &hisi_hba->slot_info[slot_index];
   2158			slot->buf = buf;
   2159			slot->buf_dma = buf_dma;
   2160			slot->idx = slot_index;
   2161
   2162			buf += sz_slot_buf_ru;
   2163			buf_dma += sz_slot_buf_ru;
   2164		}
   2165	}
   2166
   2167	s = max_command_entries * sizeof(struct hisi_sas_iost);
   2168	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
   2169					     GFP_KERNEL);
   2170	if (!hisi_hba->iost)
   2171		goto err_out;
   2172
   2173	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
   2174	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
   2175						   &hisi_hba->breakpoint_dma,
   2176						   GFP_KERNEL);
   2177	if (!hisi_hba->breakpoint)
   2178		goto err_out;
   2179
   2180	s = hisi_hba->slot_index_count = max_command_entries;
   2181	hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
   2182	if (!hisi_hba->slot_index_tags)
   2183		goto err_out;
   2184
   2185	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
   2186	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
   2187						    &hisi_hba->initial_fis_dma,
   2188						    GFP_KERNEL);
   2189	if (!hisi_hba->initial_fis)
   2190		goto err_out;
   2191
   2192	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
   2193	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
   2194					&hisi_hba->sata_breakpoint_dma,
   2195					GFP_KERNEL);
   2196	if (!hisi_hba->sata_breakpoint)
   2197		goto err_out;
   2198
   2199	hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
   2200
   2201	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
   2202	if (!hisi_hba->wq) {
   2203		dev_err(dev, "sas_alloc: failed to create workqueue\n");
   2204		goto err_out;
   2205	}
   2206
   2207	return 0;
   2208err_out:
   2209	return -ENOMEM;
   2210}
   2211EXPORT_SYMBOL_GPL(hisi_sas_alloc);
   2212
   2213void hisi_sas_free(struct hisi_hba *hisi_hba)
   2214{
   2215	int i;
   2216
   2217	for (i = 0; i < hisi_hba->n_phy; i++) {
   2218		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
   2219
   2220		del_timer_sync(&phy->timer);
   2221	}
   2222
   2223	if (hisi_hba->wq)
   2224		destroy_workqueue(hisi_hba->wq);
   2225}
   2226EXPORT_SYMBOL_GPL(hisi_sas_free);
   2227
   2228void hisi_sas_rst_work_handler(struct work_struct *work)
   2229{
   2230	struct hisi_hba *hisi_hba =
   2231		container_of(work, struct hisi_hba, rst_work);
   2232
   2233	if (hisi_sas_controller_prereset(hisi_hba))
   2234		return;
   2235
   2236	hisi_sas_controller_reset(hisi_hba);
   2237}
   2238EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
   2239
   2240void hisi_sas_sync_rst_work_handler(struct work_struct *work)
   2241{
   2242	struct hisi_sas_rst *rst =
   2243		container_of(work, struct hisi_sas_rst, work);
   2244
   2245	if (hisi_sas_controller_prereset(rst->hisi_hba))
   2246		goto rst_complete;
   2247
   2248	if (!hisi_sas_controller_reset(rst->hisi_hba))
   2249		rst->done = true;
   2250rst_complete:
   2251	complete(rst->completion);
   2252}
   2253EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
   2254
   2255int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
   2256{
   2257	struct device *dev = hisi_hba->dev;
   2258	struct platform_device *pdev = hisi_hba->platform_dev;
   2259	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
   2260	struct clk *refclk;
   2261
   2262	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
   2263					  SAS_ADDR_SIZE)) {
   2264		dev_err(dev, "could not get property sas-addr\n");
   2265		return -ENOENT;
   2266	}
   2267
   2268	if (np) {
   2269		/*
   2270		 * These properties are only required for platform device-based
   2271		 * controller with DT firmware.
   2272		 */
   2273		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
   2274					"hisilicon,sas-syscon");
   2275		if (IS_ERR(hisi_hba->ctrl)) {
   2276			dev_err(dev, "could not get syscon\n");
   2277			return -ENOENT;
   2278		}
   2279
   2280		if (device_property_read_u32(dev, "ctrl-reset-reg",
   2281					     &hisi_hba->ctrl_reset_reg)) {
   2282			dev_err(dev, "could not get property ctrl-reset-reg\n");
   2283			return -ENOENT;
   2284		}
   2285
   2286		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
   2287					     &hisi_hba->ctrl_reset_sts_reg)) {
   2288			dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
   2289			return -ENOENT;
   2290		}
   2291
   2292		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
   2293					     &hisi_hba->ctrl_clock_ena_reg)) {
   2294			dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
   2295			return -ENOENT;
   2296		}
   2297	}
   2298
   2299	refclk = devm_clk_get(dev, NULL);
   2300	if (IS_ERR(refclk))
   2301		dev_dbg(dev, "no ref clk property\n");
   2302	else
   2303		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
   2304
   2305	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
   2306		dev_err(dev, "could not get property phy-count\n");
   2307		return -ENOENT;
   2308	}
   2309
   2310	if (device_property_read_u32(dev, "queue-count",
   2311				     &hisi_hba->queue_count)) {
   2312		dev_err(dev, "could not get property queue-count\n");
   2313		return -ENOENT;
   2314	}
   2315
   2316	return 0;
   2317}
   2318EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
   2319
   2320static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
   2321					      const struct hisi_sas_hw *hw)
   2322{
   2323	struct resource *res;
   2324	struct Scsi_Host *shost;
   2325	struct hisi_hba *hisi_hba;
   2326	struct device *dev = &pdev->dev;
   2327	int error;
   2328
   2329	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
   2330	if (!shost) {
   2331		dev_err(dev, "scsi host alloc failed\n");
   2332		return NULL;
   2333	}
   2334	hisi_hba = shost_priv(shost);
   2335
   2336	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
   2337	hisi_hba->hw = hw;
   2338	hisi_hba->dev = dev;
   2339	hisi_hba->platform_dev = pdev;
   2340	hisi_hba->shost = shost;
   2341	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
   2342
   2343	timer_setup(&hisi_hba->timer, NULL, 0);
   2344
   2345	if (hisi_sas_get_fw_info(hisi_hba) < 0)
   2346		goto err_out;
   2347
   2348	error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
   2349	if (error) {
   2350		dev_err(dev, "No usable DMA addressing method\n");
   2351		goto err_out;
   2352	}
   2353
   2354	hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
   2355	if (IS_ERR(hisi_hba->regs))
   2356		goto err_out;
   2357
   2358	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
   2359	if (res) {
   2360		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
   2361		if (IS_ERR(hisi_hba->sgpio_regs))
   2362			goto err_out;
   2363	}
   2364
   2365	if (hisi_sas_alloc(hisi_hba)) {
   2366		hisi_sas_free(hisi_hba);
   2367		goto err_out;
   2368	}
   2369
   2370	return shost;
   2371err_out:
   2372	scsi_host_put(shost);
   2373	dev_err(dev, "shost alloc failed\n");
   2374	return NULL;
   2375}
   2376
   2377static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
   2378{
   2379	if (hisi_hba->hw->interrupt_preinit)
   2380		return hisi_hba->hw->interrupt_preinit(hisi_hba);
   2381	return 0;
   2382}
   2383
   2384int hisi_sas_probe(struct platform_device *pdev,
   2385		   const struct hisi_sas_hw *hw)
   2386{
   2387	struct Scsi_Host *shost;
   2388	struct hisi_hba *hisi_hba;
   2389	struct device *dev = &pdev->dev;
   2390	struct asd_sas_phy **arr_phy;
   2391	struct asd_sas_port **arr_port;
   2392	struct sas_ha_struct *sha;
   2393	int rc, phy_nr, port_nr, i;
   2394
   2395	shost = hisi_sas_shost_alloc(pdev, hw);
   2396	if (!shost)
   2397		return -ENOMEM;
   2398
   2399	sha = SHOST_TO_SAS_HA(shost);
   2400	hisi_hba = shost_priv(shost);
   2401	platform_set_drvdata(pdev, sha);
   2402
   2403	phy_nr = port_nr = hisi_hba->n_phy;
   2404
   2405	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
   2406	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
   2407	if (!arr_phy || !arr_port) {
   2408		rc = -ENOMEM;
   2409		goto err_out_ha;
   2410	}
   2411
   2412	sha->sas_phy = arr_phy;
   2413	sha->sas_port = arr_port;
   2414	sha->lldd_ha = hisi_hba;
   2415
   2416	shost->transportt = hisi_sas_stt;
   2417	shost->max_id = HISI_SAS_MAX_DEVICES;
   2418	shost->max_lun = ~0;
   2419	shost->max_channel = 1;
   2420	shost->max_cmd_len = 16;
   2421	if (hisi_hba->hw->slot_index_alloc) {
   2422		shost->can_queue = HISI_SAS_MAX_COMMANDS;
   2423		shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
   2424	} else {
   2425		shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
   2426		shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
   2427	}
   2428
   2429	sha->sas_ha_name = DRV_NAME;
   2430	sha->dev = hisi_hba->dev;
   2431	sha->lldd_module = THIS_MODULE;
   2432	sha->sas_addr = &hisi_hba->sas_addr[0];
   2433	sha->num_phys = hisi_hba->n_phy;
   2434	sha->core.shost = hisi_hba->shost;
   2435
   2436	for (i = 0; i < hisi_hba->n_phy; i++) {
   2437		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
   2438		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
   2439	}
   2440
   2441	rc = hisi_sas_interrupt_preinit(hisi_hba);
   2442	if (rc)
   2443		goto err_out_ha;
   2444
   2445	rc = scsi_add_host(shost, &pdev->dev);
   2446	if (rc)
   2447		goto err_out_ha;
   2448
   2449	rc = sas_register_ha(sha);
   2450	if (rc)
   2451		goto err_out_register_ha;
   2452
   2453	rc = hisi_hba->hw->hw_init(hisi_hba);
   2454	if (rc)
   2455		goto err_out_hw_init;
   2456
   2457	scsi_scan_host(shost);
   2458
   2459	return 0;
   2460
   2461err_out_hw_init:
   2462	sas_unregister_ha(sha);
   2463err_out_register_ha:
   2464	scsi_remove_host(shost);
   2465err_out_ha:
   2466	hisi_sas_free(hisi_hba);
   2467	scsi_host_put(shost);
   2468	return rc;
   2469}
   2470EXPORT_SYMBOL_GPL(hisi_sas_probe);
   2471
   2472int hisi_sas_remove(struct platform_device *pdev)
   2473{
   2474	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
   2475	struct hisi_hba *hisi_hba = sha->lldd_ha;
   2476	struct Scsi_Host *shost = sha->core.shost;
   2477
   2478	del_timer_sync(&hisi_hba->timer);
   2479
   2480	sas_unregister_ha(sha);
   2481	sas_remove_host(sha->core.shost);
   2482
   2483	hisi_sas_free(hisi_hba);
   2484	scsi_host_put(shost);
   2485	return 0;
   2486}
   2487EXPORT_SYMBOL_GPL(hisi_sas_remove);
   2488
   2489#if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
   2490#define DEBUGFS_ENABLE_DEFAULT  "enabled"
   2491bool hisi_sas_debugfs_enable = true;
   2492u32 hisi_sas_debugfs_dump_count = 50;
   2493#else
   2494#define DEBUGFS_ENABLE_DEFAULT "disabled"
   2495bool hisi_sas_debugfs_enable;
   2496u32 hisi_sas_debugfs_dump_count = 1;
   2497#endif
   2498
   2499EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
   2500module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
   2501MODULE_PARM_DESC(hisi_sas_debugfs_enable,
   2502		 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
   2503
   2504EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
   2505module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
   2506MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
   2507
   2508struct dentry *hisi_sas_debugfs_dir;
   2509EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
   2510
   2511static __init int hisi_sas_init(void)
   2512{
   2513	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
   2514	if (!hisi_sas_stt)
   2515		return -ENOMEM;
   2516
   2517	if (hisi_sas_debugfs_enable) {
   2518		hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
   2519		if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
   2520			pr_info("hisi_sas: Limiting debugfs dump count\n");
   2521			hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
   2522		}
   2523	}
   2524
   2525	return 0;
   2526}
   2527
   2528static __exit void hisi_sas_exit(void)
   2529{
   2530	sas_release_transport(hisi_sas_stt);
   2531
   2532	debugfs_remove(hisi_sas_debugfs_dir);
   2533}
   2534
   2535module_init(hisi_sas_init);
   2536module_exit(hisi_sas_exit);
   2537
   2538MODULE_LICENSE("GPL");
   2539MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
   2540MODULE_DESCRIPTION("HISILICON SAS controller driver");
   2541MODULE_ALIAS("platform:" DRV_NAME);