cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

snic_disc.c (13623B)


      1/*
      2 * Copyright 2014 Cisco Systems, Inc.  All rights reserved.
      3 *
      4 * This program is free software; you may redistribute it and/or modify
      5 * it under the terms of the GNU General Public License as published by
      6 * the Free Software Foundation; version 2 of the License.
      7 *
      8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
      9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     15 * SOFTWARE.
     16 */
     17
     18#include <linux/errno.h>
     19#include <linux/mempool.h>
     20
     21#include <scsi/scsi_tcq.h>
     22
     23#include "snic_disc.h"
     24#include "snic.h"
     25#include "snic_io.h"
     26
     27
     28/* snic target types */
     29static const char * const snic_tgt_type_str[] = {
     30	[SNIC_TGT_DAS] = "DAS",
     31	[SNIC_TGT_SAN] = "SAN",
     32};
     33
     34static inline const char *
     35snic_tgt_type_to_str(int typ)
     36{
     37	return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
     38		 snic_tgt_type_str[typ] : "Unknown");
     39}
     40
     41static const char * const snic_tgt_state_str[] = {
     42	[SNIC_TGT_STAT_INIT]	= "INIT",
     43	[SNIC_TGT_STAT_ONLINE]	= "ONLINE",
     44	[SNIC_TGT_STAT_OFFLINE]	= "OFFLINE",
     45	[SNIC_TGT_STAT_DEL]	= "DELETION IN PROGRESS",
     46};
     47
     48const char *
     49snic_tgt_state_to_str(int state)
     50{
     51	return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
     52		snic_tgt_state_str[state] : "UNKNOWN");
     53}
     54
     55/*
     56 * Initiate report_tgt req desc
     57 */
     58static void
     59snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
     60		     dma_addr_t rsp_buf_pa, ulong ctx)
     61{
     62	struct snic_sg_desc *sgd = NULL;
     63
     64
     65	snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
     66			1, ctx);
     67
     68	req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
     69	sgd = req_to_sgl(req);
     70	sgd[0].addr = cpu_to_le64(rsp_buf_pa);
     71	sgd[0].len = cpu_to_le32(len);
     72	sgd[0]._resvd = 0;
     73	req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
     74}
     75
     76/*
     77 * snic_queue_report_tgt_req: Queues report target request.
     78 */
     79static int
     80snic_queue_report_tgt_req(struct snic *snic)
     81{
     82	struct snic_req_info *rqi = NULL;
     83	u32 ntgts, buf_len = 0;
     84	u8 *buf = NULL;
     85	dma_addr_t pa = 0;
     86	int ret = 0;
     87
     88	rqi = snic_req_init(snic, 1);
     89	if (!rqi) {
     90		ret = -ENOMEM;
     91		goto error;
     92	}
     93
     94	if (snic->fwinfo.max_tgts)
     95		ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
     96	else
     97		ntgts = snic->shost->max_id;
     98
     99	/* Allocate Response Buffer */
    100	SNIC_BUG_ON(ntgts == 0);
    101	buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
    102
    103	buf = kzalloc(buf_len, GFP_KERNEL);
    104	if (!buf) {
    105		snic_req_free(snic, rqi);
    106		SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
    107
    108		ret = -ENOMEM;
    109		goto error;
    110	}
    111
    112	SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
    113
    114	pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
    115	if (dma_mapping_error(&snic->pdev->dev, pa)) {
    116		SNIC_HOST_ERR(snic->shost,
    117			      "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
    118			      buf);
    119		kfree(buf);
    120		snic_req_free(snic, rqi);
    121		ret = -EINVAL;
    122
    123		goto error;
    124	}
    125
    126
    127	SNIC_BUG_ON(pa == 0);
    128	rqi->sge_va = (ulong) buf;
    129
    130	snic_report_tgt_init(rqi->req,
    131			     snic->config.hid,
    132			     buf,
    133			     buf_len,
    134			     pa,
    135			     (ulong)rqi);
    136
    137	snic_handle_untagged_req(snic, rqi);
    138
    139	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
    140	if (ret) {
    141		dma_unmap_single(&snic->pdev->dev, pa, buf_len,
    142				 DMA_FROM_DEVICE);
    143		kfree(buf);
    144		rqi->sge_va = 0;
    145		snic_release_untagged_req(snic, rqi);
    146		SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
    147
    148		goto error;
    149	}
    150
    151	SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
    152
    153	return ret;
    154
    155error:
    156	SNIC_HOST_ERR(snic->shost,
    157		      "Queuing Report Targets Failed, err = %d\n",
    158		      ret);
    159	return ret;
    160} /* end of snic_queue_report_tgt_req */
    161
    162/* call into SML */
    163static void
    164snic_scsi_scan_tgt(struct work_struct *work)
    165{
    166	struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
    167	struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
    168	unsigned long flags;
    169
    170	SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
    171	scsi_scan_target(&tgt->dev,
    172			 tgt->channel,
    173			 tgt->scsi_tgt_id,
    174			 SCAN_WILD_CARD,
    175			 SCSI_SCAN_RESCAN);
    176
    177	spin_lock_irqsave(shost->host_lock, flags);
    178	tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
    179	spin_unlock_irqrestore(shost->host_lock, flags);
    180} /* end of snic_scsi_scan_tgt */
    181
    182/*
    183 * snic_tgt_lookup :
    184 */
    185static struct snic_tgt *
    186snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
    187{
    188	struct list_head *cur, *nxt;
    189	struct snic_tgt *tgt = NULL;
    190
    191	list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
    192		tgt = list_entry(cur, struct snic_tgt, list);
    193		if (tgt->id == le32_to_cpu(tgtid->tgt_id))
    194			return tgt;
    195		tgt = NULL;
    196	}
    197
    198	return tgt;
    199} /* end of snic_tgt_lookup */
    200
    201/*
    202 * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
    203 */
    204void
    205snic_tgt_dev_release(struct device *dev)
    206{
    207	struct snic_tgt *tgt = dev_to_tgt(dev);
    208
    209	SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
    210		       "Target Device ID %d (%s) Permanently Deleted.\n",
    211		       tgt->id,
    212		       dev_name(dev));
    213
    214	SNIC_BUG_ON(!list_empty(&tgt->list));
    215	kfree(tgt);
    216}
    217
    218/*
    219 * snic_tgt_del : work function to delete snic_tgt
    220 */
    221static void
    222snic_tgt_del(struct work_struct *work)
    223{
    224	struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
    225	struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
    226
    227	if (tgt->flags & SNIC_TGT_SCAN_PENDING)
    228		scsi_flush_work(shost);
    229
    230	/* Block IOs on child devices, stops new IOs */
    231	scsi_target_block(&tgt->dev);
    232
    233	/* Cleanup IOs */
    234	snic_tgt_scsi_abort_io(tgt);
    235
    236	/* Unblock IOs now, to flush if there are any. */
    237	scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
    238
    239	/* Delete SCSI Target and sdevs */
    240	scsi_remove_target(&tgt->dev);  /* ?? */
    241	device_del(&tgt->dev);
    242	put_device(&tgt->dev);
    243} /* end of snic_tgt_del */
    244
    245/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
    246 * it creates one.
    247 */
    248static struct snic_tgt *
    249snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
    250{
    251	struct snic_tgt *tgt = NULL;
    252	unsigned long flags;
    253	int ret;
    254
    255	tgt = snic_tgt_lookup(snic, tgtid);
    256	if (tgt) {
    257		/* update the information if required */
    258		return tgt;
    259	}
    260
    261	tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
    262	if (!tgt) {
    263		SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
    264		ret = -ENOMEM;
    265
    266		return tgt;
    267	}
    268
    269	INIT_LIST_HEAD(&tgt->list);
    270	tgt->id = le32_to_cpu(tgtid->tgt_id);
    271	tgt->channel = 0;
    272
    273	SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
    274	tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
    275
    276	/*
    277	 * Plugging into SML Device Tree
    278	 */
    279	tgt->tdata.disc_id = 0;
    280	tgt->state = SNIC_TGT_STAT_INIT;
    281	device_initialize(&tgt->dev);
    282	tgt->dev.parent = get_device(&snic->shost->shost_gendev);
    283	tgt->dev.release = snic_tgt_dev_release;
    284	INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
    285	INIT_WORK(&tgt->del_work, snic_tgt_del);
    286	switch (tgt->tdata.typ) {
    287	case SNIC_TGT_DAS:
    288		dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
    289			     snic->shost->host_no, tgt->channel, tgt->id);
    290		break;
    291
    292	case SNIC_TGT_SAN:
    293		dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
    294			     snic->shost->host_no, tgt->channel, tgt->id);
    295		break;
    296
    297	default:
    298		SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
    299		dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
    300			     snic->shost->host_no, tgt->channel, tgt->id);
    301		break;
    302	}
    303
    304	spin_lock_irqsave(snic->shost->host_lock, flags);
    305	list_add_tail(&tgt->list, &snic->disc.tgt_list);
    306	tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
    307	tgt->state = SNIC_TGT_STAT_ONLINE;
    308	spin_unlock_irqrestore(snic->shost->host_lock, flags);
    309
    310	SNIC_HOST_INFO(snic->shost,
    311		       "Tgt %d, type = %s detected. Adding..\n",
    312		       tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
    313
    314	ret = device_add(&tgt->dev);
    315	if (ret) {
    316		SNIC_HOST_ERR(snic->shost,
    317			      "Snic Tgt: device_add, with err = %d\n",
    318			      ret);
    319
    320		put_device(&snic->shost->shost_gendev);
    321		kfree(tgt);
    322		tgt = NULL;
    323
    324		return tgt;
    325	}
    326
    327	SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
    328
    329	scsi_queue_work(snic->shost, &tgt->scan_work);
    330
    331	return tgt;
    332} /* end of snic_tgt_create */
    333
    334/* Handler for discovery */
    335void
    336snic_handle_tgt_disc(struct work_struct *work)
    337{
    338	struct snic *snic = container_of(work, struct snic, tgt_work);
    339	struct snic_tgt_id *tgtid = NULL;
    340	struct snic_tgt *tgt = NULL;
    341	unsigned long flags;
    342	int i;
    343
    344	spin_lock_irqsave(&snic->snic_lock, flags);
    345	if (snic->in_remove) {
    346		spin_unlock_irqrestore(&snic->snic_lock, flags);
    347		kfree(snic->disc.rtgt_info);
    348
    349		return;
    350	}
    351	spin_unlock_irqrestore(&snic->snic_lock, flags);
    352
    353	mutex_lock(&snic->disc.mutex);
    354	/* Discover triggered during disc in progress */
    355	if (snic->disc.req_cnt) {
    356		snic->disc.state = SNIC_DISC_DONE;
    357		snic->disc.req_cnt = 0;
    358		mutex_unlock(&snic->disc.mutex);
    359		kfree(snic->disc.rtgt_info);
    360		snic->disc.rtgt_info = NULL;
    361
    362		SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
    363		/* Start Discovery Again */
    364		snic_disc_start(snic);
    365
    366		return;
    367	}
    368
    369	tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
    370
    371	SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
    372
    373	for (i = 0; i < snic->disc.rtgt_cnt; i++) {
    374		tgt = snic_tgt_create(snic, &tgtid[i]);
    375		if (!tgt) {
    376			int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
    377
    378			SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
    379			snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
    380			break;
    381		}
    382	}
    383
    384	snic->disc.rtgt_info = NULL;
    385	snic->disc.state = SNIC_DISC_DONE;
    386	mutex_unlock(&snic->disc.mutex);
    387
    388	SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
    389
    390	kfree(tgtid);
    391} /* end of snic_handle_tgt_disc */
    392
    393
    394int
    395snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
    396{
    397
    398	u8 typ, cmpl_stat;
    399	u32 cmnd_id, hid, tgt_cnt = 0;
    400	ulong ctx;
    401	struct snic_req_info *rqi = NULL;
    402	struct snic_tgt_id *tgtid;
    403	int i, ret = 0;
    404
    405	snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
    406	rqi = (struct snic_req_info *) ctx;
    407	tgtid = (struct snic_tgt_id *) rqi->sge_va;
    408
    409	tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
    410	if (tgt_cnt == 0) {
    411		SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
    412		ret = 1;
    413
    414		goto end;
    415	}
    416
    417	/* printing list of targets here */
    418	SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
    419
    420	SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
    421
    422	for (i = 0; i < tgt_cnt; i++)
    423		SNIC_HOST_INFO(snic->shost,
    424			       "Tgt id = 0x%x\n",
    425			       le32_to_cpu(tgtid[i].tgt_id));
    426
    427	/*
    428	 * Queue work for further processing,
    429	 * Response Buffer Memory is freed after creating targets
    430	 */
    431	snic->disc.rtgt_cnt = tgt_cnt;
    432	snic->disc.rtgt_info = (u8 *) tgtid;
    433	queue_work(snic_glob->event_q, &snic->tgt_work);
    434	ret = 0;
    435
    436end:
    437	/* Unmap Response Buffer */
    438	snic_pci_unmap_rsp_buf(snic, rqi);
    439	if (ret)
    440		kfree(tgtid);
    441
    442	rqi->sge_va = 0;
    443	snic_release_untagged_req(snic, rqi);
    444
    445	return ret;
    446} /* end of snic_report_tgt_cmpl_handler */
    447
    448/* Discovery init fn */
    449void
    450snic_disc_init(struct snic_disc *disc)
    451{
    452	INIT_LIST_HEAD(&disc->tgt_list);
    453	mutex_init(&disc->mutex);
    454	disc->disc_id = 0;
    455	disc->nxt_tgt_id = 0;
    456	disc->state = SNIC_DISC_INIT;
    457	disc->req_cnt = 0;
    458	disc->rtgt_cnt = 0;
    459	disc->rtgt_info = NULL;
    460	disc->cb = NULL;
    461} /* end of snic_disc_init */
    462
    463/* Discovery, uninit fn */
    464void
    465snic_disc_term(struct snic *snic)
    466{
    467	struct snic_disc *disc = &snic->disc;
    468
    469	mutex_lock(&disc->mutex);
    470	if (disc->req_cnt) {
    471		disc->req_cnt = 0;
    472		SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
    473	}
    474	mutex_unlock(&disc->mutex);
    475}
    476
    477/*
    478 * snic_disc_start: Discovery Start ...
    479 */
    480int
    481snic_disc_start(struct snic *snic)
    482{
    483	struct snic_disc *disc = &snic->disc;
    484	unsigned long flags;
    485	int ret = 0;
    486
    487	SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
    488
    489	spin_lock_irqsave(&snic->snic_lock, flags);
    490	if (snic->in_remove) {
    491		spin_unlock_irqrestore(&snic->snic_lock, flags);
    492		SNIC_ERR("snic driver removal in progress ...\n");
    493		ret = 0;
    494
    495		return ret;
    496	}
    497	spin_unlock_irqrestore(&snic->snic_lock, flags);
    498
    499	mutex_lock(&disc->mutex);
    500	if (disc->state == SNIC_DISC_PENDING) {
    501		disc->req_cnt++;
    502		mutex_unlock(&disc->mutex);
    503
    504		return ret;
    505	}
    506	disc->state = SNIC_DISC_PENDING;
    507	mutex_unlock(&disc->mutex);
    508
    509	ret = snic_queue_report_tgt_req(snic);
    510	if (ret)
    511		SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
    512
    513	return ret;
    514} /* end of snic_disc_start */
    515
    516/*
    517 * snic_disc_work :
    518 */
    519void
    520snic_handle_disc(struct work_struct *work)
    521{
    522	struct snic *snic = container_of(work, struct snic, disc_work);
    523	int ret = 0;
    524
    525	SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
    526
    527	ret = snic_disc_start(snic);
    528	if (ret)
    529		goto disc_err;
    530
    531disc_err:
    532	SNIC_HOST_ERR(snic->shost,
    533		      "disc_work: Discovery Failed w/ err = %d\n",
    534		      ret);
    535} /* end of snic_disc_work */
    536
    537/*
    538 * snic_tgt_del_all : cleanup all snic targets
    539 * Called on unbinding the interface
    540 */
    541void
    542snic_tgt_del_all(struct snic *snic)
    543{
    544	struct snic_tgt *tgt = NULL;
    545	struct list_head *cur, *nxt;
    546	unsigned long flags;
    547
    548	scsi_flush_work(snic->shost);
    549
    550	mutex_lock(&snic->disc.mutex);
    551	spin_lock_irqsave(snic->shost->host_lock, flags);
    552
    553	list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
    554		tgt = list_entry(cur, struct snic_tgt, list);
    555		tgt->state = SNIC_TGT_STAT_DEL;
    556		list_del_init(&tgt->list);
    557		SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
    558		queue_work(snic_glob->event_q, &tgt->del_work);
    559		tgt = NULL;
    560	}
    561	spin_unlock_irqrestore(snic->shost->host_lock, flags);
    562	mutex_unlock(&snic->disc.mutex);
    563
    564	flush_workqueue(snic_glob->event_q);
    565} /* end of snic_tgt_del_all */