cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

esas2r_ioctl.c (51793B)


      1/*
      2 *  linux/drivers/scsi/esas2r/esas2r_ioctl.c
      3 *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
      4 *
      5 *  Copyright (c) 2001-2013 ATTO Technology, Inc.
      6 *  (mailto:linuxdrivers@attotech.com)
      7 *
      8 * This program is free software; you can redistribute it and/or
      9 * modify it under the terms of the GNU General Public License
     10 * as published by the Free Software Foundation; either version 2
     11 * of the License, or (at your option) any later version.
     12 *
     13 * This program is distributed in the hope that it will be useful,
     14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     16 * GNU General Public License for more details.
     17 *
     18 * NO WARRANTY
     19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
     20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
     21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
     22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
     23 * solely responsible for determining the appropriateness of using and
     24 * distributing the Program and assumes all risks associated with its
     25 * exercise of rights under this Agreement, including but not limited to
     26 * the risks and costs of program errors, damage to or loss of data,
     27 * programs or equipment, and unavailability or interruption of operations.
     28 *
     29 * DISCLAIMER OF LIABILITY
     30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
     31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
     33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
     34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
     35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
     36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
     37 *
     38 * You should have received a copy of the GNU General Public License
     39 * along with this program; if not, write to the Free Software
     40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
     41 * USA.
     42 */
     43
     44#include "esas2r.h"
     45
     46/*
     47 * Buffered ioctl handlers.  A buffered ioctl is one which requires that we
     48 * allocate a DMA-able memory area to communicate with the firmware.  In
     49 * order to prevent continually allocating and freeing consistent memory,
     50 * we will allocate a global buffer the first time we need it and re-use
     51 * it for subsequent ioctl calls that require it.
     52 */
     53
     54u8 *esas2r_buffered_ioctl;
     55dma_addr_t esas2r_buffered_ioctl_addr;
     56u32 esas2r_buffered_ioctl_size;
     57struct pci_dev *esas2r_buffered_ioctl_pcid;
     58
     59static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
     60typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
     61				       struct esas2r_request *,
     62				       struct esas2r_sg_context *,
     63				       void *);
     64typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
     65					     struct esas2r_request *, void *);
     66
     67struct esas2r_buffered_ioctl {
     68	struct esas2r_adapter *a;
     69	void *ioctl;
     70	u32 length;
     71	u32 control_code;
     72	u32 offset;
     73	BUFFERED_IOCTL_CALLBACK
     74		callback;
     75	void *context;
     76	BUFFERED_IOCTL_DONE_CALLBACK
     77		done_callback;
     78	void *done_context;
     79
     80};
     81
     82static void complete_fm_api_req(struct esas2r_adapter *a,
     83				struct esas2r_request *rq)
     84{
     85	a->fm_api_command_done = 1;
     86	wake_up_interruptible(&a->fm_api_waiter);
     87}
     88
     89/* Callbacks for building scatter/gather lists for FM API requests */
     90static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
     91{
     92	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
     93	int offset = sgc->cur_offset - a->save_offset;
     94
     95	(*addr) = a->firmware.phys + offset;
     96	return a->firmware.orig_len - offset;
     97}
     98
     99static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
    100{
    101	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
    102	int offset = sgc->cur_offset - a->save_offset;
    103
    104	(*addr) = a->firmware.header_buff_phys + offset;
    105	return sizeof(struct esas2r_flash_img) - offset;
    106}
    107
    108/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
    109static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
    110{
    111	struct esas2r_request *rq;
    112
    113	if (mutex_lock_interruptible(&a->fm_api_mutex)) {
    114		fi->status = FI_STAT_BUSY;
    115		return;
    116	}
    117
    118	rq = esas2r_alloc_request(a);
    119	if (rq == NULL) {
    120		fi->status = FI_STAT_BUSY;
    121		goto free_sem;
    122	}
    123
    124	if (fi == &a->firmware.header) {
    125		a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
    126							     (size_t)sizeof(
    127								     struct
    128								     esas2r_flash_img),
    129							     (dma_addr_t *)&a->
    130							     firmware.
    131							     header_buff_phys,
    132							     GFP_KERNEL);
    133
    134		if (a->firmware.header_buff == NULL) {
    135			esas2r_debug("failed to allocate header buffer!");
    136			fi->status = FI_STAT_BUSY;
    137			goto free_req;
    138		}
    139
    140		memcpy(a->firmware.header_buff, fi,
    141		       sizeof(struct esas2r_flash_img));
    142		a->save_offset = a->firmware.header_buff;
    143		a->fm_api_sgc.get_phys_addr =
    144			(PGETPHYSADDR)get_physaddr_fm_api_header;
    145	} else {
    146		a->save_offset = (u8 *)fi;
    147		a->fm_api_sgc.get_phys_addr =
    148			(PGETPHYSADDR)get_physaddr_fm_api;
    149	}
    150
    151	rq->comp_cb = complete_fm_api_req;
    152	a->fm_api_command_done = 0;
    153	a->fm_api_sgc.cur_offset = a->save_offset;
    154
    155	if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
    156			   &a->fm_api_sgc))
    157		goto all_done;
    158
    159	/* Now wait around for it to complete. */
    160	while (!a->fm_api_command_done)
    161		wait_event_interruptible(a->fm_api_waiter,
    162					 a->fm_api_command_done);
    163all_done:
    164	if (fi == &a->firmware.header) {
    165		memcpy(fi, a->firmware.header_buff,
    166		       sizeof(struct esas2r_flash_img));
    167
    168		dma_free_coherent(&a->pcid->dev,
    169				  (size_t)sizeof(struct esas2r_flash_img),
    170				  a->firmware.header_buff,
    171				  (dma_addr_t)a->firmware.header_buff_phys);
    172	}
    173free_req:
    174	esas2r_free_request(a, (struct esas2r_request *)rq);
    175free_sem:
    176	mutex_unlock(&a->fm_api_mutex);
    177	return;
    178
    179}
    180
    181static void complete_nvr_req(struct esas2r_adapter *a,
    182			     struct esas2r_request *rq)
    183{
    184	a->nvram_command_done = 1;
    185	wake_up_interruptible(&a->nvram_waiter);
    186}
    187
    188/* Callback for building scatter/gather lists for buffered ioctls */
    189static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
    190				       u64 *addr)
    191{
    192	int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
    193
    194	(*addr) = esas2r_buffered_ioctl_addr + offset;
    195	return esas2r_buffered_ioctl_size - offset;
    196}
    197
    198static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
    199					struct esas2r_request *rq)
    200{
    201	a->buffered_ioctl_done = 1;
    202	wake_up_interruptible(&a->buffered_ioctl_waiter);
    203}
    204
    205static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
    206{
    207	struct esas2r_adapter *a = bi->a;
    208	struct esas2r_request *rq;
    209	struct esas2r_sg_context sgc;
    210	u8 result = IOCTL_SUCCESS;
    211
    212	if (down_interruptible(&buffered_ioctl_semaphore))
    213		return IOCTL_OUT_OF_RESOURCES;
    214
    215	/* allocate a buffer or use the existing buffer. */
    216	if (esas2r_buffered_ioctl) {
    217		if (esas2r_buffered_ioctl_size < bi->length) {
    218			/* free the too-small buffer and get a new one */
    219			dma_free_coherent(&a->pcid->dev,
    220					  (size_t)esas2r_buffered_ioctl_size,
    221					  esas2r_buffered_ioctl,
    222					  esas2r_buffered_ioctl_addr);
    223
    224			goto allocate_buffer;
    225		}
    226	} else {
    227allocate_buffer:
    228		esas2r_buffered_ioctl_size = bi->length;
    229		esas2r_buffered_ioctl_pcid = a->pcid;
    230		esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
    231							   (size_t)
    232							   esas2r_buffered_ioctl_size,
    233							   &
    234							   esas2r_buffered_ioctl_addr,
    235							   GFP_KERNEL);
    236	}
    237
    238	if (!esas2r_buffered_ioctl) {
    239		esas2r_log(ESAS2R_LOG_CRIT,
    240			   "could not allocate %d bytes of consistent memory "
    241			   "for a buffered ioctl!",
    242			   bi->length);
    243
    244		esas2r_debug("buffered ioctl alloc failure");
    245		result = IOCTL_OUT_OF_RESOURCES;
    246		goto exit_cleanly;
    247	}
    248
    249	memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
    250
    251	rq = esas2r_alloc_request(a);
    252	if (rq == NULL) {
    253		esas2r_log(ESAS2R_LOG_CRIT,
    254			   "could not allocate an internal request");
    255
    256		result = IOCTL_OUT_OF_RESOURCES;
    257		esas2r_debug("buffered ioctl - no requests");
    258		goto exit_cleanly;
    259	}
    260
    261	a->buffered_ioctl_done = 0;
    262	rq->comp_cb = complete_buffered_ioctl_req;
    263	sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
    264	sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
    265	sgc.length = esas2r_buffered_ioctl_size;
    266
    267	if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
    268		/* completed immediately, no need to wait */
    269		a->buffered_ioctl_done = 0;
    270		goto free_andexit_cleanly;
    271	}
    272
    273	/* now wait around for it to complete. */
    274	while (!a->buffered_ioctl_done)
    275		wait_event_interruptible(a->buffered_ioctl_waiter,
    276					 a->buffered_ioctl_done);
    277
    278free_andexit_cleanly:
    279	if (result == IOCTL_SUCCESS && bi->done_callback)
    280		(*bi->done_callback)(a, rq, bi->done_context);
    281
    282	esas2r_free_request(a, rq);
    283
    284exit_cleanly:
    285	if (result == IOCTL_SUCCESS)
    286		memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
    287
    288	up(&buffered_ioctl_semaphore);
    289	return result;
    290}
    291
    292/* SMP ioctl support */
    293static int smp_ioctl_callback(struct esas2r_adapter *a,
    294			      struct esas2r_request *rq,
    295			      struct esas2r_sg_context *sgc, void *context)
    296{
    297	struct atto_ioctl_smp *si =
    298		(struct atto_ioctl_smp *)esas2r_buffered_ioctl;
    299
    300	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
    301	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
    302
    303	if (!esas2r_build_sg_list(a, rq, sgc)) {
    304		si->status = ATTO_STS_OUT_OF_RSRC;
    305		return false;
    306	}
    307
    308	esas2r_start_request(a, rq);
    309	return true;
    310}
    311
    312static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
    313{
    314	struct esas2r_buffered_ioctl bi;
    315
    316	memset(&bi, 0, sizeof(bi));
    317
    318	bi.a = a;
    319	bi.ioctl = si;
    320	bi.length = sizeof(struct atto_ioctl_smp)
    321		    + le32_to_cpu(si->req_length)
    322		    + le32_to_cpu(si->rsp_length);
    323	bi.offset = 0;
    324	bi.callback = smp_ioctl_callback;
    325	return handle_buffered_ioctl(&bi);
    326}
    327
    328
    329/* CSMI ioctl support */
    330static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
    331					     struct esas2r_request *rq)
    332{
    333	rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
    334	rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
    335
    336	/* Now call the original completion callback. */
    337	(*rq->aux_req_cb)(a, rq);
    338}
    339
    340/* Tunnel a CSMI IOCTL to the back end driver for processing. */
    341static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
    342			      union atto_ioctl_csmi *ci,
    343			      struct esas2r_request *rq,
    344			      struct esas2r_sg_context *sgc,
    345			      u32 ctrl_code,
    346			      u16 target_id)
    347{
    348	struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
    349
    350	if (test_bit(AF_DEGRADED_MODE, &a->flags))
    351		return false;
    352
    353	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
    354	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
    355	ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
    356	ioctl->csmi.target_id = cpu_to_le16(target_id);
    357	ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
    358
    359	/*
    360	 * Always usurp the completion callback since the interrupt callback
    361	 * mechanism may be used.
    362	 */
    363	rq->aux_req_cx = ci;
    364	rq->aux_req_cb = rq->comp_cb;
    365	rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
    366
    367	if (!esas2r_build_sg_list(a, rq, sgc))
    368		return false;
    369
    370	esas2r_start_request(a, rq);
    371	return true;
    372}
    373
    374static bool check_lun(struct scsi_lun lun)
    375{
    376	bool result;
    377
    378	result = ((lun.scsi_lun[7] == 0) &&
    379		  (lun.scsi_lun[6] == 0) &&
    380		  (lun.scsi_lun[5] == 0) &&
    381		  (lun.scsi_lun[4] == 0) &&
    382		  (lun.scsi_lun[3] == 0) &&
    383		  (lun.scsi_lun[2] == 0) &&
    384/* Byte 1 is intentionally skipped */
    385		  (lun.scsi_lun[0] == 0));
    386
    387	return result;
    388}
    389
    390static int csmi_ioctl_callback(struct esas2r_adapter *a,
    391			       struct esas2r_request *rq,
    392			       struct esas2r_sg_context *sgc, void *context)
    393{
    394	struct atto_csmi *ci = (struct atto_csmi *)context;
    395	union atto_ioctl_csmi *ioctl_csmi =
    396		(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
    397	u8 path = 0;
    398	u8 tid = 0;
    399	u8 lun = 0;
    400	u32 sts = CSMI_STS_SUCCESS;
    401	struct esas2r_target *t;
    402	unsigned long flags;
    403
    404	if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
    405		struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
    406
    407		path = gda->path_id;
    408		tid = gda->target_id;
    409		lun = gda->lun;
    410	} else if (ci->control_code == CSMI_CC_TASK_MGT) {
    411		struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
    412
    413		path = tm->path_id;
    414		tid = tm->target_id;
    415		lun = tm->lun;
    416	}
    417
    418	if (path > 0) {
    419		rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
    420			CSMI_STS_INV_PARAM);
    421		return false;
    422	}
    423
    424	rq->target_id = tid;
    425	rq->vrq->scsi.flags |= cpu_to_le32(lun);
    426
    427	switch (ci->control_code) {
    428	case CSMI_CC_GET_DRVR_INFO:
    429	{
    430		struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
    431
    432		strcpy(gdi->description, esas2r_get_model_name(a));
    433		gdi->csmi_major_rev = CSMI_MAJOR_REV;
    434		gdi->csmi_minor_rev = CSMI_MINOR_REV;
    435		break;
    436	}
    437
    438	case CSMI_CC_GET_CNTLR_CFG:
    439	{
    440		struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
    441
    442		gcc->base_io_addr = 0;
    443		pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
    444				      &gcc->base_memaddr_lo);
    445		pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
    446				      &gcc->base_memaddr_hi);
    447		gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
    448					  a->pcid->subsystem_vendor);
    449		gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
    450		gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
    451		gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
    452		gcc->pci_addr.bus_num = a->pcid->bus->number;
    453		gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
    454		gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
    455
    456		memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
    457
    458		gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
    459		gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
    460		gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
    461		gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
    462		gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
    463		gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
    464		gcc->bios_build_rev = LOWORD(a->flash_ver);
    465
    466		if (test_bit(AF2_THUNDERLINK, &a->flags2))
    467			gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
    468					   | CSMI_CNTLRF_SATA_HBA;
    469		else
    470			gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
    471					   | CSMI_CNTLRF_SATA_RAID;
    472
    473		gcc->rrom_major_rev = 0;
    474		gcc->rrom_minor_rev = 0;
    475		gcc->rrom_build_rev = 0;
    476		gcc->rrom_release_rev = 0;
    477		gcc->rrom_biosmajor_rev = 0;
    478		gcc->rrom_biosminor_rev = 0;
    479		gcc->rrom_biosbuild_rev = 0;
    480		gcc->rrom_biosrelease_rev = 0;
    481		break;
    482	}
    483
    484	case CSMI_CC_GET_CNTLR_STS:
    485	{
    486		struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
    487
    488		if (test_bit(AF_DEGRADED_MODE, &a->flags))
    489			gcs->status = CSMI_CNTLR_STS_FAILED;
    490		else
    491			gcs->status = CSMI_CNTLR_STS_GOOD;
    492
    493		gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
    494		break;
    495	}
    496
    497	case CSMI_CC_FW_DOWNLOAD:
    498	case CSMI_CC_GET_RAID_INFO:
    499	case CSMI_CC_GET_RAID_CFG:
    500
    501		sts = CSMI_STS_BAD_CTRL_CODE;
    502		break;
    503
    504	case CSMI_CC_SMP_PASSTHRU:
    505	case CSMI_CC_SSP_PASSTHRU:
    506	case CSMI_CC_STP_PASSTHRU:
    507	case CSMI_CC_GET_PHY_INFO:
    508	case CSMI_CC_SET_PHY_INFO:
    509	case CSMI_CC_GET_LINK_ERRORS:
    510	case CSMI_CC_GET_SATA_SIG:
    511	case CSMI_CC_GET_CONN_INFO:
    512	case CSMI_CC_PHY_CTRL:
    513
    514		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
    515				       ci->control_code,
    516				       ESAS2R_TARG_ID_INV)) {
    517			sts = CSMI_STS_FAILED;
    518			break;
    519		}
    520
    521		return true;
    522
    523	case CSMI_CC_GET_SCSI_ADDR:
    524	{
    525		struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
    526
    527		struct scsi_lun lun;
    528
    529		memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
    530
    531		if (!check_lun(lun)) {
    532			sts = CSMI_STS_NO_SCSI_ADDR;
    533			break;
    534		}
    535
    536		/* make sure the device is present */
    537		spin_lock_irqsave(&a->mem_lock, flags);
    538		t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
    539		spin_unlock_irqrestore(&a->mem_lock, flags);
    540
    541		if (t == NULL) {
    542			sts = CSMI_STS_NO_SCSI_ADDR;
    543			break;
    544		}
    545
    546		gsa->host_index = 0xFF;
    547		gsa->lun = gsa->sas_lun[1];
    548		rq->target_id = esas2r_targ_get_id(t, a);
    549		break;
    550	}
    551
    552	case CSMI_CC_GET_DEV_ADDR:
    553	{
    554		struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
    555
    556		/* make sure the target is present */
    557		t = a->targetdb + rq->target_id;
    558
    559		if (t >= a->targetdb_end
    560		    || t->target_state != TS_PRESENT
    561		    || t->sas_addr == 0) {
    562			sts = CSMI_STS_NO_DEV_ADDR;
    563			break;
    564		}
    565
    566		/* fill in the result */
    567		*(u64 *)gda->sas_addr = t->sas_addr;
    568		memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
    569		gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
    570		break;
    571	}
    572
    573	case CSMI_CC_TASK_MGT:
    574
    575		/* make sure the target is present */
    576		t = a->targetdb + rq->target_id;
    577
    578		if (t >= a->targetdb_end
    579		    || t->target_state != TS_PRESENT
    580		    || !(t->flags & TF_PASS_THRU)) {
    581			sts = CSMI_STS_NO_DEV_ADDR;
    582			break;
    583		}
    584
    585		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
    586				       ci->control_code,
    587				       t->phys_targ_id)) {
    588			sts = CSMI_STS_FAILED;
    589			break;
    590		}
    591
    592		return true;
    593
    594	default:
    595
    596		sts = CSMI_STS_BAD_CTRL_CODE;
    597		break;
    598	}
    599
    600	rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
    601
    602	return false;
    603}
    604
    605
    606static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
    607				     struct esas2r_request *rq, void *context)
    608{
    609	struct atto_csmi *ci = (struct atto_csmi *)context;
    610	union atto_ioctl_csmi *ioctl_csmi =
    611		(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
    612
    613	switch (ci->control_code) {
    614	case CSMI_CC_GET_DRVR_INFO:
    615	{
    616		struct atto_csmi_get_driver_info *gdi =
    617			&ioctl_csmi->drvr_info;
    618
    619		strcpy(gdi->name, ESAS2R_VERSION_STR);
    620
    621		gdi->major_rev = ESAS2R_MAJOR_REV;
    622		gdi->minor_rev = ESAS2R_MINOR_REV;
    623		gdi->build_rev = 0;
    624		gdi->release_rev = 0;
    625		break;
    626	}
    627
    628	case CSMI_CC_GET_SCSI_ADDR:
    629	{
    630		struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
    631
    632		if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
    633		    CSMI_STS_SUCCESS) {
    634			gsa->target_id = rq->target_id;
    635			gsa->path_id = 0;
    636		}
    637
    638		break;
    639	}
    640	}
    641
    642	ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
    643}
    644
    645
    646static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
    647{
    648	struct esas2r_buffered_ioctl bi;
    649
    650	memset(&bi, 0, sizeof(bi));
    651
    652	bi.a = a;
    653	bi.ioctl = &ci->data;
    654	bi.length = sizeof(union atto_ioctl_csmi);
    655	bi.offset = 0;
    656	bi.callback = csmi_ioctl_callback;
    657	bi.context = ci;
    658	bi.done_callback = csmi_ioctl_done_callback;
    659	bi.done_context = ci;
    660
    661	return handle_buffered_ioctl(&bi);
    662}
    663
    664/* ATTO HBA ioctl support */
    665
    666/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
    667static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
    668			     struct atto_ioctl *hi,
    669			     struct esas2r_request *rq,
    670			     struct esas2r_sg_context *sgc)
    671{
    672	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
    673
    674	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
    675
    676	if (!esas2r_build_sg_list(a, rq, sgc)) {
    677		hi->status = ATTO_STS_OUT_OF_RSRC;
    678
    679		return false;
    680	}
    681
    682	esas2r_start_request(a, rq);
    683
    684	return true;
    685}
    686
    687static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
    688				  struct esas2r_request *rq)
    689{
    690	struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
    691	struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
    692	u8 sts = ATTO_SPT_RS_FAILED;
    693
    694	spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
    695	spt->sense_length = rq->sense_len;
    696	spt->residual_length =
    697		le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
    698
    699	switch (rq->req_stat) {
    700	case RS_SUCCESS:
    701	case RS_SCSI_ERROR:
    702		sts = ATTO_SPT_RS_SUCCESS;
    703		break;
    704	case RS_UNDERRUN:
    705		sts = ATTO_SPT_RS_UNDERRUN;
    706		break;
    707	case RS_OVERRUN:
    708		sts = ATTO_SPT_RS_OVERRUN;
    709		break;
    710	case RS_SEL:
    711	case RS_SEL2:
    712		sts = ATTO_SPT_RS_NO_DEVICE;
    713		break;
    714	case RS_NO_LUN:
    715		sts = ATTO_SPT_RS_NO_LUN;
    716		break;
    717	case RS_TIMEOUT:
    718		sts = ATTO_SPT_RS_TIMEOUT;
    719		break;
    720	case RS_DEGRADED:
    721		sts = ATTO_SPT_RS_DEGRADED;
    722		break;
    723	case RS_BUSY:
    724		sts = ATTO_SPT_RS_BUSY;
    725		break;
    726	case RS_ABORTED:
    727		sts = ATTO_SPT_RS_ABORTED;
    728		break;
    729	case RS_RESET:
    730		sts = ATTO_SPT_RS_BUS_RESET;
    731		break;
    732	}
    733
    734	spt->req_status = sts;
    735
    736	/* Update the target ID to the next one present. */
    737	spt->target_id =
    738		esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
    739
    740	/* Done, call the completion callback. */
    741	(*rq->aux_req_cb)(a, rq);
    742}
    743
    744static int hba_ioctl_callback(struct esas2r_adapter *a,
    745			      struct esas2r_request *rq,
    746			      struct esas2r_sg_context *sgc,
    747			      void *context)
    748{
    749	struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
    750
    751	hi->status = ATTO_STS_SUCCESS;
    752
    753	switch (hi->function) {
    754	case ATTO_FUNC_GET_ADAP_INFO:
    755	{
    756		u8 *class_code = (u8 *)&a->pcid->class;
    757
    758		struct atto_hba_get_adapter_info *gai =
    759			&hi->data.get_adap_info;
    760
    761		if (hi->flags & HBAF_TUNNEL) {
    762			hi->status = ATTO_STS_UNSUPPORTED;
    763			break;
    764		}
    765
    766		if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
    767			hi->status = ATTO_STS_INV_VERSION;
    768			hi->version = ATTO_VER_GET_ADAP_INFO0;
    769			break;
    770		}
    771
    772		memset(gai, 0, sizeof(*gai));
    773
    774		gai->pci.vendor_id = a->pcid->vendor;
    775		gai->pci.device_id = a->pcid->device;
    776		gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
    777		gai->pci.ss_device_id = a->pcid->subsystem_device;
    778		gai->pci.class_code[0] = class_code[0];
    779		gai->pci.class_code[1] = class_code[1];
    780		gai->pci.class_code[2] = class_code[2];
    781		gai->pci.rev_id = a->pcid->revision;
    782		gai->pci.bus_num = a->pcid->bus->number;
    783		gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
    784		gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
    785
    786		if (pci_is_pcie(a->pcid)) {
    787			u16 stat;
    788			u32 caps;
    789
    790			pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA,
    791						  &stat);
    792			pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
    793						   &caps);
    794
    795			gai->pci.link_speed_curr =
    796				(u8)(stat & PCI_EXP_LNKSTA_CLS);
    797			gai->pci.link_speed_max =
    798				(u8)(caps & PCI_EXP_LNKCAP_SLS);
    799			gai->pci.link_width_curr =
    800				(u8)((stat & PCI_EXP_LNKSTA_NLW)
    801				     >> PCI_EXP_LNKSTA_NLW_SHIFT);
    802			gai->pci.link_width_max =
    803				(u8)((caps & PCI_EXP_LNKCAP_MLW)
    804				     >> 4);
    805		}
    806
    807		gai->pci.msi_vector_cnt = 1;
    808
    809		if (a->pcid->msix_enabled)
    810			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
    811		else if (a->pcid->msi_enabled)
    812			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
    813		else
    814			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
    815
    816		gai->adap_type = ATTO_GAI_AT_ESASRAID2;
    817
    818		if (test_bit(AF2_THUNDERLINK, &a->flags2))
    819			gai->adap_type = ATTO_GAI_AT_TLSASHBA;
    820
    821		if (test_bit(AF_DEGRADED_MODE, &a->flags))
    822			gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
    823
    824		gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
    825				   ATTO_GAI_AF_DEVADDR_SUPP;
    826
    827		if (a->pcid->subsystem_device == ATTO_ESAS_R60F
    828		    || a->pcid->subsystem_device == ATTO_ESAS_R608
    829		    || a->pcid->subsystem_device == ATTO_ESAS_R644
    830		    || a->pcid->subsystem_device == ATTO_TSSC_3808E)
    831			gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
    832
    833		gai->num_ports = ESAS2R_NUM_PHYS;
    834		gai->num_phys = ESAS2R_NUM_PHYS;
    835
    836		strcpy(gai->firmware_rev, a->fw_rev);
    837		strcpy(gai->flash_rev, a->flash_rev);
    838		strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
    839		strcpy(gai->model_name, esas2r_get_model_name(a));
    840
    841		gai->num_targets = ESAS2R_MAX_TARGETS;
    842
    843		gai->num_busses = 1;
    844		gai->num_targsper_bus = gai->num_targets;
    845		gai->num_lunsper_targ = 256;
    846
    847		if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
    848		    || a->pcid->subsystem_device == ATTO_ESAS_R60F)
    849			gai->num_connectors = 4;
    850		else
    851			gai->num_connectors = 2;
    852
    853		gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
    854
    855		gai->num_targets_backend = a->num_targets_backend;
    856
    857		gai->tunnel_flags = a->ioctl_tunnel
    858				    & (ATTO_GAI_TF_MEM_RW
    859				       | ATTO_GAI_TF_TRACE
    860				       | ATTO_GAI_TF_SCSI_PASS_THRU
    861				       | ATTO_GAI_TF_GET_DEV_ADDR
    862				       | ATTO_GAI_TF_PHY_CTRL
    863				       | ATTO_GAI_TF_CONN_CTRL
    864				       | ATTO_GAI_TF_GET_DEV_INFO);
    865		break;
    866	}
    867
    868	case ATTO_FUNC_GET_ADAP_ADDR:
    869	{
    870		struct atto_hba_get_adapter_address *gaa =
    871			&hi->data.get_adap_addr;
    872
    873		if (hi->flags & HBAF_TUNNEL) {
    874			hi->status = ATTO_STS_UNSUPPORTED;
    875			break;
    876		}
    877
    878		if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
    879			hi->status = ATTO_STS_INV_VERSION;
    880			hi->version = ATTO_VER_GET_ADAP_ADDR0;
    881		} else if (gaa->addr_type == ATTO_GAA_AT_PORT
    882			   || gaa->addr_type == ATTO_GAA_AT_NODE) {
    883			if (gaa->addr_type == ATTO_GAA_AT_PORT
    884			    && gaa->port_id >= ESAS2R_NUM_PHYS) {
    885				hi->status = ATTO_STS_NOT_APPL;
    886			} else {
    887				memcpy((u64 *)gaa->address,
    888				       &a->nvram->sas_addr[0], sizeof(u64));
    889				gaa->addr_len = sizeof(u64);
    890			}
    891		} else {
    892			hi->status = ATTO_STS_INV_PARAM;
    893		}
    894
    895		break;
    896	}
    897
    898	case ATTO_FUNC_MEM_RW:
    899	{
    900		if (hi->flags & HBAF_TUNNEL) {
    901			if (hba_ioctl_tunnel(a, hi, rq, sgc))
    902				return true;
    903
    904			break;
    905		}
    906
    907		hi->status = ATTO_STS_UNSUPPORTED;
    908
    909		break;
    910	}
    911
    912	case ATTO_FUNC_TRACE:
    913	{
    914		struct atto_hba_trace *trc = &hi->data.trace;
    915
    916		if (hi->flags & HBAF_TUNNEL) {
    917			if (hba_ioctl_tunnel(a, hi, rq, sgc))
    918				return true;
    919
    920			break;
    921		}
    922
    923		if (hi->version > ATTO_VER_TRACE1) {
    924			hi->status = ATTO_STS_INV_VERSION;
    925			hi->version = ATTO_VER_TRACE1;
    926			break;
    927		}
    928
    929		if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
    930		    && hi->version >= ATTO_VER_TRACE1) {
    931			if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
    932				u32 len = hi->data_length;
    933				u32 offset = trc->current_offset;
    934				u32 total_len = ESAS2R_FWCOREDUMP_SZ;
    935
    936				/* Size is zero if a core dump isn't present */
    937				if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
    938					total_len = 0;
    939
    940				if (len > total_len)
    941					len = total_len;
    942
    943				if (offset >= total_len
    944				    || offset + len > total_len
    945				    || len == 0) {
    946					hi->status = ATTO_STS_INV_PARAM;
    947					break;
    948				}
    949
    950				memcpy(trc + 1,
    951				       a->fw_coredump_buff + offset,
    952				       len);
    953
    954				hi->data_length = len;
    955			} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
    956				memset(a->fw_coredump_buff, 0,
    957				       ESAS2R_FWCOREDUMP_SZ);
    958
    959				clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
    960			} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
    961				hi->status = ATTO_STS_UNSUPPORTED;
    962				break;
    963			}
    964
    965			/* Always return all the info we can. */
    966			trc->trace_mask = 0;
    967			trc->current_offset = 0;
    968			trc->total_length = ESAS2R_FWCOREDUMP_SZ;
    969
    970			/* Return zero length buffer if core dump not present */
    971			if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
    972				trc->total_length = 0;
    973		} else {
    974			hi->status = ATTO_STS_UNSUPPORTED;
    975		}
    976
    977		break;
    978	}
    979
    980	case ATTO_FUNC_SCSI_PASS_THRU:
    981	{
    982		struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
    983		struct scsi_lun lun;
    984
    985		memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
    986
    987		if (hi->flags & HBAF_TUNNEL) {
    988			if (hba_ioctl_tunnel(a, hi, rq, sgc))
    989				return true;
    990
    991			break;
    992		}
    993
    994		if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
    995			hi->status = ATTO_STS_INV_VERSION;
    996			hi->version = ATTO_VER_SCSI_PASS_THRU0;
    997			break;
    998		}
    999
   1000		if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
   1001			hi->status = ATTO_STS_INV_PARAM;
   1002			break;
   1003		}
   1004
   1005		esas2r_sgc_init(sgc, a, rq, NULL);
   1006
   1007		sgc->length = hi->data_length;
   1008		sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
   1009				   + sizeof(struct atto_hba_scsi_pass_thru);
   1010
   1011		/* Finish request initialization */
   1012		rq->target_id = (u16)spt->target_id;
   1013		rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
   1014		memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
   1015		rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
   1016		rq->sense_len = spt->sense_length;
   1017		rq->sense_buf = (u8 *)spt->sense_data;
   1018		/* NOTE: we ignore spt->timeout */
   1019
   1020		/*
   1021		 * always usurp the completion callback since the interrupt
   1022		 * callback mechanism may be used.
   1023		 */
   1024
   1025		rq->aux_req_cx = hi;
   1026		rq->aux_req_cb = rq->comp_cb;
   1027		rq->comp_cb = scsi_passthru_comp_cb;
   1028
   1029		if (spt->flags & ATTO_SPTF_DATA_IN) {
   1030			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
   1031		} else if (spt->flags & ATTO_SPTF_DATA_OUT) {
   1032			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
   1033		} else {
   1034			if (sgc->length) {
   1035				hi->status = ATTO_STS_INV_PARAM;
   1036				break;
   1037			}
   1038		}
   1039
   1040		if (spt->flags & ATTO_SPTF_ORDERED_Q)
   1041			rq->vrq->scsi.flags |=
   1042				cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
   1043		else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
   1044			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
   1045
   1046
   1047		if (!esas2r_build_sg_list(a, rq, sgc)) {
   1048			hi->status = ATTO_STS_OUT_OF_RSRC;
   1049			break;
   1050		}
   1051
   1052		esas2r_start_request(a, rq);
   1053
   1054		return true;
   1055	}
   1056
   1057	case ATTO_FUNC_GET_DEV_ADDR:
   1058	{
   1059		struct atto_hba_get_device_address *gda =
   1060			&hi->data.get_dev_addr;
   1061		struct esas2r_target *t;
   1062
   1063		if (hi->flags & HBAF_TUNNEL) {
   1064			if (hba_ioctl_tunnel(a, hi, rq, sgc))
   1065				return true;
   1066
   1067			break;
   1068		}
   1069
   1070		if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
   1071			hi->status = ATTO_STS_INV_VERSION;
   1072			hi->version = ATTO_VER_GET_DEV_ADDR0;
   1073			break;
   1074		}
   1075
   1076		if (gda->target_id >= ESAS2R_MAX_TARGETS) {
   1077			hi->status = ATTO_STS_INV_PARAM;
   1078			break;
   1079		}
   1080
   1081		t = a->targetdb + (u16)gda->target_id;
   1082
   1083		if (t->target_state != TS_PRESENT) {
   1084			hi->status = ATTO_STS_FAILED;
   1085		} else if (gda->addr_type == ATTO_GDA_AT_PORT) {
   1086			if (t->sas_addr == 0) {
   1087				hi->status = ATTO_STS_UNSUPPORTED;
   1088			} else {
   1089				*(u64 *)gda->address = t->sas_addr;
   1090
   1091				gda->addr_len = sizeof(u64);
   1092			}
   1093		} else if (gda->addr_type == ATTO_GDA_AT_NODE) {
   1094			hi->status = ATTO_STS_NOT_APPL;
   1095		} else {
   1096			hi->status = ATTO_STS_INV_PARAM;
   1097		}
   1098
   1099		/* update the target ID to the next one present. */
   1100
   1101		gda->target_id =
   1102			esas2r_targ_db_find_next_present(a,
   1103							 (u16)gda->target_id);
   1104		break;
   1105	}
   1106
   1107	case ATTO_FUNC_PHY_CTRL:
   1108	case ATTO_FUNC_CONN_CTRL:
   1109	{
   1110		if (hba_ioctl_tunnel(a, hi, rq, sgc))
   1111			return true;
   1112
   1113		break;
   1114	}
   1115
   1116	case ATTO_FUNC_ADAP_CTRL:
   1117	{
   1118		struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
   1119
   1120		if (hi->flags & HBAF_TUNNEL) {
   1121			hi->status = ATTO_STS_UNSUPPORTED;
   1122			break;
   1123		}
   1124
   1125		if (hi->version > ATTO_VER_ADAP_CTRL0) {
   1126			hi->status = ATTO_STS_INV_VERSION;
   1127			hi->version = ATTO_VER_ADAP_CTRL0;
   1128			break;
   1129		}
   1130
   1131		if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
   1132			esas2r_reset_adapter(a);
   1133		} else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
   1134			hi->status = ATTO_STS_UNSUPPORTED;
   1135			break;
   1136		}
   1137
   1138		if (test_bit(AF_CHPRST_NEEDED, &a->flags))
   1139			ac->adap_state = ATTO_AC_AS_RST_SCHED;
   1140		else if (test_bit(AF_CHPRST_PENDING, &a->flags))
   1141			ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
   1142		else if (test_bit(AF_DISC_PENDING, &a->flags))
   1143			ac->adap_state = ATTO_AC_AS_RST_DISC;
   1144		else if (test_bit(AF_DISABLED, &a->flags))
   1145			ac->adap_state = ATTO_AC_AS_DISABLED;
   1146		else if (test_bit(AF_DEGRADED_MODE, &a->flags))
   1147			ac->adap_state = ATTO_AC_AS_DEGRADED;
   1148		else
   1149			ac->adap_state = ATTO_AC_AS_OK;
   1150
   1151		break;
   1152	}
   1153
   1154	case ATTO_FUNC_GET_DEV_INFO:
   1155	{
   1156		struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
   1157		struct esas2r_target *t;
   1158
   1159		if (hi->flags & HBAF_TUNNEL) {
   1160			if (hba_ioctl_tunnel(a, hi, rq, sgc))
   1161				return true;
   1162
   1163			break;
   1164		}
   1165
   1166		if (hi->version > ATTO_VER_GET_DEV_INFO0) {
   1167			hi->status = ATTO_STS_INV_VERSION;
   1168			hi->version = ATTO_VER_GET_DEV_INFO0;
   1169			break;
   1170		}
   1171
   1172		if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
   1173			hi->status = ATTO_STS_INV_PARAM;
   1174			break;
   1175		}
   1176
   1177		t = a->targetdb + (u16)gdi->target_id;
   1178
   1179		/* update the target ID to the next one present. */
   1180
   1181		gdi->target_id =
   1182			esas2r_targ_db_find_next_present(a,
   1183							 (u16)gdi->target_id);
   1184
   1185		if (t->target_state != TS_PRESENT) {
   1186			hi->status = ATTO_STS_FAILED;
   1187			break;
   1188		}
   1189
   1190		hi->status = ATTO_STS_UNSUPPORTED;
   1191		break;
   1192	}
   1193
   1194	default:
   1195
   1196		hi->status = ATTO_STS_INV_FUNC;
   1197		break;
   1198	}
   1199
   1200	return false;
   1201}
   1202
   1203static void hba_ioctl_done_callback(struct esas2r_adapter *a,
   1204				    struct esas2r_request *rq, void *context)
   1205{
   1206	struct atto_ioctl *ioctl_hba =
   1207		(struct atto_ioctl *)esas2r_buffered_ioctl;
   1208
   1209	esas2r_debug("hba_ioctl_done_callback %d", a->index);
   1210
   1211	if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
   1212		struct atto_hba_get_adapter_info *gai =
   1213			&ioctl_hba->data.get_adap_info;
   1214
   1215		esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
   1216
   1217		gai->drvr_rev_major = ESAS2R_MAJOR_REV;
   1218		gai->drvr_rev_minor = ESAS2R_MINOR_REV;
   1219
   1220		strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
   1221		strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
   1222
   1223		gai->num_busses = 1;
   1224		gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
   1225		gai->num_lunsper_targ = 1;
   1226	}
   1227}
   1228
   1229u8 handle_hba_ioctl(struct esas2r_adapter *a,
   1230		    struct atto_ioctl *ioctl_hba)
   1231{
   1232	struct esas2r_buffered_ioctl bi;
   1233
   1234	memset(&bi, 0, sizeof(bi));
   1235
   1236	bi.a = a;
   1237	bi.ioctl = ioctl_hba;
   1238	bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
   1239	bi.callback = hba_ioctl_callback;
   1240	bi.context = NULL;
   1241	bi.done_callback = hba_ioctl_done_callback;
   1242	bi.done_context = NULL;
   1243	bi.offset = 0;
   1244
   1245	return handle_buffered_ioctl(&bi);
   1246}
   1247
   1248
   1249int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
   1250			struct esas2r_sas_nvram *data)
   1251{
   1252	int result = 0;
   1253
   1254	a->nvram_command_done = 0;
   1255	rq->comp_cb = complete_nvr_req;
   1256
   1257	if (esas2r_nvram_write(a, rq, data)) {
   1258		/* now wait around for it to complete. */
   1259		while (!a->nvram_command_done)
   1260			wait_event_interruptible(a->nvram_waiter,
   1261						 a->nvram_command_done);
   1262		;
   1263
   1264		/* done, check the status. */
   1265		if (rq->req_stat == RS_SUCCESS)
   1266			result = 1;
   1267	}
   1268	return result;
   1269}
   1270
   1271
   1272/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
   1273int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg)
   1274{
   1275	struct atto_express_ioctl *ioctl = NULL;
   1276	struct esas2r_adapter *a;
   1277	struct esas2r_request *rq;
   1278	u16 code;
   1279	int err;
   1280
   1281	esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
   1282
   1283	if ((arg == NULL)
   1284	    || (cmd < EXPRESS_IOCTL_MIN)
   1285	    || (cmd > EXPRESS_IOCTL_MAX))
   1286		return -ENOTSUPP;
   1287
   1288	ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
   1289	if (IS_ERR(ioctl)) {
   1290		esas2r_log(ESAS2R_LOG_WARN,
   1291			   "ioctl_handler access_ok failed for cmd %u, address %p",
   1292			   cmd, arg);
   1293		return PTR_ERR(ioctl);
   1294	}
   1295
   1296	/* verify the signature */
   1297
   1298	if (memcmp(ioctl->header.signature,
   1299		   EXPRESS_IOCTL_SIGNATURE,
   1300		   EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
   1301		esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
   1302		kfree(ioctl);
   1303
   1304		return -ENOTSUPP;
   1305	}
   1306
   1307	/* assume success */
   1308
   1309	ioctl->header.return_code = IOCTL_SUCCESS;
   1310	err = 0;
   1311
   1312	/*
   1313	 * handle EXPRESS_IOCTL_GET_CHANNELS
   1314	 * without paying attention to channel
   1315	 */
   1316
   1317	if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
   1318		int i = 0, k = 0;
   1319
   1320		ioctl->data.chanlist.num_channels = 0;
   1321
   1322		while (i < MAX_ADAPTERS) {
   1323			if (esas2r_adapters[i]) {
   1324				ioctl->data.chanlist.num_channels++;
   1325				ioctl->data.chanlist.channel[k] = i;
   1326				k++;
   1327			}
   1328			i++;
   1329		}
   1330
   1331		goto ioctl_done;
   1332	}
   1333
   1334	/* get the channel */
   1335
   1336	if (ioctl->header.channel == 0xFF) {
   1337		a = (struct esas2r_adapter *)hostdata;
   1338	} else {
   1339		if (ioctl->header.channel >= MAX_ADAPTERS ||
   1340			esas2r_adapters[ioctl->header.channel] == NULL) {
   1341			ioctl->header.return_code = IOCTL_BAD_CHANNEL;
   1342			esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
   1343			kfree(ioctl);
   1344
   1345			return -ENOTSUPP;
   1346		}
   1347		a = esas2r_adapters[ioctl->header.channel];
   1348	}
   1349
   1350	switch (cmd) {
   1351	case EXPRESS_IOCTL_RW_FIRMWARE:
   1352
   1353		if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
   1354			err = esas2r_write_fw(a,
   1355					      (char *)ioctl->data.fwrw.image,
   1356					      0,
   1357					      sizeof(struct
   1358						     atto_express_ioctl));
   1359
   1360			if (err >= 0) {
   1361				err = esas2r_read_fw(a,
   1362						     (char *)ioctl->data.fwrw.
   1363						     image,
   1364						     0,
   1365						     sizeof(struct
   1366							    atto_express_ioctl));
   1367			}
   1368		} else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
   1369			err = esas2r_write_fs(a,
   1370					      (char *)ioctl->data.fwrw.image,
   1371					      0,
   1372					      sizeof(struct
   1373						     atto_express_ioctl));
   1374
   1375			if (err >= 0) {
   1376				err = esas2r_read_fs(a,
   1377						     (char *)ioctl->data.fwrw.
   1378						     image,
   1379						     0,
   1380						     sizeof(struct
   1381							    atto_express_ioctl));
   1382			}
   1383		} else {
   1384			ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
   1385		}
   1386
   1387		break;
   1388
   1389	case EXPRESS_IOCTL_READ_PARAMS:
   1390
   1391		memcpy(ioctl->data.prw.data_buffer, a->nvram,
   1392		       sizeof(struct esas2r_sas_nvram));
   1393		ioctl->data.prw.code = 1;
   1394		break;
   1395
   1396	case EXPRESS_IOCTL_WRITE_PARAMS:
   1397
   1398		rq = esas2r_alloc_request(a);
   1399		if (rq == NULL) {
   1400			kfree(ioctl);
   1401			esas2r_log(ESAS2R_LOG_WARN,
   1402			   "could not allocate an internal request");
   1403			return -ENOMEM;
   1404		}
   1405
   1406		code = esas2r_write_params(a, rq,
   1407					   (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
   1408		ioctl->data.prw.code = code;
   1409
   1410		esas2r_free_request(a, rq);
   1411
   1412		break;
   1413
   1414	case EXPRESS_IOCTL_DEFAULT_PARAMS:
   1415
   1416		esas2r_nvram_get_defaults(a,
   1417					  (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
   1418		ioctl->data.prw.code = 1;
   1419		break;
   1420
   1421	case EXPRESS_IOCTL_CHAN_INFO:
   1422
   1423		ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
   1424		ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
   1425		ioctl->data.chaninfo.IRQ = a->pcid->irq;
   1426		ioctl->data.chaninfo.device_id = a->pcid->device;
   1427		ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
   1428		ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
   1429		ioctl->data.chaninfo.revision_id = a->pcid->revision;
   1430		ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
   1431		ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
   1432		ioctl->data.chaninfo.core_rev = 0;
   1433		ioctl->data.chaninfo.host_no = a->host->host_no;
   1434		ioctl->data.chaninfo.hbaapi_rev = 0;
   1435		break;
   1436
   1437	case EXPRESS_IOCTL_SMP:
   1438		ioctl->header.return_code = handle_smp_ioctl(a,
   1439							     &ioctl->data.
   1440							     ioctl_smp);
   1441		break;
   1442
   1443	case EXPRESS_CSMI:
   1444		ioctl->header.return_code =
   1445			handle_csmi_ioctl(a, &ioctl->data.csmi);
   1446		break;
   1447
   1448	case EXPRESS_IOCTL_HBA:
   1449		ioctl->header.return_code = handle_hba_ioctl(a,
   1450							     &ioctl->data.
   1451							     ioctl_hba);
   1452		break;
   1453
   1454	case EXPRESS_IOCTL_VDA:
   1455		err = esas2r_write_vda(a,
   1456				       (char *)&ioctl->data.ioctl_vda,
   1457				       0,
   1458				       sizeof(struct atto_ioctl_vda) +
   1459				       ioctl->data.ioctl_vda.data_length);
   1460
   1461		if (err >= 0) {
   1462			err = esas2r_read_vda(a,
   1463					      (char *)&ioctl->data.ioctl_vda,
   1464					      0,
   1465					      sizeof(struct atto_ioctl_vda) +
   1466					      ioctl->data.ioctl_vda.data_length);
   1467		}
   1468
   1469
   1470
   1471
   1472		break;
   1473
   1474	case EXPRESS_IOCTL_GET_MOD_INFO:
   1475
   1476		ioctl->data.modinfo.adapter = a;
   1477		ioctl->data.modinfo.pci_dev = a->pcid;
   1478		ioctl->data.modinfo.scsi_host = a->host;
   1479		ioctl->data.modinfo.host_no = a->host->host_no;
   1480
   1481		break;
   1482
   1483	default:
   1484		esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
   1485		ioctl->header.return_code = IOCTL_ERR_INVCMD;
   1486	}
   1487
   1488ioctl_done:
   1489
   1490	if (err < 0) {
   1491		esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err,
   1492			   cmd);
   1493
   1494		switch (err) {
   1495		case -ENOMEM:
   1496		case -EBUSY:
   1497			ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
   1498			break;
   1499
   1500		case -ENOSYS:
   1501		case -EINVAL:
   1502			ioctl->header.return_code = IOCTL_INVALID_PARAM;
   1503			break;
   1504
   1505		default:
   1506			ioctl->header.return_code = IOCTL_GENERAL_ERROR;
   1507			break;
   1508		}
   1509
   1510	}
   1511
   1512	/* Always copy the buffer back, if only to pick up the status */
   1513	err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
   1514	if (err != 0) {
   1515		esas2r_log(ESAS2R_LOG_WARN,
   1516			   "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
   1517			   err, cmd);
   1518		kfree(ioctl);
   1519
   1520		return -EFAULT;
   1521	}
   1522
   1523	kfree(ioctl);
   1524
   1525	return 0;
   1526}
   1527
   1528int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg)
   1529{
   1530	return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
   1531}
   1532
   1533static void free_fw_buffers(struct esas2r_adapter *a)
   1534{
   1535	if (a->firmware.data) {
   1536		dma_free_coherent(&a->pcid->dev,
   1537				  (size_t)a->firmware.orig_len,
   1538				  a->firmware.data,
   1539				  (dma_addr_t)a->firmware.phys);
   1540
   1541		a->firmware.data = NULL;
   1542	}
   1543}
   1544
   1545static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
   1546{
   1547	free_fw_buffers(a);
   1548
   1549	a->firmware.orig_len = length;
   1550
   1551	a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
   1552					      (size_t)length,
   1553					      (dma_addr_t *)&a->firmware.phys,
   1554					      GFP_KERNEL);
   1555
   1556	if (!a->firmware.data) {
   1557		esas2r_debug("buffer alloc failed!");
   1558		return 0;
   1559	}
   1560
   1561	return 1;
   1562}
   1563
   1564/* Handle a call to read firmware. */
   1565int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
   1566{
   1567	esas2r_trace_enter();
   1568	/* if the cached header is a status, simply copy it over and return. */
   1569	if (a->firmware.state == FW_STATUS_ST) {
   1570		int size = min_t(int, count, sizeof(a->firmware.header));
   1571		esas2r_trace_exit();
   1572		memcpy(buf, &a->firmware.header, size);
   1573		esas2r_debug("esas2r_read_fw: STATUS size %d", size);
   1574		return size;
   1575	}
   1576
   1577	/*
   1578	 * if the cached header is a command, do it if at
   1579	 * offset 0, otherwise copy the pieces.
   1580	 */
   1581
   1582	if (a->firmware.state == FW_COMMAND_ST) {
   1583		u32 length = a->firmware.header.length;
   1584		esas2r_trace_exit();
   1585
   1586		esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
   1587			     length,
   1588			     off);
   1589
   1590		if (off == 0) {
   1591			if (a->firmware.header.action == FI_ACT_UP) {
   1592				if (!allocate_fw_buffers(a, length))
   1593					return -ENOMEM;
   1594
   1595
   1596				/* copy header over */
   1597
   1598				memcpy(a->firmware.data,
   1599				       &a->firmware.header,
   1600				       sizeof(a->firmware.header));
   1601
   1602				do_fm_api(a,
   1603					  (struct esas2r_flash_img *)a->firmware.data);
   1604			} else if (a->firmware.header.action == FI_ACT_UPSZ) {
   1605				int size =
   1606					min((int)count,
   1607					    (int)sizeof(a->firmware.header));
   1608				do_fm_api(a, &a->firmware.header);
   1609				memcpy(buf, &a->firmware.header, size);
   1610				esas2r_debug("FI_ACT_UPSZ size %d", size);
   1611				return size;
   1612			} else {
   1613				esas2r_debug("invalid action %d",
   1614					     a->firmware.header.action);
   1615				return -ENOSYS;
   1616			}
   1617		}
   1618
   1619		if (count + off > length)
   1620			count = length - off;
   1621
   1622		if (count < 0)
   1623			return 0;
   1624
   1625		if (!a->firmware.data) {
   1626			esas2r_debug(
   1627				"read: nonzero offset but no buffer available!");
   1628			return -ENOMEM;
   1629		}
   1630
   1631		esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
   1632			     count,
   1633			     length);
   1634
   1635		memcpy(buf, &a->firmware.data[off], count);
   1636
   1637		/* when done, release the buffer */
   1638
   1639		if (length <= off + count) {
   1640			esas2r_debug("esas2r_read_fw: freeing buffer!");
   1641
   1642			free_fw_buffers(a);
   1643		}
   1644
   1645		return count;
   1646	}
   1647
   1648	esas2r_trace_exit();
   1649	esas2r_debug("esas2r_read_fw: invalid firmware state %d",
   1650		     a->firmware.state);
   1651
   1652	return -EINVAL;
   1653}
   1654
   1655/* Handle a call to write firmware. */
   1656int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
   1657		    int count)
   1658{
   1659	u32 length;
   1660
   1661	if (off == 0) {
   1662		struct esas2r_flash_img *header =
   1663			(struct esas2r_flash_img *)buf;
   1664
   1665		/* assume version 0 flash image */
   1666
   1667		int min_size = sizeof(struct esas2r_flash_img_v0);
   1668
   1669		a->firmware.state = FW_INVALID_ST;
   1670
   1671		/* validate the version field first */
   1672
   1673		if (count < 4
   1674		    ||  header->fi_version > FI_VERSION_1) {
   1675			esas2r_debug(
   1676				"esas2r_write_fw: short header or invalid version");
   1677			return -EINVAL;
   1678		}
   1679
   1680		/* See if its a version 1 flash image */
   1681
   1682		if (header->fi_version == FI_VERSION_1)
   1683			min_size = sizeof(struct esas2r_flash_img);
   1684
   1685		/* If this is the start, the header must be full and valid. */
   1686		if (count < min_size) {
   1687			esas2r_debug("esas2r_write_fw: short header, aborting");
   1688			return -EINVAL;
   1689		}
   1690
   1691		/* Make sure the size is reasonable. */
   1692		length = header->length;
   1693
   1694		if (length > 1024 * 1024) {
   1695			esas2r_debug(
   1696				"esas2r_write_fw: hosed, length %d  fi_version %d",
   1697				length, header->fi_version);
   1698			return -EINVAL;
   1699		}
   1700
   1701		/*
   1702		 * If this is a write command, allocate memory because
   1703		 * we have to cache everything. otherwise, just cache
   1704		 * the header, because the read op will do the command.
   1705		 */
   1706
   1707		if (header->action == FI_ACT_DOWN) {
   1708			if (!allocate_fw_buffers(a, length))
   1709				return -ENOMEM;
   1710
   1711			/*
   1712			 * Store the command, so there is context on subsequent
   1713			 * calls.
   1714			 */
   1715			memcpy(&a->firmware.header,
   1716			       buf,
   1717			       sizeof(*header));
   1718		} else if (header->action == FI_ACT_UP
   1719			   ||  header->action == FI_ACT_UPSZ) {
   1720			/* Save the command, result will be picked up on read */
   1721			memcpy(&a->firmware.header,
   1722			       buf,
   1723			       sizeof(*header));
   1724
   1725			a->firmware.state = FW_COMMAND_ST;
   1726
   1727			esas2r_debug(
   1728				"esas2r_write_fw: COMMAND, count %d, action %d ",
   1729				count, header->action);
   1730
   1731			/*
   1732			 * Pretend we took the whole buffer,
   1733			 * so we don't get bothered again.
   1734			 */
   1735
   1736			return count;
   1737		} else {
   1738			esas2r_debug("esas2r_write_fw: invalid action %d ",
   1739				     a->firmware.header.action);
   1740			return -ENOSYS;
   1741		}
   1742	} else {
   1743		length = a->firmware.header.length;
   1744	}
   1745
   1746	/*
   1747	 * We only get here on a download command, regardless of offset.
   1748	 * the chunks written by the system need to be cached, and when
   1749	 * the final one arrives, issue the fmapi command.
   1750	 */
   1751
   1752	if (off + count > length)
   1753		count = length - off;
   1754
   1755	if (count > 0) {
   1756		esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
   1757			     count,
   1758			     length);
   1759
   1760		/*
   1761		 * On a full upload, the system tries sending the whole buffer.
   1762		 * there's nothing to do with it, so just drop it here, before
   1763		 * trying to copy over into unallocated memory!
   1764		 */
   1765		if (a->firmware.header.action == FI_ACT_UP)
   1766			return count;
   1767
   1768		if (!a->firmware.data) {
   1769			esas2r_debug(
   1770				"write: nonzero offset but no buffer available!");
   1771			return -ENOMEM;
   1772		}
   1773
   1774		memcpy(&a->firmware.data[off], buf, count);
   1775
   1776		if (length == off + count) {
   1777			do_fm_api(a,
   1778				  (struct esas2r_flash_img *)a->firmware.data);
   1779
   1780			/*
   1781			 * Now copy the header result to be picked up by the
   1782			 * next read
   1783			 */
   1784			memcpy(&a->firmware.header,
   1785			       a->firmware.data,
   1786			       sizeof(a->firmware.header));
   1787
   1788			a->firmware.state = FW_STATUS_ST;
   1789
   1790			esas2r_debug("write completed");
   1791
   1792			/*
   1793			 * Since the system has the data buffered, the only way
   1794			 * this can leak is if a root user writes a program
   1795			 * that writes a shorter buffer than it claims, and the
   1796			 * copyin fails.
   1797			 */
   1798			free_fw_buffers(a);
   1799		}
   1800	}
   1801
   1802	return count;
   1803}
   1804
   1805/* Callback for the completion of a VDA request. */
   1806static void vda_complete_req(struct esas2r_adapter *a,
   1807			     struct esas2r_request *rq)
   1808{
   1809	a->vda_command_done = 1;
   1810	wake_up_interruptible(&a->vda_waiter);
   1811}
   1812
   1813/* Scatter/gather callback for VDA requests */
   1814static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
   1815{
   1816	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
   1817	int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
   1818
   1819	(*addr) = a->ppvda_buffer + offset;
   1820	return VDA_MAX_BUFFER_SIZE - offset;
   1821}
   1822
   1823/* Handle a call to read a VDA command. */
   1824int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
   1825{
   1826	if (!a->vda_buffer)
   1827		return -ENOMEM;
   1828
   1829	if (off == 0) {
   1830		struct esas2r_request *rq;
   1831		struct atto_ioctl_vda *vi =
   1832			(struct atto_ioctl_vda *)a->vda_buffer;
   1833		struct esas2r_sg_context sgc;
   1834		bool wait_for_completion;
   1835
   1836		/*
   1837		 * Presumeably, someone has already written to the vda_buffer,
   1838		 * and now they are reading the node the response, so now we
   1839		 * will actually issue the request to the chip and reply.
   1840		 */
   1841
   1842		/* allocate a request */
   1843		rq = esas2r_alloc_request(a);
   1844		if (rq == NULL) {
   1845			esas2r_debug("esas2r_read_vda: out of requests");
   1846			return -EBUSY;
   1847		}
   1848
   1849		rq->comp_cb = vda_complete_req;
   1850
   1851		sgc.first_req = rq;
   1852		sgc.adapter = a;
   1853		sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
   1854		sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
   1855
   1856		a->vda_command_done = 0;
   1857
   1858		wait_for_completion =
   1859			esas2r_process_vda_ioctl(a, vi, rq, &sgc);
   1860
   1861		if (wait_for_completion) {
   1862			/* now wait around for it to complete. */
   1863
   1864			while (!a->vda_command_done)
   1865				wait_event_interruptible(a->vda_waiter,
   1866							 a->vda_command_done);
   1867		}
   1868
   1869		esas2r_free_request(a, (struct esas2r_request *)rq);
   1870	}
   1871
   1872	if (off > VDA_MAX_BUFFER_SIZE)
   1873		return 0;
   1874
   1875	if (count + off > VDA_MAX_BUFFER_SIZE)
   1876		count = VDA_MAX_BUFFER_SIZE - off;
   1877
   1878	if (count < 0)
   1879		return 0;
   1880
   1881	memcpy(buf, a->vda_buffer + off, count);
   1882
   1883	return count;
   1884}
   1885
   1886/* Handle a call to write a VDA command. */
   1887int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
   1888		     int count)
   1889{
   1890	/*
   1891	 * allocate memory for it, if not already done.  once allocated,
   1892	 * we will keep it around until the driver is unloaded.
   1893	 */
   1894
   1895	if (!a->vda_buffer) {
   1896		dma_addr_t dma_addr;
   1897		a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
   1898						   (size_t)
   1899						   VDA_MAX_BUFFER_SIZE,
   1900						   &dma_addr,
   1901						   GFP_KERNEL);
   1902
   1903		a->ppvda_buffer = dma_addr;
   1904	}
   1905
   1906	if (!a->vda_buffer)
   1907		return -ENOMEM;
   1908
   1909	if (off > VDA_MAX_BUFFER_SIZE)
   1910		return 0;
   1911
   1912	if (count + off > VDA_MAX_BUFFER_SIZE)
   1913		count = VDA_MAX_BUFFER_SIZE - off;
   1914
   1915	if (count < 1)
   1916		return 0;
   1917
   1918	memcpy(a->vda_buffer + off, buf, count);
   1919
   1920	return count;
   1921}
   1922
   1923/* Callback for the completion of an FS_API request.*/
   1924static void fs_api_complete_req(struct esas2r_adapter *a,
   1925				struct esas2r_request *rq)
   1926{
   1927	a->fs_api_command_done = 1;
   1928
   1929	wake_up_interruptible(&a->fs_api_waiter);
   1930}
   1931
   1932/* Scatter/gather callback for VDA requests */
   1933static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
   1934{
   1935	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
   1936	struct esas2r_ioctl_fs *fs =
   1937		(struct esas2r_ioctl_fs *)a->fs_api_buffer;
   1938	u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
   1939
   1940	(*addr) = a->ppfs_api_buffer + offset;
   1941
   1942	return a->fs_api_buffer_size - offset;
   1943}
   1944
   1945/* Handle a call to read firmware via FS_API. */
   1946int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
   1947{
   1948	if (!a->fs_api_buffer)
   1949		return -ENOMEM;
   1950
   1951	if (off == 0) {
   1952		struct esas2r_request *rq;
   1953		struct esas2r_sg_context sgc;
   1954		struct esas2r_ioctl_fs *fs =
   1955			(struct esas2r_ioctl_fs *)a->fs_api_buffer;
   1956
   1957		/* If another flash request is already in progress, return. */
   1958		if (mutex_lock_interruptible(&a->fs_api_mutex)) {
   1959busy:
   1960			fs->status = ATTO_STS_OUT_OF_RSRC;
   1961			return -EBUSY;
   1962		}
   1963
   1964		/*
   1965		 * Presumeably, someone has already written to the
   1966		 * fs_api_buffer, and now they are reading the node the
   1967		 * response, so now we will actually issue the request to the
   1968		 * chip and reply. Allocate a request
   1969		 */
   1970
   1971		rq = esas2r_alloc_request(a);
   1972		if (rq == NULL) {
   1973			esas2r_debug("esas2r_read_fs: out of requests");
   1974			mutex_unlock(&a->fs_api_mutex);
   1975			goto busy;
   1976		}
   1977
   1978		rq->comp_cb = fs_api_complete_req;
   1979
   1980		/* Set up the SGCONTEXT for to build the s/g table */
   1981
   1982		sgc.cur_offset = fs->data;
   1983		sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
   1984
   1985		a->fs_api_command_done = 0;
   1986
   1987		if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
   1988			if (fs->status == ATTO_STS_OUT_OF_RSRC)
   1989				count = -EBUSY;
   1990
   1991			goto dont_wait;
   1992		}
   1993
   1994		/* Now wait around for it to complete. */
   1995
   1996		while (!a->fs_api_command_done)
   1997			wait_event_interruptible(a->fs_api_waiter,
   1998						 a->fs_api_command_done);
   1999		;
   2000dont_wait:
   2001		/* Free the request and keep going */
   2002		mutex_unlock(&a->fs_api_mutex);
   2003		esas2r_free_request(a, (struct esas2r_request *)rq);
   2004
   2005		/* Pick up possible error code from above */
   2006		if (count < 0)
   2007			return count;
   2008	}
   2009
   2010	if (off > a->fs_api_buffer_size)
   2011		return 0;
   2012
   2013	if (count + off > a->fs_api_buffer_size)
   2014		count = a->fs_api_buffer_size - off;
   2015
   2016	if (count < 0)
   2017		return 0;
   2018
   2019	memcpy(buf, a->fs_api_buffer + off, count);
   2020
   2021	return count;
   2022}
   2023
   2024/* Handle a call to write firmware via FS_API. */
   2025int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
   2026		    int count)
   2027{
   2028	if (off == 0) {
   2029		struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
   2030		u32 length = fs->command.length + offsetof(
   2031			struct esas2r_ioctl_fs,
   2032			data);
   2033
   2034		/*
   2035		 * Special case, for BEGIN commands, the length field
   2036		 * is lying to us, so just get enough for the header.
   2037		 */
   2038
   2039		if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
   2040			length = offsetof(struct esas2r_ioctl_fs, data);
   2041
   2042		/*
   2043		 * Beginning a command.  We assume we'll get at least
   2044		 * enough in the first write so we can look at the
   2045		 * header and see how much we need to alloc.
   2046		 */
   2047
   2048		if (count < offsetof(struct esas2r_ioctl_fs, data))
   2049			return -EINVAL;
   2050
   2051		/* Allocate a buffer or use the existing buffer. */
   2052		if (a->fs_api_buffer) {
   2053			if (a->fs_api_buffer_size < length) {
   2054				/* Free too-small buffer and get a new one */
   2055				dma_free_coherent(&a->pcid->dev,
   2056						  (size_t)a->fs_api_buffer_size,
   2057						  a->fs_api_buffer,
   2058						  (dma_addr_t)a->ppfs_api_buffer);
   2059
   2060				goto re_allocate_buffer;
   2061			}
   2062		} else {
   2063re_allocate_buffer:
   2064			a->fs_api_buffer_size = length;
   2065
   2066			a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
   2067							      (size_t)a->fs_api_buffer_size,
   2068							      (dma_addr_t *)&a->ppfs_api_buffer,
   2069							      GFP_KERNEL);
   2070		}
   2071	}
   2072
   2073	if (!a->fs_api_buffer)
   2074		return -ENOMEM;
   2075
   2076	if (off > a->fs_api_buffer_size)
   2077		return 0;
   2078
   2079	if (count + off > a->fs_api_buffer_size)
   2080		count = a->fs_api_buffer_size - off;
   2081
   2082	if (count < 1)
   2083		return 0;
   2084
   2085	memcpy(a->fs_api_buffer + off, buf, count);
   2086
   2087	return count;
   2088}