cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

myrs.c (91149B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
      4 *
      5 * This driver supports the newer, SCSI-based firmware interface only.
      6 *
      7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
      8 *
      9 * Based on the original DAC960 driver, which has
     10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
     11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
     12 */
     13
     14#include <linux/module.h>
     15#include <linux/types.h>
     16#include <linux/delay.h>
     17#include <linux/interrupt.h>
     18#include <linux/pci.h>
     19#include <linux/raid_class.h>
     20#include <asm/unaligned.h>
     21#include <scsi/scsi.h>
     22#include <scsi/scsi_host.h>
     23#include <scsi/scsi_device.h>
     24#include <scsi/scsi_cmnd.h>
     25#include <scsi/scsi_tcq.h>
     26#include "myrs.h"
     27
     28static struct raid_template *myrs_raid_template;
     29
     30static struct myrs_devstate_name_entry {
     31	enum myrs_devstate state;
     32	char *name;
     33} myrs_devstate_name_list[] = {
     34	{ MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
     35	{ MYRS_DEVICE_ONLINE, "Online" },
     36	{ MYRS_DEVICE_REBUILD, "Rebuild" },
     37	{ MYRS_DEVICE_MISSING, "Missing" },
     38	{ MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
     39	{ MYRS_DEVICE_OFFLINE, "Offline" },
     40	{ MYRS_DEVICE_CRITICAL, "Critical" },
     41	{ MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
     42	{ MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
     43	{ MYRS_DEVICE_STANDBY, "Standby" },
     44	{ MYRS_DEVICE_INVALID_STATE, "Invalid" },
     45};
     46
     47static char *myrs_devstate_name(enum myrs_devstate state)
     48{
     49	struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
     50	int i;
     51
     52	for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
     53		if (entry[i].state == state)
     54			return entry[i].name;
     55	}
     56	return NULL;
     57}
     58
     59static struct myrs_raid_level_name_entry {
     60	enum myrs_raid_level level;
     61	char *name;
     62} myrs_raid_level_name_list[] = {
     63	{ MYRS_RAID_LEVEL0, "RAID0" },
     64	{ MYRS_RAID_LEVEL1, "RAID1" },
     65	{ MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
     66	{ MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
     67	{ MYRS_RAID_LEVEL6, "RAID6" },
     68	{ MYRS_RAID_JBOD, "JBOD" },
     69	{ MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
     70	{ MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
     71	{ MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
     72	{ MYRS_RAID_SPAN, "Mylex SPAN" },
     73	{ MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
     74	{ MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
     75	{ MYRS_RAID_PHYSICAL, "Physical device" },
     76};
     77
     78static char *myrs_raid_level_name(enum myrs_raid_level level)
     79{
     80	struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
     81	int i;
     82
     83	for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
     84		if (entry[i].level == level)
     85			return entry[i].name;
     86	}
     87	return NULL;
     88}
     89
     90/*
     91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
     92 */
     93static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
     94{
     95	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
     96
     97	memset(mbox, 0, sizeof(union myrs_cmd_mbox));
     98	cmd_blk->status = 0;
     99}
    100
    101/*
    102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
    103 */
    104static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
    105{
    106	void __iomem *base = cs->io_base;
    107	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    108	union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
    109
    110	cs->write_cmd_mbox(next_mbox, mbox);
    111
    112	if (cs->prev_cmd_mbox1->words[0] == 0 ||
    113	    cs->prev_cmd_mbox2->words[0] == 0)
    114		cs->get_cmd_mbox(base);
    115
    116	cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
    117	cs->prev_cmd_mbox1 = next_mbox;
    118
    119	if (++next_mbox > cs->last_cmd_mbox)
    120		next_mbox = cs->first_cmd_mbox;
    121
    122	cs->next_cmd_mbox = next_mbox;
    123}
    124
    125/*
    126 * myrs_exec_cmd - executes V2 Command and waits for completion.
    127 */
    128static void myrs_exec_cmd(struct myrs_hba *cs,
    129		struct myrs_cmdblk *cmd_blk)
    130{
    131	DECLARE_COMPLETION_ONSTACK(complete);
    132	unsigned long flags;
    133
    134	cmd_blk->complete = &complete;
    135	spin_lock_irqsave(&cs->queue_lock, flags);
    136	myrs_qcmd(cs, cmd_blk);
    137	spin_unlock_irqrestore(&cs->queue_lock, flags);
    138
    139	wait_for_completion(&complete);
    140}
    141
    142/*
    143 * myrs_report_progress - prints progress message
    144 */
    145static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
    146		unsigned char *msg, unsigned long blocks,
    147		unsigned long size)
    148{
    149	shost_printk(KERN_INFO, cs->host,
    150		     "Logical Drive %d: %s in Progress: %d%% completed\n",
    151		     ldev_num, msg,
    152		     (100 * (int)(blocks >> 7)) / (int)(size >> 7));
    153}
    154
    155/*
    156 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
    157 */
    158static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
    159{
    160	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
    161	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    162	dma_addr_t ctlr_info_addr;
    163	union myrs_sgl *sgl;
    164	unsigned char status;
    165	unsigned short ldev_present, ldev_critical, ldev_offline;
    166
    167	ldev_present = cs->ctlr_info->ldev_present;
    168	ldev_critical = cs->ctlr_info->ldev_critical;
    169	ldev_offline = cs->ctlr_info->ldev_offline;
    170
    171	ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
    172					sizeof(struct myrs_ctlr_info),
    173					DMA_FROM_DEVICE);
    174	if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
    175		return MYRS_STATUS_FAILED;
    176
    177	mutex_lock(&cs->dcmd_mutex);
    178	myrs_reset_cmd(cmd_blk);
    179	mbox->ctlr_info.id = MYRS_DCMD_TAG;
    180	mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
    181	mbox->ctlr_info.control.dma_ctrl_to_host = true;
    182	mbox->ctlr_info.control.no_autosense = true;
    183	mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
    184	mbox->ctlr_info.ctlr_num = 0;
    185	mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
    186	sgl = &mbox->ctlr_info.dma_addr;
    187	sgl->sge[0].sge_addr = ctlr_info_addr;
    188	sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
    189	dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
    190	myrs_exec_cmd(cs, cmd_blk);
    191	status = cmd_blk->status;
    192	mutex_unlock(&cs->dcmd_mutex);
    193	dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
    194			 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
    195	if (status == MYRS_STATUS_SUCCESS) {
    196		if (cs->ctlr_info->bg_init_active +
    197		    cs->ctlr_info->ldev_init_active +
    198		    cs->ctlr_info->pdev_init_active +
    199		    cs->ctlr_info->cc_active +
    200		    cs->ctlr_info->rbld_active +
    201		    cs->ctlr_info->exp_active != 0)
    202			cs->needs_update = true;
    203		if (cs->ctlr_info->ldev_present != ldev_present ||
    204		    cs->ctlr_info->ldev_critical != ldev_critical ||
    205		    cs->ctlr_info->ldev_offline != ldev_offline)
    206			shost_printk(KERN_INFO, cs->host,
    207				     "Logical drive count changes (%d/%d/%d)\n",
    208				     cs->ctlr_info->ldev_critical,
    209				     cs->ctlr_info->ldev_offline,
    210				     cs->ctlr_info->ldev_present);
    211	}
    212
    213	return status;
    214}
    215
    216/*
    217 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
    218 */
    219static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
    220		unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
    221{
    222	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
    223	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    224	dma_addr_t ldev_info_addr;
    225	struct myrs_ldev_info ldev_info_orig;
    226	union myrs_sgl *sgl;
    227	unsigned char status;
    228
    229	memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
    230	ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
    231					sizeof(struct myrs_ldev_info),
    232					DMA_FROM_DEVICE);
    233	if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
    234		return MYRS_STATUS_FAILED;
    235
    236	mutex_lock(&cs->dcmd_mutex);
    237	myrs_reset_cmd(cmd_blk);
    238	mbox->ldev_info.id = MYRS_DCMD_TAG;
    239	mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
    240	mbox->ldev_info.control.dma_ctrl_to_host = true;
    241	mbox->ldev_info.control.no_autosense = true;
    242	mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
    243	mbox->ldev_info.ldev.ldev_num = ldev_num;
    244	mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
    245	sgl = &mbox->ldev_info.dma_addr;
    246	sgl->sge[0].sge_addr = ldev_info_addr;
    247	sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
    248	dev_dbg(&cs->host->shost_gendev,
    249		"Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
    250	myrs_exec_cmd(cs, cmd_blk);
    251	status = cmd_blk->status;
    252	mutex_unlock(&cs->dcmd_mutex);
    253	dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
    254			 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
    255	if (status == MYRS_STATUS_SUCCESS) {
    256		unsigned short ldev_num = ldev_info->ldev_num;
    257		struct myrs_ldev_info *new = ldev_info;
    258		struct myrs_ldev_info *old = &ldev_info_orig;
    259		unsigned long ldev_size = new->cfg_devsize;
    260
    261		if (new->dev_state != old->dev_state) {
    262			const char *name;
    263
    264			name = myrs_devstate_name(new->dev_state);
    265			shost_printk(KERN_INFO, cs->host,
    266				     "Logical Drive %d is now %s\n",
    267				     ldev_num, name ? name : "Invalid");
    268		}
    269		if ((new->soft_errs != old->soft_errs) ||
    270		    (new->cmds_failed != old->cmds_failed) ||
    271		    (new->deferred_write_errs != old->deferred_write_errs))
    272			shost_printk(KERN_INFO, cs->host,
    273				     "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
    274				     ldev_num, new->soft_errs,
    275				     new->cmds_failed,
    276				     new->deferred_write_errs);
    277		if (new->bg_init_active)
    278			myrs_report_progress(cs, ldev_num,
    279					     "Background Initialization",
    280					     new->bg_init_lba, ldev_size);
    281		else if (new->fg_init_active)
    282			myrs_report_progress(cs, ldev_num,
    283					     "Foreground Initialization",
    284					     new->fg_init_lba, ldev_size);
    285		else if (new->migration_active)
    286			myrs_report_progress(cs, ldev_num,
    287					     "Data Migration",
    288					     new->migration_lba, ldev_size);
    289		else if (new->patrol_active)
    290			myrs_report_progress(cs, ldev_num,
    291					     "Patrol Operation",
    292					     new->patrol_lba, ldev_size);
    293		if (old->bg_init_active && !new->bg_init_active)
    294			shost_printk(KERN_INFO, cs->host,
    295				     "Logical Drive %d: Background Initialization %s\n",
    296				     ldev_num,
    297				     (new->ldev_control.ldev_init_done ?
    298				      "Completed" : "Failed"));
    299	}
    300	return status;
    301}
    302
    303/*
    304 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
    305 */
    306static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
    307		unsigned char channel, unsigned char target, unsigned char lun,
    308		struct myrs_pdev_info *pdev_info)
    309{
    310	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
    311	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    312	dma_addr_t pdev_info_addr;
    313	union myrs_sgl *sgl;
    314	unsigned char status;
    315
    316	pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
    317					sizeof(struct myrs_pdev_info),
    318					DMA_FROM_DEVICE);
    319	if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
    320		return MYRS_STATUS_FAILED;
    321
    322	mutex_lock(&cs->dcmd_mutex);
    323	myrs_reset_cmd(cmd_blk);
    324	mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
    325	mbox->pdev_info.id = MYRS_DCMD_TAG;
    326	mbox->pdev_info.control.dma_ctrl_to_host = true;
    327	mbox->pdev_info.control.no_autosense = true;
    328	mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
    329	mbox->pdev_info.pdev.lun = lun;
    330	mbox->pdev_info.pdev.target = target;
    331	mbox->pdev_info.pdev.channel = channel;
    332	mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
    333	sgl = &mbox->pdev_info.dma_addr;
    334	sgl->sge[0].sge_addr = pdev_info_addr;
    335	sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
    336	dev_dbg(&cs->host->shost_gendev,
    337		"Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
    338		channel, target, lun);
    339	myrs_exec_cmd(cs, cmd_blk);
    340	status = cmd_blk->status;
    341	mutex_unlock(&cs->dcmd_mutex);
    342	dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
    343			 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
    344	return status;
    345}
    346
    347/*
    348 * myrs_dev_op - executes a "Device Operation" Command
    349 */
    350static unsigned char myrs_dev_op(struct myrs_hba *cs,
    351		enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
    352{
    353	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
    354	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    355	unsigned char status;
    356
    357	mutex_lock(&cs->dcmd_mutex);
    358	myrs_reset_cmd(cmd_blk);
    359	mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
    360	mbox->dev_op.id = MYRS_DCMD_TAG;
    361	mbox->dev_op.control.dma_ctrl_to_host = true;
    362	mbox->dev_op.control.no_autosense = true;
    363	mbox->dev_op.ioctl_opcode = opcode;
    364	mbox->dev_op.opdev = opdev;
    365	myrs_exec_cmd(cs, cmd_blk);
    366	status = cmd_blk->status;
    367	mutex_unlock(&cs->dcmd_mutex);
    368	return status;
    369}
    370
    371/*
    372 * myrs_translate_pdev - translates a Physical Device Channel and
    373 * TargetID into a Logical Device.
    374 */
    375static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
    376		unsigned char channel, unsigned char target, unsigned char lun,
    377		struct myrs_devmap *devmap)
    378{
    379	struct pci_dev *pdev = cs->pdev;
    380	dma_addr_t devmap_addr;
    381	struct myrs_cmdblk *cmd_blk;
    382	union myrs_cmd_mbox *mbox;
    383	union myrs_sgl *sgl;
    384	unsigned char status;
    385
    386	memset(devmap, 0x0, sizeof(struct myrs_devmap));
    387	devmap_addr = dma_map_single(&pdev->dev, devmap,
    388				     sizeof(struct myrs_devmap),
    389				     DMA_FROM_DEVICE);
    390	if (dma_mapping_error(&pdev->dev, devmap_addr))
    391		return MYRS_STATUS_FAILED;
    392
    393	mutex_lock(&cs->dcmd_mutex);
    394	cmd_blk = &cs->dcmd_blk;
    395	mbox = &cmd_blk->mbox;
    396	mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
    397	mbox->pdev_info.control.dma_ctrl_to_host = true;
    398	mbox->pdev_info.control.no_autosense = true;
    399	mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
    400	mbox->pdev_info.pdev.target = target;
    401	mbox->pdev_info.pdev.channel = channel;
    402	mbox->pdev_info.pdev.lun = lun;
    403	mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
    404	sgl = &mbox->pdev_info.dma_addr;
    405	sgl->sge[0].sge_addr = devmap_addr;
    406	sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
    407
    408	myrs_exec_cmd(cs, cmd_blk);
    409	status = cmd_blk->status;
    410	mutex_unlock(&cs->dcmd_mutex);
    411	dma_unmap_single(&pdev->dev, devmap_addr,
    412			 sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
    413	return status;
    414}
    415
    416/*
    417 * myrs_get_event - executes a Get Event Command
    418 */
    419static unsigned char myrs_get_event(struct myrs_hba *cs,
    420		unsigned int event_num, struct myrs_event *event_buf)
    421{
    422	struct pci_dev *pdev = cs->pdev;
    423	dma_addr_t event_addr;
    424	struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
    425	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    426	union myrs_sgl *sgl;
    427	unsigned char status;
    428
    429	event_addr = dma_map_single(&pdev->dev, event_buf,
    430				    sizeof(struct myrs_event), DMA_FROM_DEVICE);
    431	if (dma_mapping_error(&pdev->dev, event_addr))
    432		return MYRS_STATUS_FAILED;
    433
    434	mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
    435	mbox->get_event.dma_size = sizeof(struct myrs_event);
    436	mbox->get_event.evnum_upper = event_num >> 16;
    437	mbox->get_event.ctlr_num = 0;
    438	mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
    439	mbox->get_event.evnum_lower = event_num & 0xFFFF;
    440	sgl = &mbox->get_event.dma_addr;
    441	sgl->sge[0].sge_addr = event_addr;
    442	sgl->sge[0].sge_count = mbox->get_event.dma_size;
    443	myrs_exec_cmd(cs, cmd_blk);
    444	status = cmd_blk->status;
    445	dma_unmap_single(&pdev->dev, event_addr,
    446			 sizeof(struct myrs_event), DMA_FROM_DEVICE);
    447
    448	return status;
    449}
    450
    451/*
    452 * myrs_get_fwstatus - executes a Get Health Status Command
    453 */
    454static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
    455{
    456	struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
    457	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
    458	union myrs_sgl *sgl;
    459	unsigned char status = cmd_blk->status;
    460
    461	myrs_reset_cmd(cmd_blk);
    462	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
    463	mbox->common.id = MYRS_MCMD_TAG;
    464	mbox->common.control.dma_ctrl_to_host = true;
    465	mbox->common.control.no_autosense = true;
    466	mbox->common.dma_size = sizeof(struct myrs_fwstat);
    467	mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
    468	sgl = &mbox->common.dma_addr;
    469	sgl->sge[0].sge_addr = cs->fwstat_addr;
    470	sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
    471	dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
    472	myrs_exec_cmd(cs, cmd_blk);
    473	status = cmd_blk->status;
    474
    475	return status;
    476}
    477
    478/*
    479 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
    480 */
    481static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
    482		enable_mbox_t enable_mbox_fn)
    483{
    484	void __iomem *base = cs->io_base;
    485	struct pci_dev *pdev = cs->pdev;
    486	union myrs_cmd_mbox *cmd_mbox;
    487	struct myrs_stat_mbox *stat_mbox;
    488	union myrs_cmd_mbox *mbox;
    489	dma_addr_t mbox_addr;
    490	unsigned char status = MYRS_STATUS_FAILED;
    491
    492	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
    493		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
    494			dev_err(&pdev->dev, "DMA mask out of range\n");
    495			return false;
    496		}
    497
    498	/* Temporary dma mapping, used only in the scope of this function */
    499	mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
    500				  &mbox_addr, GFP_KERNEL);
    501	if (dma_mapping_error(&pdev->dev, mbox_addr))
    502		return false;
    503
    504	/* These are the base addresses for the command memory mailbox array */
    505	cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
    506	cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
    507				      &cs->cmd_mbox_addr, GFP_KERNEL);
    508	if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
    509		dev_err(&pdev->dev, "Failed to map command mailbox\n");
    510		goto out_free;
    511	}
    512	cs->first_cmd_mbox = cmd_mbox;
    513	cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
    514	cs->last_cmd_mbox = cmd_mbox;
    515	cs->next_cmd_mbox = cs->first_cmd_mbox;
    516	cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
    517	cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
    518
    519	/* These are the base addresses for the status memory mailbox array */
    520	cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
    521	stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
    522				       &cs->stat_mbox_addr, GFP_KERNEL);
    523	if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
    524		dev_err(&pdev->dev, "Failed to map status mailbox\n");
    525		goto out_free;
    526	}
    527
    528	cs->first_stat_mbox = stat_mbox;
    529	stat_mbox += MYRS_MAX_STAT_MBOX - 1;
    530	cs->last_stat_mbox = stat_mbox;
    531	cs->next_stat_mbox = cs->first_stat_mbox;
    532
    533	cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
    534					    sizeof(struct myrs_fwstat),
    535					    &cs->fwstat_addr, GFP_KERNEL);
    536	if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
    537		dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
    538		cs->fwstat_buf = NULL;
    539		goto out_free;
    540	}
    541	cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
    542	if (!cs->ctlr_info)
    543		goto out_free;
    544
    545	cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
    546	if (!cs->event_buf)
    547		goto out_free;
    548
    549	/* Enable the Memory Mailbox Interface. */
    550	memset(mbox, 0, sizeof(union myrs_cmd_mbox));
    551	mbox->set_mbox.id = 1;
    552	mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
    553	mbox->set_mbox.control.no_autosense = true;
    554	mbox->set_mbox.first_cmd_mbox_size_kb =
    555		(MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
    556	mbox->set_mbox.first_stat_mbox_size_kb =
    557		(MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
    558	mbox->set_mbox.second_cmd_mbox_size_kb = 0;
    559	mbox->set_mbox.second_stat_mbox_size_kb = 0;
    560	mbox->set_mbox.sense_len = 0;
    561	mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
    562	mbox->set_mbox.fwstat_buf_size_kb = 1;
    563	mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
    564	mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
    565	mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
    566	status = enable_mbox_fn(base, mbox_addr);
    567
    568out_free:
    569	dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
    570			  mbox, mbox_addr);
    571	if (status != MYRS_STATUS_SUCCESS)
    572		dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
    573			status);
    574	return (status == MYRS_STATUS_SUCCESS);
    575}
    576
    577/*
    578 * myrs_get_config - reads the Configuration Information
    579 */
    580static int myrs_get_config(struct myrs_hba *cs)
    581{
    582	struct myrs_ctlr_info *info = cs->ctlr_info;
    583	struct Scsi_Host *shost = cs->host;
    584	unsigned char status;
    585	unsigned char model[20];
    586	unsigned char fw_version[12];
    587	int i, model_len;
    588
    589	/* Get data into dma-able area, then copy into permanent location */
    590	mutex_lock(&cs->cinfo_mutex);
    591	status = myrs_get_ctlr_info(cs);
    592	mutex_unlock(&cs->cinfo_mutex);
    593	if (status != MYRS_STATUS_SUCCESS) {
    594		shost_printk(KERN_ERR, shost,
    595			     "Failed to get controller information\n");
    596		return -ENODEV;
    597	}
    598
    599	/* Initialize the Controller Model Name and Full Model Name fields. */
    600	model_len = sizeof(info->ctlr_name);
    601	if (model_len > sizeof(model)-1)
    602		model_len = sizeof(model)-1;
    603	memcpy(model, info->ctlr_name, model_len);
    604	model_len--;
    605	while (model[model_len] == ' ' || model[model_len] == '\0')
    606		model_len--;
    607	model[++model_len] = '\0';
    608	strcpy(cs->model_name, "DAC960 ");
    609	strcat(cs->model_name, model);
    610	/* Initialize the Controller Firmware Version field. */
    611	sprintf(fw_version, "%d.%02d-%02d",
    612		info->fw_major_version, info->fw_minor_version,
    613		info->fw_turn_number);
    614	if (info->fw_major_version == 6 &&
    615	    info->fw_minor_version == 0 &&
    616	    info->fw_turn_number < 1) {
    617		shost_printk(KERN_WARNING, shost,
    618			"FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
    619			"STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
    620			"PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
    621			fw_version);
    622		return -ENODEV;
    623	}
    624	/* Initialize the Controller Channels and Targets. */
    625	shost->max_channel = info->physchan_present + info->virtchan_present;
    626	shost->max_id = info->max_targets[0];
    627	for (i = 1; i < 16; i++) {
    628		if (!info->max_targets[i])
    629			continue;
    630		if (shost->max_id < info->max_targets[i])
    631			shost->max_id = info->max_targets[i];
    632	}
    633
    634	/*
    635	 * Initialize the Controller Queue Depth, Driver Queue Depth,
    636	 * Logical Drive Count, Maximum Blocks per Command, Controller
    637	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
    638	 * The Driver Queue Depth must be at most three less than
    639	 * the Controller Queue Depth; tag '1' is reserved for
    640	 * direct commands, and tag '2' for monitoring commands.
    641	 */
    642	shost->can_queue = info->max_tcq - 3;
    643	if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
    644		shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
    645	shost->max_sectors = info->max_transfer_size;
    646	shost->sg_tablesize = info->max_sge;
    647	if (shost->sg_tablesize > MYRS_SG_LIMIT)
    648		shost->sg_tablesize = MYRS_SG_LIMIT;
    649
    650	shost_printk(KERN_INFO, shost,
    651		"Configuring %s PCI RAID Controller\n", model);
    652	shost_printk(KERN_INFO, shost,
    653		"  Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
    654		fw_version, info->physchan_present, info->mem_size_mb);
    655
    656	shost_printk(KERN_INFO, shost,
    657		     "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
    658		     shost->can_queue, shost->max_sectors);
    659
    660	shost_printk(KERN_INFO, shost,
    661		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
    662		     shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
    663	for (i = 0; i < info->physchan_max; i++) {
    664		if (!info->max_targets[i])
    665			continue;
    666		shost_printk(KERN_INFO, shost,
    667			     "  Device Channel %d: max %d devices\n",
    668			     i, info->max_targets[i]);
    669	}
    670	shost_printk(KERN_INFO, shost,
    671		     "  Physical: %d/%d channels, %d disks, %d devices\n",
    672		     info->physchan_present, info->physchan_max,
    673		     info->pdisk_present, info->pdev_present);
    674
    675	shost_printk(KERN_INFO, shost,
    676		     "  Logical: %d/%d channels, %d disks\n",
    677		     info->virtchan_present, info->virtchan_max,
    678		     info->ldev_present);
    679	return 0;
    680}
    681
    682/*
    683 * myrs_log_event - prints a Controller Event message
    684 */
    685static struct {
    686	int ev_code;
    687	unsigned char *ev_msg;
    688} myrs_ev_list[] = {
    689	/* Physical Device Events (0x0000 - 0x007F) */
    690	{ 0x0001, "P Online" },
    691	{ 0x0002, "P Standby" },
    692	{ 0x0005, "P Automatic Rebuild Started" },
    693	{ 0x0006, "P Manual Rebuild Started" },
    694	{ 0x0007, "P Rebuild Completed" },
    695	{ 0x0008, "P Rebuild Cancelled" },
    696	{ 0x0009, "P Rebuild Failed for Unknown Reasons" },
    697	{ 0x000A, "P Rebuild Failed due to New Physical Device" },
    698	{ 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
    699	{ 0x000C, "S Offline" },
    700	{ 0x000D, "P Found" },
    701	{ 0x000E, "P Removed" },
    702	{ 0x000F, "P Unconfigured" },
    703	{ 0x0010, "P Expand Capacity Started" },
    704	{ 0x0011, "P Expand Capacity Completed" },
    705	{ 0x0012, "P Expand Capacity Failed" },
    706	{ 0x0013, "P Command Timed Out" },
    707	{ 0x0014, "P Command Aborted" },
    708	{ 0x0015, "P Command Retried" },
    709	{ 0x0016, "P Parity Error" },
    710	{ 0x0017, "P Soft Error" },
    711	{ 0x0018, "P Miscellaneous Error" },
    712	{ 0x0019, "P Reset" },
    713	{ 0x001A, "P Active Spare Found" },
    714	{ 0x001B, "P Warm Spare Found" },
    715	{ 0x001C, "S Sense Data Received" },
    716	{ 0x001D, "P Initialization Started" },
    717	{ 0x001E, "P Initialization Completed" },
    718	{ 0x001F, "P Initialization Failed" },
    719	{ 0x0020, "P Initialization Cancelled" },
    720	{ 0x0021, "P Failed because Write Recovery Failed" },
    721	{ 0x0022, "P Failed because SCSI Bus Reset Failed" },
    722	{ 0x0023, "P Failed because of Double Check Condition" },
    723	{ 0x0024, "P Failed because Device Cannot Be Accessed" },
    724	{ 0x0025, "P Failed because of Gross Error on SCSI Processor" },
    725	{ 0x0026, "P Failed because of Bad Tag from Device" },
    726	{ 0x0027, "P Failed because of Command Timeout" },
    727	{ 0x0028, "P Failed because of System Reset" },
    728	{ 0x0029, "P Failed because of Busy Status or Parity Error" },
    729	{ 0x002A, "P Failed because Host Set Device to Failed State" },
    730	{ 0x002B, "P Failed because of Selection Timeout" },
    731	{ 0x002C, "P Failed because of SCSI Bus Phase Error" },
    732	{ 0x002D, "P Failed because Device Returned Unknown Status" },
    733	{ 0x002E, "P Failed because Device Not Ready" },
    734	{ 0x002F, "P Failed because Device Not Found at Startup" },
    735	{ 0x0030, "P Failed because COD Write Operation Failed" },
    736	{ 0x0031, "P Failed because BDT Write Operation Failed" },
    737	{ 0x0039, "P Missing at Startup" },
    738	{ 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
    739	{ 0x003C, "P Temporarily Offline Device Automatically Made Online" },
    740	{ 0x003D, "P Standby Rebuild Started" },
    741	/* Logical Device Events (0x0080 - 0x00FF) */
    742	{ 0x0080, "M Consistency Check Started" },
    743	{ 0x0081, "M Consistency Check Completed" },
    744	{ 0x0082, "M Consistency Check Cancelled" },
    745	{ 0x0083, "M Consistency Check Completed With Errors" },
    746	{ 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
    747	{ 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
    748	{ 0x0086, "L Offline" },
    749	{ 0x0087, "L Critical" },
    750	{ 0x0088, "L Online" },
    751	{ 0x0089, "M Automatic Rebuild Started" },
    752	{ 0x008A, "M Manual Rebuild Started" },
    753	{ 0x008B, "M Rebuild Completed" },
    754	{ 0x008C, "M Rebuild Cancelled" },
    755	{ 0x008D, "M Rebuild Failed for Unknown Reasons" },
    756	{ 0x008E, "M Rebuild Failed due to New Physical Device" },
    757	{ 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
    758	{ 0x0090, "M Initialization Started" },
    759	{ 0x0091, "M Initialization Completed" },
    760	{ 0x0092, "M Initialization Cancelled" },
    761	{ 0x0093, "M Initialization Failed" },
    762	{ 0x0094, "L Found" },
    763	{ 0x0095, "L Deleted" },
    764	{ 0x0096, "M Expand Capacity Started" },
    765	{ 0x0097, "M Expand Capacity Completed" },
    766	{ 0x0098, "M Expand Capacity Failed" },
    767	{ 0x0099, "L Bad Block Found" },
    768	{ 0x009A, "L Size Changed" },
    769	{ 0x009B, "L Type Changed" },
    770	{ 0x009C, "L Bad Data Block Found" },
    771	{ 0x009E, "L Read of Data Block in BDT" },
    772	{ 0x009F, "L Write Back Data for Disk Block Lost" },
    773	{ 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
    774	{ 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
    775	{ 0x00A2, "L Standby Rebuild Started" },
    776	/* Fault Management Events (0x0100 - 0x017F) */
    777	{ 0x0140, "E Fan %d Failed" },
    778	{ 0x0141, "E Fan %d OK" },
    779	{ 0x0142, "E Fan %d Not Present" },
    780	{ 0x0143, "E Power Supply %d Failed" },
    781	{ 0x0144, "E Power Supply %d OK" },
    782	{ 0x0145, "E Power Supply %d Not Present" },
    783	{ 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
    784	{ 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
    785	{ 0x0148, "E Temperature Sensor %d Temperature Normal" },
    786	{ 0x0149, "E Temperature Sensor %d Not Present" },
    787	{ 0x014A, "E Enclosure Management Unit %d Access Critical" },
    788	{ 0x014B, "E Enclosure Management Unit %d Access OK" },
    789	{ 0x014C, "E Enclosure Management Unit %d Access Offline" },
    790	/* Controller Events (0x0180 - 0x01FF) */
    791	{ 0x0181, "C Cache Write Back Error" },
    792	{ 0x0188, "C Battery Backup Unit Found" },
    793	{ 0x0189, "C Battery Backup Unit Charge Level Low" },
    794	{ 0x018A, "C Battery Backup Unit Charge Level OK" },
    795	{ 0x0193, "C Installation Aborted" },
    796	{ 0x0195, "C Battery Backup Unit Physically Removed" },
    797	{ 0x0196, "C Memory Error During Warm Boot" },
    798	{ 0x019E, "C Memory Soft ECC Error Corrected" },
    799	{ 0x019F, "C Memory Hard ECC Error Corrected" },
    800	{ 0x01A2, "C Battery Backup Unit Failed" },
    801	{ 0x01AB, "C Mirror Race Recovery Failed" },
    802	{ 0x01AC, "C Mirror Race on Critical Drive" },
    803	/* Controller Internal Processor Events */
    804	{ 0x0380, "C Internal Controller Hung" },
    805	{ 0x0381, "C Internal Controller Firmware Breakpoint" },
    806	{ 0x0390, "C Internal Controller i960 Processor Specific Error" },
    807	{ 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
    808	{ 0, "" }
    809};
    810
    811static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
    812{
    813	unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
    814	int ev_idx = 0, ev_code;
    815	unsigned char ev_type, *ev_msg;
    816	struct Scsi_Host *shost = cs->host;
    817	struct scsi_device *sdev;
    818	struct scsi_sense_hdr sshdr = {0};
    819	unsigned char sense_info[4];
    820	unsigned char cmd_specific[4];
    821
    822	if (ev->ev_code == 0x1C) {
    823		if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
    824			memset(&sshdr, 0x0, sizeof(sshdr));
    825			memset(sense_info, 0x0, sizeof(sense_info));
    826			memset(cmd_specific, 0x0, sizeof(cmd_specific));
    827		} else {
    828			memcpy(sense_info, &ev->sense_data[3], 4);
    829			memcpy(cmd_specific, &ev->sense_data[7], 4);
    830		}
    831	}
    832	if (sshdr.sense_key == VENDOR_SPECIFIC &&
    833	    (sshdr.asc == 0x80 || sshdr.asc == 0x81))
    834		ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
    835	while (true) {
    836		ev_code = myrs_ev_list[ev_idx].ev_code;
    837		if (ev_code == ev->ev_code || ev_code == 0)
    838			break;
    839		ev_idx++;
    840	}
    841	ev_type = myrs_ev_list[ev_idx].ev_msg[0];
    842	ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
    843	if (ev_code == 0) {
    844		shost_printk(KERN_WARNING, shost,
    845			     "Unknown Controller Event Code %04X\n",
    846			     ev->ev_code);
    847		return;
    848	}
    849	switch (ev_type) {
    850	case 'P':
    851		sdev = scsi_device_lookup(shost, ev->channel,
    852					  ev->target, 0);
    853		sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
    854			    ev->ev_seq, ev_msg);
    855		if (sdev && sdev->hostdata &&
    856		    sdev->channel < cs->ctlr_info->physchan_present) {
    857			struct myrs_pdev_info *pdev_info = sdev->hostdata;
    858
    859			switch (ev->ev_code) {
    860			case 0x0001:
    861			case 0x0007:
    862				pdev_info->dev_state = MYRS_DEVICE_ONLINE;
    863				break;
    864			case 0x0002:
    865				pdev_info->dev_state = MYRS_DEVICE_STANDBY;
    866				break;
    867			case 0x000C:
    868				pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
    869				break;
    870			case 0x000E:
    871				pdev_info->dev_state = MYRS_DEVICE_MISSING;
    872				break;
    873			case 0x000F:
    874				pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
    875				break;
    876			}
    877		}
    878		break;
    879	case 'L':
    880		shost_printk(KERN_INFO, shost,
    881			     "event %d: Logical Drive %d %s\n",
    882			     ev->ev_seq, ev->lun, ev_msg);
    883		cs->needs_update = true;
    884		break;
    885	case 'M':
    886		shost_printk(KERN_INFO, shost,
    887			     "event %d: Logical Drive %d %s\n",
    888			     ev->ev_seq, ev->lun, ev_msg);
    889		cs->needs_update = true;
    890		break;
    891	case 'S':
    892		if (sshdr.sense_key == NO_SENSE ||
    893		    (sshdr.sense_key == NOT_READY &&
    894		     sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
    895					    sshdr.ascq == 0x02)))
    896			break;
    897		shost_printk(KERN_INFO, shost,
    898			     "event %d: Physical Device %d:%d %s\n",
    899			     ev->ev_seq, ev->channel, ev->target, ev_msg);
    900		shost_printk(KERN_INFO, shost,
    901			     "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
    902			     ev->channel, ev->target,
    903			     sshdr.sense_key, sshdr.asc, sshdr.ascq);
    904		shost_printk(KERN_INFO, shost,
    905			     "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
    906			     ev->channel, ev->target,
    907			     sense_info[0], sense_info[1],
    908			     sense_info[2], sense_info[3],
    909			     cmd_specific[0], cmd_specific[1],
    910			     cmd_specific[2], cmd_specific[3]);
    911		break;
    912	case 'E':
    913		if (cs->disable_enc_msg)
    914			break;
    915		sprintf(msg_buf, ev_msg, ev->lun);
    916		shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
    917			     ev->ev_seq, ev->target, msg_buf);
    918		break;
    919	case 'C':
    920		shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
    921			     ev->ev_seq, ev_msg);
    922		break;
    923	default:
    924		shost_printk(KERN_INFO, shost,
    925			     "event %d: Unknown Event Code %04X\n",
    926			     ev->ev_seq, ev->ev_code);
    927		break;
    928	}
    929}
    930
    931/*
    932 * SCSI sysfs interface functions
    933 */
    934static ssize_t raid_state_show(struct device *dev,
    935		struct device_attribute *attr, char *buf)
    936{
    937	struct scsi_device *sdev = to_scsi_device(dev);
    938	struct myrs_hba *cs = shost_priv(sdev->host);
    939	int ret;
    940
    941	if (!sdev->hostdata)
    942		return snprintf(buf, 16, "Unknown\n");
    943
    944	if (sdev->channel >= cs->ctlr_info->physchan_present) {
    945		struct myrs_ldev_info *ldev_info = sdev->hostdata;
    946		const char *name;
    947
    948		name = myrs_devstate_name(ldev_info->dev_state);
    949		if (name)
    950			ret = snprintf(buf, 32, "%s\n", name);
    951		else
    952			ret = snprintf(buf, 32, "Invalid (%02X)\n",
    953				       ldev_info->dev_state);
    954	} else {
    955		struct myrs_pdev_info *pdev_info;
    956		const char *name;
    957
    958		pdev_info = sdev->hostdata;
    959		name = myrs_devstate_name(pdev_info->dev_state);
    960		if (name)
    961			ret = snprintf(buf, 32, "%s\n", name);
    962		else
    963			ret = snprintf(buf, 32, "Invalid (%02X)\n",
    964				       pdev_info->dev_state);
    965	}
    966	return ret;
    967}
    968
    969static ssize_t raid_state_store(struct device *dev,
    970		struct device_attribute *attr, const char *buf, size_t count)
    971{
    972	struct scsi_device *sdev = to_scsi_device(dev);
    973	struct myrs_hba *cs = shost_priv(sdev->host);
    974	struct myrs_cmdblk *cmd_blk;
    975	union myrs_cmd_mbox *mbox;
    976	enum myrs_devstate new_state;
    977	unsigned short ldev_num;
    978	unsigned char status;
    979
    980	if (!strncmp(buf, "offline", 7) ||
    981	    !strncmp(buf, "kill", 4))
    982		new_state = MYRS_DEVICE_OFFLINE;
    983	else if (!strncmp(buf, "online", 6))
    984		new_state = MYRS_DEVICE_ONLINE;
    985	else if (!strncmp(buf, "standby", 7))
    986		new_state = MYRS_DEVICE_STANDBY;
    987	else
    988		return -EINVAL;
    989
    990	if (sdev->channel < cs->ctlr_info->physchan_present) {
    991		struct myrs_pdev_info *pdev_info = sdev->hostdata;
    992		struct myrs_devmap *pdev_devmap =
    993			(struct myrs_devmap *)&pdev_info->rsvd13;
    994
    995		if (pdev_info->dev_state == new_state) {
    996			sdev_printk(KERN_INFO, sdev,
    997				    "Device already in %s\n",
    998				    myrs_devstate_name(new_state));
    999			return count;
   1000		}
   1001		status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
   1002					     sdev->lun, pdev_devmap);
   1003		if (status != MYRS_STATUS_SUCCESS)
   1004			return -ENXIO;
   1005		ldev_num = pdev_devmap->ldev_num;
   1006	} else {
   1007		struct myrs_ldev_info *ldev_info = sdev->hostdata;
   1008
   1009		if (ldev_info->dev_state == new_state) {
   1010			sdev_printk(KERN_INFO, sdev,
   1011				    "Device already in %s\n",
   1012				    myrs_devstate_name(new_state));
   1013			return count;
   1014		}
   1015		ldev_num = ldev_info->ldev_num;
   1016	}
   1017	mutex_lock(&cs->dcmd_mutex);
   1018	cmd_blk = &cs->dcmd_blk;
   1019	myrs_reset_cmd(cmd_blk);
   1020	mbox = &cmd_blk->mbox;
   1021	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
   1022	mbox->common.id = MYRS_DCMD_TAG;
   1023	mbox->common.control.dma_ctrl_to_host = true;
   1024	mbox->common.control.no_autosense = true;
   1025	mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
   1026	mbox->set_devstate.state = new_state;
   1027	mbox->set_devstate.ldev.ldev_num = ldev_num;
   1028	myrs_exec_cmd(cs, cmd_blk);
   1029	status = cmd_blk->status;
   1030	mutex_unlock(&cs->dcmd_mutex);
   1031	if (status == MYRS_STATUS_SUCCESS) {
   1032		if (sdev->channel < cs->ctlr_info->physchan_present) {
   1033			struct myrs_pdev_info *pdev_info = sdev->hostdata;
   1034
   1035			pdev_info->dev_state = new_state;
   1036		} else {
   1037			struct myrs_ldev_info *ldev_info = sdev->hostdata;
   1038
   1039			ldev_info->dev_state = new_state;
   1040		}
   1041		sdev_printk(KERN_INFO, sdev,
   1042			    "Set device state to %s\n",
   1043			    myrs_devstate_name(new_state));
   1044		return count;
   1045	}
   1046	sdev_printk(KERN_INFO, sdev,
   1047		    "Failed to set device state to %s, status 0x%02x\n",
   1048		    myrs_devstate_name(new_state), status);
   1049	return -EINVAL;
   1050}
   1051static DEVICE_ATTR_RW(raid_state);
   1052
   1053static ssize_t raid_level_show(struct device *dev,
   1054		struct device_attribute *attr, char *buf)
   1055{
   1056	struct scsi_device *sdev = to_scsi_device(dev);
   1057	struct myrs_hba *cs = shost_priv(sdev->host);
   1058	const char *name = NULL;
   1059
   1060	if (!sdev->hostdata)
   1061		return snprintf(buf, 16, "Unknown\n");
   1062
   1063	if (sdev->channel >= cs->ctlr_info->physchan_present) {
   1064		struct myrs_ldev_info *ldev_info;
   1065
   1066		ldev_info = sdev->hostdata;
   1067		name = myrs_raid_level_name(ldev_info->raid_level);
   1068		if (!name)
   1069			return snprintf(buf, 32, "Invalid (%02X)\n",
   1070					ldev_info->dev_state);
   1071
   1072	} else
   1073		name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
   1074
   1075	return snprintf(buf, 32, "%s\n", name);
   1076}
   1077static DEVICE_ATTR_RO(raid_level);
   1078
   1079static ssize_t rebuild_show(struct device *dev,
   1080		struct device_attribute *attr, char *buf)
   1081{
   1082	struct scsi_device *sdev = to_scsi_device(dev);
   1083	struct myrs_hba *cs = shost_priv(sdev->host);
   1084	struct myrs_ldev_info *ldev_info;
   1085	unsigned short ldev_num;
   1086	unsigned char status;
   1087
   1088	if (sdev->channel < cs->ctlr_info->physchan_present)
   1089		return snprintf(buf, 32, "physical device - not rebuilding\n");
   1090
   1091	ldev_info = sdev->hostdata;
   1092	ldev_num = ldev_info->ldev_num;
   1093	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
   1094	if (status != MYRS_STATUS_SUCCESS) {
   1095		sdev_printk(KERN_INFO, sdev,
   1096			    "Failed to get device information, status 0x%02x\n",
   1097			    status);
   1098		return -EIO;
   1099	}
   1100	if (ldev_info->rbld_active) {
   1101		return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
   1102				(size_t)ldev_info->rbld_lba,
   1103				(size_t)ldev_info->cfg_devsize);
   1104	} else
   1105		return snprintf(buf, 32, "not rebuilding\n");
   1106}
   1107
   1108static ssize_t rebuild_store(struct device *dev,
   1109		struct device_attribute *attr, const char *buf, size_t count)
   1110{
   1111	struct scsi_device *sdev = to_scsi_device(dev);
   1112	struct myrs_hba *cs = shost_priv(sdev->host);
   1113	struct myrs_ldev_info *ldev_info;
   1114	struct myrs_cmdblk *cmd_blk;
   1115	union myrs_cmd_mbox *mbox;
   1116	unsigned short ldev_num;
   1117	unsigned char status;
   1118	int rebuild, ret;
   1119
   1120	if (sdev->channel < cs->ctlr_info->physchan_present)
   1121		return -EINVAL;
   1122
   1123	ldev_info = sdev->hostdata;
   1124	if (!ldev_info)
   1125		return -ENXIO;
   1126	ldev_num = ldev_info->ldev_num;
   1127
   1128	ret = kstrtoint(buf, 0, &rebuild);
   1129	if (ret)
   1130		return ret;
   1131
   1132	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
   1133	if (status != MYRS_STATUS_SUCCESS) {
   1134		sdev_printk(KERN_INFO, sdev,
   1135			    "Failed to get device information, status 0x%02x\n",
   1136			    status);
   1137		return -EIO;
   1138	}
   1139
   1140	if (rebuild && ldev_info->rbld_active) {
   1141		sdev_printk(KERN_INFO, sdev,
   1142			    "Rebuild Not Initiated; already in progress\n");
   1143		return -EALREADY;
   1144	}
   1145	if (!rebuild && !ldev_info->rbld_active) {
   1146		sdev_printk(KERN_INFO, sdev,
   1147			    "Rebuild Not Cancelled; no rebuild in progress\n");
   1148		return count;
   1149	}
   1150
   1151	mutex_lock(&cs->dcmd_mutex);
   1152	cmd_blk = &cs->dcmd_blk;
   1153	myrs_reset_cmd(cmd_blk);
   1154	mbox = &cmd_blk->mbox;
   1155	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
   1156	mbox->common.id = MYRS_DCMD_TAG;
   1157	mbox->common.control.dma_ctrl_to_host = true;
   1158	mbox->common.control.no_autosense = true;
   1159	if (rebuild) {
   1160		mbox->ldev_info.ldev.ldev_num = ldev_num;
   1161		mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
   1162	} else {
   1163		mbox->ldev_info.ldev.ldev_num = ldev_num;
   1164		mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
   1165	}
   1166	myrs_exec_cmd(cs, cmd_blk);
   1167	status = cmd_blk->status;
   1168	mutex_unlock(&cs->dcmd_mutex);
   1169	if (status) {
   1170		sdev_printk(KERN_INFO, sdev,
   1171			    "Rebuild Not %s, status 0x%02x\n",
   1172			    rebuild ? "Initiated" : "Cancelled", status);
   1173		ret = -EIO;
   1174	} else {
   1175		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
   1176			    rebuild ? "Initiated" : "Cancelled");
   1177		ret = count;
   1178	}
   1179
   1180	return ret;
   1181}
   1182static DEVICE_ATTR_RW(rebuild);
   1183
   1184static ssize_t consistency_check_show(struct device *dev,
   1185		struct device_attribute *attr, char *buf)
   1186{
   1187	struct scsi_device *sdev = to_scsi_device(dev);
   1188	struct myrs_hba *cs = shost_priv(sdev->host);
   1189	struct myrs_ldev_info *ldev_info;
   1190	unsigned short ldev_num;
   1191
   1192	if (sdev->channel < cs->ctlr_info->physchan_present)
   1193		return snprintf(buf, 32, "physical device - not checking\n");
   1194
   1195	ldev_info = sdev->hostdata;
   1196	if (!ldev_info)
   1197		return -ENXIO;
   1198	ldev_num = ldev_info->ldev_num;
   1199	myrs_get_ldev_info(cs, ldev_num, ldev_info);
   1200	if (ldev_info->cc_active)
   1201		return snprintf(buf, 32, "checking block %zu of %zu\n",
   1202				(size_t)ldev_info->cc_lba,
   1203				(size_t)ldev_info->cfg_devsize);
   1204	else
   1205		return snprintf(buf, 32, "not checking\n");
   1206}
   1207
   1208static ssize_t consistency_check_store(struct device *dev,
   1209		struct device_attribute *attr, const char *buf, size_t count)
   1210{
   1211	struct scsi_device *sdev = to_scsi_device(dev);
   1212	struct myrs_hba *cs = shost_priv(sdev->host);
   1213	struct myrs_ldev_info *ldev_info;
   1214	struct myrs_cmdblk *cmd_blk;
   1215	union myrs_cmd_mbox *mbox;
   1216	unsigned short ldev_num;
   1217	unsigned char status;
   1218	int check, ret;
   1219
   1220	if (sdev->channel < cs->ctlr_info->physchan_present)
   1221		return -EINVAL;
   1222
   1223	ldev_info = sdev->hostdata;
   1224	if (!ldev_info)
   1225		return -ENXIO;
   1226	ldev_num = ldev_info->ldev_num;
   1227
   1228	ret = kstrtoint(buf, 0, &check);
   1229	if (ret)
   1230		return ret;
   1231
   1232	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
   1233	if (status != MYRS_STATUS_SUCCESS) {
   1234		sdev_printk(KERN_INFO, sdev,
   1235			    "Failed to get device information, status 0x%02x\n",
   1236			    status);
   1237		return -EIO;
   1238	}
   1239	if (check && ldev_info->cc_active) {
   1240		sdev_printk(KERN_INFO, sdev,
   1241			    "Consistency Check Not Initiated; "
   1242			    "already in progress\n");
   1243		return -EALREADY;
   1244	}
   1245	if (!check && !ldev_info->cc_active) {
   1246		sdev_printk(KERN_INFO, sdev,
   1247			    "Consistency Check Not Cancelled; "
   1248			    "check not in progress\n");
   1249		return count;
   1250	}
   1251
   1252	mutex_lock(&cs->dcmd_mutex);
   1253	cmd_blk = &cs->dcmd_blk;
   1254	myrs_reset_cmd(cmd_blk);
   1255	mbox = &cmd_blk->mbox;
   1256	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
   1257	mbox->common.id = MYRS_DCMD_TAG;
   1258	mbox->common.control.dma_ctrl_to_host = true;
   1259	mbox->common.control.no_autosense = true;
   1260	if (check) {
   1261		mbox->cc.ldev.ldev_num = ldev_num;
   1262		mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
   1263		mbox->cc.restore_consistency = true;
   1264		mbox->cc.initialized_area_only = false;
   1265	} else {
   1266		mbox->cc.ldev.ldev_num = ldev_num;
   1267		mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
   1268	}
   1269	myrs_exec_cmd(cs, cmd_blk);
   1270	status = cmd_blk->status;
   1271	mutex_unlock(&cs->dcmd_mutex);
   1272	if (status != MYRS_STATUS_SUCCESS) {
   1273		sdev_printk(KERN_INFO, sdev,
   1274			    "Consistency Check Not %s, status 0x%02x\n",
   1275			    check ? "Initiated" : "Cancelled", status);
   1276		ret = -EIO;
   1277	} else {
   1278		sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
   1279			    check ? "Initiated" : "Cancelled");
   1280		ret = count;
   1281	}
   1282
   1283	return ret;
   1284}
   1285static DEVICE_ATTR_RW(consistency_check);
   1286
   1287static struct attribute *myrs_sdev_attrs[] = {
   1288	&dev_attr_consistency_check.attr,
   1289	&dev_attr_rebuild.attr,
   1290	&dev_attr_raid_state.attr,
   1291	&dev_attr_raid_level.attr,
   1292	NULL,
   1293};
   1294
   1295ATTRIBUTE_GROUPS(myrs_sdev);
   1296
   1297static ssize_t serial_show(struct device *dev,
   1298		struct device_attribute *attr, char *buf)
   1299{
   1300	struct Scsi_Host *shost = class_to_shost(dev);
   1301	struct myrs_hba *cs = shost_priv(shost);
   1302	char serial[17];
   1303
   1304	memcpy(serial, cs->ctlr_info->serial_number, 16);
   1305	serial[16] = '\0';
   1306	return snprintf(buf, 16, "%s\n", serial);
   1307}
   1308static DEVICE_ATTR_RO(serial);
   1309
   1310static ssize_t ctlr_num_show(struct device *dev,
   1311		struct device_attribute *attr, char *buf)
   1312{
   1313	struct Scsi_Host *shost = class_to_shost(dev);
   1314	struct myrs_hba *cs = shost_priv(shost);
   1315
   1316	return snprintf(buf, 20, "%d\n", cs->host->host_no);
   1317}
   1318static DEVICE_ATTR_RO(ctlr_num);
   1319
   1320static struct myrs_cpu_type_tbl {
   1321	enum myrs_cpu_type type;
   1322	char *name;
   1323} myrs_cpu_type_names[] = {
   1324	{ MYRS_CPUTYPE_i960CA, "i960CA" },
   1325	{ MYRS_CPUTYPE_i960RD, "i960RD" },
   1326	{ MYRS_CPUTYPE_i960RN, "i960RN" },
   1327	{ MYRS_CPUTYPE_i960RP, "i960RP" },
   1328	{ MYRS_CPUTYPE_NorthBay, "NorthBay" },
   1329	{ MYRS_CPUTYPE_StrongArm, "StrongARM" },
   1330	{ MYRS_CPUTYPE_i960RM, "i960RM" },
   1331};
   1332
   1333static ssize_t processor_show(struct device *dev,
   1334		struct device_attribute *attr, char *buf)
   1335{
   1336	struct Scsi_Host *shost = class_to_shost(dev);
   1337	struct myrs_hba *cs = shost_priv(shost);
   1338	struct myrs_cpu_type_tbl *tbl;
   1339	const char *first_processor = NULL;
   1340	const char *second_processor = NULL;
   1341	struct myrs_ctlr_info *info = cs->ctlr_info;
   1342	ssize_t ret;
   1343	int i;
   1344
   1345	if (info->cpu[0].cpu_count) {
   1346		tbl = myrs_cpu_type_names;
   1347		for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
   1348			if (tbl[i].type == info->cpu[0].cpu_type) {
   1349				first_processor = tbl[i].name;
   1350				break;
   1351			}
   1352		}
   1353	}
   1354	if (info->cpu[1].cpu_count) {
   1355		tbl = myrs_cpu_type_names;
   1356		for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
   1357			if (tbl[i].type == info->cpu[1].cpu_type) {
   1358				second_processor = tbl[i].name;
   1359				break;
   1360			}
   1361		}
   1362	}
   1363	if (first_processor && second_processor)
   1364		ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
   1365			       "2: %s (%s, %d cpus)\n",
   1366			       info->cpu[0].cpu_name,
   1367			       first_processor, info->cpu[0].cpu_count,
   1368			       info->cpu[1].cpu_name,
   1369			       second_processor, info->cpu[1].cpu_count);
   1370	else if (first_processor && !second_processor)
   1371		ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
   1372			       info->cpu[0].cpu_name,
   1373			       first_processor, info->cpu[0].cpu_count);
   1374	else if (!first_processor && second_processor)
   1375		ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
   1376			       info->cpu[1].cpu_name,
   1377			       second_processor, info->cpu[1].cpu_count);
   1378	else
   1379		ret = snprintf(buf, 64, "1: absent\n2: absent\n");
   1380
   1381	return ret;
   1382}
   1383static DEVICE_ATTR_RO(processor);
   1384
   1385static ssize_t model_show(struct device *dev,
   1386		struct device_attribute *attr, char *buf)
   1387{
   1388	struct Scsi_Host *shost = class_to_shost(dev);
   1389	struct myrs_hba *cs = shost_priv(shost);
   1390
   1391	return snprintf(buf, 28, "%s\n", cs->model_name);
   1392}
   1393static DEVICE_ATTR_RO(model);
   1394
   1395static ssize_t ctlr_type_show(struct device *dev,
   1396		struct device_attribute *attr, char *buf)
   1397{
   1398	struct Scsi_Host *shost = class_to_shost(dev);
   1399	struct myrs_hba *cs = shost_priv(shost);
   1400
   1401	return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
   1402}
   1403static DEVICE_ATTR_RO(ctlr_type);
   1404
   1405static ssize_t cache_size_show(struct device *dev,
   1406		struct device_attribute *attr, char *buf)
   1407{
   1408	struct Scsi_Host *shost = class_to_shost(dev);
   1409	struct myrs_hba *cs = shost_priv(shost);
   1410
   1411	return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
   1412}
   1413static DEVICE_ATTR_RO(cache_size);
   1414
   1415static ssize_t firmware_show(struct device *dev,
   1416		struct device_attribute *attr, char *buf)
   1417{
   1418	struct Scsi_Host *shost = class_to_shost(dev);
   1419	struct myrs_hba *cs = shost_priv(shost);
   1420
   1421	return snprintf(buf, 16, "%d.%02d-%02d\n",
   1422			cs->ctlr_info->fw_major_version,
   1423			cs->ctlr_info->fw_minor_version,
   1424			cs->ctlr_info->fw_turn_number);
   1425}
   1426static DEVICE_ATTR_RO(firmware);
   1427
   1428static ssize_t discovery_store(struct device *dev,
   1429		struct device_attribute *attr, const char *buf, size_t count)
   1430{
   1431	struct Scsi_Host *shost = class_to_shost(dev);
   1432	struct myrs_hba *cs = shost_priv(shost);
   1433	struct myrs_cmdblk *cmd_blk;
   1434	union myrs_cmd_mbox *mbox;
   1435	unsigned char status;
   1436
   1437	mutex_lock(&cs->dcmd_mutex);
   1438	cmd_blk = &cs->dcmd_blk;
   1439	myrs_reset_cmd(cmd_blk);
   1440	mbox = &cmd_blk->mbox;
   1441	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
   1442	mbox->common.id = MYRS_DCMD_TAG;
   1443	mbox->common.control.dma_ctrl_to_host = true;
   1444	mbox->common.control.no_autosense = true;
   1445	mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
   1446	myrs_exec_cmd(cs, cmd_blk);
   1447	status = cmd_blk->status;
   1448	mutex_unlock(&cs->dcmd_mutex);
   1449	if (status != MYRS_STATUS_SUCCESS) {
   1450		shost_printk(KERN_INFO, shost,
   1451			     "Discovery Not Initiated, status %02X\n",
   1452			     status);
   1453		return -EINVAL;
   1454	}
   1455	shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
   1456	cs->next_evseq = 0;
   1457	cs->needs_update = true;
   1458	queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
   1459	flush_delayed_work(&cs->monitor_work);
   1460	shost_printk(KERN_INFO, shost, "Discovery Completed\n");
   1461
   1462	return count;
   1463}
   1464static DEVICE_ATTR_WO(discovery);
   1465
   1466static ssize_t flush_cache_store(struct device *dev,
   1467		struct device_attribute *attr, const char *buf, size_t count)
   1468{
   1469	struct Scsi_Host *shost = class_to_shost(dev);
   1470	struct myrs_hba *cs = shost_priv(shost);
   1471	unsigned char status;
   1472
   1473	status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
   1474			     MYRS_RAID_CONTROLLER);
   1475	if (status == MYRS_STATUS_SUCCESS) {
   1476		shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
   1477		return count;
   1478	}
   1479	shost_printk(KERN_INFO, shost,
   1480		     "Cache Flush failed, status 0x%02x\n", status);
   1481	return -EIO;
   1482}
   1483static DEVICE_ATTR_WO(flush_cache);
   1484
   1485static ssize_t disable_enclosure_messages_show(struct device *dev,
   1486		struct device_attribute *attr, char *buf)
   1487{
   1488	struct Scsi_Host *shost = class_to_shost(dev);
   1489	struct myrs_hba *cs = shost_priv(shost);
   1490
   1491	return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
   1492}
   1493
   1494static ssize_t disable_enclosure_messages_store(struct device *dev,
   1495		struct device_attribute *attr, const char *buf, size_t count)
   1496{
   1497	struct scsi_device *sdev = to_scsi_device(dev);
   1498	struct myrs_hba *cs = shost_priv(sdev->host);
   1499	int value, ret;
   1500
   1501	ret = kstrtoint(buf, 0, &value);
   1502	if (ret)
   1503		return ret;
   1504
   1505	if (value > 2)
   1506		return -EINVAL;
   1507
   1508	cs->disable_enc_msg = value;
   1509	return count;
   1510}
   1511static DEVICE_ATTR_RW(disable_enclosure_messages);
   1512
   1513static struct attribute *myrs_shost_attrs[] = {
   1514	&dev_attr_serial.attr,
   1515	&dev_attr_ctlr_num.attr,
   1516	&dev_attr_processor.attr,
   1517	&dev_attr_model.attr,
   1518	&dev_attr_ctlr_type.attr,
   1519	&dev_attr_cache_size.attr,
   1520	&dev_attr_firmware.attr,
   1521	&dev_attr_discovery.attr,
   1522	&dev_attr_flush_cache.attr,
   1523	&dev_attr_disable_enclosure_messages.attr,
   1524	NULL,
   1525};
   1526
   1527ATTRIBUTE_GROUPS(myrs_shost);
   1528
   1529/*
   1530 * SCSI midlayer interface
   1531 */
   1532static int myrs_host_reset(struct scsi_cmnd *scmd)
   1533{
   1534	struct Scsi_Host *shost = scmd->device->host;
   1535	struct myrs_hba *cs = shost_priv(shost);
   1536
   1537	cs->reset(cs->io_base);
   1538	return SUCCESS;
   1539}
   1540
   1541static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
   1542		struct myrs_ldev_info *ldev_info)
   1543{
   1544	unsigned char modes[32], *mode_pg;
   1545	bool dbd;
   1546	size_t mode_len;
   1547
   1548	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
   1549	if (dbd) {
   1550		mode_len = 24;
   1551		mode_pg = &modes[4];
   1552	} else {
   1553		mode_len = 32;
   1554		mode_pg = &modes[12];
   1555	}
   1556	memset(modes, 0, sizeof(modes));
   1557	modes[0] = mode_len - 1;
   1558	modes[2] = 0x10; /* Enable FUA */
   1559	if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
   1560		modes[2] |= 0x80;
   1561	if (!dbd) {
   1562		unsigned char *block_desc = &modes[4];
   1563
   1564		modes[3] = 8;
   1565		put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
   1566		put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
   1567	}
   1568	mode_pg[0] = 0x08;
   1569	mode_pg[1] = 0x12;
   1570	if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
   1571		mode_pg[2] |= 0x01;
   1572	if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
   1573	    ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
   1574		mode_pg[2] |= 0x04;
   1575	if (ldev_info->cacheline_size) {
   1576		mode_pg[2] |= 0x08;
   1577		put_unaligned_be16(1 << ldev_info->cacheline_size,
   1578				   &mode_pg[14]);
   1579	}
   1580
   1581	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
   1582}
   1583
   1584static int myrs_queuecommand(struct Scsi_Host *shost,
   1585		struct scsi_cmnd *scmd)
   1586{
   1587	struct request *rq = scsi_cmd_to_rq(scmd);
   1588	struct myrs_hba *cs = shost_priv(shost);
   1589	struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
   1590	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
   1591	struct scsi_device *sdev = scmd->device;
   1592	union myrs_sgl *hw_sge;
   1593	dma_addr_t sense_addr;
   1594	struct scatterlist *sgl;
   1595	unsigned long flags, timeout;
   1596	int nsge;
   1597
   1598	if (!scmd->device->hostdata) {
   1599		scmd->result = (DID_NO_CONNECT << 16);
   1600		scsi_done(scmd);
   1601		return 0;
   1602	}
   1603
   1604	switch (scmd->cmnd[0]) {
   1605	case REPORT_LUNS:
   1606		scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
   1607		scsi_done(scmd);
   1608		return 0;
   1609	case MODE_SENSE:
   1610		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
   1611			struct myrs_ldev_info *ldev_info = sdev->hostdata;
   1612
   1613			if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
   1614			    (scmd->cmnd[2] & 0x3F) != 0x08) {
   1615				/* Illegal request, invalid field in CDB */
   1616				scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
   1617			} else {
   1618				myrs_mode_sense(cs, scmd, ldev_info);
   1619				scmd->result = (DID_OK << 16);
   1620			}
   1621			scsi_done(scmd);
   1622			return 0;
   1623		}
   1624		break;
   1625	}
   1626
   1627	myrs_reset_cmd(cmd_blk);
   1628	cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
   1629					&sense_addr);
   1630	if (!cmd_blk->sense)
   1631		return SCSI_MLQUEUE_HOST_BUSY;
   1632	cmd_blk->sense_addr = sense_addr;
   1633
   1634	timeout = rq->timeout;
   1635	if (scmd->cmd_len <= 10) {
   1636		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
   1637			struct myrs_ldev_info *ldev_info = sdev->hostdata;
   1638
   1639			mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
   1640			mbox->SCSI_10.pdev.lun = ldev_info->lun;
   1641			mbox->SCSI_10.pdev.target = ldev_info->target;
   1642			mbox->SCSI_10.pdev.channel = ldev_info->channel;
   1643			mbox->SCSI_10.pdev.ctlr = 0;
   1644		} else {
   1645			mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
   1646			mbox->SCSI_10.pdev.lun = sdev->lun;
   1647			mbox->SCSI_10.pdev.target = sdev->id;
   1648			mbox->SCSI_10.pdev.channel = sdev->channel;
   1649		}
   1650		mbox->SCSI_10.id = rq->tag + 3;
   1651		mbox->SCSI_10.control.dma_ctrl_to_host =
   1652			(scmd->sc_data_direction == DMA_FROM_DEVICE);
   1653		if (rq->cmd_flags & REQ_FUA)
   1654			mbox->SCSI_10.control.fua = true;
   1655		mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
   1656		mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
   1657		mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
   1658		mbox->SCSI_10.cdb_len = scmd->cmd_len;
   1659		if (timeout > 60) {
   1660			mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
   1661			mbox->SCSI_10.tmo.tmo_val = timeout / 60;
   1662		} else {
   1663			mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
   1664			mbox->SCSI_10.tmo.tmo_val = timeout;
   1665		}
   1666		memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
   1667		hw_sge = &mbox->SCSI_10.dma_addr;
   1668		cmd_blk->dcdb = NULL;
   1669	} else {
   1670		dma_addr_t dcdb_dma;
   1671
   1672		cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
   1673					       &dcdb_dma);
   1674		if (!cmd_blk->dcdb) {
   1675			dma_pool_free(cs->sense_pool, cmd_blk->sense,
   1676				      cmd_blk->sense_addr);
   1677			cmd_blk->sense = NULL;
   1678			cmd_blk->sense_addr = 0;
   1679			return SCSI_MLQUEUE_HOST_BUSY;
   1680		}
   1681		cmd_blk->dcdb_dma = dcdb_dma;
   1682		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
   1683			struct myrs_ldev_info *ldev_info = sdev->hostdata;
   1684
   1685			mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
   1686			mbox->SCSI_255.pdev.lun = ldev_info->lun;
   1687			mbox->SCSI_255.pdev.target = ldev_info->target;
   1688			mbox->SCSI_255.pdev.channel = ldev_info->channel;
   1689			mbox->SCSI_255.pdev.ctlr = 0;
   1690		} else {
   1691			mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
   1692			mbox->SCSI_255.pdev.lun = sdev->lun;
   1693			mbox->SCSI_255.pdev.target = sdev->id;
   1694			mbox->SCSI_255.pdev.channel = sdev->channel;
   1695		}
   1696		mbox->SCSI_255.id = rq->tag + 3;
   1697		mbox->SCSI_255.control.dma_ctrl_to_host =
   1698			(scmd->sc_data_direction == DMA_FROM_DEVICE);
   1699		if (rq->cmd_flags & REQ_FUA)
   1700			mbox->SCSI_255.control.fua = true;
   1701		mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
   1702		mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
   1703		mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
   1704		mbox->SCSI_255.cdb_len = scmd->cmd_len;
   1705		mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
   1706		if (timeout > 60) {
   1707			mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
   1708			mbox->SCSI_255.tmo.tmo_val = timeout / 60;
   1709		} else {
   1710			mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
   1711			mbox->SCSI_255.tmo.tmo_val = timeout;
   1712		}
   1713		memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
   1714		hw_sge = &mbox->SCSI_255.dma_addr;
   1715	}
   1716	if (scmd->sc_data_direction == DMA_NONE)
   1717		goto submit;
   1718	nsge = scsi_dma_map(scmd);
   1719	if (nsge == 1) {
   1720		sgl = scsi_sglist(scmd);
   1721		hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
   1722		hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
   1723	} else {
   1724		struct myrs_sge *hw_sgl;
   1725		dma_addr_t hw_sgl_addr;
   1726		int i;
   1727
   1728		if (nsge > 2) {
   1729			hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
   1730						&hw_sgl_addr);
   1731			if (WARN_ON(!hw_sgl)) {
   1732				if (cmd_blk->dcdb) {
   1733					dma_pool_free(cs->dcdb_pool,
   1734						      cmd_blk->dcdb,
   1735						      cmd_blk->dcdb_dma);
   1736					cmd_blk->dcdb = NULL;
   1737					cmd_blk->dcdb_dma = 0;
   1738				}
   1739				dma_pool_free(cs->sense_pool,
   1740					      cmd_blk->sense,
   1741					      cmd_blk->sense_addr);
   1742				cmd_blk->sense = NULL;
   1743				cmd_blk->sense_addr = 0;
   1744				return SCSI_MLQUEUE_HOST_BUSY;
   1745			}
   1746			cmd_blk->sgl = hw_sgl;
   1747			cmd_blk->sgl_addr = hw_sgl_addr;
   1748			if (scmd->cmd_len <= 10)
   1749				mbox->SCSI_10.control.add_sge_mem = true;
   1750			else
   1751				mbox->SCSI_255.control.add_sge_mem = true;
   1752			hw_sge->ext.sge0_len = nsge;
   1753			hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
   1754		} else
   1755			hw_sgl = hw_sge->sge;
   1756
   1757		scsi_for_each_sg(scmd, sgl, nsge, i) {
   1758			if (WARN_ON(!hw_sgl)) {
   1759				scsi_dma_unmap(scmd);
   1760				scmd->result = (DID_ERROR << 16);
   1761				scsi_done(scmd);
   1762				return 0;
   1763			}
   1764			hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
   1765			hw_sgl->sge_count = (u64)sg_dma_len(sgl);
   1766			hw_sgl++;
   1767		}
   1768	}
   1769submit:
   1770	spin_lock_irqsave(&cs->queue_lock, flags);
   1771	myrs_qcmd(cs, cmd_blk);
   1772	spin_unlock_irqrestore(&cs->queue_lock, flags);
   1773
   1774	return 0;
   1775}
   1776
   1777static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
   1778		struct scsi_device *sdev)
   1779{
   1780	unsigned short ldev_num;
   1781	unsigned int chan_offset =
   1782		sdev->channel - cs->ctlr_info->physchan_present;
   1783
   1784	ldev_num = sdev->id + chan_offset * sdev->host->max_id;
   1785
   1786	return ldev_num;
   1787}
   1788
   1789static int myrs_slave_alloc(struct scsi_device *sdev)
   1790{
   1791	struct myrs_hba *cs = shost_priv(sdev->host);
   1792	unsigned char status;
   1793
   1794	if (sdev->channel > sdev->host->max_channel)
   1795		return 0;
   1796
   1797	if (sdev->channel >= cs->ctlr_info->physchan_present) {
   1798		struct myrs_ldev_info *ldev_info;
   1799		unsigned short ldev_num;
   1800
   1801		if (sdev->lun > 0)
   1802			return -ENXIO;
   1803
   1804		ldev_num = myrs_translate_ldev(cs, sdev);
   1805
   1806		ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
   1807		if (!ldev_info)
   1808			return -ENOMEM;
   1809
   1810		status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
   1811		if (status != MYRS_STATUS_SUCCESS) {
   1812			sdev->hostdata = NULL;
   1813			kfree(ldev_info);
   1814		} else {
   1815			enum raid_level level;
   1816
   1817			dev_dbg(&sdev->sdev_gendev,
   1818				"Logical device mapping %d:%d:%d -> %d\n",
   1819				ldev_info->channel, ldev_info->target,
   1820				ldev_info->lun, ldev_info->ldev_num);
   1821
   1822			sdev->hostdata = ldev_info;
   1823			switch (ldev_info->raid_level) {
   1824			case MYRS_RAID_LEVEL0:
   1825				level = RAID_LEVEL_LINEAR;
   1826				break;
   1827			case MYRS_RAID_LEVEL1:
   1828				level = RAID_LEVEL_1;
   1829				break;
   1830			case MYRS_RAID_LEVEL3:
   1831			case MYRS_RAID_LEVEL3F:
   1832			case MYRS_RAID_LEVEL3L:
   1833				level = RAID_LEVEL_3;
   1834				break;
   1835			case MYRS_RAID_LEVEL5:
   1836			case MYRS_RAID_LEVEL5L:
   1837				level = RAID_LEVEL_5;
   1838				break;
   1839			case MYRS_RAID_LEVEL6:
   1840				level = RAID_LEVEL_6;
   1841				break;
   1842			case MYRS_RAID_LEVELE:
   1843			case MYRS_RAID_NEWSPAN:
   1844			case MYRS_RAID_SPAN:
   1845				level = RAID_LEVEL_LINEAR;
   1846				break;
   1847			case MYRS_RAID_JBOD:
   1848				level = RAID_LEVEL_JBOD;
   1849				break;
   1850			default:
   1851				level = RAID_LEVEL_UNKNOWN;
   1852				break;
   1853			}
   1854			raid_set_level(myrs_raid_template,
   1855				       &sdev->sdev_gendev, level);
   1856			if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
   1857				const char *name;
   1858
   1859				name = myrs_devstate_name(ldev_info->dev_state);
   1860				sdev_printk(KERN_DEBUG, sdev,
   1861					    "logical device in state %s\n",
   1862					    name ? name : "Invalid");
   1863			}
   1864		}
   1865	} else {
   1866		struct myrs_pdev_info *pdev_info;
   1867
   1868		pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
   1869		if (!pdev_info)
   1870			return -ENOMEM;
   1871
   1872		status = myrs_get_pdev_info(cs, sdev->channel,
   1873					    sdev->id, sdev->lun,
   1874					    pdev_info);
   1875		if (status != MYRS_STATUS_SUCCESS) {
   1876			sdev->hostdata = NULL;
   1877			kfree(pdev_info);
   1878			return -ENXIO;
   1879		}
   1880		sdev->hostdata = pdev_info;
   1881	}
   1882	return 0;
   1883}
   1884
   1885static int myrs_slave_configure(struct scsi_device *sdev)
   1886{
   1887	struct myrs_hba *cs = shost_priv(sdev->host);
   1888	struct myrs_ldev_info *ldev_info;
   1889
   1890	if (sdev->channel > sdev->host->max_channel)
   1891		return -ENXIO;
   1892
   1893	if (sdev->channel < cs->ctlr_info->physchan_present) {
   1894		/* Skip HBA device */
   1895		if (sdev->type == TYPE_RAID)
   1896			return -ENXIO;
   1897		sdev->no_uld_attach = 1;
   1898		return 0;
   1899	}
   1900	if (sdev->lun != 0)
   1901		return -ENXIO;
   1902
   1903	ldev_info = sdev->hostdata;
   1904	if (!ldev_info)
   1905		return -ENXIO;
   1906	if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
   1907	    ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
   1908		sdev->wce_default_on = 1;
   1909	sdev->tagged_supported = 1;
   1910	return 0;
   1911}
   1912
   1913static void myrs_slave_destroy(struct scsi_device *sdev)
   1914{
   1915	kfree(sdev->hostdata);
   1916}
   1917
   1918static struct scsi_host_template myrs_template = {
   1919	.module			= THIS_MODULE,
   1920	.name			= "DAC960",
   1921	.proc_name		= "myrs",
   1922	.queuecommand		= myrs_queuecommand,
   1923	.eh_host_reset_handler	= myrs_host_reset,
   1924	.slave_alloc		= myrs_slave_alloc,
   1925	.slave_configure	= myrs_slave_configure,
   1926	.slave_destroy		= myrs_slave_destroy,
   1927	.cmd_size		= sizeof(struct myrs_cmdblk),
   1928	.shost_groups		= myrs_shost_groups,
   1929	.sdev_groups		= myrs_sdev_groups,
   1930	.this_id		= -1,
   1931};
   1932
   1933static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
   1934		const struct pci_device_id *entry)
   1935{
   1936	struct Scsi_Host *shost;
   1937	struct myrs_hba *cs;
   1938
   1939	shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
   1940	if (!shost)
   1941		return NULL;
   1942
   1943	shost->max_cmd_len = 16;
   1944	shost->max_lun = 256;
   1945	cs = shost_priv(shost);
   1946	mutex_init(&cs->dcmd_mutex);
   1947	mutex_init(&cs->cinfo_mutex);
   1948	cs->host = shost;
   1949
   1950	return cs;
   1951}
   1952
   1953/*
   1954 * RAID template functions
   1955 */
   1956
   1957/**
   1958 * myrs_is_raid - return boolean indicating device is raid volume
   1959 * @dev: the device struct object
   1960 */
   1961static int
   1962myrs_is_raid(struct device *dev)
   1963{
   1964	struct scsi_device *sdev = to_scsi_device(dev);
   1965	struct myrs_hba *cs = shost_priv(sdev->host);
   1966
   1967	return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
   1968}
   1969
   1970/**
   1971 * myrs_get_resync - get raid volume resync percent complete
   1972 * @dev: the device struct object
   1973 */
   1974static void
   1975myrs_get_resync(struct device *dev)
   1976{
   1977	struct scsi_device *sdev = to_scsi_device(dev);
   1978	struct myrs_hba *cs = shost_priv(sdev->host);
   1979	struct myrs_ldev_info *ldev_info = sdev->hostdata;
   1980	u64 percent_complete = 0;
   1981
   1982	if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
   1983		return;
   1984	if (ldev_info->rbld_active) {
   1985		unsigned short ldev_num = ldev_info->ldev_num;
   1986
   1987		myrs_get_ldev_info(cs, ldev_num, ldev_info);
   1988		percent_complete = ldev_info->rbld_lba * 100;
   1989		do_div(percent_complete, ldev_info->cfg_devsize);
   1990	}
   1991	raid_set_resync(myrs_raid_template, dev, percent_complete);
   1992}
   1993
   1994/**
   1995 * myrs_get_state - get raid volume status
   1996 * @dev: the device struct object
   1997 */
   1998static void
   1999myrs_get_state(struct device *dev)
   2000{
   2001	struct scsi_device *sdev = to_scsi_device(dev);
   2002	struct myrs_hba *cs = shost_priv(sdev->host);
   2003	struct myrs_ldev_info *ldev_info = sdev->hostdata;
   2004	enum raid_state state = RAID_STATE_UNKNOWN;
   2005
   2006	if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
   2007		state = RAID_STATE_UNKNOWN;
   2008	else {
   2009		switch (ldev_info->dev_state) {
   2010		case MYRS_DEVICE_ONLINE:
   2011			state = RAID_STATE_ACTIVE;
   2012			break;
   2013		case MYRS_DEVICE_SUSPECTED_CRITICAL:
   2014		case MYRS_DEVICE_CRITICAL:
   2015			state = RAID_STATE_DEGRADED;
   2016			break;
   2017		case MYRS_DEVICE_REBUILD:
   2018			state = RAID_STATE_RESYNCING;
   2019			break;
   2020		case MYRS_DEVICE_UNCONFIGURED:
   2021		case MYRS_DEVICE_INVALID_STATE:
   2022			state = RAID_STATE_UNKNOWN;
   2023			break;
   2024		default:
   2025			state = RAID_STATE_OFFLINE;
   2026		}
   2027	}
   2028	raid_set_state(myrs_raid_template, dev, state);
   2029}
   2030
   2031static struct raid_function_template myrs_raid_functions = {
   2032	.cookie		= &myrs_template,
   2033	.is_raid	= myrs_is_raid,
   2034	.get_resync	= myrs_get_resync,
   2035	.get_state	= myrs_get_state,
   2036};
   2037
   2038/*
   2039 * PCI interface functions
   2040 */
   2041static void myrs_flush_cache(struct myrs_hba *cs)
   2042{
   2043	myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
   2044}
   2045
   2046static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
   2047		struct scsi_cmnd *scmd)
   2048{
   2049	unsigned char status;
   2050
   2051	if (!cmd_blk)
   2052		return;
   2053
   2054	scsi_dma_unmap(scmd);
   2055	status = cmd_blk->status;
   2056	if (cmd_blk->sense) {
   2057		if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
   2058			unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
   2059
   2060			if (sense_len > cmd_blk->sense_len)
   2061				sense_len = cmd_blk->sense_len;
   2062			memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
   2063		}
   2064		dma_pool_free(cs->sense_pool, cmd_blk->sense,
   2065			      cmd_blk->sense_addr);
   2066		cmd_blk->sense = NULL;
   2067		cmd_blk->sense_addr = 0;
   2068	}
   2069	if (cmd_blk->dcdb) {
   2070		dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
   2071			      cmd_blk->dcdb_dma);
   2072		cmd_blk->dcdb = NULL;
   2073		cmd_blk->dcdb_dma = 0;
   2074	}
   2075	if (cmd_blk->sgl) {
   2076		dma_pool_free(cs->sg_pool, cmd_blk->sgl,
   2077			      cmd_blk->sgl_addr);
   2078		cmd_blk->sgl = NULL;
   2079		cmd_blk->sgl_addr = 0;
   2080	}
   2081	if (cmd_blk->residual)
   2082		scsi_set_resid(scmd, cmd_blk->residual);
   2083	if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
   2084	    status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
   2085		scmd->result = (DID_BAD_TARGET << 16);
   2086	else
   2087		scmd->result = (DID_OK << 16) | status;
   2088	scsi_done(scmd);
   2089}
   2090
   2091static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
   2092{
   2093	if (!cmd_blk)
   2094		return;
   2095
   2096	if (cmd_blk->complete) {
   2097		complete(cmd_blk->complete);
   2098		cmd_blk->complete = NULL;
   2099	}
   2100}
   2101
   2102static void myrs_monitor(struct work_struct *work)
   2103{
   2104	struct myrs_hba *cs = container_of(work, struct myrs_hba,
   2105					   monitor_work.work);
   2106	struct Scsi_Host *shost = cs->host;
   2107	struct myrs_ctlr_info *info = cs->ctlr_info;
   2108	unsigned int epoch = cs->fwstat_buf->epoch;
   2109	unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
   2110	unsigned char status;
   2111
   2112	dev_dbg(&shost->shost_gendev, "monitor tick\n");
   2113
   2114	status = myrs_get_fwstatus(cs);
   2115
   2116	if (cs->needs_update) {
   2117		cs->needs_update = false;
   2118		mutex_lock(&cs->cinfo_mutex);
   2119		status = myrs_get_ctlr_info(cs);
   2120		mutex_unlock(&cs->cinfo_mutex);
   2121	}
   2122	if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
   2123		status = myrs_get_event(cs, cs->next_evseq,
   2124					cs->event_buf);
   2125		if (status == MYRS_STATUS_SUCCESS) {
   2126			myrs_log_event(cs, cs->event_buf);
   2127			cs->next_evseq++;
   2128			interval = 1;
   2129		}
   2130	}
   2131
   2132	if (time_after(jiffies, cs->secondary_monitor_time
   2133		       + MYRS_SECONDARY_MONITOR_INTERVAL))
   2134		cs->secondary_monitor_time = jiffies;
   2135
   2136	if (info->bg_init_active +
   2137	    info->ldev_init_active +
   2138	    info->pdev_init_active +
   2139	    info->cc_active +
   2140	    info->rbld_active +
   2141	    info->exp_active != 0) {
   2142		struct scsi_device *sdev;
   2143
   2144		shost_for_each_device(sdev, shost) {
   2145			struct myrs_ldev_info *ldev_info;
   2146			int ldev_num;
   2147
   2148			if (sdev->channel < info->physchan_present)
   2149				continue;
   2150			ldev_info = sdev->hostdata;
   2151			if (!ldev_info)
   2152				continue;
   2153			ldev_num = ldev_info->ldev_num;
   2154			myrs_get_ldev_info(cs, ldev_num, ldev_info);
   2155		}
   2156		cs->needs_update = true;
   2157	}
   2158	if (epoch == cs->epoch &&
   2159	    cs->fwstat_buf->next_evseq == cs->next_evseq &&
   2160	    (cs->needs_update == false ||
   2161	     time_before(jiffies, cs->primary_monitor_time
   2162			 + MYRS_PRIMARY_MONITOR_INTERVAL))) {
   2163		interval = MYRS_SECONDARY_MONITOR_INTERVAL;
   2164	}
   2165
   2166	if (interval > 1)
   2167		cs->primary_monitor_time = jiffies;
   2168	queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
   2169}
   2170
   2171static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
   2172{
   2173	struct Scsi_Host *shost = cs->host;
   2174	size_t elem_size, elem_align;
   2175
   2176	elem_align = sizeof(struct myrs_sge);
   2177	elem_size = shost->sg_tablesize * elem_align;
   2178	cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
   2179				      elem_size, elem_align, 0);
   2180	if (cs->sg_pool == NULL) {
   2181		shost_printk(KERN_ERR, shost,
   2182			     "Failed to allocate SG pool\n");
   2183		return false;
   2184	}
   2185
   2186	cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
   2187					 MYRS_SENSE_SIZE, sizeof(int), 0);
   2188	if (cs->sense_pool == NULL) {
   2189		dma_pool_destroy(cs->sg_pool);
   2190		cs->sg_pool = NULL;
   2191		shost_printk(KERN_ERR, shost,
   2192			     "Failed to allocate sense data pool\n");
   2193		return false;
   2194	}
   2195
   2196	cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
   2197					MYRS_DCDB_SIZE,
   2198					sizeof(unsigned char), 0);
   2199	if (!cs->dcdb_pool) {
   2200		dma_pool_destroy(cs->sg_pool);
   2201		cs->sg_pool = NULL;
   2202		dma_pool_destroy(cs->sense_pool);
   2203		cs->sense_pool = NULL;
   2204		shost_printk(KERN_ERR, shost,
   2205			     "Failed to allocate DCDB pool\n");
   2206		return false;
   2207	}
   2208
   2209	snprintf(cs->work_q_name, sizeof(cs->work_q_name),
   2210		 "myrs_wq_%d", shost->host_no);
   2211	cs->work_q = create_singlethread_workqueue(cs->work_q_name);
   2212	if (!cs->work_q) {
   2213		dma_pool_destroy(cs->dcdb_pool);
   2214		cs->dcdb_pool = NULL;
   2215		dma_pool_destroy(cs->sg_pool);
   2216		cs->sg_pool = NULL;
   2217		dma_pool_destroy(cs->sense_pool);
   2218		cs->sense_pool = NULL;
   2219		shost_printk(KERN_ERR, shost,
   2220			     "Failed to create workqueue\n");
   2221		return false;
   2222	}
   2223
   2224	/* Initialize the Monitoring Timer. */
   2225	INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
   2226	queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
   2227
   2228	return true;
   2229}
   2230
   2231static void myrs_destroy_mempools(struct myrs_hba *cs)
   2232{
   2233	cancel_delayed_work_sync(&cs->monitor_work);
   2234	destroy_workqueue(cs->work_q);
   2235
   2236	dma_pool_destroy(cs->sg_pool);
   2237	dma_pool_destroy(cs->dcdb_pool);
   2238	dma_pool_destroy(cs->sense_pool);
   2239}
   2240
   2241static void myrs_unmap(struct myrs_hba *cs)
   2242{
   2243	kfree(cs->event_buf);
   2244	kfree(cs->ctlr_info);
   2245	if (cs->fwstat_buf) {
   2246		dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
   2247				  cs->fwstat_buf, cs->fwstat_addr);
   2248		cs->fwstat_buf = NULL;
   2249	}
   2250	if (cs->first_stat_mbox) {
   2251		dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
   2252				  cs->first_stat_mbox, cs->stat_mbox_addr);
   2253		cs->first_stat_mbox = NULL;
   2254	}
   2255	if (cs->first_cmd_mbox) {
   2256		dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
   2257				  cs->first_cmd_mbox, cs->cmd_mbox_addr);
   2258		cs->first_cmd_mbox = NULL;
   2259	}
   2260}
   2261
   2262static void myrs_cleanup(struct myrs_hba *cs)
   2263{
   2264	struct pci_dev *pdev = cs->pdev;
   2265
   2266	/* Free the memory mailbox, status, and related structures */
   2267	myrs_unmap(cs);
   2268
   2269	if (cs->mmio_base) {
   2270		if (cs->disable_intr)
   2271			cs->disable_intr(cs);
   2272		iounmap(cs->mmio_base);
   2273		cs->mmio_base = NULL;
   2274	}
   2275	if (cs->irq)
   2276		free_irq(cs->irq, cs);
   2277	if (cs->io_addr)
   2278		release_region(cs->io_addr, 0x80);
   2279	pci_set_drvdata(pdev, NULL);
   2280	pci_disable_device(pdev);
   2281	scsi_host_put(cs->host);
   2282}
   2283
   2284static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
   2285		const struct pci_device_id *entry)
   2286{
   2287	struct myrs_privdata *privdata =
   2288		(struct myrs_privdata *)entry->driver_data;
   2289	irq_handler_t irq_handler = privdata->irq_handler;
   2290	unsigned int mmio_size = privdata->mmio_size;
   2291	struct myrs_hba *cs = NULL;
   2292
   2293	cs = myrs_alloc_host(pdev, entry);
   2294	if (!cs) {
   2295		dev_err(&pdev->dev, "Unable to allocate Controller\n");
   2296		return NULL;
   2297	}
   2298	cs->pdev = pdev;
   2299
   2300	if (pci_enable_device(pdev))
   2301		goto Failure;
   2302
   2303	cs->pci_addr = pci_resource_start(pdev, 0);
   2304
   2305	pci_set_drvdata(pdev, cs);
   2306	spin_lock_init(&cs->queue_lock);
   2307	/* Map the Controller Register Window. */
   2308	if (mmio_size < PAGE_SIZE)
   2309		mmio_size = PAGE_SIZE;
   2310	cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
   2311	if (cs->mmio_base == NULL) {
   2312		dev_err(&pdev->dev,
   2313			"Unable to map Controller Register Window\n");
   2314		goto Failure;
   2315	}
   2316
   2317	cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
   2318	if (privdata->hw_init(pdev, cs, cs->io_base))
   2319		goto Failure;
   2320
   2321	/* Acquire shared access to the IRQ Channel. */
   2322	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
   2323		dev_err(&pdev->dev,
   2324			"Unable to acquire IRQ Channel %d\n", pdev->irq);
   2325		goto Failure;
   2326	}
   2327	cs->irq = pdev->irq;
   2328	return cs;
   2329
   2330Failure:
   2331	dev_err(&pdev->dev,
   2332		"Failed to initialize Controller\n");
   2333	myrs_cleanup(cs);
   2334	return NULL;
   2335}
   2336
   2337/*
   2338 * myrs_err_status reports Controller BIOS Messages passed through
   2339 * the Error Status Register when the driver performs the BIOS handshaking.
   2340 * It returns true for fatal errors and false otherwise.
   2341 */
   2342
   2343static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
   2344		unsigned char parm0, unsigned char parm1)
   2345{
   2346	struct pci_dev *pdev = cs->pdev;
   2347
   2348	switch (status) {
   2349	case 0x00:
   2350		dev_info(&pdev->dev,
   2351			 "Physical Device %d:%d Not Responding\n",
   2352			 parm1, parm0);
   2353		break;
   2354	case 0x08:
   2355		dev_notice(&pdev->dev, "Spinning Up Drives\n");
   2356		break;
   2357	case 0x30:
   2358		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
   2359		break;
   2360	case 0x60:
   2361		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
   2362		break;
   2363	case 0x70:
   2364		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
   2365		break;
   2366	case 0x90:
   2367		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
   2368			   parm1, parm0);
   2369		break;
   2370	case 0xA0:
   2371		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
   2372		break;
   2373	case 0xB0:
   2374		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
   2375		break;
   2376	case 0xD0:
   2377		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
   2378		break;
   2379	case 0xF0:
   2380		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
   2381		return true;
   2382	default:
   2383		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
   2384			status);
   2385		return true;
   2386	}
   2387	return false;
   2388}
   2389
   2390/*
   2391 * Hardware-specific functions
   2392 */
   2393
   2394/*
   2395 * DAC960 GEM Series Controllers.
   2396 */
   2397
   2398static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
   2399{
   2400	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
   2401
   2402	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
   2403}
   2404
   2405static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
   2406{
   2407	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
   2408
   2409	writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
   2410}
   2411
   2412static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
   2413{
   2414	__le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
   2415
   2416	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
   2417}
   2418
   2419static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
   2420{
   2421	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
   2422
   2423	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
   2424}
   2425
   2426static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
   2427{
   2428	__le32 val;
   2429
   2430	val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
   2431	return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
   2432}
   2433
   2434static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
   2435{
   2436	__le32 val;
   2437
   2438	val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
   2439	return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
   2440}
   2441
   2442static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
   2443{
   2444	__le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
   2445
   2446	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
   2447}
   2448
   2449static inline void DAC960_GEM_ack_intr(void __iomem *base)
   2450{
   2451	__le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
   2452				  DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
   2453
   2454	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
   2455}
   2456
   2457static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
   2458{
   2459	__le32 val;
   2460
   2461	val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
   2462	return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
   2463}
   2464
   2465static inline void DAC960_GEM_enable_intr(void __iomem *base)
   2466{
   2467	__le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
   2468				  DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
   2469	writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
   2470}
   2471
   2472static inline void DAC960_GEM_disable_intr(void __iomem *base)
   2473{
   2474	__le32 val = 0;
   2475
   2476	writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
   2477}
   2478
   2479static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
   2480		union myrs_cmd_mbox *mbox)
   2481{
   2482	memcpy(&mem_mbox->words[1], &mbox->words[1],
   2483	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
   2484	/* Barrier to avoid reordering */
   2485	wmb();
   2486	mem_mbox->words[0] = mbox->words[0];
   2487	/* Barrier to force PCI access */
   2488	mb();
   2489}
   2490
   2491static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
   2492		dma_addr_t cmd_mbox_addr)
   2493{
   2494	dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
   2495}
   2496
   2497static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
   2498{
   2499	return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
   2500}
   2501
   2502static inline bool
   2503DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
   2504		unsigned char *param0, unsigned char *param1)
   2505{
   2506	__le32 val;
   2507
   2508	val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
   2509	if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
   2510		return false;
   2511	*error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
   2512	*param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
   2513	*param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
   2514	writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
   2515	return true;
   2516}
   2517
   2518static inline unsigned char
   2519DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
   2520{
   2521	unsigned char status;
   2522
   2523	while (DAC960_GEM_hw_mbox_is_full(base))
   2524		udelay(1);
   2525	DAC960_GEM_write_hw_mbox(base, mbox_addr);
   2526	DAC960_GEM_hw_mbox_new_cmd(base);
   2527	while (!DAC960_GEM_hw_mbox_status_available(base))
   2528		udelay(1);
   2529	status = DAC960_GEM_read_cmd_status(base);
   2530	DAC960_GEM_ack_hw_mbox_intr(base);
   2531	DAC960_GEM_ack_hw_mbox_status(base);
   2532
   2533	return status;
   2534}
   2535
   2536static int DAC960_GEM_hw_init(struct pci_dev *pdev,
   2537		struct myrs_hba *cs, void __iomem *base)
   2538{
   2539	int timeout = 0;
   2540	unsigned char status, parm0, parm1;
   2541
   2542	DAC960_GEM_disable_intr(base);
   2543	DAC960_GEM_ack_hw_mbox_status(base);
   2544	udelay(1000);
   2545	while (DAC960_GEM_init_in_progress(base) &&
   2546	       timeout < MYRS_MAILBOX_TIMEOUT) {
   2547		if (DAC960_GEM_read_error_status(base, &status,
   2548						 &parm0, &parm1) &&
   2549		    myrs_err_status(cs, status, parm0, parm1))
   2550			return -EIO;
   2551		udelay(10);
   2552		timeout++;
   2553	}
   2554	if (timeout == MYRS_MAILBOX_TIMEOUT) {
   2555		dev_err(&pdev->dev,
   2556			"Timeout waiting for Controller Initialisation\n");
   2557		return -ETIMEDOUT;
   2558	}
   2559	if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
   2560		dev_err(&pdev->dev,
   2561			"Unable to Enable Memory Mailbox Interface\n");
   2562		DAC960_GEM_reset_ctrl(base);
   2563		return -EAGAIN;
   2564	}
   2565	DAC960_GEM_enable_intr(base);
   2566	cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
   2567	cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
   2568	cs->disable_intr = DAC960_GEM_disable_intr;
   2569	cs->reset = DAC960_GEM_reset_ctrl;
   2570	return 0;
   2571}
   2572
   2573static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
   2574{
   2575	struct myrs_hba *cs = arg;
   2576	void __iomem *base = cs->io_base;
   2577	struct myrs_stat_mbox *next_stat_mbox;
   2578	unsigned long flags;
   2579
   2580	spin_lock_irqsave(&cs->queue_lock, flags);
   2581	DAC960_GEM_ack_intr(base);
   2582	next_stat_mbox = cs->next_stat_mbox;
   2583	while (next_stat_mbox->id > 0) {
   2584		unsigned short id = next_stat_mbox->id;
   2585		struct scsi_cmnd *scmd = NULL;
   2586		struct myrs_cmdblk *cmd_blk = NULL;
   2587
   2588		if (id == MYRS_DCMD_TAG)
   2589			cmd_blk = &cs->dcmd_blk;
   2590		else if (id == MYRS_MCMD_TAG)
   2591			cmd_blk = &cs->mcmd_blk;
   2592		else {
   2593			scmd = scsi_host_find_tag(cs->host, id - 3);
   2594			if (scmd)
   2595				cmd_blk = scsi_cmd_priv(scmd);
   2596		}
   2597		if (cmd_blk) {
   2598			cmd_blk->status = next_stat_mbox->status;
   2599			cmd_blk->sense_len = next_stat_mbox->sense_len;
   2600			cmd_blk->residual = next_stat_mbox->residual;
   2601		} else
   2602			dev_err(&cs->pdev->dev,
   2603				"Unhandled command completion %d\n", id);
   2604
   2605		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
   2606		if (++next_stat_mbox > cs->last_stat_mbox)
   2607			next_stat_mbox = cs->first_stat_mbox;
   2608
   2609		if (cmd_blk) {
   2610			if (id < 3)
   2611				myrs_handle_cmdblk(cs, cmd_blk);
   2612			else
   2613				myrs_handle_scsi(cs, cmd_blk, scmd);
   2614		}
   2615	}
   2616	cs->next_stat_mbox = next_stat_mbox;
   2617	spin_unlock_irqrestore(&cs->queue_lock, flags);
   2618	return IRQ_HANDLED;
   2619}
   2620
   2621static struct myrs_privdata DAC960_GEM_privdata = {
   2622	.hw_init =		DAC960_GEM_hw_init,
   2623	.irq_handler =		DAC960_GEM_intr_handler,
   2624	.mmio_size =		DAC960_GEM_mmio_size,
   2625};
   2626
   2627/*
   2628 * DAC960 BA Series Controllers.
   2629 */
   2630
   2631static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
   2632{
   2633	writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
   2634}
   2635
   2636static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
   2637{
   2638	writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
   2639}
   2640
   2641static inline void DAC960_BA_reset_ctrl(void __iomem *base)
   2642{
   2643	writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
   2644}
   2645
   2646static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
   2647{
   2648	writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
   2649}
   2650
   2651static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
   2652{
   2653	u8 val;
   2654
   2655	val = readb(base + DAC960_BA_IDB_OFFSET);
   2656	return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
   2657}
   2658
   2659static inline bool DAC960_BA_init_in_progress(void __iomem *base)
   2660{
   2661	u8 val;
   2662
   2663	val = readb(base + DAC960_BA_IDB_OFFSET);
   2664	return !(val & DAC960_BA_IDB_INIT_DONE);
   2665}
   2666
   2667static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
   2668{
   2669	writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
   2670}
   2671
   2672static inline void DAC960_BA_ack_intr(void __iomem *base)
   2673{
   2674	writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
   2675	       base + DAC960_BA_ODB_OFFSET);
   2676}
   2677
   2678static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
   2679{
   2680	u8 val;
   2681
   2682	val = readb(base + DAC960_BA_ODB_OFFSET);
   2683	return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
   2684}
   2685
   2686static inline void DAC960_BA_enable_intr(void __iomem *base)
   2687{
   2688	writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
   2689}
   2690
   2691static inline void DAC960_BA_disable_intr(void __iomem *base)
   2692{
   2693	writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
   2694}
   2695
   2696static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
   2697		union myrs_cmd_mbox *mbox)
   2698{
   2699	memcpy(&mem_mbox->words[1], &mbox->words[1],
   2700	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
   2701	/* Barrier to avoid reordering */
   2702	wmb();
   2703	mem_mbox->words[0] = mbox->words[0];
   2704	/* Barrier to force PCI access */
   2705	mb();
   2706}
   2707
   2708
   2709static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
   2710		dma_addr_t cmd_mbox_addr)
   2711{
   2712	dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
   2713}
   2714
   2715static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
   2716{
   2717	return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
   2718}
   2719
   2720static inline bool
   2721DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
   2722		unsigned char *param0, unsigned char *param1)
   2723{
   2724	u8 val;
   2725
   2726	val = readb(base + DAC960_BA_ERRSTS_OFFSET);
   2727	if (!(val & DAC960_BA_ERRSTS_PENDING))
   2728		return false;
   2729	val &= ~DAC960_BA_ERRSTS_PENDING;
   2730	*error = val;
   2731	*param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
   2732	*param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
   2733	writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
   2734	return true;
   2735}
   2736
   2737static inline unsigned char
   2738DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
   2739{
   2740	unsigned char status;
   2741
   2742	while (DAC960_BA_hw_mbox_is_full(base))
   2743		udelay(1);
   2744	DAC960_BA_write_hw_mbox(base, mbox_addr);
   2745	DAC960_BA_hw_mbox_new_cmd(base);
   2746	while (!DAC960_BA_hw_mbox_status_available(base))
   2747		udelay(1);
   2748	status = DAC960_BA_read_cmd_status(base);
   2749	DAC960_BA_ack_hw_mbox_intr(base);
   2750	DAC960_BA_ack_hw_mbox_status(base);
   2751
   2752	return status;
   2753}
   2754
   2755static int DAC960_BA_hw_init(struct pci_dev *pdev,
   2756		struct myrs_hba *cs, void __iomem *base)
   2757{
   2758	int timeout = 0;
   2759	unsigned char status, parm0, parm1;
   2760
   2761	DAC960_BA_disable_intr(base);
   2762	DAC960_BA_ack_hw_mbox_status(base);
   2763	udelay(1000);
   2764	while (DAC960_BA_init_in_progress(base) &&
   2765	       timeout < MYRS_MAILBOX_TIMEOUT) {
   2766		if (DAC960_BA_read_error_status(base, &status,
   2767					      &parm0, &parm1) &&
   2768		    myrs_err_status(cs, status, parm0, parm1))
   2769			return -EIO;
   2770		udelay(10);
   2771		timeout++;
   2772	}
   2773	if (timeout == MYRS_MAILBOX_TIMEOUT) {
   2774		dev_err(&pdev->dev,
   2775			"Timeout waiting for Controller Initialisation\n");
   2776		return -ETIMEDOUT;
   2777	}
   2778	if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
   2779		dev_err(&pdev->dev,
   2780			"Unable to Enable Memory Mailbox Interface\n");
   2781		DAC960_BA_reset_ctrl(base);
   2782		return -EAGAIN;
   2783	}
   2784	DAC960_BA_enable_intr(base);
   2785	cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
   2786	cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
   2787	cs->disable_intr = DAC960_BA_disable_intr;
   2788	cs->reset = DAC960_BA_reset_ctrl;
   2789	return 0;
   2790}
   2791
   2792static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
   2793{
   2794	struct myrs_hba *cs = arg;
   2795	void __iomem *base = cs->io_base;
   2796	struct myrs_stat_mbox *next_stat_mbox;
   2797	unsigned long flags;
   2798
   2799	spin_lock_irqsave(&cs->queue_lock, flags);
   2800	DAC960_BA_ack_intr(base);
   2801	next_stat_mbox = cs->next_stat_mbox;
   2802	while (next_stat_mbox->id > 0) {
   2803		unsigned short id = next_stat_mbox->id;
   2804		struct scsi_cmnd *scmd = NULL;
   2805		struct myrs_cmdblk *cmd_blk = NULL;
   2806
   2807		if (id == MYRS_DCMD_TAG)
   2808			cmd_blk = &cs->dcmd_blk;
   2809		else if (id == MYRS_MCMD_TAG)
   2810			cmd_blk = &cs->mcmd_blk;
   2811		else {
   2812			scmd = scsi_host_find_tag(cs->host, id - 3);
   2813			if (scmd)
   2814				cmd_blk = scsi_cmd_priv(scmd);
   2815		}
   2816		if (cmd_blk) {
   2817			cmd_blk->status = next_stat_mbox->status;
   2818			cmd_blk->sense_len = next_stat_mbox->sense_len;
   2819			cmd_blk->residual = next_stat_mbox->residual;
   2820		} else
   2821			dev_err(&cs->pdev->dev,
   2822				"Unhandled command completion %d\n", id);
   2823
   2824		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
   2825		if (++next_stat_mbox > cs->last_stat_mbox)
   2826			next_stat_mbox = cs->first_stat_mbox;
   2827
   2828		if (cmd_blk) {
   2829			if (id < 3)
   2830				myrs_handle_cmdblk(cs, cmd_blk);
   2831			else
   2832				myrs_handle_scsi(cs, cmd_blk, scmd);
   2833		}
   2834	}
   2835	cs->next_stat_mbox = next_stat_mbox;
   2836	spin_unlock_irqrestore(&cs->queue_lock, flags);
   2837	return IRQ_HANDLED;
   2838}
   2839
   2840static struct myrs_privdata DAC960_BA_privdata = {
   2841	.hw_init =		DAC960_BA_hw_init,
   2842	.irq_handler =		DAC960_BA_intr_handler,
   2843	.mmio_size =		DAC960_BA_mmio_size,
   2844};
   2845
   2846/*
   2847 * DAC960 LP Series Controllers.
   2848 */
   2849
   2850static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
   2851{
   2852	writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
   2853}
   2854
   2855static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
   2856{
   2857	writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
   2858}
   2859
   2860static inline void DAC960_LP_reset_ctrl(void __iomem *base)
   2861{
   2862	writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
   2863}
   2864
   2865static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
   2866{
   2867	writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
   2868}
   2869
   2870static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
   2871{
   2872	u8 val;
   2873
   2874	val = readb(base + DAC960_LP_IDB_OFFSET);
   2875	return val & DAC960_LP_IDB_HWMBOX_FULL;
   2876}
   2877
   2878static inline bool DAC960_LP_init_in_progress(void __iomem *base)
   2879{
   2880	u8 val;
   2881
   2882	val = readb(base + DAC960_LP_IDB_OFFSET);
   2883	return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
   2884}
   2885
   2886static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
   2887{
   2888	writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
   2889}
   2890
   2891static inline void DAC960_LP_ack_intr(void __iomem *base)
   2892{
   2893	writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
   2894	       base + DAC960_LP_ODB_OFFSET);
   2895}
   2896
   2897static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
   2898{
   2899	u8 val;
   2900
   2901	val = readb(base + DAC960_LP_ODB_OFFSET);
   2902	return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
   2903}
   2904
   2905static inline void DAC960_LP_enable_intr(void __iomem *base)
   2906{
   2907	writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
   2908}
   2909
   2910static inline void DAC960_LP_disable_intr(void __iomem *base)
   2911{
   2912	writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
   2913}
   2914
   2915static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
   2916		union myrs_cmd_mbox *mbox)
   2917{
   2918	memcpy(&mem_mbox->words[1], &mbox->words[1],
   2919	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
   2920	/* Barrier to avoid reordering */
   2921	wmb();
   2922	mem_mbox->words[0] = mbox->words[0];
   2923	/* Barrier to force PCI access */
   2924	mb();
   2925}
   2926
   2927static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
   2928		dma_addr_t cmd_mbox_addr)
   2929{
   2930	dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
   2931}
   2932
   2933static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
   2934{
   2935	return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
   2936}
   2937
   2938static inline bool
   2939DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
   2940		unsigned char *param0, unsigned char *param1)
   2941{
   2942	u8 val;
   2943
   2944	val = readb(base + DAC960_LP_ERRSTS_OFFSET);
   2945	if (!(val & DAC960_LP_ERRSTS_PENDING))
   2946		return false;
   2947	val &= ~DAC960_LP_ERRSTS_PENDING;
   2948	*error = val;
   2949	*param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
   2950	*param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
   2951	writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
   2952	return true;
   2953}
   2954
   2955static inline unsigned char
   2956DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
   2957{
   2958	unsigned char status;
   2959
   2960	while (DAC960_LP_hw_mbox_is_full(base))
   2961		udelay(1);
   2962	DAC960_LP_write_hw_mbox(base, mbox_addr);
   2963	DAC960_LP_hw_mbox_new_cmd(base);
   2964	while (!DAC960_LP_hw_mbox_status_available(base))
   2965		udelay(1);
   2966	status = DAC960_LP_read_cmd_status(base);
   2967	DAC960_LP_ack_hw_mbox_intr(base);
   2968	DAC960_LP_ack_hw_mbox_status(base);
   2969
   2970	return status;
   2971}
   2972
   2973static int DAC960_LP_hw_init(struct pci_dev *pdev,
   2974		struct myrs_hba *cs, void __iomem *base)
   2975{
   2976	int timeout = 0;
   2977	unsigned char status, parm0, parm1;
   2978
   2979	DAC960_LP_disable_intr(base);
   2980	DAC960_LP_ack_hw_mbox_status(base);
   2981	udelay(1000);
   2982	while (DAC960_LP_init_in_progress(base) &&
   2983	       timeout < MYRS_MAILBOX_TIMEOUT) {
   2984		if (DAC960_LP_read_error_status(base, &status,
   2985					      &parm0, &parm1) &&
   2986		    myrs_err_status(cs, status, parm0, parm1))
   2987			return -EIO;
   2988		udelay(10);
   2989		timeout++;
   2990	}
   2991	if (timeout == MYRS_MAILBOX_TIMEOUT) {
   2992		dev_err(&pdev->dev,
   2993			"Timeout waiting for Controller Initialisation\n");
   2994		return -ETIMEDOUT;
   2995	}
   2996	if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
   2997		dev_err(&pdev->dev,
   2998			"Unable to Enable Memory Mailbox Interface\n");
   2999		DAC960_LP_reset_ctrl(base);
   3000		return -ENODEV;
   3001	}
   3002	DAC960_LP_enable_intr(base);
   3003	cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
   3004	cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
   3005	cs->disable_intr = DAC960_LP_disable_intr;
   3006	cs->reset = DAC960_LP_reset_ctrl;
   3007
   3008	return 0;
   3009}
   3010
   3011static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
   3012{
   3013	struct myrs_hba *cs = arg;
   3014	void __iomem *base = cs->io_base;
   3015	struct myrs_stat_mbox *next_stat_mbox;
   3016	unsigned long flags;
   3017
   3018	spin_lock_irqsave(&cs->queue_lock, flags);
   3019	DAC960_LP_ack_intr(base);
   3020	next_stat_mbox = cs->next_stat_mbox;
   3021	while (next_stat_mbox->id > 0) {
   3022		unsigned short id = next_stat_mbox->id;
   3023		struct scsi_cmnd *scmd = NULL;
   3024		struct myrs_cmdblk *cmd_blk = NULL;
   3025
   3026		if (id == MYRS_DCMD_TAG)
   3027			cmd_blk = &cs->dcmd_blk;
   3028		else if (id == MYRS_MCMD_TAG)
   3029			cmd_blk = &cs->mcmd_blk;
   3030		else {
   3031			scmd = scsi_host_find_tag(cs->host, id - 3);
   3032			if (scmd)
   3033				cmd_blk = scsi_cmd_priv(scmd);
   3034		}
   3035		if (cmd_blk) {
   3036			cmd_blk->status = next_stat_mbox->status;
   3037			cmd_blk->sense_len = next_stat_mbox->sense_len;
   3038			cmd_blk->residual = next_stat_mbox->residual;
   3039		} else
   3040			dev_err(&cs->pdev->dev,
   3041				"Unhandled command completion %d\n", id);
   3042
   3043		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
   3044		if (++next_stat_mbox > cs->last_stat_mbox)
   3045			next_stat_mbox = cs->first_stat_mbox;
   3046
   3047		if (cmd_blk) {
   3048			if (id < 3)
   3049				myrs_handle_cmdblk(cs, cmd_blk);
   3050			else
   3051				myrs_handle_scsi(cs, cmd_blk, scmd);
   3052		}
   3053	}
   3054	cs->next_stat_mbox = next_stat_mbox;
   3055	spin_unlock_irqrestore(&cs->queue_lock, flags);
   3056	return IRQ_HANDLED;
   3057}
   3058
   3059static struct myrs_privdata DAC960_LP_privdata = {
   3060	.hw_init =		DAC960_LP_hw_init,
   3061	.irq_handler =		DAC960_LP_intr_handler,
   3062	.mmio_size =		DAC960_LP_mmio_size,
   3063};
   3064
   3065/*
   3066 * Module functions
   3067 */
   3068static int
   3069myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
   3070{
   3071	struct myrs_hba *cs;
   3072	int ret;
   3073
   3074	cs = myrs_detect(dev, entry);
   3075	if (!cs)
   3076		return -ENODEV;
   3077
   3078	ret = myrs_get_config(cs);
   3079	if (ret < 0) {
   3080		myrs_cleanup(cs);
   3081		return ret;
   3082	}
   3083
   3084	if (!myrs_create_mempools(dev, cs)) {
   3085		ret = -ENOMEM;
   3086		goto failed;
   3087	}
   3088
   3089	ret = scsi_add_host(cs->host, &dev->dev);
   3090	if (ret) {
   3091		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
   3092		myrs_destroy_mempools(cs);
   3093		goto failed;
   3094	}
   3095	scsi_scan_host(cs->host);
   3096	return 0;
   3097failed:
   3098	myrs_cleanup(cs);
   3099	return ret;
   3100}
   3101
   3102
   3103static void myrs_remove(struct pci_dev *pdev)
   3104{
   3105	struct myrs_hba *cs = pci_get_drvdata(pdev);
   3106
   3107	if (cs == NULL)
   3108		return;
   3109
   3110	shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
   3111	myrs_flush_cache(cs);
   3112	myrs_destroy_mempools(cs);
   3113	myrs_cleanup(cs);
   3114}
   3115
   3116
   3117static const struct pci_device_id myrs_id_table[] = {
   3118	{
   3119		PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
   3120			       PCI_DEVICE_ID_MYLEX_DAC960_GEM,
   3121			       PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
   3122		.driver_data	= (unsigned long) &DAC960_GEM_privdata,
   3123	},
   3124	{
   3125		PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
   3126	},
   3127	{
   3128		PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
   3129	},
   3130	{0, },
   3131};
   3132
   3133MODULE_DEVICE_TABLE(pci, myrs_id_table);
   3134
   3135static struct pci_driver myrs_pci_driver = {
   3136	.name		= "myrs",
   3137	.id_table	= myrs_id_table,
   3138	.probe		= myrs_probe,
   3139	.remove		= myrs_remove,
   3140};
   3141
   3142static int __init myrs_init_module(void)
   3143{
   3144	int ret;
   3145
   3146	myrs_raid_template = raid_class_attach(&myrs_raid_functions);
   3147	if (!myrs_raid_template)
   3148		return -ENODEV;
   3149
   3150	ret = pci_register_driver(&myrs_pci_driver);
   3151	if (ret)
   3152		raid_class_release(myrs_raid_template);
   3153
   3154	return ret;
   3155}
   3156
   3157static void __exit myrs_cleanup_module(void)
   3158{
   3159	pci_unregister_driver(&myrs_pci_driver);
   3160	raid_class_release(myrs_raid_template);
   3161}
   3162
   3163module_init(myrs_init_module);
   3164module_exit(myrs_cleanup_module);
   3165
   3166MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
   3167MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
   3168MODULE_LICENSE("GPL");