cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

src.c (34518B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	Adaptec AAC series RAID controller driver
      4 *	(c) Copyright 2001 Red Hat Inc.
      5 *
      6 * based on the old aacraid driver that is..
      7 * Adaptec aacraid device driver for Linux.
      8 *
      9 * Copyright (c) 2000-2010 Adaptec, Inc.
     10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
     11 *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
     12 *
     13 * Module Name:
     14 *  src.c
     15 *
     16 * Abstract: Hardware Device Interface for PMC SRC based controllers
     17 */
     18
     19#include <linux/kernel.h>
     20#include <linux/init.h>
     21#include <linux/types.h>
     22#include <linux/pci.h>
     23#include <linux/spinlock.h>
     24#include <linux/slab.h>
     25#include <linux/blkdev.h>
     26#include <linux/delay.h>
     27#include <linux/completion.h>
     28#include <linux/time.h>
     29#include <linux/interrupt.h>
     30#include <scsi/scsi_host.h>
     31
     32#include "aacraid.h"
     33
     34static int aac_src_get_sync_status(struct aac_dev *dev);
     35
     36static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
     37{
     38	struct aac_msix_ctx *ctx;
     39	struct aac_dev *dev;
     40	unsigned long bellbits, bellbits_shifted;
     41	int vector_no;
     42	int isFastResponse, mode;
     43	u32 index, handle;
     44
     45	ctx = (struct aac_msix_ctx *)dev_id;
     46	dev = ctx->dev;
     47	vector_no = ctx->vector_no;
     48
     49	if (dev->msi_enabled) {
     50		mode = AAC_INT_MODE_MSI;
     51		if (vector_no == 0) {
     52			bellbits = src_readl(dev, MUnit.ODR_MSI);
     53			if (bellbits & 0x40000)
     54				mode |= AAC_INT_MODE_AIF;
     55			if (bellbits & 0x1000)
     56				mode |= AAC_INT_MODE_SYNC;
     57		}
     58	} else {
     59		mode = AAC_INT_MODE_INTX;
     60		bellbits = src_readl(dev, MUnit.ODR_R);
     61		if (bellbits & PmDoorBellResponseSent) {
     62			bellbits = PmDoorBellResponseSent;
     63			src_writel(dev, MUnit.ODR_C, bellbits);
     64			src_readl(dev, MUnit.ODR_C);
     65		} else {
     66			bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
     67			src_writel(dev, MUnit.ODR_C, bellbits);
     68			src_readl(dev, MUnit.ODR_C);
     69
     70			if (bellbits_shifted & DoorBellAifPending)
     71				mode |= AAC_INT_MODE_AIF;
     72			else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
     73				mode |= AAC_INT_MODE_SYNC;
     74		}
     75	}
     76
     77	if (mode & AAC_INT_MODE_SYNC) {
     78		unsigned long sflags;
     79		struct list_head *entry;
     80		int send_it = 0;
     81		extern int aac_sync_mode;
     82
     83		if (!aac_sync_mode && !dev->msi_enabled) {
     84			src_writel(dev, MUnit.ODR_C, bellbits);
     85			src_readl(dev, MUnit.ODR_C);
     86		}
     87
     88		if (dev->sync_fib) {
     89			if (dev->sync_fib->callback)
     90				dev->sync_fib->callback(dev->sync_fib->callback_data,
     91					dev->sync_fib);
     92			spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
     93			if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
     94				dev->management_fib_count--;
     95				complete(&dev->sync_fib->event_wait);
     96			}
     97			spin_unlock_irqrestore(&dev->sync_fib->event_lock,
     98						sflags);
     99			spin_lock_irqsave(&dev->sync_lock, sflags);
    100			if (!list_empty(&dev->sync_fib_list)) {
    101				entry = dev->sync_fib_list.next;
    102				dev->sync_fib = list_entry(entry,
    103							   struct fib,
    104							   fiblink);
    105				list_del(entry);
    106				send_it = 1;
    107			} else {
    108				dev->sync_fib = NULL;
    109			}
    110			spin_unlock_irqrestore(&dev->sync_lock, sflags);
    111			if (send_it) {
    112				aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
    113					(u32)dev->sync_fib->hw_fib_pa,
    114					0, 0, 0, 0, 0,
    115					NULL, NULL, NULL, NULL, NULL);
    116			}
    117		}
    118		if (!dev->msi_enabled)
    119			mode = 0;
    120
    121	}
    122
    123	if (mode & AAC_INT_MODE_AIF) {
    124		/* handle AIF */
    125		if (dev->sa_firmware) {
    126			u32 events = src_readl(dev, MUnit.SCR0);
    127
    128			aac_intr_normal(dev, events, 1, 0, NULL);
    129			writel(events, &dev->IndexRegs->Mailbox[0]);
    130			src_writel(dev, MUnit.IDR, 1 << 23);
    131		} else {
    132			if (dev->aif_thread && dev->fsa_dev)
    133				aac_intr_normal(dev, 0, 2, 0, NULL);
    134		}
    135		if (dev->msi_enabled)
    136			aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
    137		mode = 0;
    138	}
    139
    140	if (mode) {
    141		index = dev->host_rrq_idx[vector_no];
    142
    143		for (;;) {
    144			isFastResponse = 0;
    145			/* remove toggle bit (31) */
    146			handle = le32_to_cpu((dev->host_rrq[index])
    147				& 0x7fffffff);
    148			/* check fast response bits (30, 1) */
    149			if (handle & 0x40000000)
    150				isFastResponse = 1;
    151			handle &= 0x0000ffff;
    152			if (handle == 0)
    153				break;
    154			handle >>= 2;
    155			if (dev->msi_enabled && dev->max_msix > 1)
    156				atomic_dec(&dev->rrq_outstanding[vector_no]);
    157			aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
    158			dev->host_rrq[index++] = 0;
    159			if (index == (vector_no + 1) * dev->vector_cap)
    160				index = vector_no * dev->vector_cap;
    161			dev->host_rrq_idx[vector_no] = index;
    162		}
    163		mode = 0;
    164	}
    165
    166	return IRQ_HANDLED;
    167}
    168
    169/**
    170 *	aac_src_disable_interrupt	-	Disable interrupts
    171 *	@dev: Adapter
    172 */
    173
    174static void aac_src_disable_interrupt(struct aac_dev *dev)
    175{
    176	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
    177}
    178
    179/**
    180 *	aac_src_enable_interrupt_message	-	Enable interrupts
    181 *	@dev: Adapter
    182 */
    183
    184static void aac_src_enable_interrupt_message(struct aac_dev *dev)
    185{
    186	aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
    187}
    188
    189/**
    190 *	src_sync_cmd	-	send a command and wait
    191 *	@dev: Adapter
    192 *	@command: Command to execute
    193 *	@p1: first parameter
    194 *	@p2: second parameter
    195 *	@p3: third parameter
    196 *	@p4: forth parameter
    197 *	@p5: fifth parameter
    198 *	@p6: sixth parameter
    199 *	@status: adapter status
    200 *	@r1: first return value
    201 *	@r2: second return valu
    202 *	@r3: third return value
    203 *	@r4: forth return value
    204 *
    205 *	This routine will send a synchronous command to the adapter and wait
    206 *	for its	completion.
    207 */
    208
    209static int src_sync_cmd(struct aac_dev *dev, u32 command,
    210	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
    211	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
    212{
    213	unsigned long start;
    214	unsigned long delay;
    215	int ok;
    216
    217	/*
    218	 *	Write the command into Mailbox 0
    219	 */
    220	writel(command, &dev->IndexRegs->Mailbox[0]);
    221	/*
    222	 *	Write the parameters into Mailboxes 1 - 6
    223	 */
    224	writel(p1, &dev->IndexRegs->Mailbox[1]);
    225	writel(p2, &dev->IndexRegs->Mailbox[2]);
    226	writel(p3, &dev->IndexRegs->Mailbox[3]);
    227	writel(p4, &dev->IndexRegs->Mailbox[4]);
    228
    229	/*
    230	 *	Clear the synch command doorbell to start on a clean slate.
    231	 */
    232	if (!dev->msi_enabled)
    233		src_writel(dev,
    234			   MUnit.ODR_C,
    235			   OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
    236
    237	/*
    238	 *	Disable doorbell interrupts
    239	 */
    240	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
    241
    242	/*
    243	 *	Force the completion of the mask register write before issuing
    244	 *	the interrupt.
    245	 */
    246	src_readl(dev, MUnit.OIMR);
    247
    248	/*
    249	 *	Signal that there is a new synch command
    250	 */
    251	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
    252
    253	if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
    254		!dev->in_soft_reset) {
    255		ok = 0;
    256		start = jiffies;
    257
    258		if (command == IOP_RESET_ALWAYS) {
    259			/* Wait up to 10 sec */
    260			delay = 10*HZ;
    261		} else {
    262			/* Wait up to 5 minutes */
    263			delay = 300*HZ;
    264		}
    265		while (time_before(jiffies, start+delay)) {
    266			udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
    267			/*
    268			 *	Mon960 will set doorbell0 bit when it has completed the command.
    269			 */
    270			if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
    271				/*
    272				 *	Clear the doorbell.
    273				 */
    274				if (dev->msi_enabled)
    275					aac_src_access_devreg(dev,
    276						AAC_CLEAR_SYNC_BIT);
    277				else
    278					src_writel(dev,
    279						MUnit.ODR_C,
    280						OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
    281				ok = 1;
    282				break;
    283			}
    284			/*
    285			 *	Yield the processor in case we are slow
    286			 */
    287			msleep(1);
    288		}
    289		if (unlikely(ok != 1)) {
    290			/*
    291			 *	Restore interrupt mask even though we timed out
    292			 */
    293			aac_adapter_enable_int(dev);
    294			return -ETIMEDOUT;
    295		}
    296		/*
    297		 *	Pull the synch status from Mailbox 0.
    298		 */
    299		if (status)
    300			*status = readl(&dev->IndexRegs->Mailbox[0]);
    301		if (r1)
    302			*r1 = readl(&dev->IndexRegs->Mailbox[1]);
    303		if (r2)
    304			*r2 = readl(&dev->IndexRegs->Mailbox[2]);
    305		if (r3)
    306			*r3 = readl(&dev->IndexRegs->Mailbox[3]);
    307		if (r4)
    308			*r4 = readl(&dev->IndexRegs->Mailbox[4]);
    309		if (command == GET_COMM_PREFERRED_SETTINGS)
    310			dev->max_msix =
    311				readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
    312		/*
    313		 *	Clear the synch command doorbell.
    314		 */
    315		if (!dev->msi_enabled)
    316			src_writel(dev,
    317				MUnit.ODR_C,
    318				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
    319	}
    320
    321	/*
    322	 *	Restore interrupt mask
    323	 */
    324	aac_adapter_enable_int(dev);
    325	return 0;
    326}
    327
    328/**
    329 *	aac_src_interrupt_adapter	-	interrupt adapter
    330 *	@dev: Adapter
    331 *
    332 *	Send an interrupt to the i960 and breakpoint it.
    333 */
    334
    335static void aac_src_interrupt_adapter(struct aac_dev *dev)
    336{
    337	src_sync_cmd(dev, BREAKPOINT_REQUEST,
    338		0, 0, 0, 0, 0, 0,
    339		NULL, NULL, NULL, NULL, NULL);
    340}
    341
    342/**
    343 *	aac_src_notify_adapter		-	send an event to the adapter
    344 *	@dev: Adapter
    345 *	@event: Event to send
    346 *
    347 *	Notify the i960 that something it probably cares about has
    348 *	happened.
    349 */
    350
    351static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
    352{
    353	switch (event) {
    354
    355	case AdapNormCmdQue:
    356		src_writel(dev, MUnit.ODR_C,
    357			INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
    358		break;
    359	case HostNormRespNotFull:
    360		src_writel(dev, MUnit.ODR_C,
    361			INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
    362		break;
    363	case AdapNormRespQue:
    364		src_writel(dev, MUnit.ODR_C,
    365			INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
    366		break;
    367	case HostNormCmdNotFull:
    368		src_writel(dev, MUnit.ODR_C,
    369			INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
    370		break;
    371	case FastIo:
    372		src_writel(dev, MUnit.ODR_C,
    373			INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
    374		break;
    375	case AdapPrintfDone:
    376		src_writel(dev, MUnit.ODR_C,
    377			INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
    378		break;
    379	default:
    380		BUG();
    381		break;
    382	}
    383}
    384
    385/**
    386 *	aac_src_start_adapter		-	activate adapter
    387 *	@dev:	Adapter
    388 *
    389 *	Start up processing on an i960 based AAC adapter
    390 */
    391
    392static void aac_src_start_adapter(struct aac_dev *dev)
    393{
    394	union aac_init *init;
    395	int i;
    396
    397	 /* reset host_rrq_idx first */
    398	for (i = 0; i < dev->max_msix; i++) {
    399		dev->host_rrq_idx[i] = i * dev->vector_cap;
    400		atomic_set(&dev->rrq_outstanding[i], 0);
    401	}
    402	atomic_set(&dev->msix_counter, 0);
    403	dev->fibs_pushed_no = 0;
    404
    405	init = dev->init;
    406	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
    407		init->r8.host_elapsed_seconds =
    408			cpu_to_le32(ktime_get_real_seconds());
    409		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
    410			lower_32_bits(dev->init_pa),
    411			upper_32_bits(dev->init_pa),
    412			sizeof(struct _r8) +
    413			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
    414			0, 0, 0, NULL, NULL, NULL, NULL, NULL);
    415	} else {
    416		init->r7.host_elapsed_seconds =
    417			cpu_to_le32(ktime_get_real_seconds());
    418		// We can only use a 32 bit address here
    419		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
    420			(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
    421			NULL, NULL, NULL, NULL, NULL);
    422	}
    423
    424}
    425
    426/**
    427 *	aac_src_check_health
    428 *	@dev: device to check if healthy
    429 *
    430 *	Will attempt to determine if the specified adapter is alive and
    431 *	capable of handling requests, returning 0 if alive.
    432 */
    433static int aac_src_check_health(struct aac_dev *dev)
    434{
    435	u32 status = src_readl(dev, MUnit.OMR);
    436
    437	/*
    438	 *	Check to see if the board panic'd.
    439	 */
    440	if (unlikely(status & KERNEL_PANIC))
    441		goto err_blink;
    442
    443	/*
    444	 *	Check to see if the board failed any self tests.
    445	 */
    446	if (unlikely(status & SELF_TEST_FAILED))
    447		goto err_out;
    448
    449	/*
    450	 *	Check to see if the board failed any self tests.
    451	 */
    452	if (unlikely(status & MONITOR_PANIC))
    453		goto err_out;
    454
    455	/*
    456	 *	Wait for the adapter to be up and running.
    457	 */
    458	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
    459		return -3;
    460	/*
    461	 *	Everything is OK
    462	 */
    463	return 0;
    464
    465err_out:
    466	return -1;
    467
    468err_blink:
    469	return (status >> 16) & 0xFF;
    470}
    471
    472static inline u32 aac_get_vector(struct aac_dev *dev)
    473{
    474	return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
    475}
    476
    477/**
    478 *	aac_src_deliver_message
    479 *	@fib: fib to issue
    480 *
    481 *	Will send a fib, returning 0 if successful.
    482 */
    483static int aac_src_deliver_message(struct fib *fib)
    484{
    485	struct aac_dev *dev = fib->dev;
    486	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
    487	u32 fibsize;
    488	dma_addr_t address;
    489	struct aac_fib_xporthdr *pFibX;
    490	int native_hba;
    491#if !defined(writeq)
    492	unsigned long flags;
    493#endif
    494
    495	u16 vector_no;
    496
    497	atomic_inc(&q->numpending);
    498
    499	native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
    500
    501
    502	if (dev->msi_enabled && dev->max_msix > 1 &&
    503		(native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
    504
    505		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
    506			&& dev->sa_firmware)
    507			vector_no = aac_get_vector(dev);
    508		else
    509			vector_no = fib->vector_no;
    510
    511		if (native_hba) {
    512			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
    513				struct aac_hba_tm_req *tm_req;
    514
    515				tm_req = (struct aac_hba_tm_req *)
    516						fib->hw_fib_va;
    517				if (tm_req->iu_type ==
    518					HBA_IU_TYPE_SCSI_TM_REQ) {
    519					((struct aac_hba_tm_req *)
    520						fib->hw_fib_va)->reply_qid
    521							= vector_no;
    522					((struct aac_hba_tm_req *)
    523						fib->hw_fib_va)->request_id
    524							+= (vector_no << 16);
    525				} else {
    526					((struct aac_hba_reset_req *)
    527						fib->hw_fib_va)->reply_qid
    528							= vector_no;
    529					((struct aac_hba_reset_req *)
    530						fib->hw_fib_va)->request_id
    531							+= (vector_no << 16);
    532				}
    533			} else {
    534				((struct aac_hba_cmd_req *)
    535					fib->hw_fib_va)->reply_qid
    536						= vector_no;
    537				((struct aac_hba_cmd_req *)
    538					fib->hw_fib_va)->request_id
    539						+= (vector_no << 16);
    540			}
    541		} else {
    542			fib->hw_fib_va->header.Handle += (vector_no << 16);
    543		}
    544	} else {
    545		vector_no = 0;
    546	}
    547
    548	atomic_inc(&dev->rrq_outstanding[vector_no]);
    549
    550	if (native_hba) {
    551		address = fib->hw_fib_pa;
    552		fibsize = (fib->hbacmd_size + 127) / 128 - 1;
    553		if (fibsize > 31)
    554			fibsize = 31;
    555		address |= fibsize;
    556#if defined(writeq)
    557		src_writeq(dev, MUnit.IQN_L, (u64)address);
    558#else
    559		spin_lock_irqsave(&fib->dev->iq_lock, flags);
    560		src_writel(dev, MUnit.IQN_H,
    561			upper_32_bits(address) & 0xffffffff);
    562		src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
    563		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
    564#endif
    565	} else {
    566		if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
    567			dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
    568			/* Calculate the amount to the fibsize bits */
    569			fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
    570				+ 127) / 128 - 1;
    571			/* New FIB header, 32-bit */
    572			address = fib->hw_fib_pa;
    573			fib->hw_fib_va->header.StructType = FIB_MAGIC2;
    574			fib->hw_fib_va->header.SenderFibAddress =
    575				cpu_to_le32((u32)address);
    576			fib->hw_fib_va->header.u.TimeStamp = 0;
    577			WARN_ON(upper_32_bits(address) != 0L);
    578		} else {
    579			/* Calculate the amount to the fibsize bits */
    580			fibsize = (sizeof(struct aac_fib_xporthdr) +
    581				le16_to_cpu(fib->hw_fib_va->header.Size)
    582				+ 127) / 128 - 1;
    583			/* Fill XPORT header */
    584			pFibX = (struct aac_fib_xporthdr *)
    585				((unsigned char *)fib->hw_fib_va -
    586				sizeof(struct aac_fib_xporthdr));
    587			pFibX->Handle = fib->hw_fib_va->header.Handle;
    588			pFibX->HostAddress =
    589				cpu_to_le64((u64)fib->hw_fib_pa);
    590			pFibX->Size = cpu_to_le32(
    591				le16_to_cpu(fib->hw_fib_va->header.Size));
    592			address = fib->hw_fib_pa -
    593				(u64)sizeof(struct aac_fib_xporthdr);
    594		}
    595		if (fibsize > 31)
    596			fibsize = 31;
    597		address |= fibsize;
    598
    599#if defined(writeq)
    600		src_writeq(dev, MUnit.IQ_L, (u64)address);
    601#else
    602		spin_lock_irqsave(&fib->dev->iq_lock, flags);
    603		src_writel(dev, MUnit.IQ_H,
    604			upper_32_bits(address) & 0xffffffff);
    605		src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
    606		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
    607#endif
    608	}
    609	return 0;
    610}
    611
    612/**
    613 *	aac_src_ioremap
    614 *	@dev: device ioremap
    615 *	@size: mapping resize request
    616 *
    617 */
    618static int aac_src_ioremap(struct aac_dev *dev, u32 size)
    619{
    620	if (!size) {
    621		iounmap(dev->regs.src.bar1);
    622		dev->regs.src.bar1 = NULL;
    623		iounmap(dev->regs.src.bar0);
    624		dev->base = dev->regs.src.bar0 = NULL;
    625		return 0;
    626	}
    627	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
    628		AAC_MIN_SRC_BAR1_SIZE);
    629	dev->base = NULL;
    630	if (dev->regs.src.bar1 == NULL)
    631		return -1;
    632	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
    633	if (dev->base == NULL) {
    634		iounmap(dev->regs.src.bar1);
    635		dev->regs.src.bar1 = NULL;
    636		return -1;
    637	}
    638	dev->IndexRegs = &((struct src_registers __iomem *)
    639		dev->base)->u.tupelo.IndexRegs;
    640	return 0;
    641}
    642
    643/**
    644 *  aac_srcv_ioremap
    645 *	@dev: device ioremap
    646 *	@size: mapping resize request
    647 *
    648 */
    649static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
    650{
    651	if (!size) {
    652		iounmap(dev->regs.src.bar0);
    653		dev->base = dev->regs.src.bar0 = NULL;
    654		return 0;
    655	}
    656
    657	dev->regs.src.bar1 =
    658	ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
    659	dev->base = NULL;
    660	if (dev->regs.src.bar1 == NULL)
    661		return -1;
    662	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
    663	if (dev->base == NULL) {
    664		iounmap(dev->regs.src.bar1);
    665		dev->regs.src.bar1 = NULL;
    666		return -1;
    667	}
    668	dev->IndexRegs = &((struct src_registers __iomem *)
    669		dev->base)->u.denali.IndexRegs;
    670	return 0;
    671}
    672
    673void aac_set_intx_mode(struct aac_dev *dev)
    674{
    675	if (dev->msi_enabled) {
    676		aac_src_access_devreg(dev, AAC_ENABLE_INTX);
    677		dev->msi_enabled = 0;
    678		msleep(5000); /* Delay 5 seconds */
    679	}
    680}
    681
    682static void aac_clear_omr(struct aac_dev *dev)
    683{
    684	u32 omr_value = 0;
    685
    686	omr_value = src_readl(dev, MUnit.OMR);
    687
    688	/*
    689	 * Check for PCI Errors or Kernel Panic
    690	 */
    691	if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
    692		omr_value = 0;
    693
    694	/*
    695	 * Preserve MSIX Value if any
    696	 */
    697	src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
    698	src_readl(dev, MUnit.OMR);
    699}
    700
    701static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
    702{
    703	__le32 supported_options3;
    704
    705	if (!aac_fib_dump)
    706		return;
    707
    708	supported_options3  = dev->supplement_adapter_info.supported_options3;
    709	if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
    710		return;
    711
    712	aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
    713			0, 0, 0,  0, 0, 0, NULL, NULL, NULL, NULL, NULL);
    714}
    715
    716static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
    717{
    718	bool ctrl_up = true;
    719	unsigned long status, start;
    720	bool is_up = false;
    721
    722	start = jiffies;
    723	do {
    724		schedule();
    725		status = src_readl(dev, MUnit.OMR);
    726
    727		if (status == 0xffffffff)
    728			status = 0;
    729
    730		if (status & KERNEL_BOOTING) {
    731			start = jiffies;
    732			continue;
    733		}
    734
    735		if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
    736			ctrl_up = false;
    737			break;
    738		}
    739
    740		is_up = status & KERNEL_UP_AND_RUNNING;
    741
    742	} while (!is_up);
    743
    744	return ctrl_up;
    745}
    746
    747static void aac_src_drop_io(struct aac_dev *dev)
    748{
    749	if (!dev->soft_reset_support)
    750		return;
    751
    752	aac_adapter_sync_cmd(dev, DROP_IO,
    753			0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
    754}
    755
    756static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
    757{
    758	aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
    759						NULL, NULL, NULL, NULL);
    760	aac_src_drop_io(dev);
    761}
    762
    763static void aac_send_iop_reset(struct aac_dev *dev)
    764{
    765	aac_dump_fw_fib_iop_reset(dev);
    766
    767	aac_notify_fw_of_iop_reset(dev);
    768
    769	aac_set_intx_mode(dev);
    770
    771	aac_clear_omr(dev);
    772
    773	src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
    774
    775	msleep(5000);
    776}
    777
    778static void aac_send_hardware_soft_reset(struct aac_dev *dev)
    779{
    780	u_int32_t val;
    781
    782	aac_clear_omr(dev);
    783	val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
    784	val |= 0x01;
    785	writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
    786	msleep_interruptible(20000);
    787}
    788
    789static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
    790{
    791	bool is_ctrl_up;
    792	int ret = 0;
    793
    794	if (bled < 0)
    795		goto invalid_out;
    796
    797	if (bled)
    798		dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
    799
    800	/*
    801	 * When there is a BlinkLED, IOP_RESET has not effect
    802	 */
    803	if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
    804		reset_type &= ~HW_IOP_RESET;
    805
    806	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
    807
    808	dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
    809
    810	if (reset_type & HW_IOP_RESET) {
    811		dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
    812		aac_send_iop_reset(dev);
    813
    814		/*
    815		 * Creates a delay or wait till up and running comes thru
    816		 */
    817		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
    818		if (!is_ctrl_up)
    819			dev_err(&dev->pdev->dev, "IOP reset failed\n");
    820		else {
    821			dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
    822			goto set_startup;
    823		}
    824	}
    825
    826	if (!dev->sa_firmware) {
    827		dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
    828		ret = -ENODEV;
    829		goto out;
    830	}
    831
    832	if (reset_type & HW_SOFT_RESET) {
    833		dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
    834		aac_send_hardware_soft_reset(dev);
    835		dev->msi_enabled = 0;
    836
    837		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
    838		if (!is_ctrl_up) {
    839			dev_err(&dev->pdev->dev, "SOFT reset failed\n");
    840			ret = -ENODEV;
    841			goto out;
    842		} else
    843			dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
    844	}
    845
    846set_startup:
    847	if (startup_timeout < 300)
    848		startup_timeout = 300;
    849
    850out:
    851	return ret;
    852
    853invalid_out:
    854	if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
    855		ret = -ENODEV;
    856goto out;
    857}
    858
    859/**
    860 *	aac_src_select_comm	-	Select communications method
    861 *	@dev: Adapter
    862 *	@comm: communications method
    863 */
    864static int aac_src_select_comm(struct aac_dev *dev, int comm)
    865{
    866	switch (comm) {
    867	case AAC_COMM_MESSAGE:
    868		dev->a_ops.adapter_intr = aac_src_intr_message;
    869		dev->a_ops.adapter_deliver = aac_src_deliver_message;
    870		break;
    871	default:
    872		return 1;
    873	}
    874	return 0;
    875}
    876
    877/**
    878 *  aac_src_init	-	initialize an Cardinal Frey Bar card
    879 *  @dev: device to configure
    880 *
    881 */
    882
    883int aac_src_init(struct aac_dev *dev)
    884{
    885	unsigned long start;
    886	unsigned long status;
    887	int restart = 0;
    888	int instance = dev->id;
    889	const char *name = dev->name;
    890
    891	dev->a_ops.adapter_ioremap = aac_src_ioremap;
    892	dev->a_ops.adapter_comm = aac_src_select_comm;
    893
    894	dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
    895	if (aac_adapter_ioremap(dev, dev->base_size)) {
    896		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
    897		goto error_iounmap;
    898	}
    899
    900	/* Failure to reset here is an option ... */
    901	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
    902	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
    903
    904	if (dev->init_reset) {
    905		dev->init_reset = false;
    906		if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
    907			++restart;
    908	}
    909
    910	/*
    911	 *	Check to see if the board panic'd while booting.
    912	 */
    913	status = src_readl(dev, MUnit.OMR);
    914	if (status & KERNEL_PANIC) {
    915		if (aac_src_restart_adapter(dev,
    916			aac_src_check_health(dev), IOP_HWSOFT_RESET))
    917			goto error_iounmap;
    918		++restart;
    919	}
    920	/*
    921	 *	Check to see if the board failed any self tests.
    922	 */
    923	status = src_readl(dev, MUnit.OMR);
    924	if (status & SELF_TEST_FAILED) {
    925		printk(KERN_ERR "%s%d: adapter self-test failed.\n",
    926			dev->name, instance);
    927		goto error_iounmap;
    928	}
    929	/*
    930	 *	Check to see if the monitor panic'd while booting.
    931	 */
    932	if (status & MONITOR_PANIC) {
    933		printk(KERN_ERR "%s%d: adapter monitor panic.\n",
    934			dev->name, instance);
    935		goto error_iounmap;
    936	}
    937	start = jiffies;
    938	/*
    939	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
    940	 */
    941	while (!((status = src_readl(dev, MUnit.OMR)) &
    942		KERNEL_UP_AND_RUNNING)) {
    943		if ((restart &&
    944		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
    945		  time_after(jiffies, start+HZ*startup_timeout)) {
    946			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
    947					dev->name, instance, status);
    948			goto error_iounmap;
    949		}
    950		if (!restart &&
    951		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
    952		  time_after(jiffies, start + HZ *
    953		  ((startup_timeout > 60)
    954		    ? (startup_timeout - 60)
    955		    : (startup_timeout / 2))))) {
    956			if (likely(!aac_src_restart_adapter(dev,
    957				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
    958				start = jiffies;
    959			++restart;
    960		}
    961		msleep(1);
    962	}
    963	if (restart && aac_commit)
    964		aac_commit = 1;
    965	/*
    966	 *	Fill in the common function dispatch table.
    967	 */
    968	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
    969	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
    970	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
    971	dev->a_ops.adapter_notify = aac_src_notify_adapter;
    972	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
    973	dev->a_ops.adapter_check_health = aac_src_check_health;
    974	dev->a_ops.adapter_restart = aac_src_restart_adapter;
    975	dev->a_ops.adapter_start = aac_src_start_adapter;
    976
    977	/*
    978	 *	First clear out all interrupts.  Then enable the one's that we
    979	 *	can handle.
    980	 */
    981	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
    982	aac_adapter_disable_int(dev);
    983	src_writel(dev, MUnit.ODR_C, 0xffffffff);
    984	aac_adapter_enable_int(dev);
    985
    986	if (aac_init_adapter(dev) == NULL)
    987		goto error_iounmap;
    988	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
    989		goto error_iounmap;
    990
    991	dev->msi = !pci_enable_msi(dev->pdev);
    992
    993	dev->aac_msix[0].vector_no = 0;
    994	dev->aac_msix[0].dev = dev;
    995
    996	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
    997			IRQF_SHARED, "aacraid", &(dev->aac_msix[0]))  < 0) {
    998
    999		if (dev->msi)
   1000			pci_disable_msi(dev->pdev);
   1001
   1002		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
   1003			name, instance);
   1004		goto error_iounmap;
   1005	}
   1006	dev->dbg_base = pci_resource_start(dev->pdev, 2);
   1007	dev->dbg_base_mapped = dev->regs.src.bar1;
   1008	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
   1009	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
   1010
   1011	aac_adapter_enable_int(dev);
   1012
   1013	if (!dev->sync_mode) {
   1014		/*
   1015		 * Tell the adapter that all is configured, and it can
   1016		 * start accepting requests
   1017		 */
   1018		aac_src_start_adapter(dev);
   1019	}
   1020	return 0;
   1021
   1022error_iounmap:
   1023
   1024	return -1;
   1025}
   1026
   1027static int aac_src_wait_sync(struct aac_dev *dev, int *status)
   1028{
   1029	unsigned long start = jiffies;
   1030	unsigned long usecs = 0;
   1031	int delay = 5 * HZ;
   1032	int rc = 1;
   1033
   1034	while (time_before(jiffies, start+delay)) {
   1035		/*
   1036		 * Delay 5 microseconds to let Mon960 get info.
   1037		 */
   1038		udelay(5);
   1039
   1040		/*
   1041		 * Mon960 will set doorbell0 bit when it has completed the
   1042		 * command.
   1043		 */
   1044		if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
   1045			/*
   1046			 * Clear: the doorbell.
   1047			 */
   1048			if (dev->msi_enabled)
   1049				aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
   1050			else
   1051				src_writel(dev, MUnit.ODR_C,
   1052					OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
   1053			rc = 0;
   1054
   1055			break;
   1056		}
   1057
   1058		/*
   1059		 * Yield the processor in case we are slow
   1060		 */
   1061		usecs = 1 * USEC_PER_MSEC;
   1062		usleep_range(usecs, usecs + 50);
   1063	}
   1064	/*
   1065	 * Pull the synch status from Mailbox 0.
   1066	 */
   1067	if (status && !rc) {
   1068		status[0] = readl(&dev->IndexRegs->Mailbox[0]);
   1069		status[1] = readl(&dev->IndexRegs->Mailbox[1]);
   1070		status[2] = readl(&dev->IndexRegs->Mailbox[2]);
   1071		status[3] = readl(&dev->IndexRegs->Mailbox[3]);
   1072		status[4] = readl(&dev->IndexRegs->Mailbox[4]);
   1073	}
   1074
   1075	return rc;
   1076}
   1077
   1078/**
   1079 *  aac_src_soft_reset	-	perform soft reset to speed up
   1080 *  access
   1081 *
   1082 *  Assumptions: That the controller is in a state where we can
   1083 *  bring it back to life with an init struct. We can only use
   1084 *  fast sync commands, as the timeout is 5 seconds.
   1085 *
   1086 *  @dev: device to configure
   1087 *
   1088 */
   1089
   1090static int aac_src_soft_reset(struct aac_dev *dev)
   1091{
   1092	u32 status_omr = src_readl(dev, MUnit.OMR);
   1093	u32 status[5];
   1094	int rc = 1;
   1095	int state = 0;
   1096	char *state_str[7] = {
   1097		"GET_ADAPTER_PROPERTIES Failed",
   1098		"GET_ADAPTER_PROPERTIES timeout",
   1099		"SOFT_RESET not supported",
   1100		"DROP_IO Failed",
   1101		"DROP_IO timeout",
   1102		"Check Health failed"
   1103	};
   1104
   1105	if (status_omr == INVALID_OMR)
   1106		return 1;       // pcie hosed
   1107
   1108	if (!(status_omr & KERNEL_UP_AND_RUNNING))
   1109		return 1;       // not up and running
   1110
   1111	/*
   1112	 * We go into soft reset mode to allow us to handle response
   1113	 */
   1114	dev->in_soft_reset = 1;
   1115	dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
   1116
   1117	/* Get adapter properties */
   1118	rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
   1119		0, 0, 0, status+0, status+1, status+2, status+3, status+4);
   1120	if (rc)
   1121		goto out;
   1122
   1123	state++;
   1124	if (aac_src_wait_sync(dev, status)) {
   1125		rc = 1;
   1126		goto out;
   1127	}
   1128
   1129	state++;
   1130	if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
   1131		(status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
   1132		rc = 2;
   1133		goto out;
   1134	}
   1135
   1136	if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
   1137		(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
   1138		dev->sa_firmware = 1;
   1139
   1140	state++;
   1141	rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
   1142		 status+0, status+1, status+2, status+3, status+4);
   1143
   1144	if (rc)
   1145		goto out;
   1146
   1147	state++;
   1148	if (aac_src_wait_sync(dev, status)) {
   1149		rc = 3;
   1150		goto out;
   1151	}
   1152
   1153	if (status[1])
   1154		dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
   1155			__func__, status[1]);
   1156
   1157	state++;
   1158	rc = aac_src_check_health(dev);
   1159
   1160out:
   1161	dev->in_soft_reset = 0;
   1162	dev->msi_enabled = 0;
   1163	if (rc)
   1164		dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
   1165			state_str[state], rc);
   1166
   1167	return rc;
   1168}
   1169/**
   1170 *  aac_srcv_init	-	initialize an SRCv card
   1171 *  @dev: device to configure
   1172 *
   1173 */
   1174
   1175int aac_srcv_init(struct aac_dev *dev)
   1176{
   1177	unsigned long start;
   1178	unsigned long status;
   1179	int restart = 0;
   1180	int instance = dev->id;
   1181	const char *name = dev->name;
   1182
   1183	dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
   1184	dev->a_ops.adapter_comm = aac_src_select_comm;
   1185
   1186	dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
   1187	if (aac_adapter_ioremap(dev, dev->base_size)) {
   1188		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
   1189		goto error_iounmap;
   1190	}
   1191
   1192	/* Failure to reset here is an option ... */
   1193	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
   1194	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
   1195
   1196	if (dev->init_reset) {
   1197		dev->init_reset = false;
   1198		if (aac_src_soft_reset(dev)) {
   1199			aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
   1200			++restart;
   1201		}
   1202	}
   1203
   1204	/*
   1205	 *	Check to see if flash update is running.
   1206	 *	Wait for the adapter to be up and running. Wait up to 5 minutes
   1207	 */
   1208	status = src_readl(dev, MUnit.OMR);
   1209	if (status & FLASH_UPD_PENDING) {
   1210		start = jiffies;
   1211		do {
   1212			status = src_readl(dev, MUnit.OMR);
   1213			if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
   1214				printk(KERN_ERR "%s%d: adapter flash update failed.\n",
   1215					dev->name, instance);
   1216				goto error_iounmap;
   1217			}
   1218		} while (!(status & FLASH_UPD_SUCCESS) &&
   1219			 !(status & FLASH_UPD_FAILED));
   1220		/* Delay 10 seconds.
   1221		 * Because right now FW is doing a soft reset,
   1222		 * do not read scratch pad register at this time
   1223		 */
   1224		ssleep(10);
   1225	}
   1226	/*
   1227	 *	Check to see if the board panic'd while booting.
   1228	 */
   1229	status = src_readl(dev, MUnit.OMR);
   1230	if (status & KERNEL_PANIC) {
   1231		if (aac_src_restart_adapter(dev,
   1232			aac_src_check_health(dev), IOP_HWSOFT_RESET))
   1233			goto error_iounmap;
   1234		++restart;
   1235	}
   1236	/*
   1237	 *	Check to see if the board failed any self tests.
   1238	 */
   1239	status = src_readl(dev, MUnit.OMR);
   1240	if (status & SELF_TEST_FAILED) {
   1241		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
   1242		goto error_iounmap;
   1243	}
   1244	/*
   1245	 *	Check to see if the monitor panic'd while booting.
   1246	 */
   1247	if (status & MONITOR_PANIC) {
   1248		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
   1249		goto error_iounmap;
   1250	}
   1251
   1252	start = jiffies;
   1253	/*
   1254	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
   1255	 */
   1256	do {
   1257		status = src_readl(dev, MUnit.OMR);
   1258		if (status == INVALID_OMR)
   1259			status = 0;
   1260
   1261		if ((restart &&
   1262		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
   1263		  time_after(jiffies, start+HZ*startup_timeout)) {
   1264			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
   1265					dev->name, instance, status);
   1266			goto error_iounmap;
   1267		}
   1268		if (!restart &&
   1269		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
   1270		  time_after(jiffies, start + HZ *
   1271		  ((startup_timeout > 60)
   1272		    ? (startup_timeout - 60)
   1273		    : (startup_timeout / 2))))) {
   1274			if (likely(!aac_src_restart_adapter(dev,
   1275				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
   1276				start = jiffies;
   1277			++restart;
   1278		}
   1279		msleep(1);
   1280	} while (!(status & KERNEL_UP_AND_RUNNING));
   1281
   1282	if (restart && aac_commit)
   1283		aac_commit = 1;
   1284	/*
   1285	 *	Fill in the common function dispatch table.
   1286	 */
   1287	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
   1288	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
   1289	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
   1290	dev->a_ops.adapter_notify = aac_src_notify_adapter;
   1291	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
   1292	dev->a_ops.adapter_check_health = aac_src_check_health;
   1293	dev->a_ops.adapter_restart = aac_src_restart_adapter;
   1294	dev->a_ops.adapter_start = aac_src_start_adapter;
   1295
   1296	/*
   1297	 *	First clear out all interrupts.  Then enable the one's that we
   1298	 *	can handle.
   1299	 */
   1300	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
   1301	aac_adapter_disable_int(dev);
   1302	src_writel(dev, MUnit.ODR_C, 0xffffffff);
   1303	aac_adapter_enable_int(dev);
   1304
   1305	if (aac_init_adapter(dev) == NULL)
   1306		goto error_iounmap;
   1307	if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
   1308		(dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
   1309		goto error_iounmap;
   1310	if (dev->msi_enabled)
   1311		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
   1312
   1313	if (aac_acquire_irq(dev))
   1314		goto error_iounmap;
   1315
   1316	dev->dbg_base = pci_resource_start(dev->pdev, 2);
   1317	dev->dbg_base_mapped = dev->regs.src.bar1;
   1318	dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
   1319	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
   1320
   1321	aac_adapter_enable_int(dev);
   1322
   1323	if (!dev->sync_mode) {
   1324		/*
   1325		 * Tell the adapter that all is configured, and it can
   1326		 * start accepting requests
   1327		 */
   1328		aac_src_start_adapter(dev);
   1329	}
   1330	return 0;
   1331
   1332error_iounmap:
   1333
   1334	return -1;
   1335}
   1336
   1337void aac_src_access_devreg(struct aac_dev *dev, int mode)
   1338{
   1339	u_int32_t val;
   1340
   1341	switch (mode) {
   1342	case AAC_ENABLE_INTERRUPT:
   1343		src_writel(dev,
   1344			   MUnit.OIMR,
   1345			   dev->OIMR = (dev->msi_enabled ?
   1346					AAC_INT_ENABLE_TYPE1_MSIX :
   1347					AAC_INT_ENABLE_TYPE1_INTX));
   1348		break;
   1349
   1350	case AAC_DISABLE_INTERRUPT:
   1351		src_writel(dev,
   1352			   MUnit.OIMR,
   1353			   dev->OIMR = AAC_INT_DISABLE_ALL);
   1354		break;
   1355
   1356	case AAC_ENABLE_MSIX:
   1357		/* set bit 6 */
   1358		val = src_readl(dev, MUnit.IDR);
   1359		val |= 0x40;
   1360		src_writel(dev,  MUnit.IDR, val);
   1361		src_readl(dev, MUnit.IDR);
   1362		/* unmask int. */
   1363		val = PMC_ALL_INTERRUPT_BITS;
   1364		src_writel(dev, MUnit.IOAR, val);
   1365		val = src_readl(dev, MUnit.OIMR);
   1366		src_writel(dev,
   1367			   MUnit.OIMR,
   1368			   val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
   1369		break;
   1370
   1371	case AAC_DISABLE_MSIX:
   1372		/* reset bit 6 */
   1373		val = src_readl(dev, MUnit.IDR);
   1374		val &= ~0x40;
   1375		src_writel(dev, MUnit.IDR, val);
   1376		src_readl(dev, MUnit.IDR);
   1377		break;
   1378
   1379	case AAC_CLEAR_AIF_BIT:
   1380		/* set bit 5 */
   1381		val = src_readl(dev, MUnit.IDR);
   1382		val |= 0x20;
   1383		src_writel(dev, MUnit.IDR, val);
   1384		src_readl(dev, MUnit.IDR);
   1385		break;
   1386
   1387	case AAC_CLEAR_SYNC_BIT:
   1388		/* set bit 4 */
   1389		val = src_readl(dev, MUnit.IDR);
   1390		val |= 0x10;
   1391		src_writel(dev, MUnit.IDR, val);
   1392		src_readl(dev, MUnit.IDR);
   1393		break;
   1394
   1395	case AAC_ENABLE_INTX:
   1396		/* set bit 7 */
   1397		val = src_readl(dev, MUnit.IDR);
   1398		val |= 0x80;
   1399		src_writel(dev, MUnit.IDR, val);
   1400		src_readl(dev, MUnit.IDR);
   1401		/* unmask int. */
   1402		val = PMC_ALL_INTERRUPT_BITS;
   1403		src_writel(dev, MUnit.IOAR, val);
   1404		src_readl(dev, MUnit.IOAR);
   1405		val = src_readl(dev, MUnit.OIMR);
   1406		src_writel(dev, MUnit.OIMR,
   1407				val & (~(PMC_GLOBAL_INT_BIT2)));
   1408		break;
   1409
   1410	default:
   1411		break;
   1412	}
   1413}
   1414
   1415static int aac_src_get_sync_status(struct aac_dev *dev)
   1416{
   1417	int msix_val = 0;
   1418	int legacy_val = 0;
   1419
   1420	msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
   1421
   1422	if (!dev->msi_enabled) {
   1423		/*
   1424		 * if Legacy int status indicates cmd is not complete
   1425		 * sample MSIx register to see if it indiactes cmd complete,
   1426		 * if yes set the controller in MSIx mode and consider cmd
   1427		 * completed
   1428		 */
   1429		legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
   1430		if (!(legacy_val & 1) && msix_val)
   1431			dev->msi_enabled = 1;
   1432		return legacy_val;
   1433	}
   1434
   1435	return msix_val;
   1436}