cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mpt3sas_base.c (254471B)


      1/*
      2 * This is the Fusion MPT base driver providing common API layer interface
      3 * for access to MPT (Message Passing Technology) firmware.
      4 *
      5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
      6 * Copyright (C) 2012-2014  LSI Corporation
      7 * Copyright (C) 2013-2014 Avago Technologies
      8 *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
      9 *
     10 * This program is free software; you can redistribute it and/or
     11 * modify it under the terms of the GNU General Public License
     12 * as published by the Free Software Foundation; either version 2
     13 * of the License, or (at your option) any later version.
     14 *
     15 * This program is distributed in the hope that it will be useful,
     16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     18 * GNU General Public License for more details.
     19 *
     20 * NO WARRANTY
     21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
     22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
     23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
     24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
     25 * solely responsible for determining the appropriateness of using and
     26 * distributing the Program and assumes all risks associated with its
     27 * exercise of rights under this Agreement, including but not limited to
     28 * the risks and costs of program errors, damage to or loss of data,
     29 * programs or equipment, and unavailability or interruption of operations.
     30
     31 * DISCLAIMER OF LIABILITY
     32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
     33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
     35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
     36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
     37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
     38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
     39
     40 * You should have received a copy of the GNU General Public License
     41 * along with this program; if not, write to the Free Software
     42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
     43 * USA.
     44 */
     45
     46#include <linux/kernel.h>
     47#include <linux/module.h>
     48#include <linux/errno.h>
     49#include <linux/init.h>
     50#include <linux/slab.h>
     51#include <linux/types.h>
     52#include <linux/pci.h>
     53#include <linux/kdev_t.h>
     54#include <linux/blkdev.h>
     55#include <linux/delay.h>
     56#include <linux/interrupt.h>
     57#include <linux/dma-mapping.h>
     58#include <linux/io.h>
     59#include <linux/time.h>
     60#include <linux/ktime.h>
     61#include <linux/kthread.h>
     62#include <asm/page.h>        /* To get host page size per arch */
     63#include <linux/aer.h>
     64
     65
     66#include "mpt3sas_base.h"
     67
     68static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
     69
     70
     71#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
     72
     73 /* maximum controller queue depth */
     74#define MAX_HBA_QUEUE_DEPTH	30000
     75#define MAX_CHAIN_DEPTH		100000
     76static int max_queue_depth = -1;
     77module_param(max_queue_depth, int, 0444);
     78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
     79
     80static int max_sgl_entries = -1;
     81module_param(max_sgl_entries, int, 0444);
     82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
     83
     84static int msix_disable = -1;
     85module_param(msix_disable, int, 0444);
     86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
     87
     88static int smp_affinity_enable = 1;
     89module_param(smp_affinity_enable, int, 0444);
     90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
     91
     92static int max_msix_vectors = -1;
     93module_param(max_msix_vectors, int, 0444);
     94MODULE_PARM_DESC(max_msix_vectors,
     95	" max msix vectors");
     96
     97static int irqpoll_weight = -1;
     98module_param(irqpoll_weight, int, 0444);
     99MODULE_PARM_DESC(irqpoll_weight,
    100	"irq poll weight (default= one fourth of HBA queue depth)");
    101
    102static int mpt3sas_fwfault_debug;
    103MODULE_PARM_DESC(mpt3sas_fwfault_debug,
    104	" enable detection of firmware fault and halt firmware - (default=0)");
    105
    106static int perf_mode = -1;
    107module_param(perf_mode, int, 0444);
    108MODULE_PARM_DESC(perf_mode,
    109	"Performance mode (only for Aero/Sea Generation), options:\n\t\t"
    110	"0 - balanced: high iops mode is enabled &\n\t\t"
    111	"interrupt coalescing is enabled only on high iops queues,\n\t\t"
    112	"1 - iops: high iops mode is disabled &\n\t\t"
    113	"interrupt coalescing is enabled on all queues,\n\t\t"
    114	"2 - latency: high iops mode is disabled &\n\t\t"
    115	"interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
    116	"\t\tdefault - default perf_mode is 'balanced'"
    117	);
    118
    119static int poll_queues;
    120module_param(poll_queues, int, 0444);
    121MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
    122	"This parameter is effective only if host_tagset_enable=1. &\n\t\t"
    123	"when poll_queues are enabled then &\n\t\t"
    124	"perf_mode is set to latency mode. &\n\t\t"
    125	);
    126
    127enum mpt3sas_perf_mode {
    128	MPT_PERF_MODE_DEFAULT	= -1,
    129	MPT_PERF_MODE_BALANCED	= 0,
    130	MPT_PERF_MODE_IOPS	= 1,
    131	MPT_PERF_MODE_LATENCY	= 2,
    132};
    133
    134static int
    135_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
    136		u32 ioc_state, int timeout);
    137static int
    138_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
    139static void
    140_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
    141
    142/**
    143 * mpt3sas_base_check_cmd_timeout - Function
    144 *		to check timeout and command termination due
    145 *		to Host reset.
    146 *
    147 * @ioc:	per adapter object.
    148 * @status:	Status of issued command.
    149 * @mpi_request:mf request pointer.
    150 * @sz:		size of buffer.
    151 *
    152 * Return: 1/0 Reset to be done or Not
    153 */
    154u8
    155mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
    156		u8 status, void *mpi_request, int sz)
    157{
    158	u8 issue_reset = 0;
    159
    160	if (!(status & MPT3_CMD_RESET))
    161		issue_reset = 1;
    162
    163	ioc_err(ioc, "Command %s\n",
    164		issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
    165	_debug_dump_mf(mpi_request, sz);
    166
    167	return issue_reset;
    168}
    169
    170/**
    171 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
    172 * @val: ?
    173 * @kp: ?
    174 *
    175 * Return: ?
    176 */
    177static int
    178_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
    179{
    180	int ret = param_set_int(val, kp);
    181	struct MPT3SAS_ADAPTER *ioc;
    182
    183	if (ret)
    184		return ret;
    185
    186	/* global ioc spinlock to protect controller list on list operations */
    187	pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
    188	spin_lock(&gioc_lock);
    189	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
    190		ioc->fwfault_debug = mpt3sas_fwfault_debug;
    191	spin_unlock(&gioc_lock);
    192	return 0;
    193}
    194module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
    195	param_get_int, &mpt3sas_fwfault_debug, 0644);
    196
    197/**
    198 * _base_readl_aero - retry readl for max three times.
    199 * @addr: MPT Fusion system interface register address
    200 *
    201 * Retry the readl() for max three times if it gets zero value
    202 * while reading the system interface register.
    203 */
    204static inline u32
    205_base_readl_aero(const volatile void __iomem *addr)
    206{
    207	u32 i = 0, ret_val;
    208
    209	do {
    210		ret_val = readl(addr);
    211		i++;
    212	} while (ret_val == 0 && i < 3);
    213
    214	return ret_val;
    215}
    216
    217static inline u32
    218_base_readl(const volatile void __iomem *addr)
    219{
    220	return readl(addr);
    221}
    222
    223/**
    224 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
    225 *				  in BAR0 space.
    226 *
    227 * @ioc: per adapter object
    228 * @reply: reply message frame(lower 32bit addr)
    229 * @index: System request message index.
    230 */
    231static void
    232_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
    233		u32 index)
    234{
    235	/*
    236	 * 256 is offset within sys register.
    237	 * 256 offset MPI frame starts. Max MPI frame supported is 32.
    238	 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
    239	 */
    240	u16 cmd_credit = ioc->facts.RequestCredit + 1;
    241	void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
    242			MPI_FRAME_START_OFFSET +
    243			(cmd_credit * ioc->request_sz) + (index * sizeof(u32));
    244
    245	writel(reply, reply_free_iomem);
    246}
    247
    248/**
    249 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
    250 *				to system/BAR0 region.
    251 *
    252 * @dst_iomem: Pointer to the destination location in BAR0 space.
    253 * @src: Pointer to the Source data.
    254 * @size: Size of data to be copied.
    255 */
    256static void
    257_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
    258{
    259	int i;
    260	u32 *src_virt_mem = (u32 *)src;
    261
    262	for (i = 0; i < size/4; i++)
    263		writel((u32)src_virt_mem[i],
    264				(void __iomem *)dst_iomem + (i * 4));
    265}
    266
    267/**
    268 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
    269 *
    270 * @dst_iomem: Pointer to the destination location in BAR0 space.
    271 * @src: Pointer to the Source data.
    272 * @size: Size of data to be copied.
    273 */
    274static void
    275_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
    276{
    277	int i;
    278	u32 *src_virt_mem = (u32 *)(src);
    279
    280	for (i = 0; i < size/4; i++)
    281		writel((u32)src_virt_mem[i],
    282			(void __iomem *)dst_iomem + (i * 4));
    283}
    284
    285/**
    286 * _base_get_chain - Calculates and Returns virtual chain address
    287 *			 for the provided smid in BAR0 space.
    288 *
    289 * @ioc: per adapter object
    290 * @smid: system request message index
    291 * @sge_chain_count: Scatter gather chain count.
    292 *
    293 * Return: the chain address.
    294 */
    295static inline void __iomem*
    296_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
    297		u8 sge_chain_count)
    298{
    299	void __iomem *base_chain, *chain_virt;
    300	u16 cmd_credit = ioc->facts.RequestCredit + 1;
    301
    302	base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
    303		(cmd_credit * ioc->request_sz) +
    304		REPLY_FREE_POOL_SIZE;
    305	chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
    306			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
    307	return chain_virt;
    308}
    309
    310/**
    311 * _base_get_chain_phys - Calculates and Returns physical address
    312 *			in BAR0 for scatter gather chains, for
    313 *			the provided smid.
    314 *
    315 * @ioc: per adapter object
    316 * @smid: system request message index
    317 * @sge_chain_count: Scatter gather chain count.
    318 *
    319 * Return: Physical chain address.
    320 */
    321static inline phys_addr_t
    322_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
    323		u8 sge_chain_count)
    324{
    325	phys_addr_t base_chain_phys, chain_phys;
    326	u16 cmd_credit = ioc->facts.RequestCredit + 1;
    327
    328	base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
    329		(cmd_credit * ioc->request_sz) +
    330		REPLY_FREE_POOL_SIZE;
    331	chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
    332			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
    333	return chain_phys;
    334}
    335
    336/**
    337 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
    338 *			buffer address for the provided smid.
    339 *			(Each smid can have 64K starts from 17024)
    340 *
    341 * @ioc: per adapter object
    342 * @smid: system request message index
    343 *
    344 * Return: Pointer to buffer location in BAR0.
    345 */
    346
    347static void __iomem *
    348_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
    349{
    350	u16 cmd_credit = ioc->facts.RequestCredit + 1;
    351	// Added extra 1 to reach end of chain.
    352	void __iomem *chain_end = _base_get_chain(ioc,
    353			cmd_credit + 1,
    354			ioc->facts.MaxChainDepth);
    355	return chain_end + (smid * 64 * 1024);
    356}
    357
    358/**
    359 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
    360 *		Host buffer Physical address for the provided smid.
    361 *		(Each smid can have 64K starts from 17024)
    362 *
    363 * @ioc: per adapter object
    364 * @smid: system request message index
    365 *
    366 * Return: Pointer to buffer location in BAR0.
    367 */
    368static phys_addr_t
    369_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
    370{
    371	u16 cmd_credit = ioc->facts.RequestCredit + 1;
    372	phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
    373			cmd_credit + 1,
    374			ioc->facts.MaxChainDepth);
    375	return chain_end_phys + (smid * 64 * 1024);
    376}
    377
    378/**
    379 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
    380 *			lookup list and Provides chain_buffer
    381 *			address for the matching dma address.
    382 *			(Each smid can have 64K starts from 17024)
    383 *
    384 * @ioc: per adapter object
    385 * @chain_buffer_dma: Chain buffer dma address.
    386 *
    387 * Return: Pointer to chain buffer. Or Null on Failure.
    388 */
    389static void *
    390_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
    391		dma_addr_t chain_buffer_dma)
    392{
    393	u16 index, j;
    394	struct chain_tracker *ct;
    395
    396	for (index = 0; index < ioc->scsiio_depth; index++) {
    397		for (j = 0; j < ioc->chains_needed_per_io; j++) {
    398			ct = &ioc->chain_lookup[index].chains_per_smid[j];
    399			if (ct && ct->chain_buffer_dma == chain_buffer_dma)
    400				return ct->chain_buffer;
    401		}
    402	}
    403	ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
    404	return NULL;
    405}
    406
    407/**
    408 * _clone_sg_entries -	MPI EP's scsiio and config requests
    409 *			are handled here. Base function for
    410 *			double buffering, before submitting
    411 *			the requests.
    412 *
    413 * @ioc: per adapter object.
    414 * @mpi_request: mf request pointer.
    415 * @smid: system request message index.
    416 */
    417static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
    418		void *mpi_request, u16 smid)
    419{
    420	Mpi2SGESimple32_t *sgel, *sgel_next;
    421	u32  sgl_flags, sge_chain_count = 0;
    422	bool is_write = false;
    423	u16 i = 0;
    424	void __iomem *buffer_iomem;
    425	phys_addr_t buffer_iomem_phys;
    426	void __iomem *buff_ptr;
    427	phys_addr_t buff_ptr_phys;
    428	void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
    429	void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
    430	phys_addr_t dst_addr_phys;
    431	MPI2RequestHeader_t *request_hdr;
    432	struct scsi_cmnd *scmd;
    433	struct scatterlist *sg_scmd = NULL;
    434	int is_scsiio_req = 0;
    435
    436	request_hdr = (MPI2RequestHeader_t *) mpi_request;
    437
    438	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
    439		Mpi25SCSIIORequest_t *scsiio_request =
    440			(Mpi25SCSIIORequest_t *)mpi_request;
    441		sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
    442		is_scsiio_req = 1;
    443	} else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
    444		Mpi2ConfigRequest_t  *config_req =
    445			(Mpi2ConfigRequest_t *)mpi_request;
    446		sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
    447	} else
    448		return;
    449
    450	/* From smid we can get scsi_cmd, once we have sg_scmd,
    451	 * we just need to get sg_virt and sg_next to get virtual
    452	 * address associated with sgel->Address.
    453	 */
    454
    455	if (is_scsiio_req) {
    456		/* Get scsi_cmd using smid */
    457		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
    458		if (scmd == NULL) {
    459			ioc_err(ioc, "scmd is NULL\n");
    460			return;
    461		}
    462
    463		/* Get sg_scmd from scmd provided */
    464		sg_scmd = scsi_sglist(scmd);
    465	}
    466
    467	/*
    468	 * 0 - 255	System register
    469	 * 256 - 4352	MPI Frame. (This is based on maxCredit 32)
    470	 * 4352 - 4864	Reply_free pool (512 byte is reserved
    471	 *		considering maxCredit 32. Reply need extra
    472	 *		room, for mCPU case kept four times of
    473	 *		maxCredit).
    474	 * 4864 - 17152	SGE chain element. (32cmd * 3 chain of
    475	 *		128 byte size = 12288)
    476	 * 17152 - x	Host buffer mapped with smid.
    477	 *		(Each smid can have 64K Max IO.)
    478	 * BAR0+Last 1K MSIX Addr and Data
    479	 * Total size in use 2113664 bytes of 4MB BAR0
    480	 */
    481
    482	buffer_iomem = _base_get_buffer_bar0(ioc, smid);
    483	buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
    484
    485	buff_ptr = buffer_iomem;
    486	buff_ptr_phys = buffer_iomem_phys;
    487	WARN_ON(buff_ptr_phys > U32_MAX);
    488
    489	if (le32_to_cpu(sgel->FlagsLength) &
    490			(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
    491		is_write = true;
    492
    493	for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
    494
    495		sgl_flags =
    496		    (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
    497
    498		switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
    499		case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
    500			/*
    501			 * Helper function which on passing
    502			 * chain_buffer_dma returns chain_buffer. Get
    503			 * the virtual address for sgel->Address
    504			 */
    505			sgel_next =
    506				_base_get_chain_buffer_dma_to_chain_buffer(ioc,
    507						le32_to_cpu(sgel->Address));
    508			if (sgel_next == NULL)
    509				return;
    510			/*
    511			 * This is coping 128 byte chain
    512			 * frame (not a host buffer)
    513			 */
    514			dst_chain_addr[sge_chain_count] =
    515				_base_get_chain(ioc,
    516					smid, sge_chain_count);
    517			src_chain_addr[sge_chain_count] =
    518						(void *) sgel_next;
    519			dst_addr_phys = _base_get_chain_phys(ioc,
    520						smid, sge_chain_count);
    521			WARN_ON(dst_addr_phys > U32_MAX);
    522			sgel->Address =
    523				cpu_to_le32(lower_32_bits(dst_addr_phys));
    524			sgel = sgel_next;
    525			sge_chain_count++;
    526			break;
    527		case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
    528			if (is_write) {
    529				if (is_scsiio_req) {
    530					_base_clone_to_sys_mem(buff_ptr,
    531					    sg_virt(sg_scmd),
    532					    (le32_to_cpu(sgel->FlagsLength) &
    533					    0x00ffffff));
    534					/*
    535					 * FIXME: this relies on a a zero
    536					 * PCI mem_offset.
    537					 */
    538					sgel->Address =
    539					    cpu_to_le32((u32)buff_ptr_phys);
    540				} else {
    541					_base_clone_to_sys_mem(buff_ptr,
    542					    ioc->config_vaddr,
    543					    (le32_to_cpu(sgel->FlagsLength) &
    544					    0x00ffffff));
    545					sgel->Address =
    546					    cpu_to_le32((u32)buff_ptr_phys);
    547				}
    548			}
    549			buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
    550			    0x00ffffff);
    551			buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
    552			    0x00ffffff);
    553			if ((le32_to_cpu(sgel->FlagsLength) &
    554			    (MPI2_SGE_FLAGS_END_OF_BUFFER
    555					<< MPI2_SGE_FLAGS_SHIFT)))
    556				goto eob_clone_chain;
    557			else {
    558				/*
    559				 * Every single element in MPT will have
    560				 * associated sg_next. Better to sanity that
    561				 * sg_next is not NULL, but it will be a bug
    562				 * if it is null.
    563				 */
    564				if (is_scsiio_req) {
    565					sg_scmd = sg_next(sg_scmd);
    566					if (sg_scmd)
    567						sgel++;
    568					else
    569						goto eob_clone_chain;
    570				}
    571			}
    572			break;
    573		}
    574	}
    575
    576eob_clone_chain:
    577	for (i = 0; i < sge_chain_count; i++) {
    578		if (is_scsiio_req)
    579			_base_clone_to_sys_mem(dst_chain_addr[i],
    580				src_chain_addr[i], ioc->request_sz);
    581	}
    582}
    583
    584/**
    585 *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
    586 * @arg: input argument, used to derive ioc
    587 *
    588 * Return:
    589 * 0 if controller is removed from pci subsystem.
    590 * -1 for other case.
    591 */
    592static int mpt3sas_remove_dead_ioc_func(void *arg)
    593{
    594	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
    595	struct pci_dev *pdev;
    596
    597	if (!ioc)
    598		return -1;
    599
    600	pdev = ioc->pdev;
    601	if (!pdev)
    602		return -1;
    603	pci_stop_and_remove_bus_device_locked(pdev);
    604	return 0;
    605}
    606
    607/**
    608 * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
    609 * @ioc: Per Adapter Object
    610 *
    611 * Return: nothing.
    612 */
    613static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
    614{
    615	Mpi26IoUnitControlRequest_t *mpi_request;
    616	Mpi26IoUnitControlReply_t *mpi_reply;
    617	u16 smid;
    618	ktime_t current_time;
    619	u64 TimeStamp = 0;
    620	u8 issue_reset = 0;
    621
    622	mutex_lock(&ioc->scsih_cmds.mutex);
    623	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
    624		ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
    625		goto out;
    626	}
    627	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
    628	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
    629	if (!smid) {
    630		ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
    631		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
    632		goto out;
    633	}
    634	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
    635	ioc->scsih_cmds.smid = smid;
    636	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
    637	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
    638	mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
    639	mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
    640	current_time = ktime_get_real();
    641	TimeStamp = ktime_to_ms(current_time);
    642	mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
    643	mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
    644	init_completion(&ioc->scsih_cmds.done);
    645	ioc->put_smid_default(ioc, smid);
    646	dinitprintk(ioc, ioc_info(ioc,
    647	    "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
    648	    TimeStamp));
    649	wait_for_completion_timeout(&ioc->scsih_cmds.done,
    650		MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
    651	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
    652		mpt3sas_check_cmd_timeout(ioc,
    653		    ioc->scsih_cmds.status, mpi_request,
    654		    sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
    655		goto issue_host_reset;
    656	}
    657	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
    658		mpi_reply = ioc->scsih_cmds.reply;
    659		dinitprintk(ioc, ioc_info(ioc,
    660		    "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
    661		    le16_to_cpu(mpi_reply->IOCStatus),
    662		    le32_to_cpu(mpi_reply->IOCLogInfo)));
    663	}
    664issue_host_reset:
    665	if (issue_reset)
    666		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
    667	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
    668out:
    669	mutex_unlock(&ioc->scsih_cmds.mutex);
    670}
    671
    672/**
    673 * _base_fault_reset_work - workq handling ioc fault conditions
    674 * @work: input argument, used to derive ioc
    675 *
    676 * Context: sleep.
    677 */
    678static void
    679_base_fault_reset_work(struct work_struct *work)
    680{
    681	struct MPT3SAS_ADAPTER *ioc =
    682	    container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
    683	unsigned long	 flags;
    684	u32 doorbell;
    685	int rc;
    686	struct task_struct *p;
    687
    688
    689	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
    690	if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
    691			ioc->pci_error_recovery)
    692		goto rearm_timer;
    693	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
    694
    695	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
    696	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
    697		ioc_err(ioc, "SAS host is non-operational !!!!\n");
    698
    699		/* It may be possible that EEH recovery can resolve some of
    700		 * pci bus failure issues rather removing the dead ioc function
    701		 * by considering controller is in a non-operational state. So
    702		 * here priority is given to the EEH recovery. If it doesn't
    703		 * not resolve this issue, mpt3sas driver will consider this
    704		 * controller to non-operational state and remove the dead ioc
    705		 * function.
    706		 */
    707		if (ioc->non_operational_loop++ < 5) {
    708			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
    709							 flags);
    710			goto rearm_timer;
    711		}
    712
    713		/*
    714		 * Call _scsih_flush_pending_cmds callback so that we flush all
    715		 * pending commands back to OS. This call is required to avoid
    716		 * deadlock at block layer. Dead IOC will fail to do diag reset,
    717		 * and this call is safe since dead ioc will never return any
    718		 * command back from HW.
    719		 */
    720		mpt3sas_base_pause_mq_polling(ioc);
    721		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
    722		/*
    723		 * Set remove_host flag early since kernel thread will
    724		 * take some time to execute.
    725		 */
    726		ioc->remove_host = 1;
    727		/*Remove the Dead Host */
    728		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
    729		    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
    730		if (IS_ERR(p))
    731			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
    732				__func__);
    733		else
    734			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
    735				__func__);
    736		return; /* don't rearm timer */
    737	}
    738
    739	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
    740		u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
    741		    ioc->manu_pg11.CoreDumpTOSec :
    742		    MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
    743
    744		timeout /= (FAULT_POLLING_INTERVAL/1000);
    745
    746		if (ioc->ioc_coredump_loop == 0) {
    747			mpt3sas_print_coredump_info(ioc,
    748			    doorbell & MPI2_DOORBELL_DATA_MASK);
    749			/* do not accept any IOs and disable the interrupts */
    750			spin_lock_irqsave(
    751			    &ioc->ioc_reset_in_progress_lock, flags);
    752			ioc->shost_recovery = 1;
    753			spin_unlock_irqrestore(
    754			    &ioc->ioc_reset_in_progress_lock, flags);
    755			mpt3sas_base_mask_interrupts(ioc);
    756			mpt3sas_base_pause_mq_polling(ioc);
    757			_base_clear_outstanding_commands(ioc);
    758		}
    759
    760		ioc_info(ioc, "%s: CoreDump loop %d.",
    761		    __func__, ioc->ioc_coredump_loop);
    762
    763		/* Wait until CoreDump completes or times out */
    764		if (ioc->ioc_coredump_loop++ < timeout) {
    765			spin_lock_irqsave(
    766			    &ioc->ioc_reset_in_progress_lock, flags);
    767			goto rearm_timer;
    768		}
    769	}
    770
    771	if (ioc->ioc_coredump_loop) {
    772		if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
    773			ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
    774			    __func__, ioc->ioc_coredump_loop);
    775		else
    776			ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
    777			    __func__, ioc->ioc_coredump_loop);
    778		ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
    779	}
    780	ioc->non_operational_loop = 0;
    781	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
    782		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
    783		ioc_warn(ioc, "%s: hard reset: %s\n",
    784			 __func__, rc == 0 ? "success" : "failed");
    785		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
    786		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
    787			mpt3sas_print_fault_code(ioc, doorbell &
    788			    MPI2_DOORBELL_DATA_MASK);
    789		} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
    790		    MPI2_IOC_STATE_COREDUMP)
    791			mpt3sas_print_coredump_info(ioc, doorbell &
    792			    MPI2_DOORBELL_DATA_MASK);
    793		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
    794		    MPI2_IOC_STATE_OPERATIONAL)
    795			return; /* don't rearm timer */
    796	}
    797	ioc->ioc_coredump_loop = 0;
    798	if (ioc->time_sync_interval &&
    799	    ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
    800		ioc->timestamp_update_count = 0;
    801		_base_sync_drv_fw_timestamp(ioc);
    802	}
    803	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
    804 rearm_timer:
    805	if (ioc->fault_reset_work_q)
    806		queue_delayed_work(ioc->fault_reset_work_q,
    807		    &ioc->fault_reset_work,
    808		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
    809	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
    810}
    811
    812/**
    813 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
    814 * @ioc: per adapter object
    815 *
    816 * Context: sleep.
    817 */
    818void
    819mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
    820{
    821	unsigned long	 flags;
    822
    823	if (ioc->fault_reset_work_q)
    824		return;
    825
    826	ioc->timestamp_update_count = 0;
    827	/* initialize fault polling */
    828
    829	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
    830	snprintf(ioc->fault_reset_work_q_name,
    831	    sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
    832	    ioc->driver_name, ioc->id);
    833	ioc->fault_reset_work_q =
    834		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
    835	if (!ioc->fault_reset_work_q) {
    836		ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
    837		return;
    838	}
    839	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
    840	if (ioc->fault_reset_work_q)
    841		queue_delayed_work(ioc->fault_reset_work_q,
    842		    &ioc->fault_reset_work,
    843		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
    844	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
    845}
    846
    847/**
    848 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
    849 * @ioc: per adapter object
    850 *
    851 * Context: sleep.
    852 */
    853void
    854mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
    855{
    856	unsigned long flags;
    857	struct workqueue_struct *wq;
    858
    859	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
    860	wq = ioc->fault_reset_work_q;
    861	ioc->fault_reset_work_q = NULL;
    862	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
    863	if (wq) {
    864		if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
    865			flush_workqueue(wq);
    866		destroy_workqueue(wq);
    867	}
    868}
    869
    870/**
    871 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
    872 * @ioc: per adapter object
    873 * @fault_code: fault code
    874 */
    875void
    876mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
    877{
    878	ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
    879}
    880
    881/**
    882 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
    883 * @ioc: per adapter object
    884 * @fault_code: fault code
    885 *
    886 * Return: nothing.
    887 */
    888void
    889mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
    890{
    891	ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
    892}
    893
    894/**
    895 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
    896 * completes or times out
    897 * @ioc: per adapter object
    898 * @caller: caller function name
    899 *
    900 * Return: 0 for success, non-zero for failure.
    901 */
    902int
    903mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
    904		const char *caller)
    905{
    906	u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
    907			ioc->manu_pg11.CoreDumpTOSec :
    908			MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
    909
    910	int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
    911					timeout);
    912
    913	if (ioc_state)
    914		ioc_err(ioc,
    915		    "%s: CoreDump timed out. (ioc_state=0x%x)\n",
    916		    caller, ioc_state);
    917	else
    918		ioc_info(ioc,
    919		    "%s: CoreDump completed. (ioc_state=0x%x)\n",
    920		    caller, ioc_state);
    921
    922	return ioc_state;
    923}
    924
    925/**
    926 * mpt3sas_halt_firmware - halt's mpt controller firmware
    927 * @ioc: per adapter object
    928 *
    929 * For debugging timeout related issues.  Writing 0xCOFFEE00
    930 * to the doorbell register will halt controller firmware. With
    931 * the purpose to stop both driver and firmware, the enduser can
    932 * obtain a ring buffer from controller UART.
    933 */
    934void
    935mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
    936{
    937	u32 doorbell;
    938
    939	if (!ioc->fwfault_debug)
    940		return;
    941
    942	dump_stack();
    943
    944	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
    945	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
    946		mpt3sas_print_fault_code(ioc, doorbell &
    947		    MPI2_DOORBELL_DATA_MASK);
    948	} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
    949	    MPI2_IOC_STATE_COREDUMP) {
    950		mpt3sas_print_coredump_info(ioc, doorbell &
    951		    MPI2_DOORBELL_DATA_MASK);
    952	} else {
    953		writel(0xC0FFEE00, &ioc->chip->Doorbell);
    954		ioc_err(ioc, "Firmware is halted due to command timeout\n");
    955	}
    956
    957	if (ioc->fwfault_debug == 2)
    958		for (;;)
    959			;
    960	else
    961		panic("panic in %s\n", __func__);
    962}
    963
    964/**
    965 * _base_sas_ioc_info - verbose translation of the ioc status
    966 * @ioc: per adapter object
    967 * @mpi_reply: reply mf payload returned from firmware
    968 * @request_hdr: request mf
    969 */
    970static void
    971_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
    972	MPI2RequestHeader_t *request_hdr)
    973{
    974	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
    975	    MPI2_IOCSTATUS_MASK;
    976	char *desc = NULL;
    977	u16 frame_sz;
    978	char *func_str = NULL;
    979
    980	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
    981	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
    982	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
    983	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
    984		return;
    985
    986	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
    987		return;
    988	/*
    989	 * Older Firmware version doesn't support driver trigger pages.
    990	 * So, skip displaying 'config invalid type' type
    991	 * of error message.
    992	 */
    993	if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
    994		Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
    995
    996		if ((rqst->ExtPageType ==
    997		    MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
    998		    !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
    999			return;
   1000		}
   1001	}
   1002
   1003	switch (ioc_status) {
   1004
   1005/****************************************************************************
   1006*  Common IOCStatus values for all replies
   1007****************************************************************************/
   1008
   1009	case MPI2_IOCSTATUS_INVALID_FUNCTION:
   1010		desc = "invalid function";
   1011		break;
   1012	case MPI2_IOCSTATUS_BUSY:
   1013		desc = "busy";
   1014		break;
   1015	case MPI2_IOCSTATUS_INVALID_SGL:
   1016		desc = "invalid sgl";
   1017		break;
   1018	case MPI2_IOCSTATUS_INTERNAL_ERROR:
   1019		desc = "internal error";
   1020		break;
   1021	case MPI2_IOCSTATUS_INVALID_VPID:
   1022		desc = "invalid vpid";
   1023		break;
   1024	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
   1025		desc = "insufficient resources";
   1026		break;
   1027	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
   1028		desc = "insufficient power";
   1029		break;
   1030	case MPI2_IOCSTATUS_INVALID_FIELD:
   1031		desc = "invalid field";
   1032		break;
   1033	case MPI2_IOCSTATUS_INVALID_STATE:
   1034		desc = "invalid state";
   1035		break;
   1036	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
   1037		desc = "op state not supported";
   1038		break;
   1039
   1040/****************************************************************************
   1041*  Config IOCStatus values
   1042****************************************************************************/
   1043
   1044	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
   1045		desc = "config invalid action";
   1046		break;
   1047	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
   1048		desc = "config invalid type";
   1049		break;
   1050	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
   1051		desc = "config invalid page";
   1052		break;
   1053	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
   1054		desc = "config invalid data";
   1055		break;
   1056	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
   1057		desc = "config no defaults";
   1058		break;
   1059	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
   1060		desc = "config cant commit";
   1061		break;
   1062
   1063/****************************************************************************
   1064*  SCSI IO Reply
   1065****************************************************************************/
   1066
   1067	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
   1068	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
   1069	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
   1070	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
   1071	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
   1072	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
   1073	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
   1074	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
   1075	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
   1076	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
   1077	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
   1078	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
   1079		break;
   1080
   1081/****************************************************************************
   1082*  For use by SCSI Initiator and SCSI Target end-to-end data protection
   1083****************************************************************************/
   1084
   1085	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
   1086		desc = "eedp guard error";
   1087		break;
   1088	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
   1089		desc = "eedp ref tag error";
   1090		break;
   1091	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
   1092		desc = "eedp app tag error";
   1093		break;
   1094
   1095/****************************************************************************
   1096*  SCSI Target values
   1097****************************************************************************/
   1098
   1099	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
   1100		desc = "target invalid io index";
   1101		break;
   1102	case MPI2_IOCSTATUS_TARGET_ABORTED:
   1103		desc = "target aborted";
   1104		break;
   1105	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
   1106		desc = "target no conn retryable";
   1107		break;
   1108	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
   1109		desc = "target no connection";
   1110		break;
   1111	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
   1112		desc = "target xfer count mismatch";
   1113		break;
   1114	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
   1115		desc = "target data offset error";
   1116		break;
   1117	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
   1118		desc = "target too much write data";
   1119		break;
   1120	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
   1121		desc = "target iu too short";
   1122		break;
   1123	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
   1124		desc = "target ack nak timeout";
   1125		break;
   1126	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
   1127		desc = "target nak received";
   1128		break;
   1129
   1130/****************************************************************************
   1131*  Serial Attached SCSI values
   1132****************************************************************************/
   1133
   1134	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
   1135		desc = "smp request failed";
   1136		break;
   1137	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
   1138		desc = "smp data overrun";
   1139		break;
   1140
   1141/****************************************************************************
   1142*  Diagnostic Buffer Post / Diagnostic Release values
   1143****************************************************************************/
   1144
   1145	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
   1146		desc = "diagnostic released";
   1147		break;
   1148	default:
   1149		break;
   1150	}
   1151
   1152	if (!desc)
   1153		return;
   1154
   1155	switch (request_hdr->Function) {
   1156	case MPI2_FUNCTION_CONFIG:
   1157		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
   1158		func_str = "config_page";
   1159		break;
   1160	case MPI2_FUNCTION_SCSI_TASK_MGMT:
   1161		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
   1162		func_str = "task_mgmt";
   1163		break;
   1164	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
   1165		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
   1166		func_str = "sas_iounit_ctl";
   1167		break;
   1168	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
   1169		frame_sz = sizeof(Mpi2SepRequest_t);
   1170		func_str = "enclosure";
   1171		break;
   1172	case MPI2_FUNCTION_IOC_INIT:
   1173		frame_sz = sizeof(Mpi2IOCInitRequest_t);
   1174		func_str = "ioc_init";
   1175		break;
   1176	case MPI2_FUNCTION_PORT_ENABLE:
   1177		frame_sz = sizeof(Mpi2PortEnableRequest_t);
   1178		func_str = "port_enable";
   1179		break;
   1180	case MPI2_FUNCTION_SMP_PASSTHROUGH:
   1181		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
   1182		func_str = "smp_passthru";
   1183		break;
   1184	case MPI2_FUNCTION_NVME_ENCAPSULATED:
   1185		frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
   1186		    ioc->sge_size;
   1187		func_str = "nvme_encapsulated";
   1188		break;
   1189	default:
   1190		frame_sz = 32;
   1191		func_str = "unknown";
   1192		break;
   1193	}
   1194
   1195	ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
   1196		 desc, ioc_status, request_hdr, func_str);
   1197
   1198	_debug_dump_mf(request_hdr, frame_sz/4);
   1199}
   1200
   1201/**
   1202 * _base_display_event_data - verbose translation of firmware asyn events
   1203 * @ioc: per adapter object
   1204 * @mpi_reply: reply mf payload returned from firmware
   1205 */
   1206static void
   1207_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
   1208	Mpi2EventNotificationReply_t *mpi_reply)
   1209{
   1210	char *desc = NULL;
   1211	u16 event;
   1212
   1213	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
   1214		return;
   1215
   1216	event = le16_to_cpu(mpi_reply->Event);
   1217
   1218	switch (event) {
   1219	case MPI2_EVENT_LOG_DATA:
   1220		desc = "Log Data";
   1221		break;
   1222	case MPI2_EVENT_STATE_CHANGE:
   1223		desc = "Status Change";
   1224		break;
   1225	case MPI2_EVENT_HARD_RESET_RECEIVED:
   1226		desc = "Hard Reset Received";
   1227		break;
   1228	case MPI2_EVENT_EVENT_CHANGE:
   1229		desc = "Event Change";
   1230		break;
   1231	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
   1232		desc = "Device Status Change";
   1233		break;
   1234	case MPI2_EVENT_IR_OPERATION_STATUS:
   1235		if (!ioc->hide_ir_msg)
   1236			desc = "IR Operation Status";
   1237		break;
   1238	case MPI2_EVENT_SAS_DISCOVERY:
   1239	{
   1240		Mpi2EventDataSasDiscovery_t *event_data =
   1241		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
   1242		ioc_info(ioc, "Discovery: (%s)",
   1243			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
   1244			 "start" : "stop");
   1245		if (event_data->DiscoveryStatus)
   1246			pr_cont(" discovery_status(0x%08x)",
   1247			    le32_to_cpu(event_data->DiscoveryStatus));
   1248		pr_cont("\n");
   1249		return;
   1250	}
   1251	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
   1252		desc = "SAS Broadcast Primitive";
   1253		break;
   1254	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
   1255		desc = "SAS Init Device Status Change";
   1256		break;
   1257	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
   1258		desc = "SAS Init Table Overflow";
   1259		break;
   1260	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
   1261		desc = "SAS Topology Change List";
   1262		break;
   1263	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
   1264		desc = "SAS Enclosure Device Status Change";
   1265		break;
   1266	case MPI2_EVENT_IR_VOLUME:
   1267		if (!ioc->hide_ir_msg)
   1268			desc = "IR Volume";
   1269		break;
   1270	case MPI2_EVENT_IR_PHYSICAL_DISK:
   1271		if (!ioc->hide_ir_msg)
   1272			desc = "IR Physical Disk";
   1273		break;
   1274	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
   1275		if (!ioc->hide_ir_msg)
   1276			desc = "IR Configuration Change List";
   1277		break;
   1278	case MPI2_EVENT_LOG_ENTRY_ADDED:
   1279		if (!ioc->hide_ir_msg)
   1280			desc = "Log Entry Added";
   1281		break;
   1282	case MPI2_EVENT_TEMP_THRESHOLD:
   1283		desc = "Temperature Threshold";
   1284		break;
   1285	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
   1286		desc = "Cable Event";
   1287		break;
   1288	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
   1289		desc = "SAS Device Discovery Error";
   1290		break;
   1291	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
   1292		desc = "PCIE Device Status Change";
   1293		break;
   1294	case MPI2_EVENT_PCIE_ENUMERATION:
   1295	{
   1296		Mpi26EventDataPCIeEnumeration_t *event_data =
   1297			(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
   1298		ioc_info(ioc, "PCIE Enumeration: (%s)",
   1299			 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
   1300			 "start" : "stop");
   1301		if (event_data->EnumerationStatus)
   1302			pr_cont("enumeration_status(0x%08x)",
   1303				le32_to_cpu(event_data->EnumerationStatus));
   1304		pr_cont("\n");
   1305		return;
   1306	}
   1307	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
   1308		desc = "PCIE Topology Change List";
   1309		break;
   1310	}
   1311
   1312	if (!desc)
   1313		return;
   1314
   1315	ioc_info(ioc, "%s\n", desc);
   1316}
   1317
   1318/**
   1319 * _base_sas_log_info - verbose translation of firmware log info
   1320 * @ioc: per adapter object
   1321 * @log_info: log info
   1322 */
   1323static void
   1324_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
   1325{
   1326	union loginfo_type {
   1327		u32	loginfo;
   1328		struct {
   1329			u32	subcode:16;
   1330			u32	code:8;
   1331			u32	originator:4;
   1332			u32	bus_type:4;
   1333		} dw;
   1334	};
   1335	union loginfo_type sas_loginfo;
   1336	char *originator_str = NULL;
   1337
   1338	sas_loginfo.loginfo = log_info;
   1339	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
   1340		return;
   1341
   1342	/* each nexus loss loginfo */
   1343	if (log_info == 0x31170000)
   1344		return;
   1345
   1346	/* eat the loginfos associated with task aborts */
   1347	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
   1348	    0x31140000 || log_info == 0x31130000))
   1349		return;
   1350
   1351	switch (sas_loginfo.dw.originator) {
   1352	case 0:
   1353		originator_str = "IOP";
   1354		break;
   1355	case 1:
   1356		originator_str = "PL";
   1357		break;
   1358	case 2:
   1359		if (!ioc->hide_ir_msg)
   1360			originator_str = "IR";
   1361		else
   1362			originator_str = "WarpDrive";
   1363		break;
   1364	}
   1365
   1366	ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
   1367		 log_info,
   1368		 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
   1369}
   1370
   1371/**
   1372 * _base_display_reply_info - handle reply descriptors depending on IOC Status
   1373 * @ioc: per adapter object
   1374 * @smid: system request message index
   1375 * @msix_index: MSIX table index supplied by the OS
   1376 * @reply: reply message frame (lower 32bit addr)
   1377 */
   1378static void
   1379_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
   1380	u32 reply)
   1381{
   1382	MPI2DefaultReply_t *mpi_reply;
   1383	u16 ioc_status;
   1384	u32 loginfo = 0;
   1385
   1386	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
   1387	if (unlikely(!mpi_reply)) {
   1388		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
   1389			__FILE__, __LINE__, __func__);
   1390		return;
   1391	}
   1392	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
   1393
   1394	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
   1395	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
   1396		_base_sas_ioc_info(ioc , mpi_reply,
   1397		   mpt3sas_base_get_msg_frame(ioc, smid));
   1398	}
   1399
   1400	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
   1401		loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
   1402		_base_sas_log_info(ioc, loginfo);
   1403	}
   1404
   1405	if (ioc_status || loginfo) {
   1406		ioc_status &= MPI2_IOCSTATUS_MASK;
   1407		mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
   1408	}
   1409}
   1410
   1411/**
   1412 * mpt3sas_base_done - base internal command completion routine
   1413 * @ioc: per adapter object
   1414 * @smid: system request message index
   1415 * @msix_index: MSIX table index supplied by the OS
   1416 * @reply: reply message frame(lower 32bit addr)
   1417 *
   1418 * Return:
   1419 * 1 meaning mf should be freed from _base_interrupt
   1420 * 0 means the mf is freed from this function.
   1421 */
   1422u8
   1423mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
   1424	u32 reply)
   1425{
   1426	MPI2DefaultReply_t *mpi_reply;
   1427
   1428	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
   1429	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
   1430		return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
   1431
   1432	if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
   1433		return 1;
   1434
   1435	ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
   1436	if (mpi_reply) {
   1437		ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
   1438		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
   1439	}
   1440	ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
   1441
   1442	complete(&ioc->base_cmds.done);
   1443	return 1;
   1444}
   1445
   1446/**
   1447 * _base_async_event - main callback handler for firmware asyn events
   1448 * @ioc: per adapter object
   1449 * @msix_index: MSIX table index supplied by the OS
   1450 * @reply: reply message frame(lower 32bit addr)
   1451 *
   1452 * Return:
   1453 * 1 meaning mf should be freed from _base_interrupt
   1454 * 0 means the mf is freed from this function.
   1455 */
   1456static u8
   1457_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
   1458{
   1459	Mpi2EventNotificationReply_t *mpi_reply;
   1460	Mpi2EventAckRequest_t *ack_request;
   1461	u16 smid;
   1462	struct _event_ack_list *delayed_event_ack;
   1463
   1464	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
   1465	if (!mpi_reply)
   1466		return 1;
   1467	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
   1468		return 1;
   1469
   1470	_base_display_event_data(ioc, mpi_reply);
   1471
   1472	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
   1473		goto out;
   1474	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
   1475	if (!smid) {
   1476		delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
   1477					GFP_ATOMIC);
   1478		if (!delayed_event_ack)
   1479			goto out;
   1480		INIT_LIST_HEAD(&delayed_event_ack->list);
   1481		delayed_event_ack->Event = mpi_reply->Event;
   1482		delayed_event_ack->EventContext = mpi_reply->EventContext;
   1483		list_add_tail(&delayed_event_ack->list,
   1484				&ioc->delayed_event_ack_list);
   1485		dewtprintk(ioc,
   1486			   ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
   1487				    le16_to_cpu(mpi_reply->Event)));
   1488		goto out;
   1489	}
   1490
   1491	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
   1492	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
   1493	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
   1494	ack_request->Event = mpi_reply->Event;
   1495	ack_request->EventContext = mpi_reply->EventContext;
   1496	ack_request->VF_ID = 0;  /* TODO */
   1497	ack_request->VP_ID = 0;
   1498	ioc->put_smid_default(ioc, smid);
   1499
   1500 out:
   1501
   1502	/* scsih callback handler */
   1503	mpt3sas_scsih_event_callback(ioc, msix_index, reply);
   1504
   1505	/* ctl callback handler */
   1506	mpt3sas_ctl_event_callback(ioc, msix_index, reply);
   1507
   1508	return 1;
   1509}
   1510
   1511static struct scsiio_tracker *
   1512_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   1513{
   1514	struct scsi_cmnd *cmd;
   1515
   1516	if (WARN_ON(!smid) ||
   1517	    WARN_ON(smid >= ioc->hi_priority_smid))
   1518		return NULL;
   1519
   1520	cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
   1521	if (cmd)
   1522		return scsi_cmd_priv(cmd);
   1523
   1524	return NULL;
   1525}
   1526
   1527/**
   1528 * _base_get_cb_idx - obtain the callback index
   1529 * @ioc: per adapter object
   1530 * @smid: system request message index
   1531 *
   1532 * Return: callback index.
   1533 */
   1534static u8
   1535_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   1536{
   1537	int i;
   1538	u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
   1539	u8 cb_idx = 0xFF;
   1540
   1541	if (smid < ioc->hi_priority_smid) {
   1542		struct scsiio_tracker *st;
   1543
   1544		if (smid < ctl_smid) {
   1545			st = _get_st_from_smid(ioc, smid);
   1546			if (st)
   1547				cb_idx = st->cb_idx;
   1548		} else if (smid == ctl_smid)
   1549			cb_idx = ioc->ctl_cb_idx;
   1550	} else if (smid < ioc->internal_smid) {
   1551		i = smid - ioc->hi_priority_smid;
   1552		cb_idx = ioc->hpr_lookup[i].cb_idx;
   1553	} else if (smid <= ioc->hba_queue_depth) {
   1554		i = smid - ioc->internal_smid;
   1555		cb_idx = ioc->internal_lookup[i].cb_idx;
   1556	}
   1557	return cb_idx;
   1558}
   1559
   1560/**
   1561 * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
   1562 *				when driver is flushing out the IOs.
   1563 * @ioc: per adapter object
   1564 *
   1565 * Pause polling on the mq poll (io uring) queues when driver is flushing
   1566 * out the IOs. Otherwise we may see the race condition of completing the same
   1567 * IO from two paths.
   1568 *
   1569 * Returns nothing.
   1570 */
   1571void
   1572mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
   1573{
   1574	int iopoll_q_count =
   1575	    ioc->reply_queue_count - ioc->iopoll_q_start_index;
   1576	int qid;
   1577
   1578	for (qid = 0; qid < iopoll_q_count; qid++)
   1579		atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
   1580
   1581	/*
   1582	 * wait for current poll to complete.
   1583	 */
   1584	for (qid = 0; qid < iopoll_q_count; qid++) {
   1585		while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
   1586			cpu_relax();
   1587			udelay(500);
   1588		}
   1589	}
   1590}
   1591
   1592/**
   1593 * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
   1594 * @ioc: per adapter object
   1595 *
   1596 * Returns nothing.
   1597 */
   1598void
   1599mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
   1600{
   1601	int iopoll_q_count =
   1602	    ioc->reply_queue_count - ioc->iopoll_q_start_index;
   1603	int qid;
   1604
   1605	for (qid = 0; qid < iopoll_q_count; qid++)
   1606		atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
   1607}
   1608
   1609/**
   1610 * mpt3sas_base_mask_interrupts - disable interrupts
   1611 * @ioc: per adapter object
   1612 *
   1613 * Disabling ResetIRQ, Reply and Doorbell Interrupts
   1614 */
   1615void
   1616mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
   1617{
   1618	u32 him_register;
   1619
   1620	ioc->mask_interrupts = 1;
   1621	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
   1622	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
   1623	writel(him_register, &ioc->chip->HostInterruptMask);
   1624	ioc->base_readl(&ioc->chip->HostInterruptMask);
   1625}
   1626
   1627/**
   1628 * mpt3sas_base_unmask_interrupts - enable interrupts
   1629 * @ioc: per adapter object
   1630 *
   1631 * Enabling only Reply Interrupts
   1632 */
   1633void
   1634mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
   1635{
   1636	u32 him_register;
   1637
   1638	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
   1639	him_register &= ~MPI2_HIM_RIM;
   1640	writel(him_register, &ioc->chip->HostInterruptMask);
   1641	ioc->mask_interrupts = 0;
   1642}
   1643
   1644union reply_descriptor {
   1645	u64 word;
   1646	struct {
   1647		u32 low;
   1648		u32 high;
   1649	} u;
   1650};
   1651
   1652static u32 base_mod64(u64 dividend, u32 divisor)
   1653{
   1654	u32 remainder;
   1655
   1656	if (!divisor)
   1657		pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
   1658	remainder = do_div(dividend, divisor);
   1659	return remainder;
   1660}
   1661
   1662/**
   1663 * _base_process_reply_queue - Process reply descriptors from reply
   1664 *		descriptor post queue.
   1665 * @reply_q: per IRQ's reply queue object.
   1666 *
   1667 * Return: number of reply descriptors processed from reply
   1668 *		descriptor queue.
   1669 */
   1670static int
   1671_base_process_reply_queue(struct adapter_reply_queue *reply_q)
   1672{
   1673	union reply_descriptor rd;
   1674	u64 completed_cmds;
   1675	u8 request_descript_type;
   1676	u16 smid;
   1677	u8 cb_idx;
   1678	u32 reply;
   1679	u8 msix_index = reply_q->msix_index;
   1680	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
   1681	Mpi2ReplyDescriptorsUnion_t *rpf;
   1682	u8 rc;
   1683
   1684	completed_cmds = 0;
   1685	if (!atomic_add_unless(&reply_q->busy, 1, 1))
   1686		return completed_cmds;
   1687
   1688	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
   1689	request_descript_type = rpf->Default.ReplyFlags
   1690	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
   1691	if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
   1692		atomic_dec(&reply_q->busy);
   1693		return completed_cmds;
   1694	}
   1695
   1696	cb_idx = 0xFF;
   1697	do {
   1698		rd.word = le64_to_cpu(rpf->Words);
   1699		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
   1700			goto out;
   1701		reply = 0;
   1702		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
   1703		if (request_descript_type ==
   1704		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
   1705		    request_descript_type ==
   1706		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
   1707		    request_descript_type ==
   1708		    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
   1709			cb_idx = _base_get_cb_idx(ioc, smid);
   1710			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
   1711			    (likely(mpt_callbacks[cb_idx] != NULL))) {
   1712				rc = mpt_callbacks[cb_idx](ioc, smid,
   1713				    msix_index, 0);
   1714				if (rc)
   1715					mpt3sas_base_free_smid(ioc, smid);
   1716			}
   1717		} else if (request_descript_type ==
   1718		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
   1719			reply = le32_to_cpu(
   1720			    rpf->AddressReply.ReplyFrameAddress);
   1721			if (reply > ioc->reply_dma_max_address ||
   1722			    reply < ioc->reply_dma_min_address)
   1723				reply = 0;
   1724			if (smid) {
   1725				cb_idx = _base_get_cb_idx(ioc, smid);
   1726				if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
   1727				    (likely(mpt_callbacks[cb_idx] != NULL))) {
   1728					rc = mpt_callbacks[cb_idx](ioc, smid,
   1729					    msix_index, reply);
   1730					if (reply)
   1731						_base_display_reply_info(ioc,
   1732						    smid, msix_index, reply);
   1733					if (rc)
   1734						mpt3sas_base_free_smid(ioc,
   1735						    smid);
   1736				}
   1737			} else {
   1738				_base_async_event(ioc, msix_index, reply);
   1739			}
   1740
   1741			/* reply free queue handling */
   1742			if (reply) {
   1743				ioc->reply_free_host_index =
   1744				    (ioc->reply_free_host_index ==
   1745				    (ioc->reply_free_queue_depth - 1)) ?
   1746				    0 : ioc->reply_free_host_index + 1;
   1747				ioc->reply_free[ioc->reply_free_host_index] =
   1748				    cpu_to_le32(reply);
   1749				if (ioc->is_mcpu_endpoint)
   1750					_base_clone_reply_to_sys_mem(ioc,
   1751						reply,
   1752						ioc->reply_free_host_index);
   1753				writel(ioc->reply_free_host_index,
   1754				    &ioc->chip->ReplyFreeHostIndex);
   1755			}
   1756		}
   1757
   1758		rpf->Words = cpu_to_le64(ULLONG_MAX);
   1759		reply_q->reply_post_host_index =
   1760		    (reply_q->reply_post_host_index ==
   1761		    (ioc->reply_post_queue_depth - 1)) ? 0 :
   1762		    reply_q->reply_post_host_index + 1;
   1763		request_descript_type =
   1764		    reply_q->reply_post_free[reply_q->reply_post_host_index].
   1765		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
   1766		completed_cmds++;
   1767		/* Update the reply post host index after continuously
   1768		 * processing the threshold number of Reply Descriptors.
   1769		 * So that FW can find enough entries to post the Reply
   1770		 * Descriptors in the reply descriptor post queue.
   1771		 */
   1772		if (completed_cmds >= ioc->thresh_hold) {
   1773			if (ioc->combined_reply_queue) {
   1774				writel(reply_q->reply_post_host_index |
   1775						((msix_index  & 7) <<
   1776						 MPI2_RPHI_MSIX_INDEX_SHIFT),
   1777				    ioc->replyPostRegisterIndex[msix_index/8]);
   1778			} else {
   1779				writel(reply_q->reply_post_host_index |
   1780						(msix_index <<
   1781						 MPI2_RPHI_MSIX_INDEX_SHIFT),
   1782						&ioc->chip->ReplyPostHostIndex);
   1783			}
   1784			if (!reply_q->is_iouring_poll_q &&
   1785			    !reply_q->irq_poll_scheduled) {
   1786				reply_q->irq_poll_scheduled = true;
   1787				irq_poll_sched(&reply_q->irqpoll);
   1788			}
   1789			atomic_dec(&reply_q->busy);
   1790			return completed_cmds;
   1791		}
   1792		if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
   1793			goto out;
   1794		if (!reply_q->reply_post_host_index)
   1795			rpf = reply_q->reply_post_free;
   1796		else
   1797			rpf++;
   1798	} while (1);
   1799
   1800 out:
   1801
   1802	if (!completed_cmds) {
   1803		atomic_dec(&reply_q->busy);
   1804		return completed_cmds;
   1805	}
   1806
   1807	if (ioc->is_warpdrive) {
   1808		writel(reply_q->reply_post_host_index,
   1809		ioc->reply_post_host_index[msix_index]);
   1810		atomic_dec(&reply_q->busy);
   1811		return completed_cmds;
   1812	}
   1813
   1814	/* Update Reply Post Host Index.
   1815	 * For those HBA's which support combined reply queue feature
   1816	 * 1. Get the correct Supplemental Reply Post Host Index Register.
   1817	 *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
   1818	 *    Index Register address bank i.e replyPostRegisterIndex[],
   1819	 * 2. Then update this register with new reply host index value
   1820	 *    in ReplyPostIndex field and the MSIxIndex field with
   1821	 *    msix_index value reduced to a value between 0 and 7,
   1822	 *    using a modulo 8 operation. Since each Supplemental Reply Post
   1823	 *    Host Index Register supports 8 MSI-X vectors.
   1824	 *
   1825	 * For other HBA's just update the Reply Post Host Index register with
   1826	 * new reply host index value in ReplyPostIndex Field and msix_index
   1827	 * value in MSIxIndex field.
   1828	 */
   1829	if (ioc->combined_reply_queue)
   1830		writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
   1831			MPI2_RPHI_MSIX_INDEX_SHIFT),
   1832			ioc->replyPostRegisterIndex[msix_index/8]);
   1833	else
   1834		writel(reply_q->reply_post_host_index | (msix_index <<
   1835			MPI2_RPHI_MSIX_INDEX_SHIFT),
   1836			&ioc->chip->ReplyPostHostIndex);
   1837	atomic_dec(&reply_q->busy);
   1838	return completed_cmds;
   1839}
   1840
   1841/**
   1842 * mpt3sas_blk_mq_poll - poll the blk mq poll queue
   1843 * @shost: Scsi_Host object
   1844 * @queue_num: hw ctx queue number
   1845 *
   1846 * Return number of entries that has been processed from poll queue.
   1847 */
   1848int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
   1849{
   1850	struct MPT3SAS_ADAPTER *ioc =
   1851	    (struct MPT3SAS_ADAPTER *)shost->hostdata;
   1852	struct adapter_reply_queue *reply_q;
   1853	int num_entries = 0;
   1854	int qid = queue_num - ioc->iopoll_q_start_index;
   1855
   1856	if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
   1857	    !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
   1858		return 0;
   1859
   1860	reply_q = ioc->io_uring_poll_queues[qid].reply_q;
   1861
   1862	num_entries = _base_process_reply_queue(reply_q);
   1863	atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
   1864
   1865	return num_entries;
   1866}
   1867
   1868/**
   1869 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
   1870 * @irq: irq number (not used)
   1871 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
   1872 *
   1873 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
   1874 */
   1875static irqreturn_t
   1876_base_interrupt(int irq, void *bus_id)
   1877{
   1878	struct adapter_reply_queue *reply_q = bus_id;
   1879	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
   1880
   1881	if (ioc->mask_interrupts)
   1882		return IRQ_NONE;
   1883	if (reply_q->irq_poll_scheduled)
   1884		return IRQ_HANDLED;
   1885	return ((_base_process_reply_queue(reply_q) > 0) ?
   1886			IRQ_HANDLED : IRQ_NONE);
   1887}
   1888
   1889/**
   1890 * _base_irqpoll - IRQ poll callback handler
   1891 * @irqpoll: irq_poll object
   1892 * @budget: irq poll weight
   1893 *
   1894 * Return: number of reply descriptors processed
   1895 */
   1896static int
   1897_base_irqpoll(struct irq_poll *irqpoll, int budget)
   1898{
   1899	struct adapter_reply_queue *reply_q;
   1900	int num_entries = 0;
   1901
   1902	reply_q = container_of(irqpoll, struct adapter_reply_queue,
   1903			irqpoll);
   1904	if (reply_q->irq_line_enable) {
   1905		disable_irq_nosync(reply_q->os_irq);
   1906		reply_q->irq_line_enable = false;
   1907	}
   1908	num_entries = _base_process_reply_queue(reply_q);
   1909	if (num_entries < budget) {
   1910		irq_poll_complete(irqpoll);
   1911		reply_q->irq_poll_scheduled = false;
   1912		reply_q->irq_line_enable = true;
   1913		enable_irq(reply_q->os_irq);
   1914		/*
   1915		 * Go for one more round of processing the
   1916		 * reply descriptor post queue in case the HBA
   1917		 * Firmware has posted some reply descriptors
   1918		 * while reenabling the IRQ.
   1919		 */
   1920		_base_process_reply_queue(reply_q);
   1921	}
   1922
   1923	return num_entries;
   1924}
   1925
   1926/**
   1927 * _base_init_irqpolls - initliaze IRQ polls
   1928 * @ioc: per adapter object
   1929 *
   1930 * Return: nothing
   1931 */
   1932static void
   1933_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
   1934{
   1935	struct adapter_reply_queue *reply_q, *next;
   1936
   1937	if (list_empty(&ioc->reply_queue_list))
   1938		return;
   1939
   1940	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
   1941		if (reply_q->is_iouring_poll_q)
   1942			continue;
   1943		irq_poll_init(&reply_q->irqpoll,
   1944			ioc->hba_queue_depth/4, _base_irqpoll);
   1945		reply_q->irq_poll_scheduled = false;
   1946		reply_q->irq_line_enable = true;
   1947		reply_q->os_irq = pci_irq_vector(ioc->pdev,
   1948		    reply_q->msix_index);
   1949	}
   1950}
   1951
   1952/**
   1953 * _base_is_controller_msix_enabled - is controller support muli-reply queues
   1954 * @ioc: per adapter object
   1955 *
   1956 * Return: Whether or not MSI/X is enabled.
   1957 */
   1958static inline int
   1959_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
   1960{
   1961	return (ioc->facts.IOCCapabilities &
   1962	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
   1963}
   1964
   1965/**
   1966 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
   1967 * @ioc: per adapter object
   1968 * @poll: poll over reply descriptor pools incase interrupt for
   1969 *		timed-out SCSI command got delayed
   1970 * Context: non-ISR context
   1971 *
   1972 * Called when a Task Management request has completed.
   1973 */
   1974void
   1975mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
   1976{
   1977	struct adapter_reply_queue *reply_q;
   1978
   1979	/* If MSIX capability is turned off
   1980	 * then multi-queues are not enabled
   1981	 */
   1982	if (!_base_is_controller_msix_enabled(ioc))
   1983		return;
   1984
   1985	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
   1986		if (ioc->shost_recovery || ioc->remove_host ||
   1987				ioc->pci_error_recovery)
   1988			return;
   1989		/* TMs are on msix_index == 0 */
   1990		if (reply_q->msix_index == 0)
   1991			continue;
   1992
   1993		if (reply_q->is_iouring_poll_q) {
   1994			_base_process_reply_queue(reply_q);
   1995			continue;
   1996		}
   1997
   1998		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
   1999		if (reply_q->irq_poll_scheduled) {
   2000			/* Calling irq_poll_disable will wait for any pending
   2001			 * callbacks to have completed.
   2002			 */
   2003			irq_poll_disable(&reply_q->irqpoll);
   2004			irq_poll_enable(&reply_q->irqpoll);
   2005			/* check how the scheduled poll has ended,
   2006			 * clean up only if necessary
   2007			 */
   2008			if (reply_q->irq_poll_scheduled) {
   2009				reply_q->irq_poll_scheduled = false;
   2010				reply_q->irq_line_enable = true;
   2011				enable_irq(reply_q->os_irq);
   2012			}
   2013		}
   2014
   2015		if (poll)
   2016			_base_process_reply_queue(reply_q);
   2017	}
   2018}
   2019
   2020/**
   2021 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
   2022 * @cb_idx: callback index
   2023 */
   2024void
   2025mpt3sas_base_release_callback_handler(u8 cb_idx)
   2026{
   2027	mpt_callbacks[cb_idx] = NULL;
   2028}
   2029
   2030/**
   2031 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
   2032 * @cb_func: callback function
   2033 *
   2034 * Return: Index of @cb_func.
   2035 */
   2036u8
   2037mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
   2038{
   2039	u8 cb_idx;
   2040
   2041	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
   2042		if (mpt_callbacks[cb_idx] == NULL)
   2043			break;
   2044
   2045	mpt_callbacks[cb_idx] = cb_func;
   2046	return cb_idx;
   2047}
   2048
   2049/**
   2050 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
   2051 */
   2052void
   2053mpt3sas_base_initialize_callback_handler(void)
   2054{
   2055	u8 cb_idx;
   2056
   2057	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
   2058		mpt3sas_base_release_callback_handler(cb_idx);
   2059}
   2060
   2061
   2062/**
   2063 * _base_build_zero_len_sge - build zero length sg entry
   2064 * @ioc: per adapter object
   2065 * @paddr: virtual address for SGE
   2066 *
   2067 * Create a zero length scatter gather entry to insure the IOCs hardware has
   2068 * something to use if the target device goes brain dead and tries
   2069 * to send data even when none is asked for.
   2070 */
   2071static void
   2072_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
   2073{
   2074	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
   2075	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
   2076	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
   2077	    MPI2_SGE_FLAGS_SHIFT);
   2078	ioc->base_add_sg_single(paddr, flags_length, -1);
   2079}
   2080
   2081/**
   2082 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
   2083 * @paddr: virtual address for SGE
   2084 * @flags_length: SGE flags and data transfer length
   2085 * @dma_addr: Physical address
   2086 */
   2087static void
   2088_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
   2089{
   2090	Mpi2SGESimple32_t *sgel = paddr;
   2091
   2092	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
   2093	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
   2094	sgel->FlagsLength = cpu_to_le32(flags_length);
   2095	sgel->Address = cpu_to_le32(dma_addr);
   2096}
   2097
   2098
   2099/**
   2100 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
   2101 * @paddr: virtual address for SGE
   2102 * @flags_length: SGE flags and data transfer length
   2103 * @dma_addr: Physical address
   2104 */
   2105static void
   2106_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
   2107{
   2108	Mpi2SGESimple64_t *sgel = paddr;
   2109
   2110	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
   2111	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
   2112	sgel->FlagsLength = cpu_to_le32(flags_length);
   2113	sgel->Address = cpu_to_le64(dma_addr);
   2114}
   2115
   2116/**
   2117 * _base_get_chain_buffer_tracker - obtain chain tracker
   2118 * @ioc: per adapter object
   2119 * @scmd: SCSI commands of the IO request
   2120 *
   2121 * Return: chain tracker from chain_lookup table using key as
   2122 * smid and smid's chain_offset.
   2123 */
   2124static struct chain_tracker *
   2125_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
   2126			       struct scsi_cmnd *scmd)
   2127{
   2128	struct chain_tracker *chain_req;
   2129	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
   2130	u16 smid = st->smid;
   2131	u8 chain_offset =
   2132	   atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
   2133
   2134	if (chain_offset == ioc->chains_needed_per_io)
   2135		return NULL;
   2136
   2137	chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
   2138	atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
   2139	return chain_req;
   2140}
   2141
   2142
   2143/**
   2144 * _base_build_sg - build generic sg
   2145 * @ioc: per adapter object
   2146 * @psge: virtual address for SGE
   2147 * @data_out_dma: physical address for WRITES
   2148 * @data_out_sz: data xfer size for WRITES
   2149 * @data_in_dma: physical address for READS
   2150 * @data_in_sz: data xfer size for READS
   2151 */
   2152static void
   2153_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
   2154	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
   2155	size_t data_in_sz)
   2156{
   2157	u32 sgl_flags;
   2158
   2159	if (!data_out_sz && !data_in_sz) {
   2160		_base_build_zero_len_sge(ioc, psge);
   2161		return;
   2162	}
   2163
   2164	if (data_out_sz && data_in_sz) {
   2165		/* WRITE sgel first */
   2166		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
   2167		    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
   2168		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
   2169		ioc->base_add_sg_single(psge, sgl_flags |
   2170		    data_out_sz, data_out_dma);
   2171
   2172		/* incr sgel */
   2173		psge += ioc->sge_size;
   2174
   2175		/* READ sgel last */
   2176		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
   2177		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
   2178		    MPI2_SGE_FLAGS_END_OF_LIST);
   2179		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
   2180		ioc->base_add_sg_single(psge, sgl_flags |
   2181		    data_in_sz, data_in_dma);
   2182	} else if (data_out_sz) /* WRITE */ {
   2183		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
   2184		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
   2185		    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
   2186		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
   2187		ioc->base_add_sg_single(psge, sgl_flags |
   2188		    data_out_sz, data_out_dma);
   2189	} else if (data_in_sz) /* READ */ {
   2190		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
   2191		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
   2192		    MPI2_SGE_FLAGS_END_OF_LIST);
   2193		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
   2194		ioc->base_add_sg_single(psge, sgl_flags |
   2195		    data_in_sz, data_in_dma);
   2196	}
   2197}
   2198
   2199/* IEEE format sgls */
   2200
   2201/**
   2202 * _base_build_nvme_prp - This function is called for NVMe end devices to build
   2203 *                        a native SGL (NVMe PRP).
   2204 * @ioc: per adapter object
   2205 * @smid: system request message index for getting asscociated SGL
   2206 * @nvme_encap_request: the NVMe request msg frame pointer
   2207 * @data_out_dma: physical address for WRITES
   2208 * @data_out_sz: data xfer size for WRITES
   2209 * @data_in_dma: physical address for READS
   2210 * @data_in_sz: data xfer size for READS
   2211 *
   2212 * The native SGL is built starting in the first PRP
   2213 * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
   2214 * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
   2215 * used to describe a larger data buffer.  If the data buffer is too large to
   2216 * describe using the two PRP entriess inside the NVMe message, then PRP1
   2217 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
   2218 * list located elsewhere in memory to describe the remaining data memory
   2219 * segments.  The PRP list will be contiguous.
   2220 *
   2221 * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
   2222 * consists of a list of PRP entries to describe a number of noncontigous
   2223 * physical memory segments as a single memory buffer, just as a SGL does.  Note
   2224 * however, that this function is only used by the IOCTL call, so the memory
   2225 * given will be guaranteed to be contiguous.  There is no need to translate
   2226 * non-contiguous SGL into a PRP in this case.  All PRPs will describe
   2227 * contiguous space that is one page size each.
   2228 *
   2229 * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
   2230 * a PRP list pointer or a PRP element, depending upon the command.  PRP2
   2231 * contains the second PRP element if the memory being described fits within 2
   2232 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
   2233 *
   2234 * A PRP list pointer contains the address of a PRP list, structured as a linear
   2235 * array of PRP entries.  Each PRP entry in this list describes a segment of
   2236 * physical memory.
   2237 *
   2238 * Each 64-bit PRP entry comprises an address and an offset field.  The address
   2239 * always points at the beginning of a 4KB physical memory page, and the offset
   2240 * describes where within that 4KB page the memory segment begins.  Only the
   2241 * first element in a PRP list may contain a non-zero offset, implying that all
   2242 * memory segments following the first begin at the start of a 4KB page.
   2243 *
   2244 * Each PRP element normally describes 4KB of physical memory, with exceptions
   2245 * for the first and last elements in the list.  If the memory being described
   2246 * by the list begins at a non-zero offset within the first 4KB page, then the
   2247 * first PRP element will contain a non-zero offset indicating where the region
   2248 * begins within the 4KB page.  The last memory segment may end before the end
   2249 * of the 4KB segment, depending upon the overall size of the memory being
   2250 * described by the PRP list.
   2251 *
   2252 * Since PRP entries lack any indication of size, the overall data buffer length
   2253 * is used to determine where the end of the data memory buffer is located, and
   2254 * how many PRP entries are required to describe it.
   2255 */
   2256static void
   2257_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
   2258	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
   2259	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
   2260	size_t data_in_sz)
   2261{
   2262	int		prp_size = NVME_PRP_SIZE;
   2263	__le64		*prp_entry, *prp1_entry, *prp2_entry;
   2264	__le64		*prp_page;
   2265	dma_addr_t	prp_entry_dma, prp_page_dma, dma_addr;
   2266	u32		offset, entry_len;
   2267	u32		page_mask_result, page_mask;
   2268	size_t		length;
   2269	struct mpt3sas_nvme_cmd *nvme_cmd =
   2270		(void *)nvme_encap_request->NVMe_Command;
   2271
   2272	/*
   2273	 * Not all commands require a data transfer. If no data, just return
   2274	 * without constructing any PRP.
   2275	 */
   2276	if (!data_in_sz && !data_out_sz)
   2277		return;
   2278	prp1_entry = &nvme_cmd->prp1;
   2279	prp2_entry = &nvme_cmd->prp2;
   2280	prp_entry = prp1_entry;
   2281	/*
   2282	 * For the PRP entries, use the specially allocated buffer of
   2283	 * contiguous memory.
   2284	 */
   2285	prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
   2286	prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
   2287
   2288	/*
   2289	 * Check if we are within 1 entry of a page boundary we don't
   2290	 * want our first entry to be a PRP List entry.
   2291	 */
   2292	page_mask = ioc->page_size - 1;
   2293	page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
   2294	if (!page_mask_result) {
   2295		/* Bump up to next page boundary. */
   2296		prp_page = (__le64 *)((u8 *)prp_page + prp_size);
   2297		prp_page_dma = prp_page_dma + prp_size;
   2298	}
   2299
   2300	/*
   2301	 * Set PRP physical pointer, which initially points to the current PRP
   2302	 * DMA memory page.
   2303	 */
   2304	prp_entry_dma = prp_page_dma;
   2305
   2306	/* Get physical address and length of the data buffer. */
   2307	if (data_in_sz) {
   2308		dma_addr = data_in_dma;
   2309		length = data_in_sz;
   2310	} else {
   2311		dma_addr = data_out_dma;
   2312		length = data_out_sz;
   2313	}
   2314
   2315	/* Loop while the length is not zero. */
   2316	while (length) {
   2317		/*
   2318		 * Check if we need to put a list pointer here if we are at
   2319		 * page boundary - prp_size (8 bytes).
   2320		 */
   2321		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
   2322		if (!page_mask_result) {
   2323			/*
   2324			 * This is the last entry in a PRP List, so we need to
   2325			 * put a PRP list pointer here.  What this does is:
   2326			 *   - bump the current memory pointer to the next
   2327			 *     address, which will be the next full page.
   2328			 *   - set the PRP Entry to point to that page.  This
   2329			 *     is now the PRP List pointer.
   2330			 *   - bump the PRP Entry pointer the start of the
   2331			 *     next page.  Since all of this PRP memory is
   2332			 *     contiguous, no need to get a new page - it's
   2333			 *     just the next address.
   2334			 */
   2335			prp_entry_dma++;
   2336			*prp_entry = cpu_to_le64(prp_entry_dma);
   2337			prp_entry++;
   2338		}
   2339
   2340		/* Need to handle if entry will be part of a page. */
   2341		offset = dma_addr & page_mask;
   2342		entry_len = ioc->page_size - offset;
   2343
   2344		if (prp_entry == prp1_entry) {
   2345			/*
   2346			 * Must fill in the first PRP pointer (PRP1) before
   2347			 * moving on.
   2348			 */
   2349			*prp1_entry = cpu_to_le64(dma_addr);
   2350
   2351			/*
   2352			 * Now point to the second PRP entry within the
   2353			 * command (PRP2).
   2354			 */
   2355			prp_entry = prp2_entry;
   2356		} else if (prp_entry == prp2_entry) {
   2357			/*
   2358			 * Should the PRP2 entry be a PRP List pointer or just
   2359			 * a regular PRP pointer?  If there is more than one
   2360			 * more page of data, must use a PRP List pointer.
   2361			 */
   2362			if (length > ioc->page_size) {
   2363				/*
   2364				 * PRP2 will contain a PRP List pointer because
   2365				 * more PRP's are needed with this command. The
   2366				 * list will start at the beginning of the
   2367				 * contiguous buffer.
   2368				 */
   2369				*prp2_entry = cpu_to_le64(prp_entry_dma);
   2370
   2371				/*
   2372				 * The next PRP Entry will be the start of the
   2373				 * first PRP List.
   2374				 */
   2375				prp_entry = prp_page;
   2376			} else {
   2377				/*
   2378				 * After this, the PRP Entries are complete.
   2379				 * This command uses 2 PRP's and no PRP list.
   2380				 */
   2381				*prp2_entry = cpu_to_le64(dma_addr);
   2382			}
   2383		} else {
   2384			/*
   2385			 * Put entry in list and bump the addresses.
   2386			 *
   2387			 * After PRP1 and PRP2 are filled in, this will fill in
   2388			 * all remaining PRP entries in a PRP List, one per
   2389			 * each time through the loop.
   2390			 */
   2391			*prp_entry = cpu_to_le64(dma_addr);
   2392			prp_entry++;
   2393			prp_entry_dma++;
   2394		}
   2395
   2396		/*
   2397		 * Bump the phys address of the command's data buffer by the
   2398		 * entry_len.
   2399		 */
   2400		dma_addr += entry_len;
   2401
   2402		/* Decrement length accounting for last partial page. */
   2403		if (entry_len > length)
   2404			length = 0;
   2405		else
   2406			length -= entry_len;
   2407	}
   2408}
   2409
   2410/**
   2411 * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
   2412 *			SGLs specific to NVMe drives only
   2413 *
   2414 * @ioc:		per adapter object
   2415 * @scmd:		SCSI command from the mid-layer
   2416 * @mpi_request:	mpi request
   2417 * @smid:		msg Index
   2418 * @sge_count:		scatter gather element count.
   2419 *
   2420 * Return:		true: PRPs are built
   2421 *			false: IEEE SGLs needs to be built
   2422 */
   2423static void
   2424base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
   2425		struct scsi_cmnd *scmd,
   2426		Mpi25SCSIIORequest_t *mpi_request,
   2427		u16 smid, int sge_count)
   2428{
   2429	int sge_len, num_prp_in_chain = 0;
   2430	Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
   2431	__le64 *curr_buff;
   2432	dma_addr_t msg_dma, sge_addr, offset;
   2433	u32 page_mask, page_mask_result;
   2434	struct scatterlist *sg_scmd;
   2435	u32 first_prp_len;
   2436	int data_len = scsi_bufflen(scmd);
   2437	u32 nvme_pg_size;
   2438
   2439	nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
   2440	/*
   2441	 * Nvme has a very convoluted prp format.  One prp is required
   2442	 * for each page or partial page. Driver need to split up OS sg_list
   2443	 * entries if it is longer than one page or cross a page
   2444	 * boundary.  Driver also have to insert a PRP list pointer entry as
   2445	 * the last entry in each physical page of the PRP list.
   2446	 *
   2447	 * NOTE: The first PRP "entry" is actually placed in the first
   2448	 * SGL entry in the main message as IEEE 64 format.  The 2nd
   2449	 * entry in the main message is the chain element, and the rest
   2450	 * of the PRP entries are built in the contiguous pcie buffer.
   2451	 */
   2452	page_mask = nvme_pg_size - 1;
   2453
   2454	/*
   2455	 * Native SGL is needed.
   2456	 * Put a chain element in main message frame that points to the first
   2457	 * chain buffer.
   2458	 *
   2459	 * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
   2460	 *        a native SGL.
   2461	 */
   2462
   2463	/* Set main message chain element pointer */
   2464	main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
   2465	/*
   2466	 * For NVMe the chain element needs to be the 2nd SG entry in the main
   2467	 * message.
   2468	 */
   2469	main_chain_element = (Mpi25IeeeSgeChain64_t *)
   2470		((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
   2471
   2472	/*
   2473	 * For the PRP entries, use the specially allocated buffer of
   2474	 * contiguous memory.  Normal chain buffers can't be used
   2475	 * because each chain buffer would need to be the size of an OS
   2476	 * page (4k).
   2477	 */
   2478	curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
   2479	msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
   2480
   2481	main_chain_element->Address = cpu_to_le64(msg_dma);
   2482	main_chain_element->NextChainOffset = 0;
   2483	main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
   2484			MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
   2485			MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
   2486
   2487	/* Build first prp, sge need not to be page aligned*/
   2488	ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
   2489	sg_scmd = scsi_sglist(scmd);
   2490	sge_addr = sg_dma_address(sg_scmd);
   2491	sge_len = sg_dma_len(sg_scmd);
   2492
   2493	offset = sge_addr & page_mask;
   2494	first_prp_len = nvme_pg_size - offset;
   2495
   2496	ptr_first_sgl->Address = cpu_to_le64(sge_addr);
   2497	ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
   2498
   2499	data_len -= first_prp_len;
   2500
   2501	if (sge_len > first_prp_len) {
   2502		sge_addr += first_prp_len;
   2503		sge_len -= first_prp_len;
   2504	} else if (data_len && (sge_len == first_prp_len)) {
   2505		sg_scmd = sg_next(sg_scmd);
   2506		sge_addr = sg_dma_address(sg_scmd);
   2507		sge_len = sg_dma_len(sg_scmd);
   2508	}
   2509
   2510	for (;;) {
   2511		offset = sge_addr & page_mask;
   2512
   2513		/* Put PRP pointer due to page boundary*/
   2514		page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
   2515		if (unlikely(!page_mask_result)) {
   2516			scmd_printk(KERN_NOTICE,
   2517				scmd, "page boundary curr_buff: 0x%p\n",
   2518				curr_buff);
   2519			msg_dma += 8;
   2520			*curr_buff = cpu_to_le64(msg_dma);
   2521			curr_buff++;
   2522			num_prp_in_chain++;
   2523		}
   2524
   2525		*curr_buff = cpu_to_le64(sge_addr);
   2526		curr_buff++;
   2527		msg_dma += 8;
   2528		num_prp_in_chain++;
   2529
   2530		sge_addr += nvme_pg_size;
   2531		sge_len -= nvme_pg_size;
   2532		data_len -= nvme_pg_size;
   2533
   2534		if (data_len <= 0)
   2535			break;
   2536
   2537		if (sge_len > 0)
   2538			continue;
   2539
   2540		sg_scmd = sg_next(sg_scmd);
   2541		sge_addr = sg_dma_address(sg_scmd);
   2542		sge_len = sg_dma_len(sg_scmd);
   2543	}
   2544
   2545	main_chain_element->Length =
   2546		cpu_to_le32(num_prp_in_chain * sizeof(u64));
   2547	return;
   2548}
   2549
   2550static bool
   2551base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
   2552	struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
   2553{
   2554	u32 data_length = 0;
   2555	bool build_prp = true;
   2556
   2557	data_length = scsi_bufflen(scmd);
   2558	if (pcie_device &&
   2559	    (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
   2560		build_prp = false;
   2561		return build_prp;
   2562	}
   2563
   2564	/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
   2565	 * we built IEEE SGL
   2566	 */
   2567	if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
   2568		build_prp = false;
   2569
   2570	return build_prp;
   2571}
   2572
   2573/**
   2574 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
   2575 * determine if the driver needs to build a native SGL.  If so, that native
   2576 * SGL is built in the special contiguous buffers allocated especially for
   2577 * PCIe SGL creation.  If the driver will not build a native SGL, return
   2578 * TRUE and a normal IEEE SGL will be built.  Currently this routine
   2579 * supports NVMe.
   2580 * @ioc: per adapter object
   2581 * @mpi_request: mf request pointer
   2582 * @smid: system request message index
   2583 * @scmd: scsi command
   2584 * @pcie_device: points to the PCIe device's info
   2585 *
   2586 * Return: 0 if native SGL was built, 1 if no SGL was built
   2587 */
   2588static int
   2589_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
   2590	Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
   2591	struct _pcie_device *pcie_device)
   2592{
   2593	int sges_left;
   2594
   2595	/* Get the SG list pointer and info. */
   2596	sges_left = scsi_dma_map(scmd);
   2597	if (sges_left < 0)
   2598		return 1;
   2599
   2600	/* Check if we need to build a native SG list. */
   2601	if (!base_is_prp_possible(ioc, pcie_device,
   2602				scmd, sges_left)) {
   2603		/* We built a native SG list, just return. */
   2604		goto out;
   2605	}
   2606
   2607	/*
   2608	 * Build native NVMe PRP.
   2609	 */
   2610	base_make_prp_nvme(ioc, scmd, mpi_request,
   2611			smid, sges_left);
   2612
   2613	return 0;
   2614out:
   2615	scsi_dma_unmap(scmd);
   2616	return 1;
   2617}
   2618
   2619/**
   2620 * _base_add_sg_single_ieee - add sg element for IEEE format
   2621 * @paddr: virtual address for SGE
   2622 * @flags: SGE flags
   2623 * @chain_offset: number of 128 byte elements from start of segment
   2624 * @length: data transfer length
   2625 * @dma_addr: Physical address
   2626 */
   2627static void
   2628_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
   2629	dma_addr_t dma_addr)
   2630{
   2631	Mpi25IeeeSgeChain64_t *sgel = paddr;
   2632
   2633	sgel->Flags = flags;
   2634	sgel->NextChainOffset = chain_offset;
   2635	sgel->Length = cpu_to_le32(length);
   2636	sgel->Address = cpu_to_le64(dma_addr);
   2637}
   2638
   2639/**
   2640 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
   2641 * @ioc: per adapter object
   2642 * @paddr: virtual address for SGE
   2643 *
   2644 * Create a zero length scatter gather entry to insure the IOCs hardware has
   2645 * something to use if the target device goes brain dead and tries
   2646 * to send data even when none is asked for.
   2647 */
   2648static void
   2649_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
   2650{
   2651	u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
   2652		MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
   2653		MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
   2654
   2655	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
   2656}
   2657
   2658/**
   2659 * _base_build_sg_scmd - main sg creation routine
   2660 *		pcie_device is unused here!
   2661 * @ioc: per adapter object
   2662 * @scmd: scsi command
   2663 * @smid: system request message index
   2664 * @unused: unused pcie_device pointer
   2665 * Context: none.
   2666 *
   2667 * The main routine that builds scatter gather table from a given
   2668 * scsi request sent via the .queuecommand main handler.
   2669 *
   2670 * Return: 0 success, anything else error
   2671 */
   2672static int
   2673_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
   2674	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
   2675{
   2676	Mpi2SCSIIORequest_t *mpi_request;
   2677	dma_addr_t chain_dma;
   2678	struct scatterlist *sg_scmd;
   2679	void *sg_local, *chain;
   2680	u32 chain_offset;
   2681	u32 chain_length;
   2682	u32 chain_flags;
   2683	int sges_left;
   2684	u32 sges_in_segment;
   2685	u32 sgl_flags;
   2686	u32 sgl_flags_last_element;
   2687	u32 sgl_flags_end_buffer;
   2688	struct chain_tracker *chain_req;
   2689
   2690	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
   2691
   2692	/* init scatter gather flags */
   2693	sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
   2694	if (scmd->sc_data_direction == DMA_TO_DEVICE)
   2695		sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
   2696	sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
   2697	    << MPI2_SGE_FLAGS_SHIFT;
   2698	sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
   2699	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
   2700	    << MPI2_SGE_FLAGS_SHIFT;
   2701	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
   2702
   2703	sg_scmd = scsi_sglist(scmd);
   2704	sges_left = scsi_dma_map(scmd);
   2705	if (sges_left < 0)
   2706		return -ENOMEM;
   2707
   2708	sg_local = &mpi_request->SGL;
   2709	sges_in_segment = ioc->max_sges_in_main_message;
   2710	if (sges_left <= sges_in_segment)
   2711		goto fill_in_last_segment;
   2712
   2713	mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
   2714	    (sges_in_segment * ioc->sge_size))/4;
   2715
   2716	/* fill in main message segment when there is a chain following */
   2717	while (sges_in_segment) {
   2718		if (sges_in_segment == 1)
   2719			ioc->base_add_sg_single(sg_local,
   2720			    sgl_flags_last_element | sg_dma_len(sg_scmd),
   2721			    sg_dma_address(sg_scmd));
   2722		else
   2723			ioc->base_add_sg_single(sg_local, sgl_flags |
   2724			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
   2725		sg_scmd = sg_next(sg_scmd);
   2726		sg_local += ioc->sge_size;
   2727		sges_left--;
   2728		sges_in_segment--;
   2729	}
   2730
   2731	/* initializing the chain flags and pointers */
   2732	chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
   2733	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
   2734	if (!chain_req)
   2735		return -1;
   2736	chain = chain_req->chain_buffer;
   2737	chain_dma = chain_req->chain_buffer_dma;
   2738	do {
   2739		sges_in_segment = (sges_left <=
   2740		    ioc->max_sges_in_chain_message) ? sges_left :
   2741		    ioc->max_sges_in_chain_message;
   2742		chain_offset = (sges_left == sges_in_segment) ?
   2743		    0 : (sges_in_segment * ioc->sge_size)/4;
   2744		chain_length = sges_in_segment * ioc->sge_size;
   2745		if (chain_offset) {
   2746			chain_offset = chain_offset <<
   2747			    MPI2_SGE_CHAIN_OFFSET_SHIFT;
   2748			chain_length += ioc->sge_size;
   2749		}
   2750		ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
   2751		    chain_length, chain_dma);
   2752		sg_local = chain;
   2753		if (!chain_offset)
   2754			goto fill_in_last_segment;
   2755
   2756		/* fill in chain segments */
   2757		while (sges_in_segment) {
   2758			if (sges_in_segment == 1)
   2759				ioc->base_add_sg_single(sg_local,
   2760				    sgl_flags_last_element |
   2761				    sg_dma_len(sg_scmd),
   2762				    sg_dma_address(sg_scmd));
   2763			else
   2764				ioc->base_add_sg_single(sg_local, sgl_flags |
   2765				    sg_dma_len(sg_scmd),
   2766				    sg_dma_address(sg_scmd));
   2767			sg_scmd = sg_next(sg_scmd);
   2768			sg_local += ioc->sge_size;
   2769			sges_left--;
   2770			sges_in_segment--;
   2771		}
   2772
   2773		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
   2774		if (!chain_req)
   2775			return -1;
   2776		chain = chain_req->chain_buffer;
   2777		chain_dma = chain_req->chain_buffer_dma;
   2778	} while (1);
   2779
   2780
   2781 fill_in_last_segment:
   2782
   2783	/* fill the last segment */
   2784	while (sges_left) {
   2785		if (sges_left == 1)
   2786			ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
   2787			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
   2788		else
   2789			ioc->base_add_sg_single(sg_local, sgl_flags |
   2790			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
   2791		sg_scmd = sg_next(sg_scmd);
   2792		sg_local += ioc->sge_size;
   2793		sges_left--;
   2794	}
   2795
   2796	return 0;
   2797}
   2798
   2799/**
   2800 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
   2801 * @ioc: per adapter object
   2802 * @scmd: scsi command
   2803 * @smid: system request message index
   2804 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
   2805 * constructed on need.
   2806 * Context: none.
   2807 *
   2808 * The main routine that builds scatter gather table from a given
   2809 * scsi request sent via the .queuecommand main handler.
   2810 *
   2811 * Return: 0 success, anything else error
   2812 */
   2813static int
   2814_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
   2815	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
   2816{
   2817	Mpi25SCSIIORequest_t *mpi_request;
   2818	dma_addr_t chain_dma;
   2819	struct scatterlist *sg_scmd;
   2820	void *sg_local, *chain;
   2821	u32 chain_offset;
   2822	u32 chain_length;
   2823	int sges_left;
   2824	u32 sges_in_segment;
   2825	u8 simple_sgl_flags;
   2826	u8 simple_sgl_flags_last;
   2827	u8 chain_sgl_flags;
   2828	struct chain_tracker *chain_req;
   2829
   2830	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
   2831
   2832	/* init scatter gather flags */
   2833	simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
   2834	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
   2835	simple_sgl_flags_last = simple_sgl_flags |
   2836	    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
   2837	chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
   2838	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
   2839
   2840	/* Check if we need to build a native SG list. */
   2841	if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
   2842			smid, scmd, pcie_device) == 0)) {
   2843		/* We built a native SG list, just return. */
   2844		return 0;
   2845	}
   2846
   2847	sg_scmd = scsi_sglist(scmd);
   2848	sges_left = scsi_dma_map(scmd);
   2849	if (sges_left < 0)
   2850		return -ENOMEM;
   2851
   2852	sg_local = &mpi_request->SGL;
   2853	sges_in_segment = (ioc->request_sz -
   2854		   offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
   2855	if (sges_left <= sges_in_segment)
   2856		goto fill_in_last_segment;
   2857
   2858	mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
   2859	    (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
   2860
   2861	/* fill in main message segment when there is a chain following */
   2862	while (sges_in_segment > 1) {
   2863		_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
   2864		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
   2865		sg_scmd = sg_next(sg_scmd);
   2866		sg_local += ioc->sge_size_ieee;
   2867		sges_left--;
   2868		sges_in_segment--;
   2869	}
   2870
   2871	/* initializing the pointers */
   2872	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
   2873	if (!chain_req)
   2874		return -1;
   2875	chain = chain_req->chain_buffer;
   2876	chain_dma = chain_req->chain_buffer_dma;
   2877	do {
   2878		sges_in_segment = (sges_left <=
   2879		    ioc->max_sges_in_chain_message) ? sges_left :
   2880		    ioc->max_sges_in_chain_message;
   2881		chain_offset = (sges_left == sges_in_segment) ?
   2882		    0 : sges_in_segment;
   2883		chain_length = sges_in_segment * ioc->sge_size_ieee;
   2884		if (chain_offset)
   2885			chain_length += ioc->sge_size_ieee;
   2886		_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
   2887		    chain_offset, chain_length, chain_dma);
   2888
   2889		sg_local = chain;
   2890		if (!chain_offset)
   2891			goto fill_in_last_segment;
   2892
   2893		/* fill in chain segments */
   2894		while (sges_in_segment) {
   2895			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
   2896			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
   2897			sg_scmd = sg_next(sg_scmd);
   2898			sg_local += ioc->sge_size_ieee;
   2899			sges_left--;
   2900			sges_in_segment--;
   2901		}
   2902
   2903		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
   2904		if (!chain_req)
   2905			return -1;
   2906		chain = chain_req->chain_buffer;
   2907		chain_dma = chain_req->chain_buffer_dma;
   2908	} while (1);
   2909
   2910
   2911 fill_in_last_segment:
   2912
   2913	/* fill the last segment */
   2914	while (sges_left > 0) {
   2915		if (sges_left == 1)
   2916			_base_add_sg_single_ieee(sg_local,
   2917			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
   2918			    sg_dma_address(sg_scmd));
   2919		else
   2920			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
   2921			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
   2922		sg_scmd = sg_next(sg_scmd);
   2923		sg_local += ioc->sge_size_ieee;
   2924		sges_left--;
   2925	}
   2926
   2927	return 0;
   2928}
   2929
   2930/**
   2931 * _base_build_sg_ieee - build generic sg for IEEE format
   2932 * @ioc: per adapter object
   2933 * @psge: virtual address for SGE
   2934 * @data_out_dma: physical address for WRITES
   2935 * @data_out_sz: data xfer size for WRITES
   2936 * @data_in_dma: physical address for READS
   2937 * @data_in_sz: data xfer size for READS
   2938 */
   2939static void
   2940_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
   2941	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
   2942	size_t data_in_sz)
   2943{
   2944	u8 sgl_flags;
   2945
   2946	if (!data_out_sz && !data_in_sz) {
   2947		_base_build_zero_len_sge_ieee(ioc, psge);
   2948		return;
   2949	}
   2950
   2951	if (data_out_sz && data_in_sz) {
   2952		/* WRITE sgel first */
   2953		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
   2954		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
   2955		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
   2956		    data_out_dma);
   2957
   2958		/* incr sgel */
   2959		psge += ioc->sge_size_ieee;
   2960
   2961		/* READ sgel last */
   2962		sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
   2963		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
   2964		    data_in_dma);
   2965	} else if (data_out_sz) /* WRITE */ {
   2966		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
   2967		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
   2968		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
   2969		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
   2970		    data_out_dma);
   2971	} else if (data_in_sz) /* READ */ {
   2972		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
   2973		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
   2974		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
   2975		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
   2976		    data_in_dma);
   2977	}
   2978}
   2979
   2980#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
   2981
   2982/**
   2983 * _base_config_dma_addressing - set dma addressing
   2984 * @ioc: per adapter object
   2985 * @pdev: PCI device struct
   2986 *
   2987 * Return: 0 for success, non-zero for failure.
   2988 */
   2989static int
   2990_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
   2991{
   2992	struct sysinfo s;
   2993
   2994	if (ioc->is_mcpu_endpoint ||
   2995	    sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
   2996	    dma_get_required_mask(&pdev->dev) <= 32)
   2997		ioc->dma_mask = 32;
   2998	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
   2999	else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
   3000		ioc->dma_mask = 63;
   3001	else
   3002		ioc->dma_mask = 64;
   3003
   3004	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
   3005	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
   3006		return -ENODEV;
   3007
   3008	if (ioc->dma_mask > 32) {
   3009		ioc->base_add_sg_single = &_base_add_sg_single_64;
   3010		ioc->sge_size = sizeof(Mpi2SGESimple64_t);
   3011	} else {
   3012		ioc->base_add_sg_single = &_base_add_sg_single_32;
   3013		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
   3014	}
   3015
   3016	si_meminfo(&s);
   3017	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
   3018		ioc->dma_mask, convert_to_kb(s.totalram));
   3019
   3020	return 0;
   3021}
   3022
   3023/**
   3024 * _base_check_enable_msix - checks MSIX capabable.
   3025 * @ioc: per adapter object
   3026 *
   3027 * Check to see if card is capable of MSIX, and set number
   3028 * of available msix vectors
   3029 */
   3030static int
   3031_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
   3032{
   3033	int base;
   3034	u16 message_control;
   3035
   3036	/* Check whether controller SAS2008 B0 controller,
   3037	 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
   3038	 */
   3039	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
   3040	    ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
   3041		return -EINVAL;
   3042	}
   3043
   3044	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
   3045	if (!base) {
   3046		dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
   3047		return -EINVAL;
   3048	}
   3049
   3050	/* get msix vector count */
   3051	/* NUMA_IO not supported for older controllers */
   3052	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
   3053	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
   3054	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
   3055	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
   3056	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
   3057	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
   3058	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
   3059		ioc->msix_vector_count = 1;
   3060	else {
   3061		pci_read_config_word(ioc->pdev, base + 2, &message_control);
   3062		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
   3063	}
   3064	dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
   3065				  ioc->msix_vector_count));
   3066	return 0;
   3067}
   3068
   3069/**
   3070 * mpt3sas_base_free_irq - free irq
   3071 * @ioc: per adapter object
   3072 *
   3073 * Freeing respective reply_queue from the list.
   3074 */
   3075void
   3076mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
   3077{
   3078	unsigned int irq;
   3079	struct adapter_reply_queue *reply_q, *next;
   3080
   3081	if (list_empty(&ioc->reply_queue_list))
   3082		return;
   3083
   3084	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
   3085		list_del(&reply_q->list);
   3086		if (reply_q->is_iouring_poll_q) {
   3087			kfree(reply_q);
   3088			continue;
   3089		}
   3090
   3091		if (ioc->smp_affinity_enable) {
   3092			irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
   3093			irq_update_affinity_hint(irq, NULL);
   3094		}
   3095		free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
   3096			 reply_q);
   3097		kfree(reply_q);
   3098	}
   3099}
   3100
   3101/**
   3102 * _base_request_irq - request irq
   3103 * @ioc: per adapter object
   3104 * @index: msix index into vector table
   3105 *
   3106 * Inserting respective reply_queue into the list.
   3107 */
   3108static int
   3109_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
   3110{
   3111	struct pci_dev *pdev = ioc->pdev;
   3112	struct adapter_reply_queue *reply_q;
   3113	int r, qid;
   3114
   3115	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
   3116	if (!reply_q) {
   3117		ioc_err(ioc, "unable to allocate memory %zu!\n",
   3118			sizeof(struct adapter_reply_queue));
   3119		return -ENOMEM;
   3120	}
   3121	reply_q->ioc = ioc;
   3122	reply_q->msix_index = index;
   3123
   3124	atomic_set(&reply_q->busy, 0);
   3125
   3126	if (index >= ioc->iopoll_q_start_index) {
   3127		qid = index - ioc->iopoll_q_start_index;
   3128		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
   3129		    ioc->driver_name, ioc->id, qid);
   3130		reply_q->is_iouring_poll_q = 1;
   3131		ioc->io_uring_poll_queues[qid].reply_q = reply_q;
   3132		goto out;
   3133	}
   3134
   3135
   3136	if (ioc->msix_enable)
   3137		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
   3138		    ioc->driver_name, ioc->id, index);
   3139	else
   3140		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
   3141		    ioc->driver_name, ioc->id);
   3142	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
   3143			IRQF_SHARED, reply_q->name, reply_q);
   3144	if (r) {
   3145		pr_err("%s: unable to allocate interrupt %d!\n",
   3146		       reply_q->name, pci_irq_vector(pdev, index));
   3147		kfree(reply_q);
   3148		return -EBUSY;
   3149	}
   3150out:
   3151	INIT_LIST_HEAD(&reply_q->list);
   3152	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
   3153	return 0;
   3154}
   3155
   3156/**
   3157 * _base_assign_reply_queues - assigning msix index for each cpu
   3158 * @ioc: per adapter object
   3159 *
   3160 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
   3161 */
   3162static void
   3163_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
   3164{
   3165	unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
   3166	struct adapter_reply_queue *reply_q;
   3167	int iopoll_q_count = ioc->reply_queue_count -
   3168	    ioc->iopoll_q_start_index;
   3169	const struct cpumask *mask;
   3170
   3171	if (!_base_is_controller_msix_enabled(ioc))
   3172		return;
   3173
   3174	if (ioc->msix_load_balance)
   3175		return;
   3176
   3177	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
   3178
   3179	nr_cpus = num_online_cpus();
   3180	nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
   3181					       ioc->facts.MaxMSIxVectors);
   3182	if (!nr_msix)
   3183		return;
   3184
   3185	if (ioc->smp_affinity_enable) {
   3186
   3187		/*
   3188		 * set irq affinity to local numa node for those irqs
   3189		 * corresponding to high iops queues.
   3190		 */
   3191		if (ioc->high_iops_queues) {
   3192			mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
   3193			for (index = 0; index < ioc->high_iops_queues;
   3194			    index++) {
   3195				irq = pci_irq_vector(ioc->pdev, index);
   3196				irq_set_affinity_and_hint(irq, mask);
   3197			}
   3198		}
   3199
   3200		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
   3201			const cpumask_t *mask;
   3202
   3203			if (reply_q->msix_index < ioc->high_iops_queues ||
   3204			    reply_q->msix_index >= ioc->iopoll_q_start_index)
   3205				continue;
   3206
   3207			mask = pci_irq_get_affinity(ioc->pdev,
   3208			    reply_q->msix_index);
   3209			if (!mask) {
   3210				ioc_warn(ioc, "no affinity for msi %x\n",
   3211					 reply_q->msix_index);
   3212				goto fall_back;
   3213			}
   3214
   3215			for_each_cpu_and(cpu, mask, cpu_online_mask) {
   3216				if (cpu >= ioc->cpu_msix_table_sz)
   3217					break;
   3218				ioc->cpu_msix_table[cpu] = reply_q->msix_index;
   3219			}
   3220		}
   3221		return;
   3222	}
   3223
   3224fall_back:
   3225	cpu = cpumask_first(cpu_online_mask);
   3226	nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
   3227	index = 0;
   3228
   3229	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
   3230		unsigned int i, group = nr_cpus / nr_msix;
   3231
   3232		if (reply_q->msix_index < ioc->high_iops_queues ||
   3233		    reply_q->msix_index >= ioc->iopoll_q_start_index)
   3234			continue;
   3235
   3236		if (cpu >= nr_cpus)
   3237			break;
   3238
   3239		if (index < nr_cpus % nr_msix)
   3240			group++;
   3241
   3242		for (i = 0 ; i < group ; i++) {
   3243			ioc->cpu_msix_table[cpu] = reply_q->msix_index;
   3244			cpu = cpumask_next(cpu, cpu_online_mask);
   3245		}
   3246		index++;
   3247	}
   3248}
   3249
   3250/**
   3251 * _base_check_and_enable_high_iops_queues - enable high iops mode
   3252 * @ioc: per adapter object
   3253 * @hba_msix_vector_count: msix vectors supported by HBA
   3254 *
   3255 * Enable high iops queues only if
   3256 *  - HBA is a SEA/AERO controller and
   3257 *  - MSI-Xs vector supported by the HBA is 128 and
   3258 *  - total CPU count in the system >=16 and
   3259 *  - loaded driver with default max_msix_vectors module parameter and
   3260 *  - system booted in non kdump mode
   3261 *
   3262 * Return: nothing.
   3263 */
   3264static void
   3265_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
   3266		int hba_msix_vector_count)
   3267{
   3268	u16 lnksta, speed;
   3269
   3270	/*
   3271	 * Disable high iops queues if io uring poll queues are enabled.
   3272	 */
   3273	if (perf_mode == MPT_PERF_MODE_IOPS ||
   3274	    perf_mode == MPT_PERF_MODE_LATENCY ||
   3275	    ioc->io_uring_poll_queues) {
   3276		ioc->high_iops_queues = 0;
   3277		return;
   3278	}
   3279
   3280	if (perf_mode == MPT_PERF_MODE_DEFAULT) {
   3281
   3282		pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
   3283		speed = lnksta & PCI_EXP_LNKSTA_CLS;
   3284
   3285		if (speed < 0x4) {
   3286			ioc->high_iops_queues = 0;
   3287			return;
   3288		}
   3289	}
   3290
   3291	if (!reset_devices && ioc->is_aero_ioc &&
   3292	    hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
   3293	    num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
   3294	    max_msix_vectors == -1)
   3295		ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
   3296	else
   3297		ioc->high_iops_queues = 0;
   3298}
   3299
   3300/**
   3301 * mpt3sas_base_disable_msix - disables msix
   3302 * @ioc: per adapter object
   3303 *
   3304 */
   3305void
   3306mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
   3307{
   3308	if (!ioc->msix_enable)
   3309		return;
   3310	pci_free_irq_vectors(ioc->pdev);
   3311	ioc->msix_enable = 0;
   3312	kfree(ioc->io_uring_poll_queues);
   3313}
   3314
   3315/**
   3316 * _base_alloc_irq_vectors - allocate msix vectors
   3317 * @ioc: per adapter object
   3318 *
   3319 */
   3320static int
   3321_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
   3322{
   3323	int i, irq_flags = PCI_IRQ_MSIX;
   3324	struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
   3325	struct irq_affinity *descp = &desc;
   3326	/*
   3327	 * Don't allocate msix vectors for poll_queues.
   3328	 * msix_vectors is always within a range of FW supported reply queue.
   3329	 */
   3330	int nr_msix_vectors = ioc->iopoll_q_start_index;
   3331
   3332
   3333	if (ioc->smp_affinity_enable)
   3334		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
   3335	else
   3336		descp = NULL;
   3337
   3338	ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
   3339	    ioc->reply_queue_count, nr_msix_vectors);
   3340
   3341	i = pci_alloc_irq_vectors_affinity(ioc->pdev,
   3342	    ioc->high_iops_queues,
   3343	    nr_msix_vectors, irq_flags, descp);
   3344
   3345	return i;
   3346}
   3347
   3348/**
   3349 * _base_enable_msix - enables msix, failback to io_apic
   3350 * @ioc: per adapter object
   3351 *
   3352 */
   3353static int
   3354_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
   3355{
   3356	int r;
   3357	int i, local_max_msix_vectors;
   3358	u8 try_msix = 0;
   3359	int iopoll_q_count = 0;
   3360
   3361	ioc->msix_load_balance = false;
   3362
   3363	if (msix_disable == -1 || msix_disable == 0)
   3364		try_msix = 1;
   3365
   3366	if (!try_msix)
   3367		goto try_ioapic;
   3368
   3369	if (_base_check_enable_msix(ioc) != 0)
   3370		goto try_ioapic;
   3371
   3372	ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
   3373	pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
   3374		ioc->cpu_count, max_msix_vectors);
   3375
   3376	ioc->reply_queue_count =
   3377		min_t(int, ioc->cpu_count, ioc->msix_vector_count);
   3378
   3379	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
   3380		local_max_msix_vectors = (reset_devices) ? 1 : 8;
   3381	else
   3382		local_max_msix_vectors = max_msix_vectors;
   3383
   3384	if (local_max_msix_vectors == 0)
   3385		goto try_ioapic;
   3386
   3387	/*
   3388	 * Enable msix_load_balance only if combined reply queue mode is
   3389	 * disabled on SAS3 & above generation HBA devices.
   3390	 */
   3391	if (!ioc->combined_reply_queue &&
   3392	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
   3393		ioc_info(ioc,
   3394		    "combined ReplyQueue is off, Enabling msix load balance\n");
   3395		ioc->msix_load_balance = true;
   3396	}
   3397
   3398	/*
   3399	 * smp affinity setting is not need when msix load balance
   3400	 * is enabled.
   3401	 */
   3402	if (ioc->msix_load_balance)
   3403		ioc->smp_affinity_enable = 0;
   3404
   3405	if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
   3406		ioc->shost->host_tagset = 0;
   3407
   3408	/*
   3409	 * Enable io uring poll queues only if host_tagset is enabled.
   3410	 */
   3411	if (ioc->shost->host_tagset)
   3412		iopoll_q_count = poll_queues;
   3413
   3414	if (iopoll_q_count) {
   3415		ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
   3416		    sizeof(struct io_uring_poll_queue), GFP_KERNEL);
   3417		if (!ioc->io_uring_poll_queues)
   3418			iopoll_q_count = 0;
   3419	}
   3420
   3421	if (ioc->is_aero_ioc)
   3422		_base_check_and_enable_high_iops_queues(ioc,
   3423		    ioc->msix_vector_count);
   3424
   3425	/*
   3426	 * Add high iops queues count to reply queue count if high iops queues
   3427	 * are enabled.
   3428	 */
   3429	ioc->reply_queue_count = min_t(int,
   3430	    ioc->reply_queue_count + ioc->high_iops_queues,
   3431	    ioc->msix_vector_count);
   3432
   3433	/*
   3434	 * Adjust the reply queue count incase reply queue count
   3435	 * exceeds the user provided MSIx vectors count.
   3436	 */
   3437	if (local_max_msix_vectors > 0)
   3438		ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
   3439		    ioc->reply_queue_count);
   3440	/*
   3441	 * Add io uring poll queues count to reply queues count
   3442	 * if io uring is enabled in driver.
   3443	 */
   3444	if (iopoll_q_count) {
   3445		if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
   3446			iopoll_q_count = 0;
   3447		ioc->reply_queue_count = min_t(int,
   3448		    ioc->reply_queue_count + iopoll_q_count,
   3449		    ioc->msix_vector_count);
   3450	}
   3451
   3452	/*
   3453	 * Starting index of io uring poll queues in reply queue list.
   3454	 */
   3455	ioc->iopoll_q_start_index =
   3456	    ioc->reply_queue_count - iopoll_q_count;
   3457
   3458	r = _base_alloc_irq_vectors(ioc);
   3459	if (r < 0) {
   3460		ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
   3461		goto try_ioapic;
   3462	}
   3463
   3464	/*
   3465	 * Adjust the reply queue count if the allocated
   3466	 * MSIx vectors is less then the requested number
   3467	 * of MSIx vectors.
   3468	 */
   3469	if (r < ioc->iopoll_q_start_index) {
   3470		ioc->reply_queue_count = r + iopoll_q_count;
   3471		ioc->iopoll_q_start_index =
   3472		    ioc->reply_queue_count - iopoll_q_count;
   3473	}
   3474
   3475	ioc->msix_enable = 1;
   3476	for (i = 0; i < ioc->reply_queue_count; i++) {
   3477		r = _base_request_irq(ioc, i);
   3478		if (r) {
   3479			mpt3sas_base_free_irq(ioc);
   3480			mpt3sas_base_disable_msix(ioc);
   3481			goto try_ioapic;
   3482		}
   3483	}
   3484
   3485	ioc_info(ioc, "High IOPs queues : %s\n",
   3486			ioc->high_iops_queues ? "enabled" : "disabled");
   3487
   3488	return 0;
   3489
   3490/* failback to io_apic interrupt routing */
   3491 try_ioapic:
   3492	ioc->high_iops_queues = 0;
   3493	ioc_info(ioc, "High IOPs queues : disabled\n");
   3494	ioc->reply_queue_count = 1;
   3495	ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
   3496	r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
   3497	if (r < 0) {
   3498		dfailprintk(ioc,
   3499			    ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
   3500				     r));
   3501	} else
   3502		r = _base_request_irq(ioc, 0);
   3503
   3504	return r;
   3505}
   3506
   3507/**
   3508 * mpt3sas_base_unmap_resources - free controller resources
   3509 * @ioc: per adapter object
   3510 */
   3511static void
   3512mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
   3513{
   3514	struct pci_dev *pdev = ioc->pdev;
   3515
   3516	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   3517
   3518	mpt3sas_base_free_irq(ioc);
   3519	mpt3sas_base_disable_msix(ioc);
   3520
   3521	kfree(ioc->replyPostRegisterIndex);
   3522	ioc->replyPostRegisterIndex = NULL;
   3523
   3524
   3525	if (ioc->chip_phys) {
   3526		iounmap(ioc->chip);
   3527		ioc->chip_phys = 0;
   3528	}
   3529
   3530	if (pci_is_enabled(pdev)) {
   3531		pci_release_selected_regions(ioc->pdev, ioc->bars);
   3532		pci_disable_pcie_error_reporting(pdev);
   3533		pci_disable_device(pdev);
   3534	}
   3535}
   3536
   3537static int
   3538_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
   3539
   3540/**
   3541 * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
   3542 *     and if it is in fault state then issue diag reset.
   3543 * @ioc: per adapter object
   3544 *
   3545 * Return: 0 for success, non-zero for failure.
   3546 */
   3547int
   3548mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
   3549{
   3550	u32 ioc_state;
   3551	int rc = -EFAULT;
   3552
   3553	dinitprintk(ioc, pr_info("%s\n", __func__));
   3554	if (ioc->pci_error_recovery)
   3555		return 0;
   3556	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   3557	dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
   3558
   3559	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
   3560		mpt3sas_print_fault_code(ioc, ioc_state &
   3561		    MPI2_DOORBELL_DATA_MASK);
   3562		mpt3sas_base_mask_interrupts(ioc);
   3563		rc = _base_diag_reset(ioc);
   3564	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
   3565	    MPI2_IOC_STATE_COREDUMP) {
   3566		mpt3sas_print_coredump_info(ioc, ioc_state &
   3567		     MPI2_DOORBELL_DATA_MASK);
   3568		mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
   3569		mpt3sas_base_mask_interrupts(ioc);
   3570		rc = _base_diag_reset(ioc);
   3571	}
   3572
   3573	return rc;
   3574}
   3575
   3576/**
   3577 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
   3578 * @ioc: per adapter object
   3579 *
   3580 * Return: 0 for success, non-zero for failure.
   3581 */
   3582int
   3583mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
   3584{
   3585	struct pci_dev *pdev = ioc->pdev;
   3586	u32 memap_sz;
   3587	u32 pio_sz;
   3588	int i, r = 0, rc;
   3589	u64 pio_chip = 0;
   3590	phys_addr_t chip_phys = 0;
   3591	struct adapter_reply_queue *reply_q;
   3592	int iopoll_q_count = 0;
   3593
   3594	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   3595
   3596	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
   3597	if (pci_enable_device_mem(pdev)) {
   3598		ioc_warn(ioc, "pci_enable_device_mem: failed\n");
   3599		ioc->bars = 0;
   3600		return -ENODEV;
   3601	}
   3602
   3603
   3604	if (pci_request_selected_regions(pdev, ioc->bars,
   3605	    ioc->driver_name)) {
   3606		ioc_warn(ioc, "pci_request_selected_regions: failed\n");
   3607		ioc->bars = 0;
   3608		r = -ENODEV;
   3609		goto out_fail;
   3610	}
   3611
   3612/* AER (Advanced Error Reporting) hooks */
   3613	pci_enable_pcie_error_reporting(pdev);
   3614
   3615	pci_set_master(pdev);
   3616
   3617
   3618	if (_base_config_dma_addressing(ioc, pdev) != 0) {
   3619		ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
   3620		r = -ENODEV;
   3621		goto out_fail;
   3622	}
   3623
   3624	for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
   3625	     (!memap_sz || !pio_sz); i++) {
   3626		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
   3627			if (pio_sz)
   3628				continue;
   3629			pio_chip = (u64)pci_resource_start(pdev, i);
   3630			pio_sz = pci_resource_len(pdev, i);
   3631		} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
   3632			if (memap_sz)
   3633				continue;
   3634			ioc->chip_phys = pci_resource_start(pdev, i);
   3635			chip_phys = ioc->chip_phys;
   3636			memap_sz = pci_resource_len(pdev, i);
   3637			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
   3638		}
   3639	}
   3640
   3641	if (ioc->chip == NULL) {
   3642		ioc_err(ioc,
   3643		    "unable to map adapter memory! or resource not found\n");
   3644		r = -EINVAL;
   3645		goto out_fail;
   3646	}
   3647
   3648	mpt3sas_base_mask_interrupts(ioc);
   3649
   3650	r = _base_get_ioc_facts(ioc);
   3651	if (r) {
   3652		rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
   3653		if (rc || (_base_get_ioc_facts(ioc)))
   3654			goto out_fail;
   3655	}
   3656
   3657	if (!ioc->rdpq_array_enable_assigned) {
   3658		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
   3659		ioc->rdpq_array_enable_assigned = 1;
   3660	}
   3661
   3662	r = _base_enable_msix(ioc);
   3663	if (r)
   3664		goto out_fail;
   3665
   3666	iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
   3667	for (i = 0; i < iopoll_q_count; i++) {
   3668		atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
   3669		atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
   3670	}
   3671
   3672	if (!ioc->is_driver_loading)
   3673		_base_init_irqpolls(ioc);
   3674	/* Use the Combined reply queue feature only for SAS3 C0 & higher
   3675	 * revision HBAs and also only when reply queue count is greater than 8
   3676	 */
   3677	if (ioc->combined_reply_queue) {
   3678		/* Determine the Supplemental Reply Post Host Index Registers
   3679		 * Addresse. Supplemental Reply Post Host Index Registers
   3680		 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
   3681		 * each register is at offset bytes of
   3682		 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
   3683		 */
   3684		ioc->replyPostRegisterIndex = kcalloc(
   3685		     ioc->combined_reply_index_count,
   3686		     sizeof(resource_size_t *), GFP_KERNEL);
   3687		if (!ioc->replyPostRegisterIndex) {
   3688			ioc_err(ioc,
   3689			    "allocation for replyPostRegisterIndex failed!\n");
   3690			r = -ENOMEM;
   3691			goto out_fail;
   3692		}
   3693
   3694		for (i = 0; i < ioc->combined_reply_index_count; i++) {
   3695			ioc->replyPostRegisterIndex[i] =
   3696				(resource_size_t __iomem *)
   3697				((u8 __force *)&ioc->chip->Doorbell +
   3698				 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
   3699				 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
   3700		}
   3701	}
   3702
   3703	if (ioc->is_warpdrive) {
   3704		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
   3705		    &ioc->chip->ReplyPostHostIndex;
   3706
   3707		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
   3708			ioc->reply_post_host_index[i] =
   3709			(resource_size_t __iomem *)
   3710			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
   3711			* 4)));
   3712	}
   3713
   3714	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
   3715		if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
   3716			pr_info("%s: enabled: index: %d\n",
   3717			    reply_q->name, reply_q->msix_index);
   3718			continue;
   3719		}
   3720
   3721		pr_info("%s: %s enabled: IRQ %d\n",
   3722			reply_q->name,
   3723			ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
   3724			pci_irq_vector(ioc->pdev, reply_q->msix_index));
   3725	}
   3726
   3727	ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
   3728		 &chip_phys, ioc->chip, memap_sz);
   3729	ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
   3730		 (unsigned long long)pio_chip, pio_sz);
   3731
   3732	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
   3733	pci_save_state(pdev);
   3734	return 0;
   3735
   3736 out_fail:
   3737	mpt3sas_base_unmap_resources(ioc);
   3738	return r;
   3739}
   3740
   3741/**
   3742 * mpt3sas_base_get_msg_frame - obtain request mf pointer
   3743 * @ioc: per adapter object
   3744 * @smid: system request message index(smid zero is invalid)
   3745 *
   3746 * Return: virt pointer to message frame.
   3747 */
   3748void *
   3749mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   3750{
   3751	return (void *)(ioc->request + (smid * ioc->request_sz));
   3752}
   3753
   3754/**
   3755 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
   3756 * @ioc: per adapter object
   3757 * @smid: system request message index
   3758 *
   3759 * Return: virt pointer to sense buffer.
   3760 */
   3761void *
   3762mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   3763{
   3764	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
   3765}
   3766
   3767/**
   3768 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
   3769 * @ioc: per adapter object
   3770 * @smid: system request message index
   3771 *
   3772 * Return: phys pointer to the low 32bit address of the sense buffer.
   3773 */
   3774__le32
   3775mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   3776{
   3777	return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
   3778	    SCSI_SENSE_BUFFERSIZE));
   3779}
   3780
   3781/**
   3782 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
   3783 * @ioc: per adapter object
   3784 * @smid: system request message index
   3785 *
   3786 * Return: virt pointer to a PCIe SGL.
   3787 */
   3788void *
   3789mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   3790{
   3791	return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
   3792}
   3793
   3794/**
   3795 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
   3796 * @ioc: per adapter object
   3797 * @smid: system request message index
   3798 *
   3799 * Return: phys pointer to the address of the PCIe buffer.
   3800 */
   3801dma_addr_t
   3802mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   3803{
   3804	return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
   3805}
   3806
   3807/**
   3808 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
   3809 * @ioc: per adapter object
   3810 * @phys_addr: lower 32 physical addr of the reply
   3811 *
   3812 * Converts 32bit lower physical addr into a virt address.
   3813 */
   3814void *
   3815mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
   3816{
   3817	if (!phys_addr)
   3818		return NULL;
   3819	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
   3820}
   3821
   3822/**
   3823 * _base_get_msix_index - get the msix index
   3824 * @ioc: per adapter object
   3825 * @scmd: scsi_cmnd object
   3826 *
   3827 * Return: msix index of general reply queues,
   3828 * i.e. reply queue on which IO request's reply
   3829 * should be posted by the HBA firmware.
   3830 */
   3831static inline u8
   3832_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
   3833	struct scsi_cmnd *scmd)
   3834{
   3835	/* Enables reply_queue load balancing */
   3836	if (ioc->msix_load_balance)
   3837		return ioc->reply_queue_count ?
   3838		    base_mod64(atomic64_add_return(1,
   3839		    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
   3840
   3841	if (scmd && ioc->shost->nr_hw_queues > 1) {
   3842		u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
   3843
   3844		return blk_mq_unique_tag_to_hwq(tag) +
   3845			ioc->high_iops_queues;
   3846	}
   3847
   3848	return ioc->cpu_msix_table[raw_smp_processor_id()];
   3849}
   3850
   3851/**
   3852 * _base_get_high_iops_msix_index - get the msix index of
   3853 *				high iops queues
   3854 * @ioc: per adapter object
   3855 * @scmd: scsi_cmnd object
   3856 *
   3857 * Return: msix index of high iops reply queues.
   3858 * i.e. high iops reply queue on which IO request's
   3859 * reply should be posted by the HBA firmware.
   3860 */
   3861static inline u8
   3862_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
   3863	struct scsi_cmnd *scmd)
   3864{
   3865	/**
   3866	 * Round robin the IO interrupts among the high iops
   3867	 * reply queues in terms of batch count 16 when outstanding
   3868	 * IOs on the target device is >=8.
   3869	 */
   3870
   3871	if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
   3872		return base_mod64((
   3873		    atomic64_add_return(1, &ioc->high_iops_outstanding) /
   3874		    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
   3875		    MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
   3876
   3877	return _base_get_msix_index(ioc, scmd);
   3878}
   3879
   3880/**
   3881 * mpt3sas_base_get_smid - obtain a free smid from internal queue
   3882 * @ioc: per adapter object
   3883 * @cb_idx: callback index
   3884 *
   3885 * Return: smid (zero is invalid)
   3886 */
   3887u16
   3888mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
   3889{
   3890	unsigned long flags;
   3891	struct request_tracker *request;
   3892	u16 smid;
   3893
   3894	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
   3895	if (list_empty(&ioc->internal_free_list)) {
   3896		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
   3897		ioc_err(ioc, "%s: smid not available\n", __func__);
   3898		return 0;
   3899	}
   3900
   3901	request = list_entry(ioc->internal_free_list.next,
   3902	    struct request_tracker, tracker_list);
   3903	request->cb_idx = cb_idx;
   3904	smid = request->smid;
   3905	list_del(&request->tracker_list);
   3906	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
   3907	return smid;
   3908}
   3909
   3910/**
   3911 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
   3912 * @ioc: per adapter object
   3913 * @cb_idx: callback index
   3914 * @scmd: pointer to scsi command object
   3915 *
   3916 * Return: smid (zero is invalid)
   3917 */
   3918u16
   3919mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
   3920	struct scsi_cmnd *scmd)
   3921{
   3922	struct scsiio_tracker *request = scsi_cmd_priv(scmd);
   3923	u16 smid;
   3924	u32 tag, unique_tag;
   3925
   3926	unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
   3927	tag = blk_mq_unique_tag_to_tag(unique_tag);
   3928
   3929	/*
   3930	 * Store hw queue number corresponding to the tag.
   3931	 * This hw queue number is used later to determine
   3932	 * the unique_tag using the logic below. This unique_tag
   3933	 * is used to retrieve the scmd pointer corresponding
   3934	 * to tag using scsi_host_find_tag() API.
   3935	 *
   3936	 * tag = smid - 1;
   3937	 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
   3938	 */
   3939	ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
   3940
   3941	smid = tag + 1;
   3942	request->cb_idx = cb_idx;
   3943	request->smid = smid;
   3944	request->scmd = scmd;
   3945	INIT_LIST_HEAD(&request->chain_list);
   3946	return smid;
   3947}
   3948
   3949/**
   3950 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
   3951 * @ioc: per adapter object
   3952 * @cb_idx: callback index
   3953 *
   3954 * Return: smid (zero is invalid)
   3955 */
   3956u16
   3957mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
   3958{
   3959	unsigned long flags;
   3960	struct request_tracker *request;
   3961	u16 smid;
   3962
   3963	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
   3964	if (list_empty(&ioc->hpr_free_list)) {
   3965		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
   3966		return 0;
   3967	}
   3968
   3969	request = list_entry(ioc->hpr_free_list.next,
   3970	    struct request_tracker, tracker_list);
   3971	request->cb_idx = cb_idx;
   3972	smid = request->smid;
   3973	list_del(&request->tracker_list);
   3974	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
   3975	return smid;
   3976}
   3977
   3978static void
   3979_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
   3980{
   3981	/*
   3982	 * See _wait_for_commands_to_complete() call with regards to this code.
   3983	 */
   3984	if (ioc->shost_recovery && ioc->pending_io_count) {
   3985		ioc->pending_io_count = scsi_host_busy(ioc->shost);
   3986		if (ioc->pending_io_count == 0)
   3987			wake_up(&ioc->reset_wq);
   3988	}
   3989}
   3990
   3991void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
   3992			   struct scsiio_tracker *st)
   3993{
   3994	if (WARN_ON(st->smid == 0))
   3995		return;
   3996	st->cb_idx = 0xFF;
   3997	st->direct_io = 0;
   3998	st->scmd = NULL;
   3999	atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
   4000	st->smid = 0;
   4001}
   4002
   4003/**
   4004 * mpt3sas_base_free_smid - put smid back on free_list
   4005 * @ioc: per adapter object
   4006 * @smid: system request message index
   4007 */
   4008void
   4009mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   4010{
   4011	unsigned long flags;
   4012	int i;
   4013
   4014	if (smid < ioc->hi_priority_smid) {
   4015		struct scsiio_tracker *st;
   4016		void *request;
   4017
   4018		st = _get_st_from_smid(ioc, smid);
   4019		if (!st) {
   4020			_base_recovery_check(ioc);
   4021			return;
   4022		}
   4023
   4024		/* Clear MPI request frame */
   4025		request = mpt3sas_base_get_msg_frame(ioc, smid);
   4026		memset(request, 0, ioc->request_sz);
   4027
   4028		mpt3sas_base_clear_st(ioc, st);
   4029		_base_recovery_check(ioc);
   4030		ioc->io_queue_num[smid - 1] = 0;
   4031		return;
   4032	}
   4033
   4034	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
   4035	if (smid < ioc->internal_smid) {
   4036		/* hi-priority */
   4037		i = smid - ioc->hi_priority_smid;
   4038		ioc->hpr_lookup[i].cb_idx = 0xFF;
   4039		list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
   4040	} else if (smid <= ioc->hba_queue_depth) {
   4041		/* internal queue */
   4042		i = smid - ioc->internal_smid;
   4043		ioc->internal_lookup[i].cb_idx = 0xFF;
   4044		list_add(&ioc->internal_lookup[i].tracker_list,
   4045		    &ioc->internal_free_list);
   4046	}
   4047	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
   4048}
   4049
   4050/**
   4051 * _base_mpi_ep_writeq - 32 bit write to MMIO
   4052 * @b: data payload
   4053 * @addr: address in MMIO space
   4054 * @writeq_lock: spin lock
   4055 *
   4056 * This special handling for MPI EP to take care of 32 bit
   4057 * environment where its not quarenteed to send the entire word
   4058 * in one transfer.
   4059 */
   4060static inline void
   4061_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
   4062					spinlock_t *writeq_lock)
   4063{
   4064	unsigned long flags;
   4065
   4066	spin_lock_irqsave(writeq_lock, flags);
   4067	__raw_writel((u32)(b), addr);
   4068	__raw_writel((u32)(b >> 32), (addr + 4));
   4069	spin_unlock_irqrestore(writeq_lock, flags);
   4070}
   4071
   4072/**
   4073 * _base_writeq - 64 bit write to MMIO
   4074 * @b: data payload
   4075 * @addr: address in MMIO space
   4076 * @writeq_lock: spin lock
   4077 *
   4078 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
   4079 * care of 32 bit environment where its not quarenteed to send the entire word
   4080 * in one transfer.
   4081 */
   4082#if defined(writeq) && defined(CONFIG_64BIT)
   4083static inline void
   4084_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
   4085{
   4086	wmb();
   4087	__raw_writeq(b, addr);
   4088	barrier();
   4089}
   4090#else
   4091static inline void
   4092_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
   4093{
   4094	_base_mpi_ep_writeq(b, addr, writeq_lock);
   4095}
   4096#endif
   4097
   4098/**
   4099 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
   4100 *                                variable of scsi tracker
   4101 * @ioc: per adapter object
   4102 * @smid: system request message index
   4103 *
   4104 * Return: msix index.
   4105 */
   4106static u8
   4107_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   4108{
   4109	struct scsiio_tracker *st = NULL;
   4110
   4111	if (smid < ioc->hi_priority_smid)
   4112		st = _get_st_from_smid(ioc, smid);
   4113
   4114	if (st == NULL)
   4115		return  _base_get_msix_index(ioc, NULL);
   4116
   4117	st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
   4118	return st->msix_io;
   4119}
   4120
   4121/**
   4122 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
   4123 * @ioc: per adapter object
   4124 * @smid: system request message index
   4125 * @handle: device handle
   4126 */
   4127static void
   4128_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
   4129	u16 smid, u16 handle)
   4130{
   4131	Mpi2RequestDescriptorUnion_t descriptor;
   4132	u64 *request = (u64 *)&descriptor;
   4133	void *mpi_req_iomem;
   4134	__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
   4135
   4136	_clone_sg_entries(ioc, (void *) mfp, smid);
   4137	mpi_req_iomem = (void __force *)ioc->chip +
   4138			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
   4139	_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
   4140					ioc->request_sz);
   4141	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
   4142	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4143	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
   4144	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
   4145	descriptor.SCSIIO.LMID = 0;
   4146	_base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
   4147	    &ioc->scsi_lookup_lock);
   4148}
   4149
   4150/**
   4151 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
   4152 * @ioc: per adapter object
   4153 * @smid: system request message index
   4154 * @handle: device handle
   4155 */
   4156static void
   4157_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
   4158{
   4159	Mpi2RequestDescriptorUnion_t descriptor;
   4160	u64 *request = (u64 *)&descriptor;
   4161
   4162
   4163	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
   4164	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4165	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
   4166	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
   4167	descriptor.SCSIIO.LMID = 0;
   4168	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
   4169	    &ioc->scsi_lookup_lock);
   4170}
   4171
   4172/**
   4173 * _base_put_smid_fast_path - send fast path request to firmware
   4174 * @ioc: per adapter object
   4175 * @smid: system request message index
   4176 * @handle: device handle
   4177 */
   4178static void
   4179_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
   4180	u16 handle)
   4181{
   4182	Mpi2RequestDescriptorUnion_t descriptor;
   4183	u64 *request = (u64 *)&descriptor;
   4184
   4185	descriptor.SCSIIO.RequestFlags =
   4186	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
   4187	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4188	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
   4189	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
   4190	descriptor.SCSIIO.LMID = 0;
   4191	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
   4192	    &ioc->scsi_lookup_lock);
   4193}
   4194
   4195/**
   4196 * _base_put_smid_hi_priority - send Task Management request to firmware
   4197 * @ioc: per adapter object
   4198 * @smid: system request message index
   4199 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
   4200 */
   4201static void
   4202_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
   4203	u16 msix_task)
   4204{
   4205	Mpi2RequestDescriptorUnion_t descriptor;
   4206	void *mpi_req_iomem;
   4207	u64 *request;
   4208
   4209	if (ioc->is_mcpu_endpoint) {
   4210		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
   4211
   4212		/* TBD 256 is offset within sys register. */
   4213		mpi_req_iomem = (void __force *)ioc->chip
   4214					+ MPI_FRAME_START_OFFSET
   4215					+ (smid * ioc->request_sz);
   4216		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
   4217							ioc->request_sz);
   4218	}
   4219
   4220	request = (u64 *)&descriptor;
   4221
   4222	descriptor.HighPriority.RequestFlags =
   4223	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
   4224	descriptor.HighPriority.MSIxIndex =  msix_task;
   4225	descriptor.HighPriority.SMID = cpu_to_le16(smid);
   4226	descriptor.HighPriority.LMID = 0;
   4227	descriptor.HighPriority.Reserved1 = 0;
   4228	if (ioc->is_mcpu_endpoint)
   4229		_base_mpi_ep_writeq(*request,
   4230				&ioc->chip->RequestDescriptorPostLow,
   4231				&ioc->scsi_lookup_lock);
   4232	else
   4233		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
   4234		    &ioc->scsi_lookup_lock);
   4235}
   4236
   4237/**
   4238 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
   4239 *  firmware
   4240 * @ioc: per adapter object
   4241 * @smid: system request message index
   4242 */
   4243void
   4244mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   4245{
   4246	Mpi2RequestDescriptorUnion_t descriptor;
   4247	u64 *request = (u64 *)&descriptor;
   4248
   4249	descriptor.Default.RequestFlags =
   4250		MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
   4251	descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
   4252	descriptor.Default.SMID = cpu_to_le16(smid);
   4253	descriptor.Default.LMID = 0;
   4254	descriptor.Default.DescriptorTypeDependent = 0;
   4255	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
   4256	    &ioc->scsi_lookup_lock);
   4257}
   4258
   4259/**
   4260 * _base_put_smid_default - Default, primarily used for config pages
   4261 * @ioc: per adapter object
   4262 * @smid: system request message index
   4263 */
   4264static void
   4265_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   4266{
   4267	Mpi2RequestDescriptorUnion_t descriptor;
   4268	void *mpi_req_iomem;
   4269	u64 *request;
   4270
   4271	if (ioc->is_mcpu_endpoint) {
   4272		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
   4273
   4274		_clone_sg_entries(ioc, (void *) mfp, smid);
   4275		/* TBD 256 is offset within sys register */
   4276		mpi_req_iomem = (void __force *)ioc->chip +
   4277			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
   4278		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
   4279							ioc->request_sz);
   4280	}
   4281	request = (u64 *)&descriptor;
   4282	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
   4283	descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4284	descriptor.Default.SMID = cpu_to_le16(smid);
   4285	descriptor.Default.LMID = 0;
   4286	descriptor.Default.DescriptorTypeDependent = 0;
   4287	if (ioc->is_mcpu_endpoint)
   4288		_base_mpi_ep_writeq(*request,
   4289				&ioc->chip->RequestDescriptorPostLow,
   4290				&ioc->scsi_lookup_lock);
   4291	else
   4292		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
   4293				&ioc->scsi_lookup_lock);
   4294}
   4295
   4296/**
   4297 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
   4298 *   Atomic Request Descriptor
   4299 * @ioc: per adapter object
   4300 * @smid: system request message index
   4301 * @handle: device handle, unused in this function, for function type match
   4302 *
   4303 * Return: nothing.
   4304 */
   4305static void
   4306_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
   4307	u16 handle)
   4308{
   4309	Mpi26AtomicRequestDescriptor_t descriptor;
   4310	u32 *request = (u32 *)&descriptor;
   4311
   4312	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
   4313	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4314	descriptor.SMID = cpu_to_le16(smid);
   4315
   4316	writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
   4317}
   4318
   4319/**
   4320 * _base_put_smid_fast_path_atomic - send fast path request to firmware
   4321 * using Atomic Request Descriptor
   4322 * @ioc: per adapter object
   4323 * @smid: system request message index
   4324 * @handle: device handle, unused in this function, for function type match
   4325 * Return: nothing
   4326 */
   4327static void
   4328_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
   4329	u16 handle)
   4330{
   4331	Mpi26AtomicRequestDescriptor_t descriptor;
   4332	u32 *request = (u32 *)&descriptor;
   4333
   4334	descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
   4335	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4336	descriptor.SMID = cpu_to_le16(smid);
   4337
   4338	writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
   4339}
   4340
   4341/**
   4342 * _base_put_smid_hi_priority_atomic - send Task Management request to
   4343 * firmware using Atomic Request Descriptor
   4344 * @ioc: per adapter object
   4345 * @smid: system request message index
   4346 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
   4347 *
   4348 * Return: nothing.
   4349 */
   4350static void
   4351_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
   4352	u16 msix_task)
   4353{
   4354	Mpi26AtomicRequestDescriptor_t descriptor;
   4355	u32 *request = (u32 *)&descriptor;
   4356
   4357	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
   4358	descriptor.MSIxIndex = msix_task;
   4359	descriptor.SMID = cpu_to_le16(smid);
   4360
   4361	writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
   4362}
   4363
   4364/**
   4365 * _base_put_smid_default_atomic - Default, primarily used for config pages
   4366 * use Atomic Request Descriptor
   4367 * @ioc: per adapter object
   4368 * @smid: system request message index
   4369 *
   4370 * Return: nothing.
   4371 */
   4372static void
   4373_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
   4374{
   4375	Mpi26AtomicRequestDescriptor_t descriptor;
   4376	u32 *request = (u32 *)&descriptor;
   4377
   4378	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
   4379	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
   4380	descriptor.SMID = cpu_to_le16(smid);
   4381
   4382	writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
   4383}
   4384
   4385/**
   4386 * _base_display_OEMs_branding - Display branding string
   4387 * @ioc: per adapter object
   4388 */
   4389static void
   4390_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
   4391{
   4392	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
   4393		return;
   4394
   4395	switch (ioc->pdev->subsystem_vendor) {
   4396	case PCI_VENDOR_ID_INTEL:
   4397		switch (ioc->pdev->device) {
   4398		case MPI2_MFGPAGE_DEVID_SAS2008:
   4399			switch (ioc->pdev->subsystem_device) {
   4400			case MPT2SAS_INTEL_RMS2LL080_SSDID:
   4401				ioc_info(ioc, "%s\n",
   4402					 MPT2SAS_INTEL_RMS2LL080_BRANDING);
   4403				break;
   4404			case MPT2SAS_INTEL_RMS2LL040_SSDID:
   4405				ioc_info(ioc, "%s\n",
   4406					 MPT2SAS_INTEL_RMS2LL040_BRANDING);
   4407				break;
   4408			case MPT2SAS_INTEL_SSD910_SSDID:
   4409				ioc_info(ioc, "%s\n",
   4410					 MPT2SAS_INTEL_SSD910_BRANDING);
   4411				break;
   4412			default:
   4413				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
   4414					 ioc->pdev->subsystem_device);
   4415				break;
   4416			}
   4417			break;
   4418		case MPI2_MFGPAGE_DEVID_SAS2308_2:
   4419			switch (ioc->pdev->subsystem_device) {
   4420			case MPT2SAS_INTEL_RS25GB008_SSDID:
   4421				ioc_info(ioc, "%s\n",
   4422					 MPT2SAS_INTEL_RS25GB008_BRANDING);
   4423				break;
   4424			case MPT2SAS_INTEL_RMS25JB080_SSDID:
   4425				ioc_info(ioc, "%s\n",
   4426					 MPT2SAS_INTEL_RMS25JB080_BRANDING);
   4427				break;
   4428			case MPT2SAS_INTEL_RMS25JB040_SSDID:
   4429				ioc_info(ioc, "%s\n",
   4430					 MPT2SAS_INTEL_RMS25JB040_BRANDING);
   4431				break;
   4432			case MPT2SAS_INTEL_RMS25KB080_SSDID:
   4433				ioc_info(ioc, "%s\n",
   4434					 MPT2SAS_INTEL_RMS25KB080_BRANDING);
   4435				break;
   4436			case MPT2SAS_INTEL_RMS25KB040_SSDID:
   4437				ioc_info(ioc, "%s\n",
   4438					 MPT2SAS_INTEL_RMS25KB040_BRANDING);
   4439				break;
   4440			case MPT2SAS_INTEL_RMS25LB040_SSDID:
   4441				ioc_info(ioc, "%s\n",
   4442					 MPT2SAS_INTEL_RMS25LB040_BRANDING);
   4443				break;
   4444			case MPT2SAS_INTEL_RMS25LB080_SSDID:
   4445				ioc_info(ioc, "%s\n",
   4446					 MPT2SAS_INTEL_RMS25LB080_BRANDING);
   4447				break;
   4448			default:
   4449				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
   4450					 ioc->pdev->subsystem_device);
   4451				break;
   4452			}
   4453			break;
   4454		case MPI25_MFGPAGE_DEVID_SAS3008:
   4455			switch (ioc->pdev->subsystem_device) {
   4456			case MPT3SAS_INTEL_RMS3JC080_SSDID:
   4457				ioc_info(ioc, "%s\n",
   4458					 MPT3SAS_INTEL_RMS3JC080_BRANDING);
   4459				break;
   4460
   4461			case MPT3SAS_INTEL_RS3GC008_SSDID:
   4462				ioc_info(ioc, "%s\n",
   4463					 MPT3SAS_INTEL_RS3GC008_BRANDING);
   4464				break;
   4465			case MPT3SAS_INTEL_RS3FC044_SSDID:
   4466				ioc_info(ioc, "%s\n",
   4467					 MPT3SAS_INTEL_RS3FC044_BRANDING);
   4468				break;
   4469			case MPT3SAS_INTEL_RS3UC080_SSDID:
   4470				ioc_info(ioc, "%s\n",
   4471					 MPT3SAS_INTEL_RS3UC080_BRANDING);
   4472				break;
   4473			default:
   4474				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
   4475					 ioc->pdev->subsystem_device);
   4476				break;
   4477			}
   4478			break;
   4479		default:
   4480			ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
   4481				 ioc->pdev->subsystem_device);
   4482			break;
   4483		}
   4484		break;
   4485	case PCI_VENDOR_ID_DELL:
   4486		switch (ioc->pdev->device) {
   4487		case MPI2_MFGPAGE_DEVID_SAS2008:
   4488			switch (ioc->pdev->subsystem_device) {
   4489			case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
   4490				ioc_info(ioc, "%s\n",
   4491					 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
   4492				break;
   4493			case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
   4494				ioc_info(ioc, "%s\n",
   4495					 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
   4496				break;
   4497			case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
   4498				ioc_info(ioc, "%s\n",
   4499					 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
   4500				break;
   4501			case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
   4502				ioc_info(ioc, "%s\n",
   4503					 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
   4504				break;
   4505			case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
   4506				ioc_info(ioc, "%s\n",
   4507					 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
   4508				break;
   4509			case MPT2SAS_DELL_PERC_H200_SSDID:
   4510				ioc_info(ioc, "%s\n",
   4511					 MPT2SAS_DELL_PERC_H200_BRANDING);
   4512				break;
   4513			case MPT2SAS_DELL_6GBPS_SAS_SSDID:
   4514				ioc_info(ioc, "%s\n",
   4515					 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
   4516				break;
   4517			default:
   4518				ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
   4519					 ioc->pdev->subsystem_device);
   4520				break;
   4521			}
   4522			break;
   4523		case MPI25_MFGPAGE_DEVID_SAS3008:
   4524			switch (ioc->pdev->subsystem_device) {
   4525			case MPT3SAS_DELL_12G_HBA_SSDID:
   4526				ioc_info(ioc, "%s\n",
   4527					 MPT3SAS_DELL_12G_HBA_BRANDING);
   4528				break;
   4529			default:
   4530				ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
   4531					 ioc->pdev->subsystem_device);
   4532				break;
   4533			}
   4534			break;
   4535		default:
   4536			ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
   4537				 ioc->pdev->subsystem_device);
   4538			break;
   4539		}
   4540		break;
   4541	case PCI_VENDOR_ID_CISCO:
   4542		switch (ioc->pdev->device) {
   4543		case MPI25_MFGPAGE_DEVID_SAS3008:
   4544			switch (ioc->pdev->subsystem_device) {
   4545			case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
   4546				ioc_info(ioc, "%s\n",
   4547					 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
   4548				break;
   4549			case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
   4550				ioc_info(ioc, "%s\n",
   4551					 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
   4552				break;
   4553			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
   4554				ioc_info(ioc, "%s\n",
   4555					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
   4556				break;
   4557			default:
   4558				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
   4559					 ioc->pdev->subsystem_device);
   4560				break;
   4561			}
   4562			break;
   4563		case MPI25_MFGPAGE_DEVID_SAS3108_1:
   4564			switch (ioc->pdev->subsystem_device) {
   4565			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
   4566				ioc_info(ioc, "%s\n",
   4567					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
   4568				break;
   4569			case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
   4570				ioc_info(ioc, "%s\n",
   4571					 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
   4572				break;
   4573			default:
   4574				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
   4575					 ioc->pdev->subsystem_device);
   4576				break;
   4577			}
   4578			break;
   4579		default:
   4580			ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
   4581				 ioc->pdev->subsystem_device);
   4582			break;
   4583		}
   4584		break;
   4585	case MPT2SAS_HP_3PAR_SSVID:
   4586		switch (ioc->pdev->device) {
   4587		case MPI2_MFGPAGE_DEVID_SAS2004:
   4588			switch (ioc->pdev->subsystem_device) {
   4589			case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
   4590				ioc_info(ioc, "%s\n",
   4591					 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
   4592				break;
   4593			default:
   4594				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
   4595					 ioc->pdev->subsystem_device);
   4596				break;
   4597			}
   4598			break;
   4599		case MPI2_MFGPAGE_DEVID_SAS2308_2:
   4600			switch (ioc->pdev->subsystem_device) {
   4601			case MPT2SAS_HP_2_4_INTERNAL_SSDID:
   4602				ioc_info(ioc, "%s\n",
   4603					 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
   4604				break;
   4605			case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
   4606				ioc_info(ioc, "%s\n",
   4607					 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
   4608				break;
   4609			case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
   4610				ioc_info(ioc, "%s\n",
   4611					 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
   4612				break;
   4613			case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
   4614				ioc_info(ioc, "%s\n",
   4615					 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
   4616				break;
   4617			default:
   4618				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
   4619					 ioc->pdev->subsystem_device);
   4620				break;
   4621			}
   4622			break;
   4623		default:
   4624			ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
   4625				 ioc->pdev->subsystem_device);
   4626			break;
   4627		}
   4628		break;
   4629	default:
   4630		break;
   4631	}
   4632}
   4633
   4634/**
   4635 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
   4636 *				version from FW Image Header.
   4637 * @ioc: per adapter object
   4638 *
   4639 * Return: 0 for success, non-zero for failure.
   4640 */
   4641	static int
   4642_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
   4643{
   4644	Mpi2FWImageHeader_t *fw_img_hdr;
   4645	Mpi26ComponentImageHeader_t *cmp_img_hdr;
   4646	Mpi25FWUploadRequest_t *mpi_request;
   4647	Mpi2FWUploadReply_t mpi_reply;
   4648	int r = 0, issue_diag_reset = 0;
   4649	u32  package_version = 0;
   4650	void *fwpkg_data = NULL;
   4651	dma_addr_t fwpkg_data_dma;
   4652	u16 smid, ioc_status;
   4653	size_t data_length;
   4654
   4655	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   4656
   4657	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
   4658		ioc_err(ioc, "%s: internal command already in use\n", __func__);
   4659		return -EAGAIN;
   4660	}
   4661
   4662	data_length = sizeof(Mpi2FWImageHeader_t);
   4663	fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
   4664			&fwpkg_data_dma, GFP_KERNEL);
   4665	if (!fwpkg_data) {
   4666		ioc_err(ioc,
   4667		    "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
   4668			__FILE__, __LINE__, __func__);
   4669		return -ENOMEM;
   4670	}
   4671
   4672	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
   4673	if (!smid) {
   4674		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
   4675		r = -EAGAIN;
   4676		goto out;
   4677	}
   4678
   4679	ioc->base_cmds.status = MPT3_CMD_PENDING;
   4680	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
   4681	ioc->base_cmds.smid = smid;
   4682	memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
   4683	mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
   4684	mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
   4685	mpi_request->ImageSize = cpu_to_le32(data_length);
   4686	ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
   4687			data_length);
   4688	init_completion(&ioc->base_cmds.done);
   4689	ioc->put_smid_default(ioc, smid);
   4690	/* Wait for 15 seconds */
   4691	wait_for_completion_timeout(&ioc->base_cmds.done,
   4692			FW_IMG_HDR_READ_TIMEOUT*HZ);
   4693	ioc_info(ioc, "%s: complete\n", __func__);
   4694	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
   4695		ioc_err(ioc, "%s: timeout\n", __func__);
   4696		_debug_dump_mf(mpi_request,
   4697				sizeof(Mpi25FWUploadRequest_t)/4);
   4698		issue_diag_reset = 1;
   4699	} else {
   4700		memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
   4701		if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
   4702			memcpy(&mpi_reply, ioc->base_cmds.reply,
   4703					sizeof(Mpi2FWUploadReply_t));
   4704			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   4705						MPI2_IOCSTATUS_MASK;
   4706			if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
   4707				fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
   4708				if (le32_to_cpu(fw_img_hdr->Signature) ==
   4709				    MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
   4710					cmp_img_hdr =
   4711					    (Mpi26ComponentImageHeader_t *)
   4712					    (fwpkg_data);
   4713					package_version =
   4714					    le32_to_cpu(
   4715					    cmp_img_hdr->ApplicationSpecific);
   4716				} else
   4717					package_version =
   4718					    le32_to_cpu(
   4719					    fw_img_hdr->PackageVersion.Word);
   4720				if (package_version)
   4721					ioc_info(ioc,
   4722					"FW Package Ver(%02d.%02d.%02d.%02d)\n",
   4723					((package_version) & 0xFF000000) >> 24,
   4724					((package_version) & 0x00FF0000) >> 16,
   4725					((package_version) & 0x0000FF00) >> 8,
   4726					(package_version) & 0x000000FF);
   4727			} else {
   4728				_debug_dump_mf(&mpi_reply,
   4729						sizeof(Mpi2FWUploadReply_t)/4);
   4730			}
   4731		}
   4732	}
   4733	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   4734out:
   4735	if (fwpkg_data)
   4736		dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
   4737				fwpkg_data_dma);
   4738	if (issue_diag_reset) {
   4739		if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
   4740			return -EFAULT;
   4741		if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
   4742			return -EFAULT;
   4743		r = -EAGAIN;
   4744	}
   4745	return r;
   4746}
   4747
   4748/**
   4749 * _base_display_ioc_capabilities - Display IOC's capabilities.
   4750 * @ioc: per adapter object
   4751 */
   4752static void
   4753_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
   4754{
   4755	int i = 0;
   4756	char desc[17] = {0};
   4757	u32 iounit_pg1_flags;
   4758	u32 bios_version;
   4759
   4760	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
   4761	strncpy(desc, ioc->manu_pg0.ChipName, 16);
   4762	ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
   4763		 desc,
   4764		 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
   4765		 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
   4766		 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
   4767		 ioc->facts.FWVersion.Word & 0x000000FF,
   4768		 ioc->pdev->revision,
   4769		 (bios_version & 0xFF000000) >> 24,
   4770		 (bios_version & 0x00FF0000) >> 16,
   4771		 (bios_version & 0x0000FF00) >> 8,
   4772		 bios_version & 0x000000FF);
   4773
   4774	_base_display_OEMs_branding(ioc);
   4775
   4776	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
   4777		pr_info("%sNVMe", i ? "," : "");
   4778		i++;
   4779	}
   4780
   4781	ioc_info(ioc, "Protocol=(");
   4782
   4783	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
   4784		pr_cont("Initiator");
   4785		i++;
   4786	}
   4787
   4788	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
   4789		pr_cont("%sTarget", i ? "," : "");
   4790		i++;
   4791	}
   4792
   4793	i = 0;
   4794	pr_cont("), Capabilities=(");
   4795
   4796	if (!ioc->hide_ir_msg) {
   4797		if (ioc->facts.IOCCapabilities &
   4798		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
   4799			pr_cont("Raid");
   4800			i++;
   4801		}
   4802	}
   4803
   4804	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
   4805		pr_cont("%sTLR", i ? "," : "");
   4806		i++;
   4807	}
   4808
   4809	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
   4810		pr_cont("%sMulticast", i ? "," : "");
   4811		i++;
   4812	}
   4813
   4814	if (ioc->facts.IOCCapabilities &
   4815	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
   4816		pr_cont("%sBIDI Target", i ? "," : "");
   4817		i++;
   4818	}
   4819
   4820	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
   4821		pr_cont("%sEEDP", i ? "," : "");
   4822		i++;
   4823	}
   4824
   4825	if (ioc->facts.IOCCapabilities &
   4826	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
   4827		pr_cont("%sSnapshot Buffer", i ? "," : "");
   4828		i++;
   4829	}
   4830
   4831	if (ioc->facts.IOCCapabilities &
   4832	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
   4833		pr_cont("%sDiag Trace Buffer", i ? "," : "");
   4834		i++;
   4835	}
   4836
   4837	if (ioc->facts.IOCCapabilities &
   4838	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
   4839		pr_cont("%sDiag Extended Buffer", i ? "," : "");
   4840		i++;
   4841	}
   4842
   4843	if (ioc->facts.IOCCapabilities &
   4844	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
   4845		pr_cont("%sTask Set Full", i ? "," : "");
   4846		i++;
   4847	}
   4848
   4849	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
   4850	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
   4851		pr_cont("%sNCQ", i ? "," : "");
   4852		i++;
   4853	}
   4854
   4855	pr_cont(")\n");
   4856}
   4857
   4858/**
   4859 * mpt3sas_base_update_missing_delay - change the missing delay timers
   4860 * @ioc: per adapter object
   4861 * @device_missing_delay: amount of time till device is reported missing
   4862 * @io_missing_delay: interval IO is returned when there is a missing device
   4863 *
   4864 * Passed on the command line, this function will modify the device missing
   4865 * delay, as well as the io missing delay. This should be called at driver
   4866 * load time.
   4867 */
   4868void
   4869mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
   4870	u16 device_missing_delay, u8 io_missing_delay)
   4871{
   4872	u16 dmd, dmd_new, dmd_orignal;
   4873	u8 io_missing_delay_original;
   4874	u16 sz;
   4875	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
   4876	Mpi2ConfigReply_t mpi_reply;
   4877	u8 num_phys = 0;
   4878	u16 ioc_status;
   4879
   4880	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
   4881	if (!num_phys)
   4882		return;
   4883
   4884	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
   4885	    sizeof(Mpi2SasIOUnit1PhyData_t));
   4886	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
   4887	if (!sas_iounit_pg1) {
   4888		ioc_err(ioc, "failure at %s:%d/%s()!\n",
   4889			__FILE__, __LINE__, __func__);
   4890		goto out;
   4891	}
   4892	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
   4893	    sas_iounit_pg1, sz))) {
   4894		ioc_err(ioc, "failure at %s:%d/%s()!\n",
   4895			__FILE__, __LINE__, __func__);
   4896		goto out;
   4897	}
   4898	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   4899	    MPI2_IOCSTATUS_MASK;
   4900	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
   4901		ioc_err(ioc, "failure at %s:%d/%s()!\n",
   4902			__FILE__, __LINE__, __func__);
   4903		goto out;
   4904	}
   4905
   4906	/* device missing delay */
   4907	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
   4908	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
   4909		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
   4910	else
   4911		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
   4912	dmd_orignal = dmd;
   4913	if (device_missing_delay > 0x7F) {
   4914		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
   4915		    device_missing_delay;
   4916		dmd = dmd / 16;
   4917		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
   4918	} else
   4919		dmd = device_missing_delay;
   4920	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
   4921
   4922	/* io missing delay */
   4923	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
   4924	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
   4925
   4926	if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
   4927	    sz)) {
   4928		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
   4929			dmd_new = (dmd &
   4930			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
   4931		else
   4932			dmd_new =
   4933		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
   4934		ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
   4935			 dmd_orignal, dmd_new);
   4936		ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
   4937			 io_missing_delay_original,
   4938			 io_missing_delay);
   4939		ioc->device_missing_delay = dmd_new;
   4940		ioc->io_missing_delay = io_missing_delay;
   4941	}
   4942
   4943out:
   4944	kfree(sas_iounit_pg1);
   4945}
   4946
   4947/**
   4948 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
   4949 *    according to performance mode.
   4950 * @ioc : per adapter object
   4951 *
   4952 * Return: zero on success; otherwise return EAGAIN error code asking the
   4953 * caller to retry.
   4954 */
   4955static int
   4956_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
   4957{
   4958	Mpi2IOCPage1_t ioc_pg1;
   4959	Mpi2ConfigReply_t mpi_reply;
   4960	int rc;
   4961
   4962	rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
   4963	if (rc)
   4964		return rc;
   4965	memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
   4966
   4967	switch (perf_mode) {
   4968	case MPT_PERF_MODE_DEFAULT:
   4969	case MPT_PERF_MODE_BALANCED:
   4970		if (ioc->high_iops_queues) {
   4971			ioc_info(ioc,
   4972				"Enable interrupt coalescing only for first\t"
   4973				"%d reply queues\n",
   4974				MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
   4975			/*
   4976			 * If 31st bit is zero then interrupt coalescing is
   4977			 * enabled for all reply descriptor post queues.
   4978			 * If 31st bit is set to one then user can
   4979			 * enable/disable interrupt coalescing on per reply
   4980			 * descriptor post queue group(8) basis. So to enable
   4981			 * interrupt coalescing only on first reply descriptor
   4982			 * post queue group 31st bit and zero th bit is enabled.
   4983			 */
   4984			ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
   4985			    ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
   4986			rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
   4987			if (rc)
   4988				return rc;
   4989			ioc_info(ioc, "performance mode: balanced\n");
   4990			return 0;
   4991		}
   4992		fallthrough;
   4993	case MPT_PERF_MODE_LATENCY:
   4994		/*
   4995		 * Enable interrupt coalescing on all reply queues
   4996		 * with timeout value 0xA
   4997		 */
   4998		ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
   4999		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
   5000		ioc_pg1.ProductSpecific = 0;
   5001		rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
   5002		if (rc)
   5003			return rc;
   5004		ioc_info(ioc, "performance mode: latency\n");
   5005		break;
   5006	case MPT_PERF_MODE_IOPS:
   5007		/*
   5008		 * Enable interrupt coalescing on all reply queues.
   5009		 */
   5010		ioc_info(ioc,
   5011		    "performance mode: iops with coalescing timeout: 0x%x\n",
   5012		    le32_to_cpu(ioc_pg1.CoalescingTimeout));
   5013		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
   5014		ioc_pg1.ProductSpecific = 0;
   5015		rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
   5016		if (rc)
   5017			return rc;
   5018		break;
   5019	}
   5020	return 0;
   5021}
   5022
   5023/**
   5024 * _base_get_event_diag_triggers - get event diag trigger values from
   5025 *				persistent pages
   5026 * @ioc : per adapter object
   5027 *
   5028 * Return: nothing.
   5029 */
   5030static int
   5031_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
   5032{
   5033	Mpi26DriverTriggerPage2_t trigger_pg2;
   5034	struct SL_WH_EVENT_TRIGGER_T *event_tg;
   5035	MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
   5036	Mpi2ConfigReply_t mpi_reply;
   5037	int r = 0, i = 0;
   5038	u16 count = 0;
   5039	u16 ioc_status;
   5040
   5041	r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
   5042	    &trigger_pg2);
   5043	if (r)
   5044		return r;
   5045
   5046	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   5047	    MPI2_IOCSTATUS_MASK;
   5048	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
   5049		dinitprintk(ioc,
   5050		    ioc_err(ioc,
   5051		    "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
   5052		   __func__, ioc_status));
   5053		return 0;
   5054	}
   5055
   5056	if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
   5057		count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
   5058		count = min_t(u16, NUM_VALID_ENTRIES, count);
   5059		ioc->diag_trigger_event.ValidEntries = count;
   5060
   5061		event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
   5062		mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
   5063		for (i = 0; i < count; i++) {
   5064			event_tg->EventValue = le16_to_cpu(
   5065			    mpi_event_tg->MPIEventCode);
   5066			event_tg->LogEntryQualifier = le16_to_cpu(
   5067			    mpi_event_tg->MPIEventCodeSpecific);
   5068			event_tg++;
   5069			mpi_event_tg++;
   5070		}
   5071	}
   5072	return 0;
   5073}
   5074
   5075/**
   5076 * _base_get_scsi_diag_triggers - get scsi diag trigger values from
   5077 *				persistent pages
   5078 * @ioc : per adapter object
   5079 *
   5080 * Return: 0 on success; otherwise return failure status.
   5081 */
   5082static int
   5083_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
   5084{
   5085	Mpi26DriverTriggerPage3_t trigger_pg3;
   5086	struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
   5087	MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
   5088	Mpi2ConfigReply_t mpi_reply;
   5089	int r = 0, i = 0;
   5090	u16 count = 0;
   5091	u16 ioc_status;
   5092
   5093	r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
   5094	    &trigger_pg3);
   5095	if (r)
   5096		return r;
   5097
   5098	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   5099	    MPI2_IOCSTATUS_MASK;
   5100	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
   5101		dinitprintk(ioc,
   5102		    ioc_err(ioc,
   5103		    "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
   5104		    __func__, ioc_status));
   5105		return 0;
   5106	}
   5107
   5108	if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
   5109		count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
   5110		count = min_t(u16, NUM_VALID_ENTRIES, count);
   5111		ioc->diag_trigger_scsi.ValidEntries = count;
   5112
   5113		scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
   5114		mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
   5115		for (i = 0; i < count; i++) {
   5116			scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
   5117			scsi_tg->ASC = mpi_scsi_tg->ASC;
   5118			scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
   5119
   5120			scsi_tg++;
   5121			mpi_scsi_tg++;
   5122		}
   5123	}
   5124	return 0;
   5125}
   5126
   5127/**
   5128 * _base_get_mpi_diag_triggers - get mpi diag trigger values from
   5129 *				persistent pages
   5130 * @ioc : per adapter object
   5131 *
   5132 * Return: 0 on success; otherwise return failure status.
   5133 */
   5134static int
   5135_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
   5136{
   5137	Mpi26DriverTriggerPage4_t trigger_pg4;
   5138	struct SL_WH_MPI_TRIGGER_T *status_tg;
   5139	MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
   5140	Mpi2ConfigReply_t mpi_reply;
   5141	int r = 0, i = 0;
   5142	u16 count = 0;
   5143	u16 ioc_status;
   5144
   5145	r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
   5146	    &trigger_pg4);
   5147	if (r)
   5148		return r;
   5149
   5150	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   5151	    MPI2_IOCSTATUS_MASK;
   5152	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
   5153		dinitprintk(ioc,
   5154		    ioc_err(ioc,
   5155		    "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
   5156		    __func__, ioc_status));
   5157		return 0;
   5158	}
   5159
   5160	if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
   5161		count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
   5162		count = min_t(u16, NUM_VALID_ENTRIES, count);
   5163		ioc->diag_trigger_mpi.ValidEntries = count;
   5164
   5165		status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
   5166		mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
   5167
   5168		for (i = 0; i < count; i++) {
   5169			status_tg->IOCStatus = le16_to_cpu(
   5170			    mpi_status_tg->IOCStatus);
   5171			status_tg->IocLogInfo = le32_to_cpu(
   5172			    mpi_status_tg->LogInfo);
   5173
   5174			status_tg++;
   5175			mpi_status_tg++;
   5176		}
   5177	}
   5178	return 0;
   5179}
   5180
   5181/**
   5182 * _base_get_master_diag_triggers - get master diag trigger values from
   5183 *				persistent pages
   5184 * @ioc : per adapter object
   5185 *
   5186 * Return: nothing.
   5187 */
   5188static int
   5189_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
   5190{
   5191	Mpi26DriverTriggerPage1_t trigger_pg1;
   5192	Mpi2ConfigReply_t mpi_reply;
   5193	int r;
   5194	u16 ioc_status;
   5195
   5196	r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
   5197	    &trigger_pg1);
   5198	if (r)
   5199		return r;
   5200
   5201	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   5202	    MPI2_IOCSTATUS_MASK;
   5203	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
   5204		dinitprintk(ioc,
   5205		    ioc_err(ioc,
   5206		    "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
   5207		   __func__, ioc_status));
   5208		return 0;
   5209	}
   5210
   5211	if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
   5212		ioc->diag_trigger_master.MasterData |=
   5213		    le32_to_cpu(
   5214		    trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
   5215	return 0;
   5216}
   5217
   5218/**
   5219 * _base_check_for_trigger_pages_support - checks whether HBA FW supports
   5220 *					driver trigger pages or not
   5221 * @ioc : per adapter object
   5222 * @trigger_flags : address where trigger page0's TriggerFlags value is copied
   5223 *
   5224 * Return: trigger flags mask if HBA FW supports driver trigger pages;
   5225 * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
   5226 * return EAGAIN if diag reset occurred due to FW fault and asking the
   5227 * caller to retry the command.
   5228 *
   5229 */
   5230static int
   5231_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
   5232{
   5233	Mpi26DriverTriggerPage0_t trigger_pg0;
   5234	int r = 0;
   5235	Mpi2ConfigReply_t mpi_reply;
   5236	u16 ioc_status;
   5237
   5238	r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
   5239	    &trigger_pg0);
   5240	if (r)
   5241		return r;
   5242
   5243	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
   5244	    MPI2_IOCSTATUS_MASK;
   5245	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
   5246		return -EFAULT;
   5247
   5248	*trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
   5249	return 0;
   5250}
   5251
   5252/**
   5253 * _base_get_diag_triggers - Retrieve diag trigger values from
   5254 *				persistent pages.
   5255 * @ioc : per adapter object
   5256 *
   5257 * Return: zero on success; otherwise return EAGAIN error codes
   5258 * asking the caller to retry.
   5259 */
   5260static int
   5261_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
   5262{
   5263	int trigger_flags;
   5264	int r;
   5265
   5266	/*
   5267	 * Default setting of master trigger.
   5268	 */
   5269	ioc->diag_trigger_master.MasterData =
   5270	    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
   5271
   5272	r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
   5273	if (r) {
   5274		if (r == -EAGAIN)
   5275			return r;
   5276		/*
   5277		 * Don't go for error handling when FW doesn't support
   5278		 * driver trigger pages.
   5279		 */
   5280		return 0;
   5281	}
   5282
   5283	ioc->supports_trigger_pages = 1;
   5284
   5285	/*
   5286	 * Retrieve master diag trigger values from driver trigger pg1
   5287	 * if master trigger bit enabled in TriggerFlags.
   5288	 */
   5289	if ((u16)trigger_flags &
   5290	    MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
   5291		r = _base_get_master_diag_triggers(ioc);
   5292		if (r)
   5293			return r;
   5294	}
   5295
   5296	/*
   5297	 * Retrieve event diag trigger values from driver trigger pg2
   5298	 * if event trigger bit enabled in TriggerFlags.
   5299	 */
   5300	if ((u16)trigger_flags &
   5301	    MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
   5302		r = _base_get_event_diag_triggers(ioc);
   5303		if (r)
   5304			return r;
   5305	}
   5306
   5307	/*
   5308	 * Retrieve scsi diag trigger values from driver trigger pg3
   5309	 * if scsi trigger bit enabled in TriggerFlags.
   5310	 */
   5311	if ((u16)trigger_flags &
   5312	    MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
   5313		r = _base_get_scsi_diag_triggers(ioc);
   5314		if (r)
   5315			return r;
   5316	}
   5317	/*
   5318	 * Retrieve mpi error diag trigger values from driver trigger pg4
   5319	 * if loginfo trigger bit enabled in TriggerFlags.
   5320	 */
   5321	if ((u16)trigger_flags &
   5322	    MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
   5323		r = _base_get_mpi_diag_triggers(ioc);
   5324		if (r)
   5325			return r;
   5326	}
   5327	return 0;
   5328}
   5329
   5330/**
   5331 * _base_update_diag_trigger_pages - Update the driver trigger pages after
   5332 *			online FW update, in case updated FW supports driver
   5333 *			trigger pages.
   5334 * @ioc : per adapter object
   5335 *
   5336 * Return: nothing.
   5337 */
   5338static void
   5339_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
   5340{
   5341
   5342	if (ioc->diag_trigger_master.MasterData)
   5343		mpt3sas_config_update_driver_trigger_pg1(ioc,
   5344		    &ioc->diag_trigger_master, 1);
   5345
   5346	if (ioc->diag_trigger_event.ValidEntries)
   5347		mpt3sas_config_update_driver_trigger_pg2(ioc,
   5348		    &ioc->diag_trigger_event, 1);
   5349
   5350	if (ioc->diag_trigger_scsi.ValidEntries)
   5351		mpt3sas_config_update_driver_trigger_pg3(ioc,
   5352		    &ioc->diag_trigger_scsi, 1);
   5353
   5354	if (ioc->diag_trigger_mpi.ValidEntries)
   5355		mpt3sas_config_update_driver_trigger_pg4(ioc,
   5356		    &ioc->diag_trigger_mpi, 1);
   5357}
   5358
   5359/**
   5360 * _base_assign_fw_reported_qd	- Get FW reported QD for SAS/SATA devices.
   5361 *				- On failure set default QD values.
   5362 * @ioc : per adapter object
   5363 *
   5364 * Returns 0 for success, non-zero for failure.
   5365 *
   5366 */
   5367static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
   5368{
   5369	Mpi2ConfigReply_t mpi_reply;
   5370	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
   5371	Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
   5372	u16 depth;
   5373	int sz;
   5374	int rc = 0;
   5375
   5376	ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
   5377	ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
   5378	ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
   5379	ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
   5380	if (!ioc->is_gen35_ioc)
   5381		goto out;
   5382	/* sas iounit page 1 */
   5383	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
   5384	sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
   5385	if (!sas_iounit_pg1) {
   5386		pr_err("%s: failure at %s:%d/%s()!\n",
   5387		    ioc->name, __FILE__, __LINE__, __func__);
   5388		return rc;
   5389	}
   5390	rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
   5391	    sas_iounit_pg1, sz);
   5392	if (rc) {
   5393		pr_err("%s: failure at %s:%d/%s()!\n",
   5394		    ioc->name, __FILE__, __LINE__, __func__);
   5395		goto out;
   5396	}
   5397
   5398	depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
   5399	ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
   5400
   5401	depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
   5402	ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
   5403
   5404	depth = sas_iounit_pg1->SATAMaxQDepth;
   5405	ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
   5406
   5407	/* pcie iounit page 1 */
   5408	rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
   5409	    &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
   5410	if (rc) {
   5411		pr_err("%s: failure at %s:%d/%s()!\n",
   5412		    ioc->name, __FILE__, __LINE__, __func__);
   5413		goto out;
   5414	}
   5415	ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
   5416	    (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
   5417	    MPT3SAS_NVME_QUEUE_DEPTH;
   5418out:
   5419	dinitprintk(ioc, pr_err(
   5420	    "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
   5421	    ioc->max_wideport_qd, ioc->max_narrowport_qd,
   5422	    ioc->max_sata_qd, ioc->max_nvme_qd));
   5423	kfree(sas_iounit_pg1);
   5424	return rc;
   5425}
   5426
   5427/**
   5428 * _base_static_config_pages - static start of day config pages
   5429 * @ioc: per adapter object
   5430 */
   5431static int
   5432_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
   5433{
   5434	Mpi2ConfigReply_t mpi_reply;
   5435	u32 iounit_pg1_flags;
   5436	int tg_flags = 0;
   5437	int rc;
   5438	ioc->nvme_abort_timeout = 30;
   5439
   5440	rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
   5441	    &ioc->manu_pg0);
   5442	if (rc)
   5443		return rc;
   5444	if (ioc->ir_firmware) {
   5445		rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
   5446		    &ioc->manu_pg10);
   5447		if (rc)
   5448			return rc;
   5449	}
   5450	/*
   5451	 * Ensure correct T10 PI operation if vendor left EEDPTagMode
   5452	 * flag unset in NVDATA.
   5453	 */
   5454	rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
   5455	    &ioc->manu_pg11);
   5456	if (rc)
   5457		return rc;
   5458	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
   5459		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
   5460		    ioc->name);
   5461		ioc->manu_pg11.EEDPTagMode &= ~0x3;
   5462		ioc->manu_pg11.EEDPTagMode |= 0x1;
   5463		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
   5464		    &ioc->manu_pg11);
   5465	}
   5466	if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
   5467		ioc->tm_custom_handling = 1;
   5468	else {
   5469		ioc->tm_custom_handling = 0;
   5470		if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
   5471			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
   5472		else if (ioc->manu_pg11.NVMeAbortTO >
   5473					NVME_TASK_ABORT_MAX_TIMEOUT)
   5474			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
   5475		else
   5476			ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
   5477	}
   5478	ioc->time_sync_interval =
   5479	    ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
   5480	if (ioc->time_sync_interval) {
   5481		if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
   5482			ioc->time_sync_interval =
   5483			    ioc->time_sync_interval * SECONDS_PER_HOUR;
   5484		else
   5485			ioc->time_sync_interval =
   5486			    ioc->time_sync_interval * SECONDS_PER_MIN;
   5487		dinitprintk(ioc, ioc_info(ioc,
   5488		    "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
   5489		    ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
   5490		    MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
   5491	} else {
   5492		if (ioc->is_gen35_ioc)
   5493			ioc_warn(ioc,
   5494			    "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
   5495	}
   5496	rc = _base_assign_fw_reported_qd(ioc);
   5497	if (rc)
   5498		return rc;
   5499	rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
   5500	if (rc)
   5501		return rc;
   5502	rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
   5503	if (rc)
   5504		return rc;
   5505	rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
   5506	if (rc)
   5507		return rc;
   5508	rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
   5509	if (rc)
   5510		return rc;
   5511	rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
   5512	if (rc)
   5513		return rc;
   5514	rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
   5515	if (rc)
   5516		return rc;
   5517	_base_display_ioc_capabilities(ioc);
   5518
   5519	/*
   5520	 * Enable task_set_full handling in iounit_pg1 when the
   5521	 * facts capabilities indicate that its supported.
   5522	 */
   5523	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
   5524	if ((ioc->facts.IOCCapabilities &
   5525	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
   5526		iounit_pg1_flags &=
   5527		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
   5528	else
   5529		iounit_pg1_flags |=
   5530		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
   5531	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
   5532	rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
   5533	if (rc)
   5534		return rc;
   5535
   5536	if (ioc->iounit_pg8.NumSensors)
   5537		ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
   5538	if (ioc->is_aero_ioc) {
   5539		rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
   5540		if (rc)
   5541			return rc;
   5542	}
   5543	if (ioc->is_gen35_ioc) {
   5544		if (ioc->is_driver_loading) {
   5545			rc = _base_get_diag_triggers(ioc);
   5546			if (rc)
   5547				return rc;
   5548		} else {
   5549			/*
   5550			 * In case of online HBA FW update operation,
   5551			 * check whether updated FW supports the driver trigger
   5552			 * pages or not.
   5553			 * - If previous FW has not supported driver trigger
   5554			 *   pages and newer FW supports them then update these
   5555			 *   pages with current diag trigger values.
   5556			 * - If previous FW has supported driver trigger pages
   5557			 *   and new FW doesn't support them then disable
   5558			 *   support_trigger_pages flag.
   5559			 */
   5560			_base_check_for_trigger_pages_support(ioc, &tg_flags);
   5561			if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
   5562				_base_update_diag_trigger_pages(ioc);
   5563			else if (ioc->supports_trigger_pages &&
   5564			    tg_flags == -EFAULT)
   5565				ioc->supports_trigger_pages = 0;
   5566		}
   5567	}
   5568	return 0;
   5569}
   5570
   5571/**
   5572 * mpt3sas_free_enclosure_list - release memory
   5573 * @ioc: per adapter object
   5574 *
   5575 * Free memory allocated during enclosure add.
   5576 */
   5577void
   5578mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
   5579{
   5580	struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
   5581
   5582	/* Free enclosure list */
   5583	list_for_each_entry_safe(enclosure_dev,
   5584			enclosure_dev_next, &ioc->enclosure_list, list) {
   5585		list_del(&enclosure_dev->list);
   5586		kfree(enclosure_dev);
   5587	}
   5588}
   5589
   5590/**
   5591 * _base_release_memory_pools - release memory
   5592 * @ioc: per adapter object
   5593 *
   5594 * Free memory allocated from _base_allocate_memory_pools.
   5595 */
   5596static void
   5597_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
   5598{
   5599	int i = 0;
   5600	int j = 0;
   5601	int dma_alloc_count = 0;
   5602	struct chain_tracker *ct;
   5603	int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
   5604
   5605	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   5606
   5607	if (ioc->request) {
   5608		dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
   5609		    ioc->request,  ioc->request_dma);
   5610		dexitprintk(ioc,
   5611			    ioc_info(ioc, "request_pool(0x%p): free\n",
   5612				     ioc->request));
   5613		ioc->request = NULL;
   5614	}
   5615
   5616	if (ioc->sense) {
   5617		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
   5618		dma_pool_destroy(ioc->sense_dma_pool);
   5619		dexitprintk(ioc,
   5620			    ioc_info(ioc, "sense_pool(0x%p): free\n",
   5621				     ioc->sense));
   5622		ioc->sense = NULL;
   5623	}
   5624
   5625	if (ioc->reply) {
   5626		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
   5627		dma_pool_destroy(ioc->reply_dma_pool);
   5628		dexitprintk(ioc,
   5629			    ioc_info(ioc, "reply_pool(0x%p): free\n",
   5630				     ioc->reply));
   5631		ioc->reply = NULL;
   5632	}
   5633
   5634	if (ioc->reply_free) {
   5635		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
   5636		    ioc->reply_free_dma);
   5637		dma_pool_destroy(ioc->reply_free_dma_pool);
   5638		dexitprintk(ioc,
   5639			    ioc_info(ioc, "reply_free_pool(0x%p): free\n",
   5640				     ioc->reply_free));
   5641		ioc->reply_free = NULL;
   5642	}
   5643
   5644	if (ioc->reply_post) {
   5645		dma_alloc_count = DIV_ROUND_UP(count,
   5646				RDPQ_MAX_INDEX_IN_ONE_CHUNK);
   5647		for (i = 0; i < count; i++) {
   5648			if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
   5649			    && dma_alloc_count) {
   5650				if (ioc->reply_post[i].reply_post_free) {
   5651					dma_pool_free(
   5652					    ioc->reply_post_free_dma_pool,
   5653					    ioc->reply_post[i].reply_post_free,
   5654					ioc->reply_post[i].reply_post_free_dma);
   5655					dexitprintk(ioc, ioc_info(ioc,
   5656					   "reply_post_free_pool(0x%p): free\n",
   5657					   ioc->reply_post[i].reply_post_free));
   5658					ioc->reply_post[i].reply_post_free =
   5659									NULL;
   5660				}
   5661				--dma_alloc_count;
   5662			}
   5663		}
   5664		dma_pool_destroy(ioc->reply_post_free_dma_pool);
   5665		if (ioc->reply_post_free_array &&
   5666			ioc->rdpq_array_enable) {
   5667			dma_pool_free(ioc->reply_post_free_array_dma_pool,
   5668			    ioc->reply_post_free_array,
   5669			    ioc->reply_post_free_array_dma);
   5670			ioc->reply_post_free_array = NULL;
   5671		}
   5672		dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
   5673		kfree(ioc->reply_post);
   5674	}
   5675
   5676	if (ioc->pcie_sgl_dma_pool) {
   5677		for (i = 0; i < ioc->scsiio_depth; i++) {
   5678			dma_pool_free(ioc->pcie_sgl_dma_pool,
   5679					ioc->pcie_sg_lookup[i].pcie_sgl,
   5680					ioc->pcie_sg_lookup[i].pcie_sgl_dma);
   5681			ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
   5682		}
   5683		dma_pool_destroy(ioc->pcie_sgl_dma_pool);
   5684	}
   5685	if (ioc->config_page) {
   5686		dexitprintk(ioc,
   5687			    ioc_info(ioc, "config_page(0x%p): free\n",
   5688				     ioc->config_page));
   5689		dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
   5690		    ioc->config_page, ioc->config_page_dma);
   5691	}
   5692
   5693	kfree(ioc->hpr_lookup);
   5694	ioc->hpr_lookup = NULL;
   5695	kfree(ioc->internal_lookup);
   5696	ioc->internal_lookup = NULL;
   5697	if (ioc->chain_lookup) {
   5698		for (i = 0; i < ioc->scsiio_depth; i++) {
   5699			for (j = ioc->chains_per_prp_buffer;
   5700			    j < ioc->chains_needed_per_io; j++) {
   5701				ct = &ioc->chain_lookup[i].chains_per_smid[j];
   5702				if (ct && ct->chain_buffer)
   5703					dma_pool_free(ioc->chain_dma_pool,
   5704						ct->chain_buffer,
   5705						ct->chain_buffer_dma);
   5706			}
   5707			kfree(ioc->chain_lookup[i].chains_per_smid);
   5708		}
   5709		dma_pool_destroy(ioc->chain_dma_pool);
   5710		kfree(ioc->chain_lookup);
   5711		ioc->chain_lookup = NULL;
   5712	}
   5713
   5714	kfree(ioc->io_queue_num);
   5715	ioc->io_queue_num = NULL;
   5716}
   5717
   5718/**
   5719 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
   5720 *	having same upper 32bits in their base memory address.
   5721 * @start_address: Base address of a reply queue set
   5722 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
   5723 *
   5724 * Return: 1 if reply queues in a set have a same upper 32bits in their base
   5725 * memory address, else 0.
   5726 */
   5727static int
   5728mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
   5729{
   5730	dma_addr_t end_address;
   5731
   5732	end_address = start_address + pool_sz - 1;
   5733
   5734	if (upper_32_bits(start_address) == upper_32_bits(end_address))
   5735		return 1;
   5736	else
   5737		return 0;
   5738}
   5739
   5740/**
   5741 * _base_reduce_hba_queue_depth- Retry with reduced queue depth
   5742 * @ioc: Adapter object
   5743 *
   5744 * Return: 0 for success, non-zero for failure.
   5745 **/
   5746static inline int
   5747_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
   5748{
   5749	int reduce_sz = 64;
   5750
   5751	if ((ioc->hba_queue_depth - reduce_sz) >
   5752	    (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
   5753		ioc->hba_queue_depth -= reduce_sz;
   5754		return 0;
   5755	} else
   5756		return -ENOMEM;
   5757}
   5758
   5759/**
   5760 * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
   5761 *			for pcie sgl pools.
   5762 * @ioc: Adapter object
   5763 * @sz: DMA Pool size
   5764 *
   5765 * Return: 0 for success, non-zero for failure.
   5766 */
   5767
   5768static int
   5769_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
   5770{
   5771	int i = 0, j = 0;
   5772	struct chain_tracker *ct;
   5773
   5774	ioc->pcie_sgl_dma_pool =
   5775	    dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
   5776	    ioc->page_size, 0);
   5777	if (!ioc->pcie_sgl_dma_pool) {
   5778		ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
   5779		return -ENOMEM;
   5780	}
   5781
   5782	ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
   5783	ioc->chains_per_prp_buffer =
   5784	    min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
   5785	for (i = 0; i < ioc->scsiio_depth; i++) {
   5786		ioc->pcie_sg_lookup[i].pcie_sgl =
   5787		    dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
   5788		    &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
   5789		if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
   5790			ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
   5791			return -EAGAIN;
   5792		}
   5793
   5794		if (!mpt3sas_check_same_4gb_region(
   5795		    ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
   5796			ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
   5797			    ioc->pcie_sg_lookup[i].pcie_sgl,
   5798			    (unsigned long long)
   5799			    ioc->pcie_sg_lookup[i].pcie_sgl_dma);
   5800			ioc->use_32bit_dma = true;
   5801			return -EAGAIN;
   5802		}
   5803
   5804		for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
   5805			ct = &ioc->chain_lookup[i].chains_per_smid[j];
   5806			ct->chain_buffer =
   5807			    ioc->pcie_sg_lookup[i].pcie_sgl +
   5808			    (j * ioc->chain_segment_sz);
   5809			ct->chain_buffer_dma =
   5810			    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
   5811			    (j * ioc->chain_segment_sz);
   5812		}
   5813	}
   5814	dinitprintk(ioc, ioc_info(ioc,
   5815	    "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
   5816	    ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
   5817	dinitprintk(ioc, ioc_info(ioc,
   5818	    "Number of chains can fit in a PRP page(%d)\n",
   5819	    ioc->chains_per_prp_buffer));
   5820	return 0;
   5821}
   5822
   5823/**
   5824 * _base_allocate_chain_dma_pool - Allocating DMA'able memory
   5825 *			for chain dma pool.
   5826 * @ioc: Adapter object
   5827 * @sz: DMA Pool size
   5828 *
   5829 * Return: 0 for success, non-zero for failure.
   5830 */
   5831static int
   5832_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
   5833{
   5834	int i = 0, j = 0;
   5835	struct chain_tracker *ctr;
   5836
   5837	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
   5838	    ioc->chain_segment_sz, 16, 0);
   5839	if (!ioc->chain_dma_pool)
   5840		return -ENOMEM;
   5841
   5842	for (i = 0; i < ioc->scsiio_depth; i++) {
   5843		for (j = ioc->chains_per_prp_buffer;
   5844		    j < ioc->chains_needed_per_io; j++) {
   5845			ctr = &ioc->chain_lookup[i].chains_per_smid[j];
   5846			ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
   5847			    GFP_KERNEL, &ctr->chain_buffer_dma);
   5848			if (!ctr->chain_buffer)
   5849				return -EAGAIN;
   5850			if (!mpt3sas_check_same_4gb_region(
   5851			    ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
   5852				ioc_err(ioc,
   5853				    "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
   5854				    ctr->chain_buffer,
   5855				    (unsigned long long)ctr->chain_buffer_dma);
   5856				ioc->use_32bit_dma = true;
   5857				return -EAGAIN;
   5858			}
   5859		}
   5860	}
   5861	dinitprintk(ioc, ioc_info(ioc,
   5862	    "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
   5863	    ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
   5864	    (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
   5865	    ioc->chain_segment_sz))/1024));
   5866	return 0;
   5867}
   5868
   5869/**
   5870 * _base_allocate_sense_dma_pool - Allocating DMA'able memory
   5871 *			for sense dma pool.
   5872 * @ioc: Adapter object
   5873 * @sz: DMA Pool size
   5874 * Return: 0 for success, non-zero for failure.
   5875 */
   5876static int
   5877_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
   5878{
   5879	ioc->sense_dma_pool =
   5880	    dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
   5881	if (!ioc->sense_dma_pool)
   5882		return -ENOMEM;
   5883	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
   5884	    GFP_KERNEL, &ioc->sense_dma);
   5885	if (!ioc->sense)
   5886		return -EAGAIN;
   5887	if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
   5888		dinitprintk(ioc, pr_err(
   5889		    "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
   5890		    ioc->sense, (unsigned long long) ioc->sense_dma));
   5891		ioc->use_32bit_dma = true;
   5892		return -EAGAIN;
   5893	}
   5894	ioc_info(ioc,
   5895	    "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
   5896	    ioc->sense, (unsigned long long)ioc->sense_dma,
   5897	    ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
   5898	return 0;
   5899}
   5900
   5901/**
   5902 * _base_allocate_reply_pool - Allocating DMA'able memory
   5903 *			for reply pool.
   5904 * @ioc: Adapter object
   5905 * @sz: DMA Pool size
   5906 * Return: 0 for success, non-zero for failure.
   5907 */
   5908static int
   5909_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
   5910{
   5911	/* reply pool, 4 byte align */
   5912	ioc->reply_dma_pool = dma_pool_create("reply pool",
   5913	    &ioc->pdev->dev, sz, 4, 0);
   5914	if (!ioc->reply_dma_pool)
   5915		return -ENOMEM;
   5916	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
   5917	    &ioc->reply_dma);
   5918	if (!ioc->reply)
   5919		return -EAGAIN;
   5920	if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
   5921		dinitprintk(ioc, pr_err(
   5922		    "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
   5923		    ioc->reply, (unsigned long long) ioc->reply_dma));
   5924		ioc->use_32bit_dma = true;
   5925		return -EAGAIN;
   5926	}
   5927	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
   5928	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
   5929	ioc_info(ioc,
   5930	    "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
   5931	    ioc->reply, (unsigned long long)ioc->reply_dma,
   5932	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
   5933	return 0;
   5934}
   5935
   5936/**
   5937 * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
   5938 *			for reply free dma pool.
   5939 * @ioc: Adapter object
   5940 * @sz: DMA Pool size
   5941 * Return: 0 for success, non-zero for failure.
   5942 */
   5943static int
   5944_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
   5945{
   5946	/* reply free queue, 16 byte align */
   5947	ioc->reply_free_dma_pool = dma_pool_create(
   5948	    "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
   5949	if (!ioc->reply_free_dma_pool)
   5950		return -ENOMEM;
   5951	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
   5952	    GFP_KERNEL, &ioc->reply_free_dma);
   5953	if (!ioc->reply_free)
   5954		return -EAGAIN;
   5955	if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
   5956		dinitprintk(ioc,
   5957		    pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
   5958		    ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
   5959		ioc->use_32bit_dma = true;
   5960		return -EAGAIN;
   5961	}
   5962	memset(ioc->reply_free, 0, sz);
   5963	dinitprintk(ioc, ioc_info(ioc,
   5964	    "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
   5965	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
   5966	dinitprintk(ioc, ioc_info(ioc,
   5967	    "reply_free_dma (0x%llx)\n",
   5968	    (unsigned long long)ioc->reply_free_dma));
   5969	return 0;
   5970}
   5971
   5972/**
   5973 * _base_allocate_reply_post_free_array - Allocating DMA'able memory
   5974 *			for reply post free array.
   5975 * @ioc: Adapter object
   5976 * @reply_post_free_array_sz: DMA Pool size
   5977 * Return: 0 for success, non-zero for failure.
   5978 */
   5979
   5980static int
   5981_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
   5982	u32 reply_post_free_array_sz)
   5983{
   5984	ioc->reply_post_free_array_dma_pool =
   5985	    dma_pool_create("reply_post_free_array pool",
   5986	    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
   5987	if (!ioc->reply_post_free_array_dma_pool)
   5988		return -ENOMEM;
   5989	ioc->reply_post_free_array =
   5990	    dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
   5991	    GFP_KERNEL, &ioc->reply_post_free_array_dma);
   5992	if (!ioc->reply_post_free_array)
   5993		return -EAGAIN;
   5994	if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
   5995	    reply_post_free_array_sz)) {
   5996		dinitprintk(ioc, pr_err(
   5997		    "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
   5998		    ioc->reply_free,
   5999		    (unsigned long long) ioc->reply_free_dma));
   6000		ioc->use_32bit_dma = true;
   6001		return -EAGAIN;
   6002	}
   6003	return 0;
   6004}
   6005/**
   6006 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
   6007 *                     for reply queues.
   6008 * @ioc: per adapter object
   6009 * @sz: DMA Pool size
   6010 * Return: 0 for success, non-zero for failure.
   6011 */
   6012static int
   6013base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
   6014{
   6015	int i = 0;
   6016	u32 dma_alloc_count = 0;
   6017	int reply_post_free_sz = ioc->reply_post_queue_depth *
   6018		sizeof(Mpi2DefaultReplyDescriptor_t);
   6019	int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
   6020
   6021	ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
   6022			GFP_KERNEL);
   6023	if (!ioc->reply_post)
   6024		return -ENOMEM;
   6025	/*
   6026	 *  For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
   6027	 *  VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
   6028	 *  be within 4GB boundary i.e reply queues in a set must have same
   6029	 *  upper 32-bits in their memory address. so here driver is allocating
   6030	 *  the DMA'able memory for reply queues according.
   6031	 *  Driver uses limitation of
   6032	 *  VENTURA_SERIES to manage INVADER_SERIES as well.
   6033	 */
   6034	dma_alloc_count = DIV_ROUND_UP(count,
   6035				RDPQ_MAX_INDEX_IN_ONE_CHUNK);
   6036	ioc->reply_post_free_dma_pool =
   6037		dma_pool_create("reply_post_free pool",
   6038		    &ioc->pdev->dev, sz, 16, 0);
   6039	if (!ioc->reply_post_free_dma_pool)
   6040		return -ENOMEM;
   6041	for (i = 0; i < count; i++) {
   6042		if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
   6043			ioc->reply_post[i].reply_post_free =
   6044			    dma_pool_zalloc(ioc->reply_post_free_dma_pool,
   6045				GFP_KERNEL,
   6046				&ioc->reply_post[i].reply_post_free_dma);
   6047			if (!ioc->reply_post[i].reply_post_free)
   6048				return -ENOMEM;
   6049			/*
   6050			 * Each set of RDPQ pool must satisfy 4gb boundary
   6051			 * restriction.
   6052			 * 1) Check if allocated resources for RDPQ pool are in
   6053			 *	the same 4GB range.
   6054			 * 2) If #1 is true, continue with 64 bit DMA.
   6055			 * 3) If #1 is false, return 1. which means free all the
   6056			 * resources and set DMA mask to 32 and allocate.
   6057			 */
   6058			if (!mpt3sas_check_same_4gb_region(
   6059				ioc->reply_post[i].reply_post_free_dma, sz)) {
   6060				dinitprintk(ioc,
   6061				    ioc_err(ioc, "bad Replypost free pool(0x%p)"
   6062				    "reply_post_free_dma = (0x%llx)\n",
   6063				    ioc->reply_post[i].reply_post_free,
   6064				    (unsigned long long)
   6065				    ioc->reply_post[i].reply_post_free_dma));
   6066				return -EAGAIN;
   6067			}
   6068			dma_alloc_count--;
   6069
   6070		} else {
   6071			ioc->reply_post[i].reply_post_free =
   6072			    (Mpi2ReplyDescriptorsUnion_t *)
   6073			    ((long)ioc->reply_post[i-1].reply_post_free
   6074			    + reply_post_free_sz);
   6075			ioc->reply_post[i].reply_post_free_dma =
   6076			    (dma_addr_t)
   6077			    (ioc->reply_post[i-1].reply_post_free_dma +
   6078			    reply_post_free_sz);
   6079		}
   6080	}
   6081	return 0;
   6082}
   6083
   6084/**
   6085 * _base_allocate_memory_pools - allocate start of day memory pools
   6086 * @ioc: per adapter object
   6087 *
   6088 * Return: 0 success, anything else error.
   6089 */
   6090static int
   6091_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
   6092{
   6093	struct mpt3sas_facts *facts;
   6094	u16 max_sge_elements;
   6095	u16 chains_needed_per_io;
   6096	u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
   6097	u32 retry_sz;
   6098	u32 rdpq_sz = 0, sense_sz = 0;
   6099	u16 max_request_credit, nvme_blocks_needed;
   6100	unsigned short sg_tablesize;
   6101	u16 sge_size;
   6102	int i;
   6103	int ret = 0, rc = 0;
   6104
   6105	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   6106
   6107
   6108	retry_sz = 0;
   6109	facts = &ioc->facts;
   6110
   6111	/* command line tunables for max sgl entries */
   6112	if (max_sgl_entries != -1)
   6113		sg_tablesize = max_sgl_entries;
   6114	else {
   6115		if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
   6116			sg_tablesize = MPT2SAS_SG_DEPTH;
   6117		else
   6118			sg_tablesize = MPT3SAS_SG_DEPTH;
   6119	}
   6120
   6121	/* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
   6122	if (reset_devices)
   6123		sg_tablesize = min_t(unsigned short, sg_tablesize,
   6124		   MPT_KDUMP_MIN_PHYS_SEGMENTS);
   6125
   6126	if (ioc->is_mcpu_endpoint)
   6127		ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
   6128	else {
   6129		if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
   6130			sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
   6131		else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
   6132			sg_tablesize = min_t(unsigned short, sg_tablesize,
   6133					SG_MAX_SEGMENTS);
   6134			ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
   6135				 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
   6136		}
   6137		ioc->shost->sg_tablesize = sg_tablesize;
   6138	}
   6139
   6140	ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
   6141		(facts->RequestCredit / 4));
   6142	if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
   6143		if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
   6144				INTERNAL_SCSIIO_CMDS_COUNT)) {
   6145			ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
   6146				facts->RequestCredit);
   6147			return -ENOMEM;
   6148		}
   6149		ioc->internal_depth = 10;
   6150	}
   6151
   6152	ioc->hi_priority_depth = ioc->internal_depth - (5);
   6153	/* command line tunables  for max controller queue depth */
   6154	if (max_queue_depth != -1 && max_queue_depth != 0) {
   6155		max_request_credit = min_t(u16, max_queue_depth +
   6156			ioc->internal_depth, facts->RequestCredit);
   6157		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
   6158			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
   6159	} else if (reset_devices)
   6160		max_request_credit = min_t(u16, facts->RequestCredit,
   6161		    (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
   6162	else
   6163		max_request_credit = min_t(u16, facts->RequestCredit,
   6164		    MAX_HBA_QUEUE_DEPTH);
   6165
   6166	/* Firmware maintains additional facts->HighPriorityCredit number of
   6167	 * credits for HiPriprity Request messages, so hba queue depth will be
   6168	 * sum of max_request_credit and high priority queue depth.
   6169	 */
   6170	ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
   6171
   6172	/* request frame size */
   6173	ioc->request_sz = facts->IOCRequestFrameSize * 4;
   6174
   6175	/* reply frame size */
   6176	ioc->reply_sz = facts->ReplyFrameSize * 4;
   6177
   6178	/* chain segment size */
   6179	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
   6180		if (facts->IOCMaxChainSegmentSize)
   6181			ioc->chain_segment_sz =
   6182					facts->IOCMaxChainSegmentSize *
   6183					MAX_CHAIN_ELEMT_SZ;
   6184		else
   6185		/* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
   6186			ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
   6187						    MAX_CHAIN_ELEMT_SZ;
   6188	} else
   6189		ioc->chain_segment_sz = ioc->request_sz;
   6190
   6191	/* calculate the max scatter element size */
   6192	sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
   6193
   6194 retry_allocation:
   6195	total_sz = 0;
   6196	/* calculate number of sg elements left over in the 1st frame */
   6197	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
   6198	    sizeof(Mpi2SGEIOUnion_t)) + sge_size);
   6199	ioc->max_sges_in_main_message = max_sge_elements/sge_size;
   6200
   6201	/* now do the same for a chain buffer */
   6202	max_sge_elements = ioc->chain_segment_sz - sge_size;
   6203	ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
   6204
   6205	/*
   6206	 *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
   6207	 */
   6208	chains_needed_per_io = ((ioc->shost->sg_tablesize -
   6209	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
   6210	    + 1;
   6211	if (chains_needed_per_io > facts->MaxChainDepth) {
   6212		chains_needed_per_io = facts->MaxChainDepth;
   6213		ioc->shost->sg_tablesize = min_t(u16,
   6214		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
   6215		* chains_needed_per_io), ioc->shost->sg_tablesize);
   6216	}
   6217	ioc->chains_needed_per_io = chains_needed_per_io;
   6218
   6219	/* reply free queue sizing - taking into account for 64 FW events */
   6220	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
   6221
   6222	/* mCPU manage single counters for simplicity */
   6223	if (ioc->is_mcpu_endpoint)
   6224		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
   6225	else {
   6226		/* calculate reply descriptor post queue depth */
   6227		ioc->reply_post_queue_depth = ioc->hba_queue_depth +
   6228			ioc->reply_free_queue_depth +  1;
   6229		/* align the reply post queue on the next 16 count boundary */
   6230		if (ioc->reply_post_queue_depth % 16)
   6231			ioc->reply_post_queue_depth += 16 -
   6232				(ioc->reply_post_queue_depth % 16);
   6233	}
   6234
   6235	if (ioc->reply_post_queue_depth >
   6236	    facts->MaxReplyDescriptorPostQueueDepth) {
   6237		ioc->reply_post_queue_depth =
   6238				facts->MaxReplyDescriptorPostQueueDepth -
   6239		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
   6240		ioc->hba_queue_depth =
   6241				((ioc->reply_post_queue_depth - 64) / 2) - 1;
   6242		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
   6243	}
   6244
   6245	ioc_info(ioc,
   6246	    "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
   6247	    "sge_per_io(%d), chains_per_io(%d)\n",
   6248	    ioc->max_sges_in_main_message,
   6249	    ioc->max_sges_in_chain_message,
   6250	    ioc->shost->sg_tablesize,
   6251	    ioc->chains_needed_per_io);
   6252
   6253	/* reply post queue, 16 byte align */
   6254	reply_post_free_sz = ioc->reply_post_queue_depth *
   6255	    sizeof(Mpi2DefaultReplyDescriptor_t);
   6256	rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
   6257	if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
   6258	    || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
   6259		rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
   6260	ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
   6261	if (ret == -EAGAIN) {
   6262		/*
   6263		 * Free allocated bad RDPQ memory pools.
   6264		 * Change dma coherent mask to 32 bit and reallocate RDPQ
   6265		 */
   6266		_base_release_memory_pools(ioc);
   6267		ioc->use_32bit_dma = true;
   6268		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
   6269			ioc_err(ioc,
   6270			    "32 DMA mask failed %s\n", pci_name(ioc->pdev));
   6271			return -ENODEV;
   6272		}
   6273		if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
   6274			return -ENOMEM;
   6275	} else if (ret == -ENOMEM)
   6276		return -ENOMEM;
   6277	total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
   6278	    DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
   6279	ioc->scsiio_depth = ioc->hba_queue_depth -
   6280	    ioc->hi_priority_depth - ioc->internal_depth;
   6281
   6282	/* set the scsi host can_queue depth
   6283	 * with some internal commands that could be outstanding
   6284	 */
   6285	ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
   6286	dinitprintk(ioc,
   6287		    ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
   6288			     ioc->shost->can_queue));
   6289
   6290	/* contiguous pool for request and chains, 16 byte align, one extra "
   6291	 * "frame for smid=0
   6292	 */
   6293	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
   6294	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
   6295
   6296	/* hi-priority queue */
   6297	sz += (ioc->hi_priority_depth * ioc->request_sz);
   6298
   6299	/* internal queue */
   6300	sz += (ioc->internal_depth * ioc->request_sz);
   6301
   6302	ioc->request_dma_sz = sz;
   6303	ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
   6304			&ioc->request_dma, GFP_KERNEL);
   6305	if (!ioc->request) {
   6306		ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
   6307			ioc->hba_queue_depth, ioc->chains_needed_per_io,
   6308			ioc->request_sz, sz / 1024);
   6309		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
   6310			goto out;
   6311		retry_sz = 64;
   6312		ioc->hba_queue_depth -= retry_sz;
   6313		_base_release_memory_pools(ioc);
   6314		goto retry_allocation;
   6315	}
   6316
   6317	if (retry_sz)
   6318		ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
   6319			ioc->hba_queue_depth, ioc->chains_needed_per_io,
   6320			ioc->request_sz, sz / 1024);
   6321
   6322	/* hi-priority queue */
   6323	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
   6324	    ioc->request_sz);
   6325	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
   6326	    ioc->request_sz);
   6327
   6328	/* internal queue */
   6329	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
   6330	    ioc->request_sz);
   6331	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
   6332	    ioc->request_sz);
   6333
   6334	ioc_info(ioc,
   6335	    "request pool(0x%p) - dma(0x%llx): "
   6336	    "depth(%d), frame_size(%d), pool_size(%d kB)\n",
   6337	    ioc->request, (unsigned long long) ioc->request_dma,
   6338	    ioc->hba_queue_depth, ioc->request_sz,
   6339	    (ioc->hba_queue_depth * ioc->request_sz) / 1024);
   6340
   6341	total_sz += sz;
   6342
   6343	dinitprintk(ioc,
   6344		    ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
   6345			     ioc->request, ioc->scsiio_depth));
   6346
   6347	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
   6348	sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
   6349	ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
   6350	if (!ioc->chain_lookup) {
   6351		ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
   6352		goto out;
   6353	}
   6354
   6355	sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
   6356	for (i = 0; i < ioc->scsiio_depth; i++) {
   6357		ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
   6358		if (!ioc->chain_lookup[i].chains_per_smid) {
   6359			ioc_err(ioc, "chain_lookup: kzalloc failed\n");
   6360			goto out;
   6361		}
   6362	}
   6363
   6364	/* initialize hi-priority queue smid's */
   6365	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
   6366	    sizeof(struct request_tracker), GFP_KERNEL);
   6367	if (!ioc->hpr_lookup) {
   6368		ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
   6369		goto out;
   6370	}
   6371	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
   6372	dinitprintk(ioc,
   6373		    ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
   6374			     ioc->hi_priority,
   6375			     ioc->hi_priority_depth, ioc->hi_priority_smid));
   6376
   6377	/* initialize internal queue smid's */
   6378	ioc->internal_lookup = kcalloc(ioc->internal_depth,
   6379	    sizeof(struct request_tracker), GFP_KERNEL);
   6380	if (!ioc->internal_lookup) {
   6381		ioc_err(ioc, "internal_lookup: kcalloc failed\n");
   6382		goto out;
   6383	}
   6384	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
   6385	dinitprintk(ioc,
   6386		    ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
   6387			     ioc->internal,
   6388			     ioc->internal_depth, ioc->internal_smid));
   6389
   6390	ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
   6391	    sizeof(u16), GFP_KERNEL);
   6392	if (!ioc->io_queue_num)
   6393		goto out;
   6394	/*
   6395	 * The number of NVMe page sized blocks needed is:
   6396	 *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
   6397	 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
   6398	 * that is placed in the main message frame.  8 is the size of each PRP
   6399	 * entry or PRP list pointer entry.  8 is subtracted from page_size
   6400	 * because of the PRP list pointer entry at the end of a page, so this
   6401	 * is not counted as a PRP entry.  The 1 added page is a round up.
   6402	 *
   6403	 * To avoid allocation failures due to the amount of memory that could
   6404	 * be required for NVMe PRP's, only each set of NVMe blocks will be
   6405	 * contiguous, so a new set is allocated for each possible I/O.
   6406	 */
   6407
   6408	ioc->chains_per_prp_buffer = 0;
   6409	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
   6410		nvme_blocks_needed =
   6411			(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
   6412		nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
   6413		nvme_blocks_needed++;
   6414
   6415		sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
   6416		ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
   6417		if (!ioc->pcie_sg_lookup) {
   6418			ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
   6419			goto out;
   6420		}
   6421		sz = nvme_blocks_needed * ioc->page_size;
   6422		rc = _base_allocate_pcie_sgl_pool(ioc, sz);
   6423		if (rc == -ENOMEM)
   6424			return -ENOMEM;
   6425		else if (rc == -EAGAIN)
   6426			goto try_32bit_dma;
   6427		total_sz += sz * ioc->scsiio_depth;
   6428	}
   6429
   6430	rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
   6431	if (rc == -ENOMEM)
   6432		return -ENOMEM;
   6433	else if (rc == -EAGAIN)
   6434		goto try_32bit_dma;
   6435	total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
   6436		ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
   6437	dinitprintk(ioc,
   6438	    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
   6439	    ioc->chain_depth, ioc->chain_segment_sz,
   6440	    (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
   6441	/* sense buffers, 4 byte align */
   6442	sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
   6443	rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
   6444	if (rc  == -ENOMEM)
   6445		return -ENOMEM;
   6446	else if (rc == -EAGAIN)
   6447		goto try_32bit_dma;
   6448	total_sz += sense_sz;
   6449	ioc_info(ioc,
   6450	    "sense pool(0x%p)- dma(0x%llx): depth(%d),"
   6451	    "element_size(%d), pool_size(%d kB)\n",
   6452	    ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
   6453	    SCSI_SENSE_BUFFERSIZE, sz / 1024);
   6454	/* reply pool, 4 byte align */
   6455	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
   6456	rc = _base_allocate_reply_pool(ioc, sz);
   6457	if (rc == -ENOMEM)
   6458		return -ENOMEM;
   6459	else if (rc == -EAGAIN)
   6460		goto try_32bit_dma;
   6461	total_sz += sz;
   6462
   6463	/* reply free queue, 16 byte align */
   6464	sz = ioc->reply_free_queue_depth * 4;
   6465	rc = _base_allocate_reply_free_dma_pool(ioc, sz);
   6466	if (rc  == -ENOMEM)
   6467		return -ENOMEM;
   6468	else if (rc == -EAGAIN)
   6469		goto try_32bit_dma;
   6470	dinitprintk(ioc,
   6471		    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
   6472			     (unsigned long long)ioc->reply_free_dma));
   6473	total_sz += sz;
   6474	if (ioc->rdpq_array_enable) {
   6475		reply_post_free_array_sz = ioc->reply_queue_count *
   6476		    sizeof(Mpi2IOCInitRDPQArrayEntry);
   6477		rc = _base_allocate_reply_post_free_array(ioc,
   6478		    reply_post_free_array_sz);
   6479		if (rc == -ENOMEM)
   6480			return -ENOMEM;
   6481		else if (rc == -EAGAIN)
   6482			goto try_32bit_dma;
   6483	}
   6484	ioc->config_page_sz = 512;
   6485	ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
   6486			ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
   6487	if (!ioc->config_page) {
   6488		ioc_err(ioc, "config page: dma_pool_alloc failed\n");
   6489		goto out;
   6490	}
   6491
   6492	ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
   6493	    ioc->config_page, (unsigned long long)ioc->config_page_dma,
   6494	    ioc->config_page_sz);
   6495	total_sz += ioc->config_page_sz;
   6496
   6497	ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
   6498		 total_sz / 1024);
   6499	ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
   6500		 ioc->shost->can_queue, facts->RequestCredit);
   6501	ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
   6502		 ioc->shost->sg_tablesize);
   6503	return 0;
   6504
   6505try_32bit_dma:
   6506	_base_release_memory_pools(ioc);
   6507	if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
   6508		/* Change dma coherent mask to 32 bit and reallocate */
   6509		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
   6510			pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
   6511			    pci_name(ioc->pdev));
   6512			return -ENODEV;
   6513		}
   6514	} else if (_base_reduce_hba_queue_depth(ioc) != 0)
   6515		return -ENOMEM;
   6516	goto retry_allocation;
   6517
   6518 out:
   6519	return -ENOMEM;
   6520}
   6521
   6522/**
   6523 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
   6524 * @ioc: Pointer to MPT_ADAPTER structure
   6525 * @cooked: Request raw or cooked IOC state
   6526 *
   6527 * Return: all IOC Doorbell register bits if cooked==0, else just the
   6528 * Doorbell bits in MPI_IOC_STATE_MASK.
   6529 */
   6530u32
   6531mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
   6532{
   6533	u32 s, sc;
   6534
   6535	s = ioc->base_readl(&ioc->chip->Doorbell);
   6536	sc = s & MPI2_IOC_STATE_MASK;
   6537	return cooked ? sc : s;
   6538}
   6539
   6540/**
   6541 * _base_wait_on_iocstate - waiting on a particular ioc state
   6542 * @ioc: ?
   6543 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
   6544 * @timeout: timeout in second
   6545 *
   6546 * Return: 0 for success, non-zero for failure.
   6547 */
   6548static int
   6549_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
   6550{
   6551	u32 count, cntdn;
   6552	u32 current_state;
   6553
   6554	count = 0;
   6555	cntdn = 1000 * timeout;
   6556	do {
   6557		current_state = mpt3sas_base_get_iocstate(ioc, 1);
   6558		if (current_state == ioc_state)
   6559			return 0;
   6560		if (count && current_state == MPI2_IOC_STATE_FAULT)
   6561			break;
   6562		if (count && current_state == MPI2_IOC_STATE_COREDUMP)
   6563			break;
   6564
   6565		usleep_range(1000, 1500);
   6566		count++;
   6567	} while (--cntdn);
   6568
   6569	return current_state;
   6570}
   6571
   6572/**
   6573 * _base_dump_reg_set -	This function will print hexdump of register set.
   6574 * @ioc: per adapter object
   6575 *
   6576 * Return: nothing.
   6577 */
   6578static inline void
   6579_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
   6580{
   6581	unsigned int i, sz = 256;
   6582	u32 __iomem *reg = (u32 __iomem *)ioc->chip;
   6583
   6584	ioc_info(ioc, "System Register set:\n");
   6585	for (i = 0; i < (sz / sizeof(u32)); i++)
   6586		pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
   6587}
   6588
   6589/**
   6590 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
   6591 * a write to the doorbell)
   6592 * @ioc: per adapter object
   6593 * @timeout: timeout in seconds
   6594 *
   6595 * Return: 0 for success, non-zero for failure.
   6596 *
   6597 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
   6598 */
   6599
   6600static int
   6601_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
   6602{
   6603	u32 cntdn, count;
   6604	u32 int_status;
   6605
   6606	count = 0;
   6607	cntdn = 1000 * timeout;
   6608	do {
   6609		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
   6610		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
   6611			dhsprintk(ioc,
   6612				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
   6613					   __func__, count, timeout));
   6614			return 0;
   6615		}
   6616
   6617		usleep_range(1000, 1500);
   6618		count++;
   6619	} while (--cntdn);
   6620
   6621	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
   6622		__func__, count, int_status);
   6623	return -EFAULT;
   6624}
   6625
   6626static int
   6627_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
   6628{
   6629	u32 cntdn, count;
   6630	u32 int_status;
   6631
   6632	count = 0;
   6633	cntdn = 2000 * timeout;
   6634	do {
   6635		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
   6636		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
   6637			dhsprintk(ioc,
   6638				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
   6639					   __func__, count, timeout));
   6640			return 0;
   6641		}
   6642
   6643		udelay(500);
   6644		count++;
   6645	} while (--cntdn);
   6646
   6647	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
   6648		__func__, count, int_status);
   6649	return -EFAULT;
   6650
   6651}
   6652
   6653/**
   6654 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
   6655 * @ioc: per adapter object
   6656 * @timeout: timeout in second
   6657 *
   6658 * Return: 0 for success, non-zero for failure.
   6659 *
   6660 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
   6661 * doorbell.
   6662 */
   6663static int
   6664_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
   6665{
   6666	u32 cntdn, count;
   6667	u32 int_status;
   6668	u32 doorbell;
   6669
   6670	count = 0;
   6671	cntdn = 1000 * timeout;
   6672	do {
   6673		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
   6674		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
   6675			dhsprintk(ioc,
   6676				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
   6677					   __func__, count, timeout));
   6678			return 0;
   6679		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
   6680			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
   6681			if ((doorbell & MPI2_IOC_STATE_MASK) ==
   6682			    MPI2_IOC_STATE_FAULT) {
   6683				mpt3sas_print_fault_code(ioc, doorbell);
   6684				return -EFAULT;
   6685			}
   6686			if ((doorbell & MPI2_IOC_STATE_MASK) ==
   6687			    MPI2_IOC_STATE_COREDUMP) {
   6688				mpt3sas_print_coredump_info(ioc, doorbell);
   6689				return -EFAULT;
   6690			}
   6691		} else if (int_status == 0xFFFFFFFF)
   6692			goto out;
   6693
   6694		usleep_range(1000, 1500);
   6695		count++;
   6696	} while (--cntdn);
   6697
   6698 out:
   6699	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
   6700		__func__, count, int_status);
   6701	return -EFAULT;
   6702}
   6703
   6704/**
   6705 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
   6706 * @ioc: per adapter object
   6707 * @timeout: timeout in second
   6708 *
   6709 * Return: 0 for success, non-zero for failure.
   6710 */
   6711static int
   6712_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
   6713{
   6714	u32 cntdn, count;
   6715	u32 doorbell_reg;
   6716
   6717	count = 0;
   6718	cntdn = 1000 * timeout;
   6719	do {
   6720		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
   6721		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
   6722			dhsprintk(ioc,
   6723				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
   6724					   __func__, count, timeout));
   6725			return 0;
   6726		}
   6727
   6728		usleep_range(1000, 1500);
   6729		count++;
   6730	} while (--cntdn);
   6731
   6732	ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
   6733		__func__, count, doorbell_reg);
   6734	return -EFAULT;
   6735}
   6736
   6737/**
   6738 * _base_send_ioc_reset - send doorbell reset
   6739 * @ioc: per adapter object
   6740 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
   6741 * @timeout: timeout in second
   6742 *
   6743 * Return: 0 for success, non-zero for failure.
   6744 */
   6745static int
   6746_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
   6747{
   6748	u32 ioc_state;
   6749	int r = 0;
   6750	unsigned long flags;
   6751
   6752	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
   6753		ioc_err(ioc, "%s: unknown reset_type\n", __func__);
   6754		return -EFAULT;
   6755	}
   6756
   6757	if (!(ioc->facts.IOCCapabilities &
   6758	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
   6759		return -EFAULT;
   6760
   6761	ioc_info(ioc, "sending message unit reset !!\n");
   6762
   6763	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
   6764	    &ioc->chip->Doorbell);
   6765	if ((_base_wait_for_doorbell_ack(ioc, 15))) {
   6766		r = -EFAULT;
   6767		goto out;
   6768	}
   6769
   6770	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
   6771	if (ioc_state) {
   6772		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
   6773			__func__, ioc_state);
   6774		r = -EFAULT;
   6775		goto out;
   6776	}
   6777 out:
   6778	if (r != 0) {
   6779		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   6780		spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
   6781		/*
   6782		 * Wait for IOC state CoreDump to clear only during
   6783		 * HBA initialization & release time.
   6784		 */
   6785		if ((ioc_state & MPI2_IOC_STATE_MASK) ==
   6786		    MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
   6787		    ioc->fault_reset_work_q == NULL)) {
   6788			spin_unlock_irqrestore(
   6789			    &ioc->ioc_reset_in_progress_lock, flags);
   6790			mpt3sas_print_coredump_info(ioc, ioc_state);
   6791			mpt3sas_base_wait_for_coredump_completion(ioc,
   6792			    __func__);
   6793			spin_lock_irqsave(
   6794			    &ioc->ioc_reset_in_progress_lock, flags);
   6795		}
   6796		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
   6797	}
   6798	ioc_info(ioc, "message unit reset: %s\n",
   6799		 r == 0 ? "SUCCESS" : "FAILED");
   6800	return r;
   6801}
   6802
   6803/**
   6804 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
   6805 * @ioc: per adapter object
   6806 * @timeout: timeout in seconds
   6807 *
   6808 * Return: Waits up to timeout seconds for the IOC to
   6809 * become operational. Returns 0 if IOC is present
   6810 * and operational; otherwise returns %-EFAULT.
   6811 */
   6812
   6813int
   6814mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
   6815{
   6816	int wait_state_count = 0;
   6817	u32 ioc_state;
   6818
   6819	do {
   6820		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
   6821		if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
   6822			break;
   6823
   6824		/*
   6825		 * Watchdog thread will be started after IOC Initialization, so
   6826		 * no need to wait here for IOC state to become operational
   6827		 * when IOC Initialization is on. Instead the driver will
   6828		 * return ETIME status, so that calling function can issue
   6829		 * diag reset operation and retry the command.
   6830		 */
   6831		if (ioc->is_driver_loading)
   6832			return -ETIME;
   6833
   6834		ssleep(1);
   6835		ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
   6836				__func__, ++wait_state_count);
   6837	} while (--timeout);
   6838	if (!timeout) {
   6839		ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
   6840		return -EFAULT;
   6841	}
   6842	if (wait_state_count)
   6843		ioc_info(ioc, "ioc is operational\n");
   6844	return 0;
   6845}
   6846
   6847/**
   6848 * _base_handshake_req_reply_wait - send request thru doorbell interface
   6849 * @ioc: per adapter object
   6850 * @request_bytes: request length
   6851 * @request: pointer having request payload
   6852 * @reply_bytes: reply length
   6853 * @reply: pointer to reply payload
   6854 * @timeout: timeout in second
   6855 *
   6856 * Return: 0 for success, non-zero for failure.
   6857 */
   6858static int
   6859_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
   6860	u32 *request, int reply_bytes, u16 *reply, int timeout)
   6861{
   6862	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
   6863	int i;
   6864	u8 failed;
   6865	__le32 *mfp;
   6866
   6867	/* make sure doorbell is not in use */
   6868	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
   6869		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
   6870		return -EFAULT;
   6871	}
   6872
   6873	/* clear pending doorbell interrupts from previous state changes */
   6874	if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
   6875	    MPI2_HIS_IOC2SYS_DB_STATUS)
   6876		writel(0, &ioc->chip->HostInterruptStatus);
   6877
   6878	/* send message to ioc */
   6879	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
   6880	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
   6881	    &ioc->chip->Doorbell);
   6882
   6883	if ((_base_spin_on_doorbell_int(ioc, 5))) {
   6884		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
   6885			__LINE__);
   6886		return -EFAULT;
   6887	}
   6888	writel(0, &ioc->chip->HostInterruptStatus);
   6889
   6890	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
   6891		ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
   6892			__LINE__);
   6893		return -EFAULT;
   6894	}
   6895
   6896	/* send message 32-bits at a time */
   6897	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
   6898		writel(request[i], &ioc->chip->Doorbell);
   6899		if ((_base_wait_for_doorbell_ack(ioc, 5)))
   6900			failed = 1;
   6901	}
   6902
   6903	if (failed) {
   6904		ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
   6905			__LINE__);
   6906		return -EFAULT;
   6907	}
   6908
   6909	/* now wait for the reply */
   6910	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
   6911		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
   6912			__LINE__);
   6913		return -EFAULT;
   6914	}
   6915
   6916	/* read the first two 16-bits, it gives the total length of the reply */
   6917	reply[0] = ioc->base_readl(&ioc->chip->Doorbell)
   6918		& MPI2_DOORBELL_DATA_MASK;
   6919	writel(0, &ioc->chip->HostInterruptStatus);
   6920	if ((_base_wait_for_doorbell_int(ioc, 5))) {
   6921		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
   6922			__LINE__);
   6923		return -EFAULT;
   6924	}
   6925	reply[1] = ioc->base_readl(&ioc->chip->Doorbell)
   6926		& MPI2_DOORBELL_DATA_MASK;
   6927	writel(0, &ioc->chip->HostInterruptStatus);
   6928
   6929	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
   6930		if ((_base_wait_for_doorbell_int(ioc, 5))) {
   6931			ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
   6932				__LINE__);
   6933			return -EFAULT;
   6934		}
   6935		if (i >=  reply_bytes/2) /* overflow case */
   6936			ioc->base_readl(&ioc->chip->Doorbell);
   6937		else
   6938			reply[i] = ioc->base_readl(&ioc->chip->Doorbell)
   6939				& MPI2_DOORBELL_DATA_MASK;
   6940		writel(0, &ioc->chip->HostInterruptStatus);
   6941	}
   6942
   6943	_base_wait_for_doorbell_int(ioc, 5);
   6944	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
   6945		dhsprintk(ioc,
   6946			  ioc_info(ioc, "doorbell is in use (line=%d)\n",
   6947				   __LINE__));
   6948	}
   6949	writel(0, &ioc->chip->HostInterruptStatus);
   6950
   6951	if (ioc->logging_level & MPT_DEBUG_INIT) {
   6952		mfp = (__le32 *)reply;
   6953		pr_info("\toffset:data\n");
   6954		for (i = 0; i < reply_bytes/4; i++)
   6955			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
   6956			    le32_to_cpu(mfp[i]));
   6957	}
   6958	return 0;
   6959}
   6960
   6961/**
   6962 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
   6963 * @ioc: per adapter object
   6964 * @mpi_reply: the reply payload from FW
   6965 * @mpi_request: the request payload sent to FW
   6966 *
   6967 * The SAS IO Unit Control Request message allows the host to perform low-level
   6968 * operations, such as resets on the PHYs of the IO Unit, also allows the host
   6969 * to obtain the IOC assigned device handles for a device if it has other
   6970 * identifying information about the device, in addition allows the host to
   6971 * remove IOC resources associated with the device.
   6972 *
   6973 * Return: 0 for success, non-zero for failure.
   6974 */
   6975int
   6976mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
   6977	Mpi2SasIoUnitControlReply_t *mpi_reply,
   6978	Mpi2SasIoUnitControlRequest_t *mpi_request)
   6979{
   6980	u16 smid;
   6981	u8 issue_reset = 0;
   6982	int rc;
   6983	void *request;
   6984
   6985	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   6986
   6987	mutex_lock(&ioc->base_cmds.mutex);
   6988
   6989	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
   6990		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
   6991		rc = -EAGAIN;
   6992		goto out;
   6993	}
   6994
   6995	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
   6996	if (rc)
   6997		goto out;
   6998
   6999	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
   7000	if (!smid) {
   7001		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
   7002		rc = -EAGAIN;
   7003		goto out;
   7004	}
   7005
   7006	rc = 0;
   7007	ioc->base_cmds.status = MPT3_CMD_PENDING;
   7008	request = mpt3sas_base_get_msg_frame(ioc, smid);
   7009	ioc->base_cmds.smid = smid;
   7010	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
   7011	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
   7012	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
   7013		ioc->ioc_link_reset_in_progress = 1;
   7014	init_completion(&ioc->base_cmds.done);
   7015	ioc->put_smid_default(ioc, smid);
   7016	wait_for_completion_timeout(&ioc->base_cmds.done,
   7017	    msecs_to_jiffies(10000));
   7018	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
   7019	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
   7020	    ioc->ioc_link_reset_in_progress)
   7021		ioc->ioc_link_reset_in_progress = 0;
   7022	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
   7023		mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
   7024		    mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
   7025		    issue_reset);
   7026		goto issue_host_reset;
   7027	}
   7028	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
   7029		memcpy(mpi_reply, ioc->base_cmds.reply,
   7030		    sizeof(Mpi2SasIoUnitControlReply_t));
   7031	else
   7032		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
   7033	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   7034	goto out;
   7035
   7036 issue_host_reset:
   7037	if (issue_reset)
   7038		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
   7039	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   7040	rc = -EFAULT;
   7041 out:
   7042	mutex_unlock(&ioc->base_cmds.mutex);
   7043	return rc;
   7044}
   7045
   7046/**
   7047 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
   7048 * @ioc: per adapter object
   7049 * @mpi_reply: the reply payload from FW
   7050 * @mpi_request: the request payload sent to FW
   7051 *
   7052 * The SCSI Enclosure Processor request message causes the IOC to
   7053 * communicate with SES devices to control LED status signals.
   7054 *
   7055 * Return: 0 for success, non-zero for failure.
   7056 */
   7057int
   7058mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
   7059	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
   7060{
   7061	u16 smid;
   7062	u8 issue_reset = 0;
   7063	int rc;
   7064	void *request;
   7065
   7066	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7067
   7068	mutex_lock(&ioc->base_cmds.mutex);
   7069
   7070	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
   7071		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
   7072		rc = -EAGAIN;
   7073		goto out;
   7074	}
   7075
   7076	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
   7077	if (rc)
   7078		goto out;
   7079
   7080	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
   7081	if (!smid) {
   7082		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
   7083		rc = -EAGAIN;
   7084		goto out;
   7085	}
   7086
   7087	rc = 0;
   7088	ioc->base_cmds.status = MPT3_CMD_PENDING;
   7089	request = mpt3sas_base_get_msg_frame(ioc, smid);
   7090	ioc->base_cmds.smid = smid;
   7091	memset(request, 0, ioc->request_sz);
   7092	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
   7093	init_completion(&ioc->base_cmds.done);
   7094	ioc->put_smid_default(ioc, smid);
   7095	wait_for_completion_timeout(&ioc->base_cmds.done,
   7096	    msecs_to_jiffies(10000));
   7097	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
   7098		mpt3sas_check_cmd_timeout(ioc,
   7099		    ioc->base_cmds.status, mpi_request,
   7100		    sizeof(Mpi2SepRequest_t)/4, issue_reset);
   7101		goto issue_host_reset;
   7102	}
   7103	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
   7104		memcpy(mpi_reply, ioc->base_cmds.reply,
   7105		    sizeof(Mpi2SepReply_t));
   7106	else
   7107		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
   7108	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   7109	goto out;
   7110
   7111 issue_host_reset:
   7112	if (issue_reset)
   7113		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
   7114	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   7115	rc = -EFAULT;
   7116 out:
   7117	mutex_unlock(&ioc->base_cmds.mutex);
   7118	return rc;
   7119}
   7120
   7121/**
   7122 * _base_get_port_facts - obtain port facts reply and save in ioc
   7123 * @ioc: per adapter object
   7124 * @port: ?
   7125 *
   7126 * Return: 0 for success, non-zero for failure.
   7127 */
   7128static int
   7129_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
   7130{
   7131	Mpi2PortFactsRequest_t mpi_request;
   7132	Mpi2PortFactsReply_t mpi_reply;
   7133	struct mpt3sas_port_facts *pfacts;
   7134	int mpi_reply_sz, mpi_request_sz, r;
   7135
   7136	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7137
   7138	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
   7139	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
   7140	memset(&mpi_request, 0, mpi_request_sz);
   7141	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
   7142	mpi_request.PortNumber = port;
   7143	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
   7144	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
   7145
   7146	if (r != 0) {
   7147		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
   7148		return r;
   7149	}
   7150
   7151	pfacts = &ioc->pfacts[port];
   7152	memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
   7153	pfacts->PortNumber = mpi_reply.PortNumber;
   7154	pfacts->VP_ID = mpi_reply.VP_ID;
   7155	pfacts->VF_ID = mpi_reply.VF_ID;
   7156	pfacts->MaxPostedCmdBuffers =
   7157	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
   7158
   7159	return 0;
   7160}
   7161
   7162/**
   7163 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
   7164 * @ioc: per adapter object
   7165 * @timeout:
   7166 *
   7167 * Return: 0 for success, non-zero for failure.
   7168 */
   7169static int
   7170_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
   7171{
   7172	u32 ioc_state;
   7173	int rc;
   7174
   7175	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7176
   7177	if (ioc->pci_error_recovery) {
   7178		dfailprintk(ioc,
   7179			    ioc_info(ioc, "%s: host in pci error recovery\n",
   7180				     __func__));
   7181		return -EFAULT;
   7182	}
   7183
   7184	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   7185	dhsprintk(ioc,
   7186		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
   7187			   __func__, ioc_state));
   7188
   7189	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
   7190	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
   7191		return 0;
   7192
   7193	if (ioc_state & MPI2_DOORBELL_USED) {
   7194		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
   7195		goto issue_diag_reset;
   7196	}
   7197
   7198	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
   7199		mpt3sas_print_fault_code(ioc, ioc_state &
   7200		    MPI2_DOORBELL_DATA_MASK);
   7201		goto issue_diag_reset;
   7202	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
   7203	    MPI2_IOC_STATE_COREDUMP) {
   7204		ioc_info(ioc,
   7205		    "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
   7206		    __func__, ioc_state);
   7207		return -EFAULT;
   7208	}
   7209
   7210	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
   7211	if (ioc_state) {
   7212		dfailprintk(ioc,
   7213			    ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
   7214				     __func__, ioc_state));
   7215		return -EFAULT;
   7216	}
   7217
   7218 issue_diag_reset:
   7219	rc = _base_diag_reset(ioc);
   7220	return rc;
   7221}
   7222
   7223/**
   7224 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
   7225 * @ioc: per adapter object
   7226 *
   7227 * Return: 0 for success, non-zero for failure.
   7228 */
   7229static int
   7230_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
   7231{
   7232	Mpi2IOCFactsRequest_t mpi_request;
   7233	Mpi2IOCFactsReply_t mpi_reply;
   7234	struct mpt3sas_facts *facts;
   7235	int mpi_reply_sz, mpi_request_sz, r;
   7236
   7237	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7238
   7239	r = _base_wait_for_iocstate(ioc, 10);
   7240	if (r) {
   7241		dfailprintk(ioc,
   7242			    ioc_info(ioc, "%s: failed getting to correct state\n",
   7243				     __func__));
   7244		return r;
   7245	}
   7246	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
   7247	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
   7248	memset(&mpi_request, 0, mpi_request_sz);
   7249	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
   7250	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
   7251	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
   7252
   7253	if (r != 0) {
   7254		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
   7255		return r;
   7256	}
   7257
   7258	facts = &ioc->facts;
   7259	memset(facts, 0, sizeof(struct mpt3sas_facts));
   7260	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
   7261	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
   7262	facts->VP_ID = mpi_reply.VP_ID;
   7263	facts->VF_ID = mpi_reply.VF_ID;
   7264	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
   7265	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
   7266	facts->WhoInit = mpi_reply.WhoInit;
   7267	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
   7268	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
   7269	if (ioc->msix_enable && (facts->MaxMSIxVectors <=
   7270	    MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
   7271		ioc->combined_reply_queue = 0;
   7272	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
   7273	facts->MaxReplyDescriptorPostQueueDepth =
   7274	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
   7275	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
   7276	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
   7277	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
   7278		ioc->ir_firmware = 1;
   7279	if ((facts->IOCCapabilities &
   7280	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
   7281		ioc->rdpq_array_capable = 1;
   7282	if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
   7283	    && ioc->is_aero_ioc)
   7284		ioc->atomic_desc_capable = 1;
   7285	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
   7286	facts->IOCRequestFrameSize =
   7287	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
   7288	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
   7289		facts->IOCMaxChainSegmentSize =
   7290			le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
   7291	}
   7292	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
   7293	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
   7294	ioc->shost->max_id = -1;
   7295	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
   7296	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
   7297	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
   7298	facts->HighPriorityCredit =
   7299	    le16_to_cpu(mpi_reply.HighPriorityCredit);
   7300	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
   7301	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
   7302	facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
   7303
   7304	/*
   7305	 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
   7306	 */
   7307	ioc->page_size = 1 << facts->CurrentHostPageSize;
   7308	if (ioc->page_size == 1) {
   7309		ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
   7310		ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
   7311	}
   7312	dinitprintk(ioc,
   7313		    ioc_info(ioc, "CurrentHostPageSize(%d)\n",
   7314			     facts->CurrentHostPageSize));
   7315
   7316	dinitprintk(ioc,
   7317		    ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
   7318			     facts->RequestCredit, facts->MaxChainDepth));
   7319	dinitprintk(ioc,
   7320		    ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
   7321			     facts->IOCRequestFrameSize * 4,
   7322			     facts->ReplyFrameSize * 4));
   7323	return 0;
   7324}
   7325
   7326/**
   7327 * _base_send_ioc_init - send ioc_init to firmware
   7328 * @ioc: per adapter object
   7329 *
   7330 * Return: 0 for success, non-zero for failure.
   7331 */
   7332static int
   7333_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
   7334{
   7335	Mpi2IOCInitRequest_t mpi_request;
   7336	Mpi2IOCInitReply_t mpi_reply;
   7337	int i, r = 0;
   7338	ktime_t current_time;
   7339	u16 ioc_status;
   7340	u32 reply_post_free_array_sz = 0;
   7341
   7342	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7343
   7344	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
   7345	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
   7346	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
   7347	mpi_request.VF_ID = 0; /* TODO */
   7348	mpi_request.VP_ID = 0;
   7349	mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
   7350	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
   7351	mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
   7352
   7353	if (_base_is_controller_msix_enabled(ioc))
   7354		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
   7355	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
   7356	mpi_request.ReplyDescriptorPostQueueDepth =
   7357	    cpu_to_le16(ioc->reply_post_queue_depth);
   7358	mpi_request.ReplyFreeQueueDepth =
   7359	    cpu_to_le16(ioc->reply_free_queue_depth);
   7360
   7361	mpi_request.SenseBufferAddressHigh =
   7362	    cpu_to_le32((u64)ioc->sense_dma >> 32);
   7363	mpi_request.SystemReplyAddressHigh =
   7364	    cpu_to_le32((u64)ioc->reply_dma >> 32);
   7365	mpi_request.SystemRequestFrameBaseAddress =
   7366	    cpu_to_le64((u64)ioc->request_dma);
   7367	mpi_request.ReplyFreeQueueAddress =
   7368	    cpu_to_le64((u64)ioc->reply_free_dma);
   7369
   7370	if (ioc->rdpq_array_enable) {
   7371		reply_post_free_array_sz = ioc->reply_queue_count *
   7372		    sizeof(Mpi2IOCInitRDPQArrayEntry);
   7373		memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
   7374		for (i = 0; i < ioc->reply_queue_count; i++)
   7375			ioc->reply_post_free_array[i].RDPQBaseAddress =
   7376			    cpu_to_le64(
   7377				(u64)ioc->reply_post[i].reply_post_free_dma);
   7378		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
   7379		mpi_request.ReplyDescriptorPostQueueAddress =
   7380		    cpu_to_le64((u64)ioc->reply_post_free_array_dma);
   7381	} else {
   7382		mpi_request.ReplyDescriptorPostQueueAddress =
   7383		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
   7384	}
   7385
   7386	/*
   7387	 * Set the flag to enable CoreDump state feature in IOC firmware.
   7388	 */
   7389	mpi_request.ConfigurationFlags |=
   7390	    cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
   7391
   7392	/* This time stamp specifies number of milliseconds
   7393	 * since epoch ~ midnight January 1, 1970.
   7394	 */
   7395	current_time = ktime_get_real();
   7396	mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
   7397
   7398	if (ioc->logging_level & MPT_DEBUG_INIT) {
   7399		__le32 *mfp;
   7400		int i;
   7401
   7402		mfp = (__le32 *)&mpi_request;
   7403		ioc_info(ioc, "\toffset:data\n");
   7404		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
   7405			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
   7406			    le32_to_cpu(mfp[i]));
   7407	}
   7408
   7409	r = _base_handshake_req_reply_wait(ioc,
   7410	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
   7411	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
   7412
   7413	if (r != 0) {
   7414		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
   7415		return r;
   7416	}
   7417
   7418	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
   7419	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
   7420	    mpi_reply.IOCLogInfo) {
   7421		ioc_err(ioc, "%s: failed\n", __func__);
   7422		r = -EIO;
   7423	}
   7424
   7425	/* Reset TimeSync Counter*/
   7426	ioc->timestamp_update_count = 0;
   7427	return r;
   7428}
   7429
   7430/**
   7431 * mpt3sas_port_enable_done - command completion routine for port enable
   7432 * @ioc: per adapter object
   7433 * @smid: system request message index
   7434 * @msix_index: MSIX table index supplied by the OS
   7435 * @reply: reply message frame(lower 32bit addr)
   7436 *
   7437 * Return: 1 meaning mf should be freed from _base_interrupt
   7438 *          0 means the mf is freed from this function.
   7439 */
   7440u8
   7441mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
   7442	u32 reply)
   7443{
   7444	MPI2DefaultReply_t *mpi_reply;
   7445	u16 ioc_status;
   7446
   7447	if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
   7448		return 1;
   7449
   7450	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
   7451	if (!mpi_reply)
   7452		return 1;
   7453
   7454	if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
   7455		return 1;
   7456
   7457	ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
   7458	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
   7459	ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
   7460	memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
   7461	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
   7462	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
   7463		ioc->port_enable_failed = 1;
   7464
   7465	if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
   7466		ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
   7467		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
   7468			mpt3sas_port_enable_complete(ioc);
   7469			return 1;
   7470		} else {
   7471			ioc->start_scan_failed = ioc_status;
   7472			ioc->start_scan = 0;
   7473			return 1;
   7474		}
   7475	}
   7476	complete(&ioc->port_enable_cmds.done);
   7477	return 1;
   7478}
   7479
   7480/**
   7481 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
   7482 * @ioc: per adapter object
   7483 *
   7484 * Return: 0 for success, non-zero for failure.
   7485 */
   7486static int
   7487_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
   7488{
   7489	Mpi2PortEnableRequest_t *mpi_request;
   7490	Mpi2PortEnableReply_t *mpi_reply;
   7491	int r = 0;
   7492	u16 smid;
   7493	u16 ioc_status;
   7494
   7495	ioc_info(ioc, "sending port enable !!\n");
   7496
   7497	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
   7498		ioc_err(ioc, "%s: internal command already in use\n", __func__);
   7499		return -EAGAIN;
   7500	}
   7501
   7502	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
   7503	if (!smid) {
   7504		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
   7505		return -EAGAIN;
   7506	}
   7507
   7508	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
   7509	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
   7510	ioc->port_enable_cmds.smid = smid;
   7511	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
   7512	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
   7513
   7514	init_completion(&ioc->port_enable_cmds.done);
   7515	ioc->put_smid_default(ioc, smid);
   7516	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
   7517	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
   7518		ioc_err(ioc, "%s: timeout\n", __func__);
   7519		_debug_dump_mf(mpi_request,
   7520		    sizeof(Mpi2PortEnableRequest_t)/4);
   7521		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
   7522			r = -EFAULT;
   7523		else
   7524			r = -ETIME;
   7525		goto out;
   7526	}
   7527
   7528	mpi_reply = ioc->port_enable_cmds.reply;
   7529	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
   7530	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
   7531		ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
   7532			__func__, ioc_status);
   7533		r = -EFAULT;
   7534		goto out;
   7535	}
   7536
   7537 out:
   7538	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
   7539	ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
   7540	return r;
   7541}
   7542
   7543/**
   7544 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
   7545 * @ioc: per adapter object
   7546 *
   7547 * Return: 0 for success, non-zero for failure.
   7548 */
   7549int
   7550mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
   7551{
   7552	Mpi2PortEnableRequest_t *mpi_request;
   7553	u16 smid;
   7554
   7555	ioc_info(ioc, "sending port enable !!\n");
   7556
   7557	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
   7558		ioc_err(ioc, "%s: internal command already in use\n", __func__);
   7559		return -EAGAIN;
   7560	}
   7561
   7562	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
   7563	if (!smid) {
   7564		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
   7565		return -EAGAIN;
   7566	}
   7567	ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
   7568	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
   7569	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
   7570	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
   7571	ioc->port_enable_cmds.smid = smid;
   7572	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
   7573	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
   7574
   7575	ioc->put_smid_default(ioc, smid);
   7576	return 0;
   7577}
   7578
   7579/**
   7580 * _base_determine_wait_on_discovery - desposition
   7581 * @ioc: per adapter object
   7582 *
   7583 * Decide whether to wait on discovery to complete. Used to either
   7584 * locate boot device, or report volumes ahead of physical devices.
   7585 *
   7586 * Return: 1 for wait, 0 for don't wait.
   7587 */
   7588static int
   7589_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
   7590{
   7591	/* We wait for discovery to complete if IR firmware is loaded.
   7592	 * The sas topology events arrive before PD events, so we need time to
   7593	 * turn on the bit in ioc->pd_handles to indicate PD
   7594	 * Also, it maybe required to report Volumes ahead of physical
   7595	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
   7596	 */
   7597	if (ioc->ir_firmware)
   7598		return 1;
   7599
   7600	/* if no Bios, then we don't need to wait */
   7601	if (!ioc->bios_pg3.BiosVersion)
   7602		return 0;
   7603
   7604	/* Bios is present, then we drop down here.
   7605	 *
   7606	 * If there any entries in the Bios Page 2, then we wait
   7607	 * for discovery to complete.
   7608	 */
   7609
   7610	/* Current Boot Device */
   7611	if ((ioc->bios_pg2.CurrentBootDeviceForm &
   7612	    MPI2_BIOSPAGE2_FORM_MASK) ==
   7613	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
   7614	/* Request Boot Device */
   7615	   (ioc->bios_pg2.ReqBootDeviceForm &
   7616	    MPI2_BIOSPAGE2_FORM_MASK) ==
   7617	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
   7618	/* Alternate Request Boot Device */
   7619	   (ioc->bios_pg2.ReqAltBootDeviceForm &
   7620	    MPI2_BIOSPAGE2_FORM_MASK) ==
   7621	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
   7622		return 0;
   7623
   7624	return 1;
   7625}
   7626
   7627/**
   7628 * _base_unmask_events - turn on notification for this event
   7629 * @ioc: per adapter object
   7630 * @event: firmware event
   7631 *
   7632 * The mask is stored in ioc->event_masks.
   7633 */
   7634static void
   7635_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
   7636{
   7637	u32 desired_event;
   7638
   7639	if (event >= 128)
   7640		return;
   7641
   7642	desired_event = (1 << (event % 32));
   7643
   7644	if (event < 32)
   7645		ioc->event_masks[0] &= ~desired_event;
   7646	else if (event < 64)
   7647		ioc->event_masks[1] &= ~desired_event;
   7648	else if (event < 96)
   7649		ioc->event_masks[2] &= ~desired_event;
   7650	else if (event < 128)
   7651		ioc->event_masks[3] &= ~desired_event;
   7652}
   7653
   7654/**
   7655 * _base_event_notification - send event notification
   7656 * @ioc: per adapter object
   7657 *
   7658 * Return: 0 for success, non-zero for failure.
   7659 */
   7660static int
   7661_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
   7662{
   7663	Mpi2EventNotificationRequest_t *mpi_request;
   7664	u16 smid;
   7665	int r = 0;
   7666	int i, issue_diag_reset = 0;
   7667
   7668	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7669
   7670	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
   7671		ioc_err(ioc, "%s: internal command already in use\n", __func__);
   7672		return -EAGAIN;
   7673	}
   7674
   7675	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
   7676	if (!smid) {
   7677		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
   7678		return -EAGAIN;
   7679	}
   7680	ioc->base_cmds.status = MPT3_CMD_PENDING;
   7681	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
   7682	ioc->base_cmds.smid = smid;
   7683	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
   7684	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
   7685	mpi_request->VF_ID = 0; /* TODO */
   7686	mpi_request->VP_ID = 0;
   7687	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
   7688		mpi_request->EventMasks[i] =
   7689		    cpu_to_le32(ioc->event_masks[i]);
   7690	init_completion(&ioc->base_cmds.done);
   7691	ioc->put_smid_default(ioc, smid);
   7692	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
   7693	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
   7694		ioc_err(ioc, "%s: timeout\n", __func__);
   7695		_debug_dump_mf(mpi_request,
   7696		    sizeof(Mpi2EventNotificationRequest_t)/4);
   7697		if (ioc->base_cmds.status & MPT3_CMD_RESET)
   7698			r = -EFAULT;
   7699		else
   7700			issue_diag_reset = 1;
   7701
   7702	} else
   7703		dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
   7704	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   7705
   7706	if (issue_diag_reset) {
   7707		if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
   7708			return -EFAULT;
   7709		if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
   7710			return -EFAULT;
   7711		r = -EAGAIN;
   7712	}
   7713	return r;
   7714}
   7715
   7716/**
   7717 * mpt3sas_base_validate_event_type - validating event types
   7718 * @ioc: per adapter object
   7719 * @event_type: firmware event
   7720 *
   7721 * This will turn on firmware event notification when application
   7722 * ask for that event. We don't mask events that are already enabled.
   7723 */
   7724void
   7725mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
   7726{
   7727	int i, j;
   7728	u32 event_mask, desired_event;
   7729	u8 send_update_to_fw;
   7730
   7731	for (i = 0, send_update_to_fw = 0; i <
   7732	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
   7733		event_mask = ~event_type[i];
   7734		desired_event = 1;
   7735		for (j = 0; j < 32; j++) {
   7736			if (!(event_mask & desired_event) &&
   7737			    (ioc->event_masks[i] & desired_event)) {
   7738				ioc->event_masks[i] &= ~desired_event;
   7739				send_update_to_fw = 1;
   7740			}
   7741			desired_event = (desired_event << 1);
   7742		}
   7743	}
   7744
   7745	if (!send_update_to_fw)
   7746		return;
   7747
   7748	mutex_lock(&ioc->base_cmds.mutex);
   7749	_base_event_notification(ioc);
   7750	mutex_unlock(&ioc->base_cmds.mutex);
   7751}
   7752
   7753/**
   7754 * _base_diag_reset - the "big hammer" start of day reset
   7755 * @ioc: per adapter object
   7756 *
   7757 * Return: 0 for success, non-zero for failure.
   7758 */
   7759static int
   7760_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
   7761{
   7762	u32 host_diagnostic;
   7763	u32 ioc_state;
   7764	u32 count;
   7765	u32 hcb_size;
   7766
   7767	ioc_info(ioc, "sending diag reset !!\n");
   7768
   7769	pci_cfg_access_lock(ioc->pdev);
   7770
   7771	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
   7772
   7773	count = 0;
   7774	do {
   7775		/* Write magic sequence to WriteSequence register
   7776		 * Loop until in diagnostic mode
   7777		 */
   7778		drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
   7779		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
   7780		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
   7781		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
   7782		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
   7783		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
   7784		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
   7785		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
   7786
   7787		/* wait 100 msec */
   7788		msleep(100);
   7789
   7790		if (count++ > 20) {
   7791			ioc_info(ioc,
   7792			    "Stop writing magic sequence after 20 retries\n");
   7793			_base_dump_reg_set(ioc);
   7794			goto out;
   7795		}
   7796
   7797		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
   7798		drsprintk(ioc,
   7799			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
   7800				   count, host_diagnostic));
   7801
   7802	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
   7803
   7804	hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
   7805
   7806	drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
   7807	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
   7808	     &ioc->chip->HostDiagnostic);
   7809
   7810	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
   7811	msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
   7812
   7813	/* Approximately 300 second max wait */
   7814	for (count = 0; count < (300000000 /
   7815		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
   7816
   7817		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
   7818
   7819		if (host_diagnostic == 0xFFFFFFFF) {
   7820			ioc_info(ioc,
   7821			    "Invalid host diagnostic register value\n");
   7822			_base_dump_reg_set(ioc);
   7823			goto out;
   7824		}
   7825		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
   7826			break;
   7827
   7828		msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
   7829	}
   7830
   7831	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
   7832
   7833		drsprintk(ioc,
   7834			  ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
   7835		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
   7836		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
   7837		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
   7838
   7839		drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
   7840		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
   7841		    &ioc->chip->HCBSize);
   7842	}
   7843
   7844	drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
   7845	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
   7846	    &ioc->chip->HostDiagnostic);
   7847
   7848	drsprintk(ioc,
   7849		  ioc_info(ioc, "disable writes to the diagnostic register\n"));
   7850	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
   7851
   7852	drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
   7853	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
   7854	if (ioc_state) {
   7855		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
   7856			__func__, ioc_state);
   7857		_base_dump_reg_set(ioc);
   7858		goto out;
   7859	}
   7860
   7861	pci_cfg_access_unlock(ioc->pdev);
   7862	ioc_info(ioc, "diag reset: SUCCESS\n");
   7863	return 0;
   7864
   7865 out:
   7866	pci_cfg_access_unlock(ioc->pdev);
   7867	ioc_err(ioc, "diag reset: FAILED\n");
   7868	return -EFAULT;
   7869}
   7870
   7871/**
   7872 * mpt3sas_base_make_ioc_ready - put controller in READY state
   7873 * @ioc: per adapter object
   7874 * @type: FORCE_BIG_HAMMER or SOFT_RESET
   7875 *
   7876 * Return: 0 for success, non-zero for failure.
   7877 */
   7878int
   7879mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
   7880{
   7881	u32 ioc_state;
   7882	int rc;
   7883	int count;
   7884
   7885	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7886
   7887	if (ioc->pci_error_recovery)
   7888		return 0;
   7889
   7890	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   7891	dhsprintk(ioc,
   7892		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
   7893			   __func__, ioc_state));
   7894
   7895	/* if in RESET state, it should move to READY state shortly */
   7896	count = 0;
   7897	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
   7898		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
   7899		    MPI2_IOC_STATE_READY) {
   7900			if (count++ == 10) {
   7901				ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
   7902					__func__, ioc_state);
   7903				return -EFAULT;
   7904			}
   7905			ssleep(1);
   7906			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   7907		}
   7908	}
   7909
   7910	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
   7911		return 0;
   7912
   7913	if (ioc_state & MPI2_DOORBELL_USED) {
   7914		ioc_info(ioc, "unexpected doorbell active!\n");
   7915		goto issue_diag_reset;
   7916	}
   7917
   7918	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
   7919		mpt3sas_print_fault_code(ioc, ioc_state &
   7920		    MPI2_DOORBELL_DATA_MASK);
   7921		goto issue_diag_reset;
   7922	}
   7923
   7924	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
   7925		/*
   7926		 * if host reset is invoked while watch dog thread is waiting
   7927		 * for IOC state to be changed to Fault state then driver has
   7928		 * to wait here for CoreDump state to clear otherwise reset
   7929		 * will be issued to the FW and FW move the IOC state to
   7930		 * reset state without copying the FW logs to coredump region.
   7931		 */
   7932		if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
   7933			mpt3sas_print_coredump_info(ioc, ioc_state &
   7934			    MPI2_DOORBELL_DATA_MASK);
   7935			mpt3sas_base_wait_for_coredump_completion(ioc,
   7936			    __func__);
   7937		}
   7938		goto issue_diag_reset;
   7939	}
   7940
   7941	if (type == FORCE_BIG_HAMMER)
   7942		goto issue_diag_reset;
   7943
   7944	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
   7945		if (!(_base_send_ioc_reset(ioc,
   7946		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
   7947			return 0;
   7948	}
   7949
   7950 issue_diag_reset:
   7951	rc = _base_diag_reset(ioc);
   7952	return rc;
   7953}
   7954
   7955/**
   7956 * _base_make_ioc_operational - put controller in OPERATIONAL state
   7957 * @ioc: per adapter object
   7958 *
   7959 * Return: 0 for success, non-zero for failure.
   7960 */
   7961static int
   7962_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
   7963{
   7964	int r, i, index, rc;
   7965	unsigned long	flags;
   7966	u32 reply_address;
   7967	u16 smid;
   7968	struct _tr_list *delayed_tr, *delayed_tr_next;
   7969	struct _sc_list *delayed_sc, *delayed_sc_next;
   7970	struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
   7971	u8 hide_flag;
   7972	struct adapter_reply_queue *reply_q;
   7973	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
   7974
   7975	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   7976
   7977	/* clean the delayed target reset list */
   7978	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
   7979	    &ioc->delayed_tr_list, list) {
   7980		list_del(&delayed_tr->list);
   7981		kfree(delayed_tr);
   7982	}
   7983
   7984
   7985	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
   7986	    &ioc->delayed_tr_volume_list, list) {
   7987		list_del(&delayed_tr->list);
   7988		kfree(delayed_tr);
   7989	}
   7990
   7991	list_for_each_entry_safe(delayed_sc, delayed_sc_next,
   7992	    &ioc->delayed_sc_list, list) {
   7993		list_del(&delayed_sc->list);
   7994		kfree(delayed_sc);
   7995	}
   7996
   7997	list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
   7998	    &ioc->delayed_event_ack_list, list) {
   7999		list_del(&delayed_event_ack->list);
   8000		kfree(delayed_event_ack);
   8001	}
   8002
   8003	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
   8004
   8005	/* hi-priority queue */
   8006	INIT_LIST_HEAD(&ioc->hpr_free_list);
   8007	smid = ioc->hi_priority_smid;
   8008	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
   8009		ioc->hpr_lookup[i].cb_idx = 0xFF;
   8010		ioc->hpr_lookup[i].smid = smid;
   8011		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
   8012		    &ioc->hpr_free_list);
   8013	}
   8014
   8015	/* internal queue */
   8016	INIT_LIST_HEAD(&ioc->internal_free_list);
   8017	smid = ioc->internal_smid;
   8018	for (i = 0; i < ioc->internal_depth; i++, smid++) {
   8019		ioc->internal_lookup[i].cb_idx = 0xFF;
   8020		ioc->internal_lookup[i].smid = smid;
   8021		list_add_tail(&ioc->internal_lookup[i].tracker_list,
   8022		    &ioc->internal_free_list);
   8023	}
   8024
   8025	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
   8026
   8027	/* initialize Reply Free Queue */
   8028	for (i = 0, reply_address = (u32)ioc->reply_dma ;
   8029	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
   8030	    ioc->reply_sz) {
   8031		ioc->reply_free[i] = cpu_to_le32(reply_address);
   8032		if (ioc->is_mcpu_endpoint)
   8033			_base_clone_reply_to_sys_mem(ioc,
   8034					reply_address, i);
   8035	}
   8036
   8037	/* initialize reply queues */
   8038	if (ioc->is_driver_loading)
   8039		_base_assign_reply_queues(ioc);
   8040
   8041	/* initialize Reply Post Free Queue */
   8042	index = 0;
   8043	reply_post_free_contig = ioc->reply_post[0].reply_post_free;
   8044	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
   8045		/*
   8046		 * If RDPQ is enabled, switch to the next allocation.
   8047		 * Otherwise advance within the contiguous region.
   8048		 */
   8049		if (ioc->rdpq_array_enable) {
   8050			reply_q->reply_post_free =
   8051				ioc->reply_post[index++].reply_post_free;
   8052		} else {
   8053			reply_q->reply_post_free = reply_post_free_contig;
   8054			reply_post_free_contig += ioc->reply_post_queue_depth;
   8055		}
   8056
   8057		reply_q->reply_post_host_index = 0;
   8058		for (i = 0; i < ioc->reply_post_queue_depth; i++)
   8059			reply_q->reply_post_free[i].Words =
   8060			    cpu_to_le64(ULLONG_MAX);
   8061		if (!_base_is_controller_msix_enabled(ioc))
   8062			goto skip_init_reply_post_free_queue;
   8063	}
   8064 skip_init_reply_post_free_queue:
   8065
   8066	r = _base_send_ioc_init(ioc);
   8067	if (r) {
   8068		/*
   8069		 * No need to check IOC state for fault state & issue
   8070		 * diag reset during host reset. This check is need
   8071		 * only during driver load time.
   8072		 */
   8073		if (!ioc->is_driver_loading)
   8074			return r;
   8075
   8076		rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
   8077		if (rc || (_base_send_ioc_init(ioc)))
   8078			return r;
   8079	}
   8080
   8081	/* initialize reply free host index */
   8082	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
   8083	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
   8084
   8085	/* initialize reply post host index */
   8086	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
   8087		if (ioc->combined_reply_queue)
   8088			writel((reply_q->msix_index & 7)<<
   8089			   MPI2_RPHI_MSIX_INDEX_SHIFT,
   8090			   ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
   8091		else
   8092			writel(reply_q->msix_index <<
   8093				MPI2_RPHI_MSIX_INDEX_SHIFT,
   8094				&ioc->chip->ReplyPostHostIndex);
   8095
   8096		if (!_base_is_controller_msix_enabled(ioc))
   8097			goto skip_init_reply_post_host_index;
   8098	}
   8099
   8100 skip_init_reply_post_host_index:
   8101
   8102	mpt3sas_base_unmask_interrupts(ioc);
   8103
   8104	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
   8105		r = _base_display_fwpkg_version(ioc);
   8106		if (r)
   8107			return r;
   8108	}
   8109
   8110	r = _base_static_config_pages(ioc);
   8111	if (r)
   8112		return r;
   8113
   8114	r = _base_event_notification(ioc);
   8115	if (r)
   8116		return r;
   8117
   8118	if (!ioc->shost_recovery) {
   8119
   8120		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
   8121		    == 0x80) {
   8122			hide_flag = (u8) (
   8123			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
   8124			    MFG_PAGE10_HIDE_SSDS_MASK);
   8125			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
   8126				ioc->mfg_pg10_hide_flag = hide_flag;
   8127		}
   8128
   8129		ioc->wait_for_discovery_to_complete =
   8130		    _base_determine_wait_on_discovery(ioc);
   8131
   8132		return r; /* scan_start and scan_finished support */
   8133	}
   8134
   8135	r = _base_send_port_enable(ioc);
   8136	if (r)
   8137		return r;
   8138
   8139	return r;
   8140}
   8141
   8142/**
   8143 * mpt3sas_base_free_resources - free resources controller resources
   8144 * @ioc: per adapter object
   8145 */
   8146void
   8147mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
   8148{
   8149	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   8150
   8151	/* synchronizing freeing resource with pci_access_mutex lock */
   8152	mutex_lock(&ioc->pci_access_mutex);
   8153	if (ioc->chip_phys && ioc->chip) {
   8154		mpt3sas_base_mask_interrupts(ioc);
   8155		ioc->shost_recovery = 1;
   8156		mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
   8157		ioc->shost_recovery = 0;
   8158	}
   8159
   8160	mpt3sas_base_unmap_resources(ioc);
   8161	mutex_unlock(&ioc->pci_access_mutex);
   8162	return;
   8163}
   8164
   8165/**
   8166 * mpt3sas_base_attach - attach controller instance
   8167 * @ioc: per adapter object
   8168 *
   8169 * Return: 0 for success, non-zero for failure.
   8170 */
   8171int
   8172mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
   8173{
   8174	int r, i, rc;
   8175	int cpu_id, last_cpu_id = 0;
   8176
   8177	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   8178
   8179	/* setup cpu_msix_table */
   8180	ioc->cpu_count = num_online_cpus();
   8181	for_each_online_cpu(cpu_id)
   8182		last_cpu_id = cpu_id;
   8183	ioc->cpu_msix_table_sz = last_cpu_id + 1;
   8184	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
   8185	ioc->reply_queue_count = 1;
   8186	if (!ioc->cpu_msix_table) {
   8187		ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
   8188		r = -ENOMEM;
   8189		goto out_free_resources;
   8190	}
   8191
   8192	if (ioc->is_warpdrive) {
   8193		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
   8194		    sizeof(resource_size_t *), GFP_KERNEL);
   8195		if (!ioc->reply_post_host_index) {
   8196			ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
   8197			r = -ENOMEM;
   8198			goto out_free_resources;
   8199		}
   8200	}
   8201
   8202	ioc->smp_affinity_enable = smp_affinity_enable;
   8203
   8204	ioc->rdpq_array_enable_assigned = 0;
   8205	ioc->use_32bit_dma = false;
   8206	ioc->dma_mask = 64;
   8207	if (ioc->is_aero_ioc)
   8208		ioc->base_readl = &_base_readl_aero;
   8209	else
   8210		ioc->base_readl = &_base_readl;
   8211	r = mpt3sas_base_map_resources(ioc);
   8212	if (r)
   8213		goto out_free_resources;
   8214
   8215	pci_set_drvdata(ioc->pdev, ioc->shost);
   8216	r = _base_get_ioc_facts(ioc);
   8217	if (r) {
   8218		rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
   8219		if (rc || (_base_get_ioc_facts(ioc)))
   8220			goto out_free_resources;
   8221	}
   8222
   8223	switch (ioc->hba_mpi_version_belonged) {
   8224	case MPI2_VERSION:
   8225		ioc->build_sg_scmd = &_base_build_sg_scmd;
   8226		ioc->build_sg = &_base_build_sg;
   8227		ioc->build_zero_len_sge = &_base_build_zero_len_sge;
   8228		ioc->get_msix_index_for_smlio = &_base_get_msix_index;
   8229		break;
   8230	case MPI25_VERSION:
   8231	case MPI26_VERSION:
   8232		/*
   8233		 * In SAS3.0,
   8234		 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
   8235		 * Target Status - all require the IEEE formatted scatter gather
   8236		 * elements.
   8237		 */
   8238		ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
   8239		ioc->build_sg = &_base_build_sg_ieee;
   8240		ioc->build_nvme_prp = &_base_build_nvme_prp;
   8241		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
   8242		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
   8243		if (ioc->high_iops_queues)
   8244			ioc->get_msix_index_for_smlio =
   8245					&_base_get_high_iops_msix_index;
   8246		else
   8247			ioc->get_msix_index_for_smlio = &_base_get_msix_index;
   8248		break;
   8249	}
   8250	if (ioc->atomic_desc_capable) {
   8251		ioc->put_smid_default = &_base_put_smid_default_atomic;
   8252		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
   8253		ioc->put_smid_fast_path =
   8254				&_base_put_smid_fast_path_atomic;
   8255		ioc->put_smid_hi_priority =
   8256				&_base_put_smid_hi_priority_atomic;
   8257	} else {
   8258		ioc->put_smid_default = &_base_put_smid_default;
   8259		ioc->put_smid_fast_path = &_base_put_smid_fast_path;
   8260		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
   8261		if (ioc->is_mcpu_endpoint)
   8262			ioc->put_smid_scsi_io =
   8263				&_base_put_smid_mpi_ep_scsi_io;
   8264		else
   8265			ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
   8266	}
   8267	/*
   8268	 * These function pointers for other requests that don't
   8269	 * the require IEEE scatter gather elements.
   8270	 *
   8271	 * For example Configuration Pages and SAS IOUNIT Control don't.
   8272	 */
   8273	ioc->build_sg_mpi = &_base_build_sg;
   8274	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
   8275
   8276	r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
   8277	if (r)
   8278		goto out_free_resources;
   8279
   8280	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
   8281	    sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
   8282	if (!ioc->pfacts) {
   8283		r = -ENOMEM;
   8284		goto out_free_resources;
   8285	}
   8286
   8287	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
   8288		r = _base_get_port_facts(ioc, i);
   8289		if (r) {
   8290			rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
   8291			if (rc || (_base_get_port_facts(ioc, i)))
   8292				goto out_free_resources;
   8293		}
   8294	}
   8295
   8296	r = _base_allocate_memory_pools(ioc);
   8297	if (r)
   8298		goto out_free_resources;
   8299
   8300	if (irqpoll_weight > 0)
   8301		ioc->thresh_hold = irqpoll_weight;
   8302	else
   8303		ioc->thresh_hold = ioc->hba_queue_depth/4;
   8304
   8305	_base_init_irqpolls(ioc);
   8306	init_waitqueue_head(&ioc->reset_wq);
   8307
   8308	/* allocate memory pd handle bitmask list */
   8309	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
   8310	if (ioc->facts.MaxDevHandle % 8)
   8311		ioc->pd_handles_sz++;
   8312	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
   8313	    GFP_KERNEL);
   8314	if (!ioc->pd_handles) {
   8315		r = -ENOMEM;
   8316		goto out_free_resources;
   8317	}
   8318	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
   8319	    GFP_KERNEL);
   8320	if (!ioc->blocking_handles) {
   8321		r = -ENOMEM;
   8322		goto out_free_resources;
   8323	}
   8324
   8325	/* allocate memory for pending OS device add list */
   8326	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
   8327	if (ioc->facts.MaxDevHandle % 8)
   8328		ioc->pend_os_device_add_sz++;
   8329	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
   8330	    GFP_KERNEL);
   8331	if (!ioc->pend_os_device_add) {
   8332		r = -ENOMEM;
   8333		goto out_free_resources;
   8334	}
   8335
   8336	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
   8337	ioc->device_remove_in_progress =
   8338		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
   8339	if (!ioc->device_remove_in_progress) {
   8340		r = -ENOMEM;
   8341		goto out_free_resources;
   8342	}
   8343
   8344	ioc->fwfault_debug = mpt3sas_fwfault_debug;
   8345
   8346	/* base internal command bits */
   8347	mutex_init(&ioc->base_cmds.mutex);
   8348	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8349	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
   8350
   8351	/* port_enable command bits */
   8352	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8353	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
   8354
   8355	/* transport internal command bits */
   8356	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8357	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
   8358	mutex_init(&ioc->transport_cmds.mutex);
   8359
   8360	/* scsih internal command bits */
   8361	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8362	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
   8363	mutex_init(&ioc->scsih_cmds.mutex);
   8364
   8365	/* task management internal command bits */
   8366	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8367	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
   8368	mutex_init(&ioc->tm_cmds.mutex);
   8369
   8370	/* config page internal command bits */
   8371	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8372	ioc->config_cmds.status = MPT3_CMD_NOT_USED;
   8373	mutex_init(&ioc->config_cmds.mutex);
   8374
   8375	/* ctl module internal command bits */
   8376	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
   8377	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
   8378	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
   8379	mutex_init(&ioc->ctl_cmds.mutex);
   8380
   8381	if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
   8382	    !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
   8383	    !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
   8384	    !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
   8385		r = -ENOMEM;
   8386		goto out_free_resources;
   8387	}
   8388
   8389	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
   8390		ioc->event_masks[i] = -1;
   8391
   8392	/* here we enable the events we care about */
   8393	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
   8394	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
   8395	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
   8396	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
   8397	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
   8398	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
   8399	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
   8400	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
   8401	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
   8402	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
   8403	_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
   8404	_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
   8405	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
   8406	if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
   8407		if (ioc->is_gen35_ioc) {
   8408			_base_unmask_events(ioc,
   8409				MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
   8410			_base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
   8411			_base_unmask_events(ioc,
   8412				MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
   8413		}
   8414	}
   8415	r = _base_make_ioc_operational(ioc);
   8416	if (r == -EAGAIN) {
   8417		r = _base_make_ioc_operational(ioc);
   8418		if (r)
   8419			goto out_free_resources;
   8420	}
   8421
   8422	/*
   8423	 * Copy current copy of IOCFacts in prev_fw_facts
   8424	 * and it will be used during online firmware upgrade.
   8425	 */
   8426	memcpy(&ioc->prev_fw_facts, &ioc->facts,
   8427	    sizeof(struct mpt3sas_facts));
   8428
   8429	ioc->non_operational_loop = 0;
   8430	ioc->ioc_coredump_loop = 0;
   8431	ioc->got_task_abort_from_ioctl = 0;
   8432	return 0;
   8433
   8434 out_free_resources:
   8435
   8436	ioc->remove_host = 1;
   8437
   8438	mpt3sas_base_free_resources(ioc);
   8439	_base_release_memory_pools(ioc);
   8440	pci_set_drvdata(ioc->pdev, NULL);
   8441	kfree(ioc->cpu_msix_table);
   8442	if (ioc->is_warpdrive)
   8443		kfree(ioc->reply_post_host_index);
   8444	kfree(ioc->pd_handles);
   8445	kfree(ioc->blocking_handles);
   8446	kfree(ioc->device_remove_in_progress);
   8447	kfree(ioc->pend_os_device_add);
   8448	kfree(ioc->tm_cmds.reply);
   8449	kfree(ioc->transport_cmds.reply);
   8450	kfree(ioc->scsih_cmds.reply);
   8451	kfree(ioc->config_cmds.reply);
   8452	kfree(ioc->base_cmds.reply);
   8453	kfree(ioc->port_enable_cmds.reply);
   8454	kfree(ioc->ctl_cmds.reply);
   8455	kfree(ioc->ctl_cmds.sense);
   8456	kfree(ioc->pfacts);
   8457	ioc->ctl_cmds.reply = NULL;
   8458	ioc->base_cmds.reply = NULL;
   8459	ioc->tm_cmds.reply = NULL;
   8460	ioc->scsih_cmds.reply = NULL;
   8461	ioc->transport_cmds.reply = NULL;
   8462	ioc->config_cmds.reply = NULL;
   8463	ioc->pfacts = NULL;
   8464	return r;
   8465}
   8466
   8467
   8468/**
   8469 * mpt3sas_base_detach - remove controller instance
   8470 * @ioc: per adapter object
   8471 */
   8472void
   8473mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
   8474{
   8475	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
   8476
   8477	mpt3sas_base_stop_watchdog(ioc);
   8478	mpt3sas_base_free_resources(ioc);
   8479	_base_release_memory_pools(ioc);
   8480	mpt3sas_free_enclosure_list(ioc);
   8481	pci_set_drvdata(ioc->pdev, NULL);
   8482	kfree(ioc->cpu_msix_table);
   8483	if (ioc->is_warpdrive)
   8484		kfree(ioc->reply_post_host_index);
   8485	kfree(ioc->pd_handles);
   8486	kfree(ioc->blocking_handles);
   8487	kfree(ioc->device_remove_in_progress);
   8488	kfree(ioc->pend_os_device_add);
   8489	kfree(ioc->pfacts);
   8490	kfree(ioc->ctl_cmds.reply);
   8491	kfree(ioc->ctl_cmds.sense);
   8492	kfree(ioc->base_cmds.reply);
   8493	kfree(ioc->port_enable_cmds.reply);
   8494	kfree(ioc->tm_cmds.reply);
   8495	kfree(ioc->transport_cmds.reply);
   8496	kfree(ioc->scsih_cmds.reply);
   8497	kfree(ioc->config_cmds.reply);
   8498}
   8499
   8500/**
   8501 * _base_pre_reset_handler - pre reset handler
   8502 * @ioc: per adapter object
   8503 */
   8504static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
   8505{
   8506	mpt3sas_scsih_pre_reset_handler(ioc);
   8507	mpt3sas_ctl_pre_reset_handler(ioc);
   8508	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
   8509}
   8510
   8511/**
   8512 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
   8513 * @ioc: per adapter object
   8514 */
   8515static void
   8516_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
   8517{
   8518	dtmprintk(ioc,
   8519	    ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
   8520	if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
   8521		ioc->transport_cmds.status |= MPT3_CMD_RESET;
   8522		mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
   8523		complete(&ioc->transport_cmds.done);
   8524	}
   8525	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
   8526		ioc->base_cmds.status |= MPT3_CMD_RESET;
   8527		mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
   8528		complete(&ioc->base_cmds.done);
   8529	}
   8530	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
   8531		ioc->port_enable_failed = 1;
   8532		ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
   8533		mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
   8534		if (ioc->is_driver_loading) {
   8535			ioc->start_scan_failed =
   8536				MPI2_IOCSTATUS_INTERNAL_ERROR;
   8537			ioc->start_scan = 0;
   8538		} else {
   8539			complete(&ioc->port_enable_cmds.done);
   8540		}
   8541	}
   8542	if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
   8543		ioc->config_cmds.status |= MPT3_CMD_RESET;
   8544		mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
   8545		ioc->config_cmds.smid = USHRT_MAX;
   8546		complete(&ioc->config_cmds.done);
   8547	}
   8548}
   8549
   8550/**
   8551 * _base_clear_outstanding_commands - clear all outstanding commands
   8552 * @ioc: per adapter object
   8553 */
   8554static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
   8555{
   8556	mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
   8557	mpt3sas_ctl_clear_outstanding_ioctls(ioc);
   8558	_base_clear_outstanding_mpt_commands(ioc);
   8559}
   8560
   8561/**
   8562 * _base_reset_done_handler - reset done handler
   8563 * @ioc: per adapter object
   8564 */
   8565static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
   8566{
   8567	mpt3sas_scsih_reset_done_handler(ioc);
   8568	mpt3sas_ctl_reset_done_handler(ioc);
   8569	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
   8570}
   8571
   8572/**
   8573 * mpt3sas_wait_for_commands_to_complete - reset controller
   8574 * @ioc: Pointer to MPT_ADAPTER structure
   8575 *
   8576 * This function is waiting 10s for all pending commands to complete
   8577 * prior to putting controller in reset.
   8578 */
   8579void
   8580mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
   8581{
   8582	u32 ioc_state;
   8583
   8584	ioc->pending_io_count = 0;
   8585
   8586	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   8587	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
   8588		return;
   8589
   8590	/* pending command count */
   8591	ioc->pending_io_count = scsi_host_busy(ioc->shost);
   8592
   8593	if (!ioc->pending_io_count)
   8594		return;
   8595
   8596	/* wait for pending commands to complete */
   8597	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
   8598}
   8599
   8600/**
   8601 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
   8602 *     attributes during online firmware upgrade and update the corresponding
   8603 *     IOC variables accordingly.
   8604 *
   8605 * @ioc: Pointer to MPT_ADAPTER structure
   8606 */
   8607static int
   8608_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
   8609{
   8610	u16 pd_handles_sz;
   8611	void *pd_handles = NULL, *blocking_handles = NULL;
   8612	void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
   8613	struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
   8614
   8615	if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
   8616		pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
   8617		if (ioc->facts.MaxDevHandle % 8)
   8618			pd_handles_sz++;
   8619
   8620		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
   8621		    GFP_KERNEL);
   8622		if (!pd_handles) {
   8623			ioc_info(ioc,
   8624			    "Unable to allocate the memory for pd_handles of sz: %d\n",
   8625			    pd_handles_sz);
   8626			return -ENOMEM;
   8627		}
   8628		memset(pd_handles + ioc->pd_handles_sz, 0,
   8629		    (pd_handles_sz - ioc->pd_handles_sz));
   8630		ioc->pd_handles = pd_handles;
   8631
   8632		blocking_handles = krealloc(ioc->blocking_handles,
   8633		    pd_handles_sz, GFP_KERNEL);
   8634		if (!blocking_handles) {
   8635			ioc_info(ioc,
   8636			    "Unable to allocate the memory for "
   8637			    "blocking_handles of sz: %d\n",
   8638			    pd_handles_sz);
   8639			return -ENOMEM;
   8640		}
   8641		memset(blocking_handles + ioc->pd_handles_sz, 0,
   8642		    (pd_handles_sz - ioc->pd_handles_sz));
   8643		ioc->blocking_handles = blocking_handles;
   8644		ioc->pd_handles_sz = pd_handles_sz;
   8645
   8646		pend_os_device_add = krealloc(ioc->pend_os_device_add,
   8647		    pd_handles_sz, GFP_KERNEL);
   8648		if (!pend_os_device_add) {
   8649			ioc_info(ioc,
   8650			    "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
   8651			    pd_handles_sz);
   8652			return -ENOMEM;
   8653		}
   8654		memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
   8655		    (pd_handles_sz - ioc->pend_os_device_add_sz));
   8656		ioc->pend_os_device_add = pend_os_device_add;
   8657		ioc->pend_os_device_add_sz = pd_handles_sz;
   8658
   8659		device_remove_in_progress = krealloc(
   8660		    ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
   8661		if (!device_remove_in_progress) {
   8662			ioc_info(ioc,
   8663			    "Unable to allocate the memory for "
   8664			    "device_remove_in_progress of sz: %d\n "
   8665			    , pd_handles_sz);
   8666			return -ENOMEM;
   8667		}
   8668		memset(device_remove_in_progress +
   8669		    ioc->device_remove_in_progress_sz, 0,
   8670		    (pd_handles_sz - ioc->device_remove_in_progress_sz));
   8671		ioc->device_remove_in_progress = device_remove_in_progress;
   8672		ioc->device_remove_in_progress_sz = pd_handles_sz;
   8673	}
   8674
   8675	memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
   8676	return 0;
   8677}
   8678
   8679/**
   8680 * mpt3sas_base_hard_reset_handler - reset controller
   8681 * @ioc: Pointer to MPT_ADAPTER structure
   8682 * @type: FORCE_BIG_HAMMER or SOFT_RESET
   8683 *
   8684 * Return: 0 for success, non-zero for failure.
   8685 */
   8686int
   8687mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
   8688	enum reset_type type)
   8689{
   8690	int r;
   8691	unsigned long flags;
   8692	u32 ioc_state;
   8693	u8 is_fault = 0, is_trigger = 0;
   8694
   8695	dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
   8696
   8697	if (ioc->pci_error_recovery) {
   8698		ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
   8699		r = 0;
   8700		goto out_unlocked;
   8701	}
   8702
   8703	if (mpt3sas_fwfault_debug)
   8704		mpt3sas_halt_firmware(ioc);
   8705
   8706	/* wait for an active reset in progress to complete */
   8707	mutex_lock(&ioc->reset_in_progress_mutex);
   8708
   8709	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
   8710	ioc->shost_recovery = 1;
   8711	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
   8712
   8713	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
   8714	    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
   8715	    (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
   8716	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
   8717		is_trigger = 1;
   8718		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
   8719		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
   8720		    (ioc_state & MPI2_IOC_STATE_MASK) ==
   8721		    MPI2_IOC_STATE_COREDUMP) {
   8722			is_fault = 1;
   8723			ioc->htb_rel.trigger_info_dwords[1] =
   8724			    (ioc_state & MPI2_DOORBELL_DATA_MASK);
   8725		}
   8726	}
   8727	_base_pre_reset_handler(ioc);
   8728	mpt3sas_wait_for_commands_to_complete(ioc);
   8729	mpt3sas_base_mask_interrupts(ioc);
   8730	mpt3sas_base_pause_mq_polling(ioc);
   8731	r = mpt3sas_base_make_ioc_ready(ioc, type);
   8732	if (r)
   8733		goto out;
   8734	_base_clear_outstanding_commands(ioc);
   8735
   8736	/* If this hard reset is called while port enable is active, then
   8737	 * there is no reason to call make_ioc_operational
   8738	 */
   8739	if (ioc->is_driver_loading && ioc->port_enable_failed) {
   8740		ioc->remove_host = 1;
   8741		r = -EFAULT;
   8742		goto out;
   8743	}
   8744	r = _base_get_ioc_facts(ioc);
   8745	if (r)
   8746		goto out;
   8747
   8748	r = _base_check_ioc_facts_changes(ioc);
   8749	if (r) {
   8750		ioc_info(ioc,
   8751		    "Some of the parameters got changed in this new firmware"
   8752		    " image and it requires system reboot\n");
   8753		goto out;
   8754	}
   8755	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
   8756		panic("%s: Issue occurred with flashing controller firmware."
   8757		      "Please reboot the system and ensure that the correct"
   8758		      " firmware version is running\n", ioc->name);
   8759
   8760	r = _base_make_ioc_operational(ioc);
   8761	if (!r)
   8762		_base_reset_done_handler(ioc);
   8763
   8764 out:
   8765	ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
   8766
   8767	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
   8768	ioc->shost_recovery = 0;
   8769	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
   8770	ioc->ioc_reset_count++;
   8771	mutex_unlock(&ioc->reset_in_progress_mutex);
   8772	mpt3sas_base_resume_mq_polling(ioc);
   8773
   8774 out_unlocked:
   8775	if ((r == 0) && is_trigger) {
   8776		if (is_fault)
   8777			mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
   8778		else
   8779			mpt3sas_trigger_master(ioc,
   8780			    MASTER_TRIGGER_ADAPTER_RESET);
   8781	}
   8782	dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
   8783	return r;
   8784}