cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qla_nx.c (117423B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * QLogic Fibre Channel HBA Driver
      4 * Copyright (c)  2003-2014 QLogic Corporation
      5 */
      6#include "qla_def.h"
      7#include <linux/delay.h>
      8#include <linux/io-64-nonatomic-lo-hi.h>
      9#include <linux/pci.h>
     10#include <linux/ratelimit.h>
     11#include <linux/vmalloc.h>
     12#include <scsi/scsi_tcq.h>
     13
     14#define MASK(n)			((1ULL<<(n))-1)
     15#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
     16	((addr >> 25) & 0x3ff))
     17#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
     18	((addr >> 25) & 0x3ff))
     19#define MS_WIN(addr) (addr & 0x0ffc0000)
     20#define QLA82XX_PCI_MN_2M   (0)
     21#define QLA82XX_PCI_MS_2M   (0x80000)
     22#define QLA82XX_PCI_OCM0_2M (0xc0000)
     23#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
     24#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
     25#define BLOCK_PROTECT_BITS 0x0F
     26
     27/* CRB window related */
     28#define CRB_BLK(off)	((off >> 20) & 0x3f)
     29#define CRB_SUBBLK(off)	((off >> 16) & 0xf)
     30#define CRB_WINDOW_2M	(0x130060)
     31#define QLA82XX_PCI_CAMQM_2M_END	(0x04800800UL)
     32#define CRB_HI(off)	((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
     33			((off) & 0xf0000))
     34#define QLA82XX_PCI_CAMQM_2M_BASE	(0x000ff800UL)
     35#define CRB_INDIRECT_2M	(0x1e0000UL)
     36
     37#define MAX_CRB_XFORM 60
     38static unsigned long crb_addr_xform[MAX_CRB_XFORM];
     39static int qla82xx_crb_table_initialized;
     40
     41#define qla82xx_crb_addr_transform(name) \
     42	(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
     43	QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
     44
     45const int MD_MIU_TEST_AGT_RDDATA[] = {
     46	0x410000A8, 0x410000AC,
     47	0x410000B8, 0x410000BC
     48};
     49
     50static void qla82xx_crb_addr_transform_setup(void)
     51{
     52	qla82xx_crb_addr_transform(XDMA);
     53	qla82xx_crb_addr_transform(TIMR);
     54	qla82xx_crb_addr_transform(SRE);
     55	qla82xx_crb_addr_transform(SQN3);
     56	qla82xx_crb_addr_transform(SQN2);
     57	qla82xx_crb_addr_transform(SQN1);
     58	qla82xx_crb_addr_transform(SQN0);
     59	qla82xx_crb_addr_transform(SQS3);
     60	qla82xx_crb_addr_transform(SQS2);
     61	qla82xx_crb_addr_transform(SQS1);
     62	qla82xx_crb_addr_transform(SQS0);
     63	qla82xx_crb_addr_transform(RPMX7);
     64	qla82xx_crb_addr_transform(RPMX6);
     65	qla82xx_crb_addr_transform(RPMX5);
     66	qla82xx_crb_addr_transform(RPMX4);
     67	qla82xx_crb_addr_transform(RPMX3);
     68	qla82xx_crb_addr_transform(RPMX2);
     69	qla82xx_crb_addr_transform(RPMX1);
     70	qla82xx_crb_addr_transform(RPMX0);
     71	qla82xx_crb_addr_transform(ROMUSB);
     72	qla82xx_crb_addr_transform(SN);
     73	qla82xx_crb_addr_transform(QMN);
     74	qla82xx_crb_addr_transform(QMS);
     75	qla82xx_crb_addr_transform(PGNI);
     76	qla82xx_crb_addr_transform(PGND);
     77	qla82xx_crb_addr_transform(PGN3);
     78	qla82xx_crb_addr_transform(PGN2);
     79	qla82xx_crb_addr_transform(PGN1);
     80	qla82xx_crb_addr_transform(PGN0);
     81	qla82xx_crb_addr_transform(PGSI);
     82	qla82xx_crb_addr_transform(PGSD);
     83	qla82xx_crb_addr_transform(PGS3);
     84	qla82xx_crb_addr_transform(PGS2);
     85	qla82xx_crb_addr_transform(PGS1);
     86	qla82xx_crb_addr_transform(PGS0);
     87	qla82xx_crb_addr_transform(PS);
     88	qla82xx_crb_addr_transform(PH);
     89	qla82xx_crb_addr_transform(NIU);
     90	qla82xx_crb_addr_transform(I2Q);
     91	qla82xx_crb_addr_transform(EG);
     92	qla82xx_crb_addr_transform(MN);
     93	qla82xx_crb_addr_transform(MS);
     94	qla82xx_crb_addr_transform(CAS2);
     95	qla82xx_crb_addr_transform(CAS1);
     96	qla82xx_crb_addr_transform(CAS0);
     97	qla82xx_crb_addr_transform(CAM);
     98	qla82xx_crb_addr_transform(C2C1);
     99	qla82xx_crb_addr_transform(C2C0);
    100	qla82xx_crb_addr_transform(SMB);
    101	qla82xx_crb_addr_transform(OCM0);
    102	/*
    103	 * Used only in P3 just define it for P2 also.
    104	 */
    105	qla82xx_crb_addr_transform(I2C0);
    106
    107	qla82xx_crb_table_initialized = 1;
    108}
    109
    110static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
    111	{{{0, 0,         0,         0} } },
    112	{{{1, 0x0100000, 0x0102000, 0x120000},
    113	{1, 0x0110000, 0x0120000, 0x130000},
    114	{1, 0x0120000, 0x0122000, 0x124000},
    115	{1, 0x0130000, 0x0132000, 0x126000},
    116	{1, 0x0140000, 0x0142000, 0x128000},
    117	{1, 0x0150000, 0x0152000, 0x12a000},
    118	{1, 0x0160000, 0x0170000, 0x110000},
    119	{1, 0x0170000, 0x0172000, 0x12e000},
    120	{0, 0x0000000, 0x0000000, 0x000000},
    121	{0, 0x0000000, 0x0000000, 0x000000},
    122	{0, 0x0000000, 0x0000000, 0x000000},
    123	{0, 0x0000000, 0x0000000, 0x000000},
    124	{0, 0x0000000, 0x0000000, 0x000000},
    125	{0, 0x0000000, 0x0000000, 0x000000},
    126	{1, 0x01e0000, 0x01e0800, 0x122000},
    127	{0, 0x0000000, 0x0000000, 0x000000} } } ,
    128	{{{1, 0x0200000, 0x0210000, 0x180000} } },
    129	{{{0, 0,         0,         0} } },
    130	{{{1, 0x0400000, 0x0401000, 0x169000} } },
    131	{{{1, 0x0500000, 0x0510000, 0x140000} } },
    132	{{{1, 0x0600000, 0x0610000, 0x1c0000} } },
    133	{{{1, 0x0700000, 0x0704000, 0x1b8000} } },
    134	{{{1, 0x0800000, 0x0802000, 0x170000},
    135	{0, 0x0000000, 0x0000000, 0x000000},
    136	{0, 0x0000000, 0x0000000, 0x000000},
    137	{0, 0x0000000, 0x0000000, 0x000000},
    138	{0, 0x0000000, 0x0000000, 0x000000},
    139	{0, 0x0000000, 0x0000000, 0x000000},
    140	{0, 0x0000000, 0x0000000, 0x000000},
    141	{0, 0x0000000, 0x0000000, 0x000000},
    142	{0, 0x0000000, 0x0000000, 0x000000},
    143	{0, 0x0000000, 0x0000000, 0x000000},
    144	{0, 0x0000000, 0x0000000, 0x000000},
    145	{0, 0x0000000, 0x0000000, 0x000000},
    146	{0, 0x0000000, 0x0000000, 0x000000},
    147	{0, 0x0000000, 0x0000000, 0x000000},
    148	{0, 0x0000000, 0x0000000, 0x000000},
    149	{1, 0x08f0000, 0x08f2000, 0x172000} } },
    150	{{{1, 0x0900000, 0x0902000, 0x174000},
    151	{0, 0x0000000, 0x0000000, 0x000000},
    152	{0, 0x0000000, 0x0000000, 0x000000},
    153	{0, 0x0000000, 0x0000000, 0x000000},
    154	{0, 0x0000000, 0x0000000, 0x000000},
    155	{0, 0x0000000, 0x0000000, 0x000000},
    156	{0, 0x0000000, 0x0000000, 0x000000},
    157	{0, 0x0000000, 0x0000000, 0x000000},
    158	{0, 0x0000000, 0x0000000, 0x000000},
    159	{0, 0x0000000, 0x0000000, 0x000000},
    160	{0, 0x0000000, 0x0000000, 0x000000},
    161	{0, 0x0000000, 0x0000000, 0x000000},
    162	{0, 0x0000000, 0x0000000, 0x000000},
    163	{0, 0x0000000, 0x0000000, 0x000000},
    164	{0, 0x0000000, 0x0000000, 0x000000},
    165	{1, 0x09f0000, 0x09f2000, 0x176000} } },
    166	{{{0, 0x0a00000, 0x0a02000, 0x178000},
    167	{0, 0x0000000, 0x0000000, 0x000000},
    168	{0, 0x0000000, 0x0000000, 0x000000},
    169	{0, 0x0000000, 0x0000000, 0x000000},
    170	{0, 0x0000000, 0x0000000, 0x000000},
    171	{0, 0x0000000, 0x0000000, 0x000000},
    172	{0, 0x0000000, 0x0000000, 0x000000},
    173	{0, 0x0000000, 0x0000000, 0x000000},
    174	{0, 0x0000000, 0x0000000, 0x000000},
    175	{0, 0x0000000, 0x0000000, 0x000000},
    176	{0, 0x0000000, 0x0000000, 0x000000},
    177	{0, 0x0000000, 0x0000000, 0x000000},
    178	{0, 0x0000000, 0x0000000, 0x000000},
    179	{0, 0x0000000, 0x0000000, 0x000000},
    180	{0, 0x0000000, 0x0000000, 0x000000},
    181	{1, 0x0af0000, 0x0af2000, 0x17a000} } },
    182	{{{0, 0x0b00000, 0x0b02000, 0x17c000},
    183	{0, 0x0000000, 0x0000000, 0x000000},
    184	{0, 0x0000000, 0x0000000, 0x000000},
    185	{0, 0x0000000, 0x0000000, 0x000000},
    186	{0, 0x0000000, 0x0000000, 0x000000},
    187	{0, 0x0000000, 0x0000000, 0x000000},
    188	{0, 0x0000000, 0x0000000, 0x000000},
    189	{0, 0x0000000, 0x0000000, 0x000000},
    190	{0, 0x0000000, 0x0000000, 0x000000},
    191	{0, 0x0000000, 0x0000000, 0x000000},
    192	{0, 0x0000000, 0x0000000, 0x000000},
    193	{0, 0x0000000, 0x0000000, 0x000000},
    194	{0, 0x0000000, 0x0000000, 0x000000},
    195	{0, 0x0000000, 0x0000000, 0x000000},
    196	{0, 0x0000000, 0x0000000, 0x000000},
    197	{1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
    198	{{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
    199	{{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
    200	{{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
    201	{{{1, 0x0f00000, 0x0f01000, 0x164000} } },
    202	{{{0, 0x1000000, 0x1004000, 0x1a8000} } },
    203	{{{1, 0x1100000, 0x1101000, 0x160000} } },
    204	{{{1, 0x1200000, 0x1201000, 0x161000} } },
    205	{{{1, 0x1300000, 0x1301000, 0x162000} } },
    206	{{{1, 0x1400000, 0x1401000, 0x163000} } },
    207	{{{1, 0x1500000, 0x1501000, 0x165000} } },
    208	{{{1, 0x1600000, 0x1601000, 0x166000} } },
    209	{{{0, 0,         0,         0} } },
    210	{{{0, 0,         0,         0} } },
    211	{{{0, 0,         0,         0} } },
    212	{{{0, 0,         0,         0} } },
    213	{{{0, 0,         0,         0} } },
    214	{{{0, 0,         0,         0} } },
    215	{{{1, 0x1d00000, 0x1d10000, 0x190000} } },
    216	{{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
    217	{{{1, 0x1f00000, 0x1f10000, 0x150000} } },
    218	{{{0} } },
    219	{{{1, 0x2100000, 0x2102000, 0x120000},
    220	{1, 0x2110000, 0x2120000, 0x130000},
    221	{1, 0x2120000, 0x2122000, 0x124000},
    222	{1, 0x2130000, 0x2132000, 0x126000},
    223	{1, 0x2140000, 0x2142000, 0x128000},
    224	{1, 0x2150000, 0x2152000, 0x12a000},
    225	{1, 0x2160000, 0x2170000, 0x110000},
    226	{1, 0x2170000, 0x2172000, 0x12e000},
    227	{0, 0x0000000, 0x0000000, 0x000000},
    228	{0, 0x0000000, 0x0000000, 0x000000},
    229	{0, 0x0000000, 0x0000000, 0x000000},
    230	{0, 0x0000000, 0x0000000, 0x000000},
    231	{0, 0x0000000, 0x0000000, 0x000000},
    232	{0, 0x0000000, 0x0000000, 0x000000},
    233	{0, 0x0000000, 0x0000000, 0x000000},
    234	{0, 0x0000000, 0x0000000, 0x000000} } },
    235	{{{1, 0x2200000, 0x2204000, 0x1b0000} } },
    236	{{{0} } },
    237	{{{0} } },
    238	{{{0} } },
    239	{{{0} } },
    240	{{{0} } },
    241	{{{1, 0x2800000, 0x2804000, 0x1a4000} } },
    242	{{{1, 0x2900000, 0x2901000, 0x16b000} } },
    243	{{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
    244	{{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
    245	{{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
    246	{{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
    247	{{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
    248	{{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
    249	{{{1, 0x3000000, 0x3000400, 0x1adc00} } },
    250	{{{0, 0x3100000, 0x3104000, 0x1a8000} } },
    251	{{{1, 0x3200000, 0x3204000, 0x1d4000} } },
    252	{{{1, 0x3300000, 0x3304000, 0x1a0000} } },
    253	{{{0} } },
    254	{{{1, 0x3500000, 0x3500400, 0x1ac000} } },
    255	{{{1, 0x3600000, 0x3600400, 0x1ae000} } },
    256	{{{1, 0x3700000, 0x3700400, 0x1ae400} } },
    257	{{{1, 0x3800000, 0x3804000, 0x1d0000} } },
    258	{{{1, 0x3900000, 0x3904000, 0x1b4000} } },
    259	{{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
    260	{{{0} } },
    261	{{{0} } },
    262	{{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
    263	{{{1, 0x3e00000, 0x3e01000, 0x167000} } },
    264	{{{1, 0x3f00000, 0x3f01000, 0x168000} } }
    265};
    266
    267/*
    268 * top 12 bits of crb internal address (hub, agent)
    269 */
    270static unsigned qla82xx_crb_hub_agt[64] = {
    271	0,
    272	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
    273	QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
    274	QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
    275	0,
    276	QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
    277	QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
    278	QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
    279	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
    280	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
    281	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
    282	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
    283	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
    284	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
    285	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
    286	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
    287	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
    288	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
    289	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
    290	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
    291	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
    292	QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
    293	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
    294	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
    295	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
    296	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
    297	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
    298	0,
    299	QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
    300	QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
    301	0,
    302	QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
    303	0,
    304	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
    305	QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
    306	0,
    307	0,
    308	0,
    309	0,
    310	0,
    311	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
    312	0,
    313	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
    314	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
    315	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
    316	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
    317	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
    318	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
    319	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
    320	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
    321	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
    322	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
    323	0,
    324	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
    325	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
    326	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
    327	QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
    328	0,
    329	QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
    330	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
    331	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
    332	0,
    333	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
    334	0,
    335};
    336
    337/* Device states */
    338static const char *const q_dev_state[] = {
    339	[QLA8XXX_DEV_UNKNOWN]		= "Unknown",
    340	[QLA8XXX_DEV_COLD]		= "Cold/Re-init",
    341	[QLA8XXX_DEV_INITIALIZING]	= "Initializing",
    342	[QLA8XXX_DEV_READY]		= "Ready",
    343	[QLA8XXX_DEV_NEED_RESET]	= "Need Reset",
    344	[QLA8XXX_DEV_NEED_QUIESCENT]	= "Need Quiescent",
    345	[QLA8XXX_DEV_FAILED]		= "Failed",
    346	[QLA8XXX_DEV_QUIESCENT]		= "Quiescent",
    347};
    348
    349const char *qdev_state(uint32_t dev_state)
    350{
    351	return (dev_state < MAX_STATES) ? q_dev_state[dev_state] : "Unknown";
    352}
    353
    354/*
    355 * In: 'off_in' is offset from CRB space in 128M pci map
    356 * Out: 'off_out' is 2M pci map addr
    357 * side effect: lock crb window
    358 */
    359static void
    360qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
    361			     void __iomem **off_out)
    362{
    363	u32 win_read;
    364	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    365
    366	ha->crb_win = CRB_HI(off_in);
    367	writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase);
    368
    369	/* Read back value to make sure write has gone through before trying
    370	 * to use it.
    371	 */
    372	win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
    373	if (win_read != ha->crb_win) {
    374		ql_dbg(ql_dbg_p3p, vha, 0xb000,
    375		    "%s: Written crbwin (0x%x) "
    376		    "!= Read crbwin (0x%x), off=0x%lx.\n",
    377		    __func__, ha->crb_win, win_read, off_in);
    378	}
    379	*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
    380}
    381
    382static int
    383qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
    384			    void __iomem **off_out)
    385{
    386	struct crb_128M_2M_sub_block_map *m;
    387
    388	if (off_in >= QLA82XX_CRB_MAX)
    389		return -1;
    390
    391	if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) {
    392		*off_out = (off_in - QLA82XX_PCI_CAMQM) +
    393		    QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
    394		return 0;
    395	}
    396
    397	if (off_in < QLA82XX_PCI_CRBSPACE)
    398		return -1;
    399
    400	off_in -= QLA82XX_PCI_CRBSPACE;
    401
    402	/* Try direct map */
    403	m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
    404
    405	if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) {
    406		*off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase;
    407		return 0;
    408	}
    409	/* Not in direct map, use crb window */
    410	*off_out = (void __iomem *)off_in;
    411	return 1;
    412}
    413
    414#define CRB_WIN_LOCK_TIMEOUT 100000000
    415static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
    416{
    417	int done = 0, timeout = 0;
    418
    419	while (!done) {
    420		/* acquire semaphore3 from PCI HW block */
    421		done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
    422		if (done == 1)
    423			break;
    424		if (timeout >= CRB_WIN_LOCK_TIMEOUT)
    425			return -1;
    426		timeout++;
    427	}
    428	qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
    429	return 0;
    430}
    431
    432int
    433qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data)
    434{
    435	void __iomem *off;
    436	unsigned long flags = 0;
    437	int rv;
    438
    439	rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
    440
    441	BUG_ON(rv == -1);
    442
    443	if (rv == 1) {
    444#ifndef __CHECKER__
    445		write_lock_irqsave(&ha->hw_lock, flags);
    446#endif
    447		qla82xx_crb_win_lock(ha);
    448		qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
    449	}
    450
    451	writel(data, (void __iomem *)off);
    452
    453	if (rv == 1) {
    454		qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
    455#ifndef __CHECKER__
    456		write_unlock_irqrestore(&ha->hw_lock, flags);
    457#endif
    458	}
    459	return 0;
    460}
    461
    462int
    463qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
    464{
    465	void __iomem *off;
    466	unsigned long flags = 0;
    467	int rv;
    468	u32 data;
    469
    470	rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
    471
    472	BUG_ON(rv == -1);
    473
    474	if (rv == 1) {
    475#ifndef __CHECKER__
    476		write_lock_irqsave(&ha->hw_lock, flags);
    477#endif
    478		qla82xx_crb_win_lock(ha);
    479		qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
    480	}
    481	data = rd_reg_dword(off);
    482
    483	if (rv == 1) {
    484		qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
    485#ifndef __CHECKER__
    486		write_unlock_irqrestore(&ha->hw_lock, flags);
    487#endif
    488	}
    489	return data;
    490}
    491
    492/*
    493 * Context: task, might sleep
    494 */
    495int qla82xx_idc_lock(struct qla_hw_data *ha)
    496{
    497	const int delay_ms = 100, timeout_ms = 2000;
    498	int done, total = 0;
    499
    500	might_sleep();
    501
    502	while (true) {
    503		/* acquire semaphore5 from PCI HW block */
    504		done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
    505		if (done == 1)
    506			break;
    507		if (WARN_ON_ONCE(total >= timeout_ms))
    508			return -1;
    509
    510		total += delay_ms;
    511		msleep(delay_ms);
    512	}
    513
    514	return 0;
    515}
    516
    517void qla82xx_idc_unlock(struct qla_hw_data *ha)
    518{
    519	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
    520}
    521
    522/*
    523 * check memory access boundary.
    524 * used by test agent. support ddr access only for now
    525 */
    526static unsigned long
    527qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
    528	unsigned long long addr, int size)
    529{
    530	if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
    531		QLA82XX_ADDR_DDR_NET_MAX) ||
    532		!addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET,
    533		QLA82XX_ADDR_DDR_NET_MAX) ||
    534		((size != 1) && (size != 2) && (size != 4) && (size != 8)))
    535			return 0;
    536	else
    537		return 1;
    538}
    539
    540static int qla82xx_pci_set_window_warning_count;
    541
    542static unsigned long
    543qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
    544{
    545	int window;
    546	u32 win_read;
    547	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    548
    549	if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
    550		QLA82XX_ADDR_DDR_NET_MAX)) {
    551		/* DDR network side */
    552		window = MN_WIN(addr);
    553		ha->ddr_mn_window = window;
    554		qla82xx_wr_32(ha,
    555			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
    556		win_read = qla82xx_rd_32(ha,
    557			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
    558		if ((win_read << 17) != window) {
    559			ql_dbg(ql_dbg_p3p, vha, 0xb003,
    560			    "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
    561			    __func__, window, win_read);
    562		}
    563		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
    564	} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
    565		QLA82XX_ADDR_OCM0_MAX)) {
    566		unsigned int temp1;
    567
    568		if ((addr & 0x00ff800) == 0xff800) {
    569			ql_log(ql_log_warn, vha, 0xb004,
    570			    "%s: QM access not handled.\n", __func__);
    571			addr = -1UL;
    572		}
    573		window = OCM_WIN(addr);
    574		ha->ddr_mn_window = window;
    575		qla82xx_wr_32(ha,
    576			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
    577		win_read = qla82xx_rd_32(ha,
    578			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
    579		temp1 = ((window & 0x1FF) << 7) |
    580		    ((window & 0x0FFFE0000) >> 17);
    581		if (win_read != temp1) {
    582			ql_log(ql_log_warn, vha, 0xb005,
    583			    "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
    584			    __func__, temp1, win_read);
    585		}
    586		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
    587
    588	} else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET,
    589		QLA82XX_P3_ADDR_QDR_NET_MAX)) {
    590		/* QDR network side */
    591		window = MS_WIN(addr);
    592		ha->qdr_sn_window = window;
    593		qla82xx_wr_32(ha,
    594			ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
    595		win_read = qla82xx_rd_32(ha,
    596			ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
    597		if (win_read != window) {
    598			ql_log(ql_log_warn, vha, 0xb006,
    599			    "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
    600			    __func__, window, win_read);
    601		}
    602		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
    603	} else {
    604		/*
    605		 * peg gdb frequently accesses memory that doesn't exist,
    606		 * this limits the chit chat so debugging isn't slowed down.
    607		 */
    608		if ((qla82xx_pci_set_window_warning_count++ < 8) ||
    609		    (qla82xx_pci_set_window_warning_count%64 == 0)) {
    610			ql_log(ql_log_warn, vha, 0xb007,
    611			    "%s: Warning:%s Unknown address range!.\n",
    612			    __func__, QLA2XXX_DRIVER_NAME);
    613		}
    614		addr = -1UL;
    615	}
    616	return addr;
    617}
    618
    619/* check if address is in the same windows as the previous access */
    620static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
    621	unsigned long long addr)
    622{
    623	int			window;
    624	unsigned long long	qdr_max;
    625
    626	qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
    627
    628	/* DDR network side */
    629	if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
    630		QLA82XX_ADDR_DDR_NET_MAX))
    631		BUG();
    632	else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
    633		QLA82XX_ADDR_OCM0_MAX))
    634		return 1;
    635	else if (addr_in_range(addr, QLA82XX_ADDR_OCM1,
    636		QLA82XX_ADDR_OCM1_MAX))
    637		return 1;
    638	else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
    639		/* QDR network side */
    640		window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
    641		if (ha->qdr_sn_window == window)
    642			return 1;
    643	}
    644	return 0;
    645}
    646
    647static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
    648	u64 off, void *data, int size)
    649{
    650	unsigned long   flags;
    651	void __iomem *addr = NULL;
    652	int             ret = 0;
    653	u64             start;
    654	uint8_t __iomem  *mem_ptr = NULL;
    655	unsigned long   mem_base;
    656	unsigned long   mem_page;
    657	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    658
    659	write_lock_irqsave(&ha->hw_lock, flags);
    660
    661	/*
    662	 * If attempting to access unknown address or straddle hw windows,
    663	 * do not access.
    664	 */
    665	start = qla82xx_pci_set_window(ha, off);
    666	if ((start == -1UL) ||
    667		(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
    668		write_unlock_irqrestore(&ha->hw_lock, flags);
    669		ql_log(ql_log_fatal, vha, 0xb008,
    670		    "%s out of bound pci memory "
    671		    "access, offset is 0x%llx.\n",
    672		    QLA2XXX_DRIVER_NAME, off);
    673		return -1;
    674	}
    675
    676	write_unlock_irqrestore(&ha->hw_lock, flags);
    677	mem_base = pci_resource_start(ha->pdev, 0);
    678	mem_page = start & PAGE_MASK;
    679	/* Map two pages whenever user tries to access addresses in two
    680	* consecutive pages.
    681	*/
    682	if (mem_page != ((start + size - 1) & PAGE_MASK))
    683		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
    684	else
    685		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
    686	if (mem_ptr == NULL) {
    687		*(u8  *)data = 0;
    688		return -1;
    689	}
    690	addr = mem_ptr;
    691	addr += start & (PAGE_SIZE - 1);
    692	write_lock_irqsave(&ha->hw_lock, flags);
    693
    694	switch (size) {
    695	case 1:
    696		*(u8  *)data = readb(addr);
    697		break;
    698	case 2:
    699		*(u16 *)data = readw(addr);
    700		break;
    701	case 4:
    702		*(u32 *)data = readl(addr);
    703		break;
    704	case 8:
    705		*(u64 *)data = readq(addr);
    706		break;
    707	default:
    708		ret = -1;
    709		break;
    710	}
    711	write_unlock_irqrestore(&ha->hw_lock, flags);
    712
    713	if (mem_ptr)
    714		iounmap(mem_ptr);
    715	return ret;
    716}
    717
    718static int
    719qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
    720	u64 off, void *data, int size)
    721{
    722	unsigned long   flags;
    723	void  __iomem *addr = NULL;
    724	int             ret = 0;
    725	u64             start;
    726	uint8_t __iomem *mem_ptr = NULL;
    727	unsigned long   mem_base;
    728	unsigned long   mem_page;
    729	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    730
    731	write_lock_irqsave(&ha->hw_lock, flags);
    732
    733	/*
    734	 * If attempting to access unknown address or straddle hw windows,
    735	 * do not access.
    736	 */
    737	start = qla82xx_pci_set_window(ha, off);
    738	if ((start == -1UL) ||
    739		(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
    740		write_unlock_irqrestore(&ha->hw_lock, flags);
    741		ql_log(ql_log_fatal, vha, 0xb009,
    742		    "%s out of bound memory "
    743		    "access, offset is 0x%llx.\n",
    744		    QLA2XXX_DRIVER_NAME, off);
    745		return -1;
    746	}
    747
    748	write_unlock_irqrestore(&ha->hw_lock, flags);
    749	mem_base = pci_resource_start(ha->pdev, 0);
    750	mem_page = start & PAGE_MASK;
    751	/* Map two pages whenever user tries to access addresses in two
    752	 * consecutive pages.
    753	 */
    754	if (mem_page != ((start + size - 1) & PAGE_MASK))
    755		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
    756	else
    757		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
    758	if (mem_ptr == NULL)
    759		return -1;
    760
    761	addr = mem_ptr;
    762	addr += start & (PAGE_SIZE - 1);
    763	write_lock_irqsave(&ha->hw_lock, flags);
    764
    765	switch (size) {
    766	case 1:
    767		writeb(*(u8  *)data, addr);
    768		break;
    769	case 2:
    770		writew(*(u16 *)data, addr);
    771		break;
    772	case 4:
    773		writel(*(u32 *)data, addr);
    774		break;
    775	case 8:
    776		writeq(*(u64 *)data, addr);
    777		break;
    778	default:
    779		ret = -1;
    780		break;
    781	}
    782	write_unlock_irqrestore(&ha->hw_lock, flags);
    783	if (mem_ptr)
    784		iounmap(mem_ptr);
    785	return ret;
    786}
    787
    788#define MTU_FUDGE_FACTOR 100
    789static unsigned long
    790qla82xx_decode_crb_addr(unsigned long addr)
    791{
    792	int i;
    793	unsigned long base_addr, offset, pci_base;
    794
    795	if (!qla82xx_crb_table_initialized)
    796		qla82xx_crb_addr_transform_setup();
    797
    798	pci_base = ADDR_ERROR;
    799	base_addr = addr & 0xfff00000;
    800	offset = addr & 0x000fffff;
    801
    802	for (i = 0; i < MAX_CRB_XFORM; i++) {
    803		if (crb_addr_xform[i] == base_addr) {
    804			pci_base = i << 20;
    805			break;
    806		}
    807	}
    808	if (pci_base == ADDR_ERROR)
    809		return pci_base;
    810	return pci_base + offset;
    811}
    812
    813static long rom_max_timeout = 100;
    814static long qla82xx_rom_lock_timeout = 100;
    815
    816static int
    817qla82xx_rom_lock(struct qla_hw_data *ha)
    818{
    819	int done = 0, timeout = 0;
    820	uint32_t lock_owner = 0;
    821	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    822
    823	while (!done) {
    824		/* acquire semaphore2 from PCI HW block */
    825		done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
    826		if (done == 1)
    827			break;
    828		if (timeout >= qla82xx_rom_lock_timeout) {
    829			lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
    830			ql_dbg(ql_dbg_p3p, vha, 0xb157,
    831			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
    832			    __func__, ha->portnum, lock_owner);
    833			return -1;
    834		}
    835		timeout++;
    836	}
    837	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
    838	return 0;
    839}
    840
    841static void
    842qla82xx_rom_unlock(struct qla_hw_data *ha)
    843{
    844	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
    845	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
    846}
    847
    848static int
    849qla82xx_wait_rom_busy(struct qla_hw_data *ha)
    850{
    851	long timeout = 0;
    852	long done = 0 ;
    853	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    854
    855	while (done == 0) {
    856		done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
    857		done &= 4;
    858		timeout++;
    859		if (timeout >= rom_max_timeout) {
    860			ql_dbg(ql_dbg_p3p, vha, 0xb00a,
    861			    "%s: Timeout reached waiting for rom busy.\n",
    862			    QLA2XXX_DRIVER_NAME);
    863			return -1;
    864		}
    865	}
    866	return 0;
    867}
    868
    869static int
    870qla82xx_wait_rom_done(struct qla_hw_data *ha)
    871{
    872	long timeout = 0;
    873	long done = 0 ;
    874	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    875
    876	while (done == 0) {
    877		done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
    878		done &= 2;
    879		timeout++;
    880		if (timeout >= rom_max_timeout) {
    881			ql_dbg(ql_dbg_p3p, vha, 0xb00b,
    882			    "%s: Timeout reached waiting for rom done.\n",
    883			    QLA2XXX_DRIVER_NAME);
    884			return -1;
    885		}
    886	}
    887	return 0;
    888}
    889
    890static int
    891qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
    892{
    893	uint32_t  off_value, rval = 0;
    894
    895	wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
    896
    897	/* Read back value to make sure write has gone through */
    898	rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
    899	off_value  = (off & 0x0000FFFF);
    900
    901	if (flag)
    902		wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
    903			      data);
    904	else
    905		rval = rd_reg_dword(off_value + CRB_INDIRECT_2M +
    906				    ha->nx_pcibase);
    907
    908	return rval;
    909}
    910
    911static int
    912qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
    913{
    914	/* Dword reads to flash. */
    915	qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
    916	*valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
    917	    (addr & 0x0000FFFF), 0, 0);
    918
    919	return 0;
    920}
    921
    922static int
    923qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
    924{
    925	int ret, loops = 0;
    926	uint32_t lock_owner = 0;
    927	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    928
    929	while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
    930		udelay(100);
    931		schedule();
    932		loops++;
    933	}
    934	if (loops >= 50000) {
    935		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
    936		ql_log(ql_log_fatal, vha, 0x00b9,
    937		    "Failed to acquire SEM2 lock, Lock Owner %u.\n",
    938		    lock_owner);
    939		return -1;
    940	}
    941	ret = qla82xx_do_rom_fast_read(ha, addr, valp);
    942	qla82xx_rom_unlock(ha);
    943	return ret;
    944}
    945
    946static int
    947qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
    948{
    949	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    950
    951	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
    952	qla82xx_wait_rom_busy(ha);
    953	if (qla82xx_wait_rom_done(ha)) {
    954		ql_log(ql_log_warn, vha, 0xb00c,
    955		    "Error waiting for rom done.\n");
    956		return -1;
    957	}
    958	*val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
    959	return 0;
    960}
    961
    962static int
    963qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
    964{
    965	uint32_t val = 0;
    966	int i, ret;
    967	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
    968
    969	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
    970	for (i = 0; i < 50000; i++) {
    971		ret = qla82xx_read_status_reg(ha, &val);
    972		if (ret < 0 || (val & 1) == 0)
    973			return ret;
    974		udelay(10);
    975		cond_resched();
    976	}
    977	ql_log(ql_log_warn, vha, 0xb00d,
    978	       "Timeout reached waiting for write finish.\n");
    979	return -1;
    980}
    981
    982static int
    983qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
    984{
    985	uint32_t val;
    986
    987	qla82xx_wait_rom_busy(ha);
    988	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
    989	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
    990	qla82xx_wait_rom_busy(ha);
    991	if (qla82xx_wait_rom_done(ha))
    992		return -1;
    993	if (qla82xx_read_status_reg(ha, &val) != 0)
    994		return -1;
    995	if ((val & 2) != 2)
    996		return -1;
    997	return 0;
    998}
    999
   1000static int
   1001qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
   1002{
   1003	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   1004
   1005	if (qla82xx_flash_set_write_enable(ha))
   1006		return -1;
   1007	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
   1008	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
   1009	if (qla82xx_wait_rom_done(ha)) {
   1010		ql_log(ql_log_warn, vha, 0xb00e,
   1011		    "Error waiting for rom done.\n");
   1012		return -1;
   1013	}
   1014	return qla82xx_flash_wait_write_finish(ha);
   1015}
   1016
   1017static int
   1018qla82xx_write_disable_flash(struct qla_hw_data *ha)
   1019{
   1020	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   1021
   1022	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
   1023	if (qla82xx_wait_rom_done(ha)) {
   1024		ql_log(ql_log_warn, vha, 0xb00f,
   1025		    "Error waiting for rom done.\n");
   1026		return -1;
   1027	}
   1028	return 0;
   1029}
   1030
   1031static int
   1032ql82xx_rom_lock_d(struct qla_hw_data *ha)
   1033{
   1034	int loops = 0;
   1035	uint32_t lock_owner = 0;
   1036	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   1037
   1038	while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
   1039		udelay(100);
   1040		cond_resched();
   1041		loops++;
   1042	}
   1043	if (loops >= 50000) {
   1044		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
   1045		ql_log(ql_log_warn, vha, 0xb010,
   1046		    "ROM lock failed, Lock Owner %u.\n", lock_owner);
   1047		return -1;
   1048	}
   1049	return 0;
   1050}
   1051
   1052static int
   1053qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
   1054	uint32_t data)
   1055{
   1056	int ret = 0;
   1057	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   1058
   1059	ret = ql82xx_rom_lock_d(ha);
   1060	if (ret < 0) {
   1061		ql_log(ql_log_warn, vha, 0xb011,
   1062		    "ROM lock failed.\n");
   1063		return ret;
   1064	}
   1065
   1066	ret = qla82xx_flash_set_write_enable(ha);
   1067	if (ret < 0)
   1068		goto done_write;
   1069
   1070	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
   1071	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
   1072	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
   1073	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
   1074	qla82xx_wait_rom_busy(ha);
   1075	if (qla82xx_wait_rom_done(ha)) {
   1076		ql_log(ql_log_warn, vha, 0xb012,
   1077		    "Error waiting for rom done.\n");
   1078		ret = -1;
   1079		goto done_write;
   1080	}
   1081
   1082	ret = qla82xx_flash_wait_write_finish(ha);
   1083
   1084done_write:
   1085	qla82xx_rom_unlock(ha);
   1086	return ret;
   1087}
   1088
   1089/* This routine does CRB initialize sequence
   1090 *  to put the ISP into operational state
   1091 */
   1092static int
   1093qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
   1094{
   1095	int addr, val;
   1096	int i ;
   1097	struct crb_addr_pair *buf;
   1098	unsigned long off;
   1099	unsigned offset, n;
   1100	struct qla_hw_data *ha = vha->hw;
   1101
   1102	struct crb_addr_pair {
   1103		long addr;
   1104		long data;
   1105	};
   1106
   1107	/* Halt all the individual PEGs and other blocks of the ISP */
   1108	qla82xx_rom_lock(ha);
   1109
   1110	/* disable all I2Q */
   1111	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
   1112	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
   1113	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
   1114	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
   1115	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
   1116	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
   1117
   1118	/* disable all niu interrupts */
   1119	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
   1120	/* disable xge rx/tx */
   1121	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
   1122	/* disable xg1 rx/tx */
   1123	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
   1124	/* disable sideband mac */
   1125	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
   1126	/* disable ap0 mac */
   1127	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
   1128	/* disable ap1 mac */
   1129	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
   1130
   1131	/* halt sre */
   1132	val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
   1133	qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
   1134
   1135	/* halt epg */
   1136	qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
   1137
   1138	/* halt timers */
   1139	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
   1140	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
   1141	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
   1142	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
   1143	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
   1144	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
   1145
   1146	/* halt pegs */
   1147	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
   1148	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
   1149	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
   1150	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
   1151	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
   1152	msleep(20);
   1153
   1154	/* big hammer */
   1155	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
   1156		/* don't reset CAM block on reset */
   1157		qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
   1158	else
   1159		qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
   1160	qla82xx_rom_unlock(ha);
   1161
   1162	/* Read the signature value from the flash.
   1163	 * Offset 0: Contain signature (0xcafecafe)
   1164	 * Offset 4: Offset and number of addr/value pairs
   1165	 * that present in CRB initialize sequence
   1166	 */
   1167	n = 0;
   1168	if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
   1169	    qla82xx_rom_fast_read(ha, 4, &n) != 0) {
   1170		ql_log(ql_log_fatal, vha, 0x006e,
   1171		    "Error Reading crb_init area: n: %08x.\n", n);
   1172		return -1;
   1173	}
   1174
   1175	/* Offset in flash = lower 16 bits
   1176	 * Number of entries = upper 16 bits
   1177	 */
   1178	offset = n & 0xffffU;
   1179	n = (n >> 16) & 0xffffU;
   1180
   1181	/* number of addr/value pair should not exceed 1024 entries */
   1182	if (n  >= 1024) {
   1183		ql_log(ql_log_fatal, vha, 0x0071,
   1184		    "Card flash not initialized:n=0x%x.\n", n);
   1185		return -1;
   1186	}
   1187
   1188	ql_log(ql_log_info, vha, 0x0072,
   1189	    "%d CRB init values found in ROM.\n", n);
   1190
   1191	buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
   1192	if (buf == NULL) {
   1193		ql_log(ql_log_fatal, vha, 0x010c,
   1194		    "Unable to allocate memory.\n");
   1195		return -ENOMEM;
   1196	}
   1197
   1198	for (i = 0; i < n; i++) {
   1199		if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
   1200		    qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
   1201			kfree(buf);
   1202			return -1;
   1203		}
   1204
   1205		buf[i].addr = addr;
   1206		buf[i].data = val;
   1207	}
   1208
   1209	for (i = 0; i < n; i++) {
   1210		/* Translate internal CRB initialization
   1211		 * address to PCI bus address
   1212		 */
   1213		off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
   1214		    QLA82XX_PCI_CRBSPACE;
   1215		/* Not all CRB  addr/value pair to be written,
   1216		 * some of them are skipped
   1217		 */
   1218
   1219		/* skipping cold reboot MAGIC */
   1220		if (off == QLA82XX_CAM_RAM(0x1fc))
   1221			continue;
   1222
   1223		/* do not reset PCI */
   1224		if (off == (ROMUSB_GLB + 0xbc))
   1225			continue;
   1226
   1227		/* skip core clock, so that firmware can increase the clock */
   1228		if (off == (ROMUSB_GLB + 0xc8))
   1229			continue;
   1230
   1231		/* skip the function enable register */
   1232		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
   1233			continue;
   1234
   1235		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
   1236			continue;
   1237
   1238		if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
   1239			continue;
   1240
   1241		if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
   1242			continue;
   1243
   1244		if (off == ADDR_ERROR) {
   1245			ql_log(ql_log_fatal, vha, 0x0116,
   1246			    "Unknown addr: 0x%08lx.\n", buf[i].addr);
   1247			continue;
   1248		}
   1249
   1250		qla82xx_wr_32(ha, off, buf[i].data);
   1251
   1252		/* ISP requires much bigger delay to settle down,
   1253		 * else crb_window returns 0xffffffff
   1254		 */
   1255		if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
   1256			msleep(1000);
   1257
   1258		/* ISP requires millisec delay between
   1259		 * successive CRB register updation
   1260		 */
   1261		msleep(1);
   1262	}
   1263
   1264	kfree(buf);
   1265
   1266	/* Resetting the data and instruction cache */
   1267	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
   1268	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
   1269	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
   1270
   1271	/* Clear all protocol processing engines */
   1272	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
   1273	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
   1274	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
   1275	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
   1276	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
   1277	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
   1278	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
   1279	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
   1280	return 0;
   1281}
   1282
   1283static int
   1284qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
   1285		u64 off, void *data, int size)
   1286{
   1287	int i, j, ret = 0, loop, sz[2], off0;
   1288	int scale, shift_amount, startword;
   1289	uint32_t temp;
   1290	uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
   1291
   1292	/*
   1293	 * If not MN, go check for MS or invalid.
   1294	 */
   1295	if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
   1296		mem_crb = QLA82XX_CRB_QDR_NET;
   1297	else {
   1298		mem_crb = QLA82XX_CRB_DDR_NET;
   1299		if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
   1300			return qla82xx_pci_mem_write_direct(ha,
   1301			    off, data, size);
   1302	}
   1303
   1304	off0 = off & 0x7;
   1305	sz[0] = (size < (8 - off0)) ? size : (8 - off0);
   1306	sz[1] = size - sz[0];
   1307
   1308	off8 = off & 0xfffffff0;
   1309	loop = (((off & 0xf) + size - 1) >> 4) + 1;
   1310	shift_amount = 4;
   1311	scale = 2;
   1312	startword = (off & 0xf)/8;
   1313
   1314	for (i = 0; i < loop; i++) {
   1315		if (qla82xx_pci_mem_read_2M(ha, off8 +
   1316		    (i << shift_amount), &word[i * scale], 8))
   1317			return -1;
   1318	}
   1319
   1320	switch (size) {
   1321	case 1:
   1322		tmpw = *((uint8_t *)data);
   1323		break;
   1324	case 2:
   1325		tmpw = *((uint16_t *)data);
   1326		break;
   1327	case 4:
   1328		tmpw = *((uint32_t *)data);
   1329		break;
   1330	case 8:
   1331	default:
   1332		tmpw = *((uint64_t *)data);
   1333		break;
   1334	}
   1335
   1336	if (sz[0] == 8) {
   1337		word[startword] = tmpw;
   1338	} else {
   1339		word[startword] &=
   1340			~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
   1341		word[startword] |= tmpw << (off0 * 8);
   1342	}
   1343	if (sz[1] != 0) {
   1344		word[startword+1] &= ~(~0ULL << (sz[1] * 8));
   1345		word[startword+1] |= tmpw >> (sz[0] * 8);
   1346	}
   1347
   1348	for (i = 0; i < loop; i++) {
   1349		temp = off8 + (i << shift_amount);
   1350		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
   1351		temp = 0;
   1352		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
   1353		temp = word[i * scale] & 0xffffffff;
   1354		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
   1355		temp = (word[i * scale] >> 32) & 0xffffffff;
   1356		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
   1357		temp = word[i*scale + 1] & 0xffffffff;
   1358		qla82xx_wr_32(ha, mem_crb +
   1359		    MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
   1360		temp = (word[i*scale + 1] >> 32) & 0xffffffff;
   1361		qla82xx_wr_32(ha, mem_crb +
   1362		    MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
   1363
   1364		temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
   1365		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
   1366		temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
   1367		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
   1368
   1369		for (j = 0; j < MAX_CTL_CHECK; j++) {
   1370			temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
   1371			if ((temp & MIU_TA_CTL_BUSY) == 0)
   1372				break;
   1373		}
   1374
   1375		if (j >= MAX_CTL_CHECK) {
   1376			if (printk_ratelimit())
   1377				dev_err(&ha->pdev->dev,
   1378				    "failed to write through agent.\n");
   1379			ret = -1;
   1380			break;
   1381		}
   1382	}
   1383
   1384	return ret;
   1385}
   1386
   1387static int
   1388qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
   1389{
   1390	int  i;
   1391	long size = 0;
   1392	long flashaddr = ha->flt_region_bootload << 2;
   1393	long memaddr = BOOTLD_START;
   1394	u64 data;
   1395	u32 high, low;
   1396
   1397	size = (IMAGE_START - BOOTLD_START) / 8;
   1398
   1399	for (i = 0; i < size; i++) {
   1400		if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
   1401		    (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
   1402			return -1;
   1403		}
   1404		data = ((u64)high << 32) | low ;
   1405		qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
   1406		flashaddr += 8;
   1407		memaddr += 8;
   1408
   1409		if (i % 0x1000 == 0)
   1410			msleep(1);
   1411	}
   1412	udelay(100);
   1413	read_lock(&ha->hw_lock);
   1414	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
   1415	qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
   1416	read_unlock(&ha->hw_lock);
   1417	return 0;
   1418}
   1419
   1420int
   1421qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
   1422		u64 off, void *data, int size)
   1423{
   1424	int i, j = 0, k, start, end, loop, sz[2], off0[2];
   1425	int	      shift_amount;
   1426	uint32_t      temp;
   1427	uint64_t      off8, val, mem_crb, word[2] = {0, 0};
   1428
   1429	/*
   1430	 * If not MN, go check for MS or invalid.
   1431	 */
   1432
   1433	if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
   1434		mem_crb = QLA82XX_CRB_QDR_NET;
   1435	else {
   1436		mem_crb = QLA82XX_CRB_DDR_NET;
   1437		if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
   1438			return qla82xx_pci_mem_read_direct(ha,
   1439			    off, data, size);
   1440	}
   1441
   1442	off8 = off & 0xfffffff0;
   1443	off0[0] = off & 0xf;
   1444	sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
   1445	shift_amount = 4;
   1446	loop = ((off0[0] + size - 1) >> shift_amount) + 1;
   1447	off0[1] = 0;
   1448	sz[1] = size - sz[0];
   1449
   1450	for (i = 0; i < loop; i++) {
   1451		temp = off8 + (i << shift_amount);
   1452		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
   1453		temp = 0;
   1454		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
   1455		temp = MIU_TA_CTL_ENABLE;
   1456		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
   1457		temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
   1458		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
   1459
   1460		for (j = 0; j < MAX_CTL_CHECK; j++) {
   1461			temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
   1462			if ((temp & MIU_TA_CTL_BUSY) == 0)
   1463				break;
   1464		}
   1465
   1466		if (j >= MAX_CTL_CHECK) {
   1467			if (printk_ratelimit())
   1468				dev_err(&ha->pdev->dev,
   1469				    "failed to read through agent.\n");
   1470			break;
   1471		}
   1472
   1473		start = off0[i] >> 2;
   1474		end   = (off0[i] + sz[i] - 1) >> 2;
   1475		for (k = start; k <= end; k++) {
   1476			temp = qla82xx_rd_32(ha,
   1477					mem_crb + MIU_TEST_AGT_RDDATA(k));
   1478			word[i] |= ((uint64_t)temp << (32 * (k & 1)));
   1479		}
   1480	}
   1481
   1482	if (j >= MAX_CTL_CHECK)
   1483		return -1;
   1484
   1485	if ((off0[0] & 7) == 0) {
   1486		val = word[0];
   1487	} else {
   1488		val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
   1489			((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
   1490	}
   1491
   1492	switch (size) {
   1493	case 1:
   1494		*(uint8_t  *)data = val;
   1495		break;
   1496	case 2:
   1497		*(uint16_t *)data = val;
   1498		break;
   1499	case 4:
   1500		*(uint32_t *)data = val;
   1501		break;
   1502	case 8:
   1503		*(uint64_t *)data = val;
   1504		break;
   1505	}
   1506	return 0;
   1507}
   1508
   1509
   1510static struct qla82xx_uri_table_desc *
   1511qla82xx_get_table_desc(const u8 *unirom, int section)
   1512{
   1513	uint32_t i;
   1514	struct qla82xx_uri_table_desc *directory =
   1515		(struct qla82xx_uri_table_desc *)&unirom[0];
   1516	uint32_t offset;
   1517	uint32_t tab_type;
   1518	uint32_t entries = le32_to_cpu(directory->num_entries);
   1519
   1520	for (i = 0; i < entries; i++) {
   1521		offset = le32_to_cpu(directory->findex) +
   1522		    (i * le32_to_cpu(directory->entry_size));
   1523		tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8);
   1524
   1525		if (tab_type == section)
   1526			return (struct qla82xx_uri_table_desc *)&unirom[offset];
   1527	}
   1528
   1529	return NULL;
   1530}
   1531
   1532static struct qla82xx_uri_data_desc *
   1533qla82xx_get_data_desc(struct qla_hw_data *ha,
   1534	u32 section, u32 idx_offset)
   1535{
   1536	const u8 *unirom = ha->hablob->fw->data;
   1537	int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] +
   1538				     idx_offset);
   1539	struct qla82xx_uri_table_desc *tab_desc = NULL;
   1540	uint32_t offset;
   1541
   1542	tab_desc = qla82xx_get_table_desc(unirom, section);
   1543	if (!tab_desc)
   1544		return NULL;
   1545
   1546	offset = le32_to_cpu(tab_desc->findex) +
   1547	    (le32_to_cpu(tab_desc->entry_size) * idx);
   1548
   1549	return (struct qla82xx_uri_data_desc *)&unirom[offset];
   1550}
   1551
   1552static u8 *
   1553qla82xx_get_bootld_offset(struct qla_hw_data *ha)
   1554{
   1555	u32 offset = BOOTLD_START;
   1556	struct qla82xx_uri_data_desc *uri_desc = NULL;
   1557
   1558	if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
   1559		uri_desc = qla82xx_get_data_desc(ha,
   1560		    QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
   1561		if (uri_desc)
   1562			offset = le32_to_cpu(uri_desc->findex);
   1563	}
   1564
   1565	return (u8 *)&ha->hablob->fw->data[offset];
   1566}
   1567
   1568static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
   1569{
   1570	struct qla82xx_uri_data_desc *uri_desc = NULL;
   1571
   1572	if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
   1573		uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
   1574		    QLA82XX_URI_FIRMWARE_IDX_OFF);
   1575		if (uri_desc)
   1576			return le32_to_cpu(uri_desc->size);
   1577	}
   1578
   1579	return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
   1580}
   1581
   1582static u8 *
   1583qla82xx_get_fw_offs(struct qla_hw_data *ha)
   1584{
   1585	u32 offset = IMAGE_START;
   1586	struct qla82xx_uri_data_desc *uri_desc = NULL;
   1587
   1588	if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
   1589		uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
   1590			QLA82XX_URI_FIRMWARE_IDX_OFF);
   1591		if (uri_desc)
   1592			offset = le32_to_cpu(uri_desc->findex);
   1593	}
   1594
   1595	return (u8 *)&ha->hablob->fw->data[offset];
   1596}
   1597
   1598/* PCI related functions */
   1599int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
   1600{
   1601	unsigned long val = 0;
   1602	u32 control;
   1603
   1604	switch (region) {
   1605	case 0:
   1606		val = 0;
   1607		break;
   1608	case 1:
   1609		pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
   1610		val = control + QLA82XX_MSIX_TBL_SPACE;
   1611		break;
   1612	}
   1613	return val;
   1614}
   1615
   1616
   1617int
   1618qla82xx_iospace_config(struct qla_hw_data *ha)
   1619{
   1620	uint32_t len = 0;
   1621
   1622	if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
   1623		ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
   1624		    "Failed to reserver selected regions.\n");
   1625		goto iospace_error_exit;
   1626	}
   1627
   1628	/* Use MMIO operations for all accesses. */
   1629	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
   1630		ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
   1631		    "Region #0 not an MMIO resource, aborting.\n");
   1632		goto iospace_error_exit;
   1633	}
   1634
   1635	len = pci_resource_len(ha->pdev, 0);
   1636	ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len);
   1637	if (!ha->nx_pcibase) {
   1638		ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
   1639		    "Cannot remap pcibase MMIO, aborting.\n");
   1640		goto iospace_error_exit;
   1641	}
   1642
   1643	/* Mapping of IO base pointer */
   1644	if (IS_QLA8044(ha)) {
   1645		ha->iobase = ha->nx_pcibase;
   1646	} else if (IS_QLA82XX(ha)) {
   1647		ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11);
   1648	}
   1649
   1650	if (!ql2xdbwr) {
   1651		ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) +
   1652		    (ha->pdev->devfn << 12)), 4);
   1653		if (!ha->nxdb_wr_ptr) {
   1654			ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
   1655			    "Cannot remap MMIO, aborting.\n");
   1656			goto iospace_error_exit;
   1657		}
   1658
   1659		/* Mapping of IO base pointer,
   1660		 * door bell read and write pointer
   1661		 */
   1662		ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) +
   1663		    (ha->pdev->devfn * 8);
   1664	} else {
   1665		ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ?
   1666			QLA82XX_CAMRAM_DB1 :
   1667			QLA82XX_CAMRAM_DB2);
   1668	}
   1669
   1670	ha->max_req_queues = ha->max_rsp_queues = 1;
   1671	ha->msix_count = ha->max_rsp_queues + 1;
   1672	ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
   1673	    "nx_pci_base=%p iobase=%p "
   1674	    "max_req_queues=%d msix_count=%d.\n",
   1675	    ha->nx_pcibase, ha->iobase,
   1676	    ha->max_req_queues, ha->msix_count);
   1677	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
   1678	    "nx_pci_base=%p iobase=%p "
   1679	    "max_req_queues=%d msix_count=%d.\n",
   1680	    ha->nx_pcibase, ha->iobase,
   1681	    ha->max_req_queues, ha->msix_count);
   1682	return 0;
   1683
   1684iospace_error_exit:
   1685	return -ENOMEM;
   1686}
   1687
   1688/* GS related functions */
   1689
   1690/* Initialization related functions */
   1691
   1692/**
   1693 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
   1694 * @vha: HA context
   1695 *
   1696 * Returns 0 on success.
   1697*/
   1698int
   1699qla82xx_pci_config(scsi_qla_host_t *vha)
   1700{
   1701	struct qla_hw_data *ha = vha->hw;
   1702	int ret;
   1703
   1704	pci_set_master(ha->pdev);
   1705	ret = pci_set_mwi(ha->pdev);
   1706	ha->chip_revision = ha->pdev->revision;
   1707	ql_dbg(ql_dbg_init, vha, 0x0043,
   1708	    "Chip revision:%d; pci_set_mwi() returned %d.\n",
   1709	    ha->chip_revision, ret);
   1710	return 0;
   1711}
   1712
   1713/**
   1714 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
   1715 * @vha: HA context
   1716 *
   1717 * Returns 0 on success.
   1718 */
   1719int
   1720qla82xx_reset_chip(scsi_qla_host_t *vha)
   1721{
   1722	struct qla_hw_data *ha = vha->hw;
   1723
   1724	ha->isp_ops->disable_intrs(ha);
   1725
   1726	return QLA_SUCCESS;
   1727}
   1728
   1729void qla82xx_config_rings(struct scsi_qla_host *vha)
   1730{
   1731	struct qla_hw_data *ha = vha->hw;
   1732	struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
   1733	struct init_cb_81xx *icb;
   1734	struct req_que *req = ha->req_q_map[0];
   1735	struct rsp_que *rsp = ha->rsp_q_map[0];
   1736
   1737	/* Setup ring parameters in initialization control block. */
   1738	icb = (struct init_cb_81xx *)ha->init_cb;
   1739	icb->request_q_outpointer = cpu_to_le16(0);
   1740	icb->response_q_inpointer = cpu_to_le16(0);
   1741	icb->request_q_length = cpu_to_le16(req->length);
   1742	icb->response_q_length = cpu_to_le16(rsp->length);
   1743	put_unaligned_le64(req->dma, &icb->request_q_address);
   1744	put_unaligned_le64(rsp->dma, &icb->response_q_address);
   1745
   1746	wrt_reg_dword(&reg->req_q_out[0], 0);
   1747	wrt_reg_dword(&reg->rsp_q_in[0], 0);
   1748	wrt_reg_dword(&reg->rsp_q_out[0], 0);
   1749}
   1750
   1751static int
   1752qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
   1753{
   1754	u64 *ptr64;
   1755	u32 i, flashaddr, size;
   1756	__le64 data;
   1757
   1758	size = (IMAGE_START - BOOTLD_START) / 8;
   1759
   1760	ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
   1761	flashaddr = BOOTLD_START;
   1762
   1763	for (i = 0; i < size; i++) {
   1764		data = cpu_to_le64(ptr64[i]);
   1765		if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
   1766			return -EIO;
   1767		flashaddr += 8;
   1768	}
   1769
   1770	flashaddr = FLASH_ADDR_START;
   1771	size = qla82xx_get_fw_size(ha) / 8;
   1772	ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
   1773
   1774	for (i = 0; i < size; i++) {
   1775		data = cpu_to_le64(ptr64[i]);
   1776
   1777		if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
   1778			return -EIO;
   1779		flashaddr += 8;
   1780	}
   1781	udelay(100);
   1782
   1783	/* Write a magic value to CAMRAM register
   1784	 * at a specified offset to indicate
   1785	 * that all data is written and
   1786	 * ready for firmware to initialize.
   1787	 */
   1788	qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
   1789
   1790	read_lock(&ha->hw_lock);
   1791	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
   1792	qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
   1793	read_unlock(&ha->hw_lock);
   1794	return 0;
   1795}
   1796
   1797static int
   1798qla82xx_set_product_offset(struct qla_hw_data *ha)
   1799{
   1800	struct qla82xx_uri_table_desc *ptab_desc = NULL;
   1801	const uint8_t *unirom = ha->hablob->fw->data;
   1802	uint32_t i;
   1803	uint32_t entries;
   1804	uint32_t flags, file_chiprev, offset;
   1805	uint8_t chiprev = ha->chip_revision;
   1806	/* Hardcoding mn_present flag for P3P */
   1807	int mn_present = 0;
   1808	uint32_t flagbit;
   1809
   1810	ptab_desc = qla82xx_get_table_desc(unirom,
   1811		 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
   1812	if (!ptab_desc)
   1813		return -1;
   1814
   1815	entries = le32_to_cpu(ptab_desc->num_entries);
   1816
   1817	for (i = 0; i < entries; i++) {
   1818		offset = le32_to_cpu(ptab_desc->findex) +
   1819			(i * le32_to_cpu(ptab_desc->entry_size));
   1820		flags = le32_to_cpu(*((__le32 *)&unirom[offset] +
   1821			QLA82XX_URI_FLAGS_OFF));
   1822		file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] +
   1823			QLA82XX_URI_CHIP_REV_OFF));
   1824
   1825		flagbit = mn_present ? 1 : 2;
   1826
   1827		if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
   1828			ha->file_prd_off = offset;
   1829			return 0;
   1830		}
   1831	}
   1832	return -1;
   1833}
   1834
   1835static int
   1836qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
   1837{
   1838	uint32_t val;
   1839	uint32_t min_size;
   1840	struct qla_hw_data *ha = vha->hw;
   1841	const struct firmware *fw = ha->hablob->fw;
   1842
   1843	ha->fw_type = fw_type;
   1844
   1845	if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
   1846		if (qla82xx_set_product_offset(ha))
   1847			return -EINVAL;
   1848
   1849		min_size = QLA82XX_URI_FW_MIN_SIZE;
   1850	} else {
   1851		val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
   1852		if (val != QLA82XX_BDINFO_MAGIC)
   1853			return -EINVAL;
   1854
   1855		min_size = QLA82XX_FW_MIN_SIZE;
   1856	}
   1857
   1858	if (fw->size < min_size)
   1859		return -EINVAL;
   1860	return 0;
   1861}
   1862
   1863static int
   1864qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
   1865{
   1866	u32 val = 0;
   1867	int retries = 60;
   1868	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   1869
   1870	do {
   1871		read_lock(&ha->hw_lock);
   1872		val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
   1873		read_unlock(&ha->hw_lock);
   1874
   1875		switch (val) {
   1876		case PHAN_INITIALIZE_COMPLETE:
   1877		case PHAN_INITIALIZE_ACK:
   1878			return QLA_SUCCESS;
   1879		case PHAN_INITIALIZE_FAILED:
   1880			break;
   1881		default:
   1882			break;
   1883		}
   1884		ql_log(ql_log_info, vha, 0x00a8,
   1885		    "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
   1886		    val, retries);
   1887
   1888		msleep(500);
   1889
   1890	} while (--retries);
   1891
   1892	ql_log(ql_log_fatal, vha, 0x00a9,
   1893	    "Cmd Peg initialization failed: 0x%x.\n", val);
   1894
   1895	val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
   1896	read_lock(&ha->hw_lock);
   1897	qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
   1898	read_unlock(&ha->hw_lock);
   1899	return QLA_FUNCTION_FAILED;
   1900}
   1901
   1902static int
   1903qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
   1904{
   1905	u32 val = 0;
   1906	int retries = 60;
   1907	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   1908
   1909	do {
   1910		read_lock(&ha->hw_lock);
   1911		val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
   1912		read_unlock(&ha->hw_lock);
   1913
   1914		switch (val) {
   1915		case PHAN_INITIALIZE_COMPLETE:
   1916		case PHAN_INITIALIZE_ACK:
   1917			return QLA_SUCCESS;
   1918		case PHAN_INITIALIZE_FAILED:
   1919			break;
   1920		default:
   1921			break;
   1922		}
   1923		ql_log(ql_log_info, vha, 0x00ab,
   1924		    "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
   1925		    val, retries);
   1926
   1927		msleep(500);
   1928
   1929	} while (--retries);
   1930
   1931	ql_log(ql_log_fatal, vha, 0x00ac,
   1932	    "Rcv Peg initialization failed: 0x%x.\n", val);
   1933	read_lock(&ha->hw_lock);
   1934	qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
   1935	read_unlock(&ha->hw_lock);
   1936	return QLA_FUNCTION_FAILED;
   1937}
   1938
   1939/* ISR related functions */
   1940static struct qla82xx_legacy_intr_set legacy_intr[] =
   1941	QLA82XX_LEGACY_INTR_CONFIG;
   1942
   1943/*
   1944 * qla82xx_mbx_completion() - Process mailbox command completions.
   1945 * @ha: SCSI driver HA context
   1946 * @mb0: Mailbox0 register
   1947 */
   1948void
   1949qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
   1950{
   1951	uint16_t	cnt;
   1952	__le16 __iomem *wptr;
   1953	struct qla_hw_data *ha = vha->hw;
   1954	struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
   1955
   1956	wptr = &reg->mailbox_out[1];
   1957
   1958	/* Load return mailbox registers. */
   1959	ha->flags.mbox_int = 1;
   1960	ha->mailbox_out[0] = mb0;
   1961
   1962	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
   1963		ha->mailbox_out[cnt] = rd_reg_word(wptr);
   1964		wptr++;
   1965	}
   1966
   1967	if (!ha->mcp)
   1968		ql_dbg(ql_dbg_async, vha, 0x5053,
   1969		    "MBX pointer ERROR.\n");
   1970}
   1971
   1972/**
   1973 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
   1974 * @irq: interrupt number
   1975 * @dev_id: SCSI driver HA context
   1976 *
   1977 * Called by system whenever the host adapter generates an interrupt.
   1978 *
   1979 * Returns handled flag.
   1980 */
   1981irqreturn_t
   1982qla82xx_intr_handler(int irq, void *dev_id)
   1983{
   1984	scsi_qla_host_t	*vha;
   1985	struct qla_hw_data *ha;
   1986	struct rsp_que *rsp;
   1987	struct device_reg_82xx __iomem *reg;
   1988	int status = 0, status1 = 0;
   1989	unsigned long	flags;
   1990	unsigned long	iter;
   1991	uint32_t	stat = 0;
   1992	uint16_t	mb[8];
   1993
   1994	rsp = (struct rsp_que *) dev_id;
   1995	if (!rsp) {
   1996		ql_log(ql_log_info, NULL, 0xb053,
   1997		    "%s: NULL response queue pointer.\n", __func__);
   1998		return IRQ_NONE;
   1999	}
   2000	ha = rsp->hw;
   2001
   2002	if (!ha->flags.msi_enabled) {
   2003		status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
   2004		if (!(status & ha->nx_legacy_intr.int_vec_bit))
   2005			return IRQ_NONE;
   2006
   2007		status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
   2008		if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
   2009			return IRQ_NONE;
   2010	}
   2011
   2012	/* clear the interrupt */
   2013	qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
   2014
   2015	/* read twice to ensure write is flushed */
   2016	qla82xx_rd_32(ha, ISR_INT_VECTOR);
   2017	qla82xx_rd_32(ha, ISR_INT_VECTOR);
   2018
   2019	reg = &ha->iobase->isp82;
   2020
   2021	spin_lock_irqsave(&ha->hardware_lock, flags);
   2022	vha = pci_get_drvdata(ha->pdev);
   2023	for (iter = 1; iter--; ) {
   2024
   2025		if (rd_reg_dword(&reg->host_int)) {
   2026			stat = rd_reg_dword(&reg->host_status);
   2027
   2028			switch (stat & 0xff) {
   2029			case 0x1:
   2030			case 0x2:
   2031			case 0x10:
   2032			case 0x11:
   2033				qla82xx_mbx_completion(vha, MSW(stat));
   2034				status |= MBX_INTERRUPT;
   2035				break;
   2036			case 0x12:
   2037				mb[0] = MSW(stat);
   2038				mb[1] = rd_reg_word(&reg->mailbox_out[1]);
   2039				mb[2] = rd_reg_word(&reg->mailbox_out[2]);
   2040				mb[3] = rd_reg_word(&reg->mailbox_out[3]);
   2041				qla2x00_async_event(vha, rsp, mb);
   2042				break;
   2043			case 0x13:
   2044				qla24xx_process_response_queue(vha, rsp);
   2045				break;
   2046			default:
   2047				ql_dbg(ql_dbg_async, vha, 0x5054,
   2048				    "Unrecognized interrupt type (%d).\n",
   2049				    stat & 0xff);
   2050				break;
   2051			}
   2052		}
   2053		wrt_reg_dword(&reg->host_int, 0);
   2054	}
   2055
   2056	qla2x00_handle_mbx_completion(ha, status);
   2057	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   2058
   2059	if (!ha->flags.msi_enabled)
   2060		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
   2061
   2062	return IRQ_HANDLED;
   2063}
   2064
   2065irqreturn_t
   2066qla82xx_msix_default(int irq, void *dev_id)
   2067{
   2068	scsi_qla_host_t	*vha;
   2069	struct qla_hw_data *ha;
   2070	struct rsp_que *rsp;
   2071	struct device_reg_82xx __iomem *reg;
   2072	int status = 0;
   2073	unsigned long flags;
   2074	uint32_t stat = 0;
   2075	uint32_t host_int = 0;
   2076	uint16_t mb[8];
   2077
   2078	rsp = (struct rsp_que *) dev_id;
   2079	if (!rsp) {
   2080		printk(KERN_INFO
   2081			"%s(): NULL response queue pointer.\n", __func__);
   2082		return IRQ_NONE;
   2083	}
   2084	ha = rsp->hw;
   2085
   2086	reg = &ha->iobase->isp82;
   2087
   2088	spin_lock_irqsave(&ha->hardware_lock, flags);
   2089	vha = pci_get_drvdata(ha->pdev);
   2090	do {
   2091		host_int = rd_reg_dword(&reg->host_int);
   2092		if (qla2x00_check_reg32_for_disconnect(vha, host_int))
   2093			break;
   2094		if (host_int) {
   2095			stat = rd_reg_dword(&reg->host_status);
   2096
   2097			switch (stat & 0xff) {
   2098			case 0x1:
   2099			case 0x2:
   2100			case 0x10:
   2101			case 0x11:
   2102				qla82xx_mbx_completion(vha, MSW(stat));
   2103				status |= MBX_INTERRUPT;
   2104				break;
   2105			case 0x12:
   2106				mb[0] = MSW(stat);
   2107				mb[1] = rd_reg_word(&reg->mailbox_out[1]);
   2108				mb[2] = rd_reg_word(&reg->mailbox_out[2]);
   2109				mb[3] = rd_reg_word(&reg->mailbox_out[3]);
   2110				qla2x00_async_event(vha, rsp, mb);
   2111				break;
   2112			case 0x13:
   2113				qla24xx_process_response_queue(vha, rsp);
   2114				break;
   2115			default:
   2116				ql_dbg(ql_dbg_async, vha, 0x5041,
   2117				    "Unrecognized interrupt type (%d).\n",
   2118				    stat & 0xff);
   2119				break;
   2120			}
   2121		}
   2122		wrt_reg_dword(&reg->host_int, 0);
   2123	} while (0);
   2124
   2125	qla2x00_handle_mbx_completion(ha, status);
   2126	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   2127
   2128	return IRQ_HANDLED;
   2129}
   2130
   2131irqreturn_t
   2132qla82xx_msix_rsp_q(int irq, void *dev_id)
   2133{
   2134	scsi_qla_host_t	*vha;
   2135	struct qla_hw_data *ha;
   2136	struct rsp_que *rsp;
   2137	struct device_reg_82xx __iomem *reg;
   2138	unsigned long flags;
   2139	uint32_t host_int = 0;
   2140
   2141	rsp = (struct rsp_que *) dev_id;
   2142	if (!rsp) {
   2143		printk(KERN_INFO
   2144			"%s(): NULL response queue pointer.\n", __func__);
   2145		return IRQ_NONE;
   2146	}
   2147
   2148	ha = rsp->hw;
   2149	reg = &ha->iobase->isp82;
   2150	spin_lock_irqsave(&ha->hardware_lock, flags);
   2151	vha = pci_get_drvdata(ha->pdev);
   2152	host_int = rd_reg_dword(&reg->host_int);
   2153	if (qla2x00_check_reg32_for_disconnect(vha, host_int))
   2154		goto out;
   2155	qla24xx_process_response_queue(vha, rsp);
   2156	wrt_reg_dword(&reg->host_int, 0);
   2157out:
   2158	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   2159	return IRQ_HANDLED;
   2160}
   2161
   2162void
   2163qla82xx_poll(int irq, void *dev_id)
   2164{
   2165	scsi_qla_host_t	*vha;
   2166	struct qla_hw_data *ha;
   2167	struct rsp_que *rsp;
   2168	struct device_reg_82xx __iomem *reg;
   2169	uint32_t stat;
   2170	uint32_t host_int = 0;
   2171	uint16_t mb[8];
   2172	unsigned long flags;
   2173
   2174	rsp = (struct rsp_que *) dev_id;
   2175	if (!rsp) {
   2176		printk(KERN_INFO
   2177			"%s(): NULL response queue pointer.\n", __func__);
   2178		return;
   2179	}
   2180	ha = rsp->hw;
   2181
   2182	reg = &ha->iobase->isp82;
   2183	spin_lock_irqsave(&ha->hardware_lock, flags);
   2184	vha = pci_get_drvdata(ha->pdev);
   2185
   2186	host_int = rd_reg_dword(&reg->host_int);
   2187	if (qla2x00_check_reg32_for_disconnect(vha, host_int))
   2188		goto out;
   2189	if (host_int) {
   2190		stat = rd_reg_dword(&reg->host_status);
   2191		switch (stat & 0xff) {
   2192		case 0x1:
   2193		case 0x2:
   2194		case 0x10:
   2195		case 0x11:
   2196			qla82xx_mbx_completion(vha, MSW(stat));
   2197			break;
   2198		case 0x12:
   2199			mb[0] = MSW(stat);
   2200			mb[1] = rd_reg_word(&reg->mailbox_out[1]);
   2201			mb[2] = rd_reg_word(&reg->mailbox_out[2]);
   2202			mb[3] = rd_reg_word(&reg->mailbox_out[3]);
   2203			qla2x00_async_event(vha, rsp, mb);
   2204			break;
   2205		case 0x13:
   2206			qla24xx_process_response_queue(vha, rsp);
   2207			break;
   2208		default:
   2209			ql_dbg(ql_dbg_p3p, vha, 0xb013,
   2210			    "Unrecognized interrupt type (%d).\n",
   2211			    stat * 0xff);
   2212			break;
   2213		}
   2214		wrt_reg_dword(&reg->host_int, 0);
   2215	}
   2216out:
   2217	spin_unlock_irqrestore(&ha->hardware_lock, flags);
   2218}
   2219
   2220void
   2221qla82xx_enable_intrs(struct qla_hw_data *ha)
   2222{
   2223	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2224
   2225	qla82xx_mbx_intr_enable(vha);
   2226	spin_lock_irq(&ha->hardware_lock);
   2227	if (IS_QLA8044(ha))
   2228		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
   2229	else
   2230		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
   2231	spin_unlock_irq(&ha->hardware_lock);
   2232	ha->interrupts_on = 1;
   2233}
   2234
   2235void
   2236qla82xx_disable_intrs(struct qla_hw_data *ha)
   2237{
   2238	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2239
   2240	if (ha->interrupts_on)
   2241		qla82xx_mbx_intr_disable(vha);
   2242
   2243	spin_lock_irq(&ha->hardware_lock);
   2244	if (IS_QLA8044(ha))
   2245		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
   2246	else
   2247		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
   2248	spin_unlock_irq(&ha->hardware_lock);
   2249	ha->interrupts_on = 0;
   2250}
   2251
   2252void qla82xx_init_flags(struct qla_hw_data *ha)
   2253{
   2254	struct qla82xx_legacy_intr_set *nx_legacy_intr;
   2255
   2256	/* ISP 8021 initializations */
   2257	rwlock_init(&ha->hw_lock);
   2258	ha->qdr_sn_window = -1;
   2259	ha->ddr_mn_window = -1;
   2260	ha->curr_window = 255;
   2261	ha->portnum = PCI_FUNC(ha->pdev->devfn);
   2262	nx_legacy_intr = &legacy_intr[ha->portnum];
   2263	ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
   2264	ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
   2265	ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
   2266	ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
   2267}
   2268
   2269static inline void
   2270qla82xx_set_idc_version(scsi_qla_host_t *vha)
   2271{
   2272	int idc_ver;
   2273	uint32_t drv_active;
   2274	struct qla_hw_data *ha = vha->hw;
   2275
   2276	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   2277	if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
   2278		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
   2279		    QLA82XX_IDC_VERSION);
   2280		ql_log(ql_log_info, vha, 0xb082,
   2281		    "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
   2282	} else {
   2283		idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
   2284		if (idc_ver != QLA82XX_IDC_VERSION)
   2285			ql_log(ql_log_info, vha, 0xb083,
   2286			    "qla2xxx driver IDC version %d is not compatible "
   2287			    "with IDC version %d of the other drivers\n",
   2288			    QLA82XX_IDC_VERSION, idc_ver);
   2289	}
   2290}
   2291
   2292inline void
   2293qla82xx_set_drv_active(scsi_qla_host_t *vha)
   2294{
   2295	uint32_t drv_active;
   2296	struct qla_hw_data *ha = vha->hw;
   2297
   2298	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   2299
   2300	/* If reset value is all FF's, initialize DRV_ACTIVE */
   2301	if (drv_active == 0xffffffff) {
   2302		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
   2303			QLA82XX_DRV_NOT_ACTIVE);
   2304		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   2305	}
   2306	drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
   2307	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
   2308}
   2309
   2310inline void
   2311qla82xx_clear_drv_active(struct qla_hw_data *ha)
   2312{
   2313	uint32_t drv_active;
   2314
   2315	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   2316	drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
   2317	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
   2318}
   2319
   2320static inline int
   2321qla82xx_need_reset(struct qla_hw_data *ha)
   2322{
   2323	uint32_t drv_state;
   2324	int rval;
   2325
   2326	if (ha->flags.nic_core_reset_owner)
   2327		return 1;
   2328	else {
   2329		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2330		rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
   2331		return rval;
   2332	}
   2333}
   2334
   2335static inline void
   2336qla82xx_set_rst_ready(struct qla_hw_data *ha)
   2337{
   2338	uint32_t drv_state;
   2339	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2340
   2341	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2342
   2343	/* If reset value is all FF's, initialize DRV_STATE */
   2344	if (drv_state == 0xffffffff) {
   2345		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
   2346		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2347	}
   2348	drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
   2349	ql_dbg(ql_dbg_init, vha, 0x00bb,
   2350	    "drv_state = 0x%08x.\n", drv_state);
   2351	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
   2352}
   2353
   2354static inline void
   2355qla82xx_clear_rst_ready(struct qla_hw_data *ha)
   2356{
   2357	uint32_t drv_state;
   2358
   2359	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2360	drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
   2361	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
   2362}
   2363
   2364static inline void
   2365qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
   2366{
   2367	uint32_t qsnt_state;
   2368
   2369	qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2370	qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
   2371	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
   2372}
   2373
   2374void
   2375qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
   2376{
   2377	struct qla_hw_data *ha = vha->hw;
   2378	uint32_t qsnt_state;
   2379
   2380	qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2381	qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
   2382	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
   2383}
   2384
   2385static int
   2386qla82xx_load_fw(scsi_qla_host_t *vha)
   2387{
   2388	int rst;
   2389	struct fw_blob *blob;
   2390	struct qla_hw_data *ha = vha->hw;
   2391
   2392	if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
   2393		ql_log(ql_log_fatal, vha, 0x009f,
   2394		    "Error during CRB initialization.\n");
   2395		return QLA_FUNCTION_FAILED;
   2396	}
   2397	udelay(500);
   2398
   2399	/* Bring QM and CAMRAM out of reset */
   2400	rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
   2401	rst &= ~((1 << 28) | (1 << 24));
   2402	qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
   2403
   2404	/*
   2405	 * FW Load priority:
   2406	 * 1) Operational firmware residing in flash.
   2407	 * 2) Firmware via request-firmware interface (.bin file).
   2408	 */
   2409	if (ql2xfwloadbin == 2)
   2410		goto try_blob_fw;
   2411
   2412	ql_log(ql_log_info, vha, 0x00a0,
   2413	    "Attempting to load firmware from flash.\n");
   2414
   2415	if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
   2416		ql_log(ql_log_info, vha, 0x00a1,
   2417		    "Firmware loaded successfully from flash.\n");
   2418		return QLA_SUCCESS;
   2419	} else {
   2420		ql_log(ql_log_warn, vha, 0x0108,
   2421		    "Firmware load from flash failed.\n");
   2422	}
   2423
   2424try_blob_fw:
   2425	ql_log(ql_log_info, vha, 0x00a2,
   2426	    "Attempting to load firmware from blob.\n");
   2427
   2428	/* Load firmware blob. */
   2429	blob = ha->hablob = qla2x00_request_firmware(vha);
   2430	if (!blob) {
   2431		ql_log(ql_log_fatal, vha, 0x00a3,
   2432		    "Firmware image not present.\n");
   2433		goto fw_load_failed;
   2434	}
   2435
   2436	/* Validating firmware blob */
   2437	if (qla82xx_validate_firmware_blob(vha,
   2438		QLA82XX_FLASH_ROMIMAGE)) {
   2439		/* Fallback to URI format */
   2440		if (qla82xx_validate_firmware_blob(vha,
   2441			QLA82XX_UNIFIED_ROMIMAGE)) {
   2442			ql_log(ql_log_fatal, vha, 0x00a4,
   2443			    "No valid firmware image found.\n");
   2444			return QLA_FUNCTION_FAILED;
   2445		}
   2446	}
   2447
   2448	if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
   2449		ql_log(ql_log_info, vha, 0x00a5,
   2450		    "Firmware loaded successfully from binary blob.\n");
   2451		return QLA_SUCCESS;
   2452	}
   2453
   2454	ql_log(ql_log_fatal, vha, 0x00a6,
   2455	       "Firmware load failed for binary blob.\n");
   2456	blob->fw = NULL;
   2457	blob = NULL;
   2458
   2459fw_load_failed:
   2460	return QLA_FUNCTION_FAILED;
   2461}
   2462
   2463int
   2464qla82xx_start_firmware(scsi_qla_host_t *vha)
   2465{
   2466	uint16_t      lnk;
   2467	struct qla_hw_data *ha = vha->hw;
   2468
   2469	/* scrub dma mask expansion register */
   2470	qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
   2471
   2472	/* Put both the PEG CMD and RCV PEG to default state
   2473	 * of 0 before resetting the hardware
   2474	 */
   2475	qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
   2476	qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
   2477
   2478	/* Overwrite stale initialization register values */
   2479	qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
   2480	qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
   2481
   2482	if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
   2483		ql_log(ql_log_fatal, vha, 0x00a7,
   2484		    "Error trying to start fw.\n");
   2485		return QLA_FUNCTION_FAILED;
   2486	}
   2487
   2488	/* Handshake with the card before we register the devices. */
   2489	if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
   2490		ql_log(ql_log_fatal, vha, 0x00aa,
   2491		    "Error during card handshake.\n");
   2492		return QLA_FUNCTION_FAILED;
   2493	}
   2494
   2495	/* Negotiated Link width */
   2496	pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
   2497	ha->link_width = (lnk >> 4) & 0x3f;
   2498
   2499	/* Synchronize with Receive peg */
   2500	return qla82xx_check_rcvpeg_state(ha);
   2501}
   2502
   2503static __le32 *
   2504qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
   2505	uint32_t length)
   2506{
   2507	uint32_t i;
   2508	uint32_t val;
   2509	struct qla_hw_data *ha = vha->hw;
   2510
   2511	/* Dword reads to flash. */
   2512	for (i = 0; i < length/4; i++, faddr += 4) {
   2513		if (qla82xx_rom_fast_read(ha, faddr, &val)) {
   2514			ql_log(ql_log_warn, vha, 0x0106,
   2515			    "Do ROM fast read failed.\n");
   2516			goto done_read;
   2517		}
   2518		dwptr[i] = cpu_to_le32(val);
   2519	}
   2520done_read:
   2521	return dwptr;
   2522}
   2523
   2524static int
   2525qla82xx_unprotect_flash(struct qla_hw_data *ha)
   2526{
   2527	int ret;
   2528	uint32_t val;
   2529	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2530
   2531	ret = ql82xx_rom_lock_d(ha);
   2532	if (ret < 0) {
   2533		ql_log(ql_log_warn, vha, 0xb014,
   2534		    "ROM Lock failed.\n");
   2535		return ret;
   2536	}
   2537
   2538	ret = qla82xx_read_status_reg(ha, &val);
   2539	if (ret < 0)
   2540		goto done_unprotect;
   2541
   2542	val &= ~(BLOCK_PROTECT_BITS << 2);
   2543	ret = qla82xx_write_status_reg(ha, val);
   2544	if (ret < 0) {
   2545		val |= (BLOCK_PROTECT_BITS << 2);
   2546		qla82xx_write_status_reg(ha, val);
   2547	}
   2548
   2549	if (qla82xx_write_disable_flash(ha) != 0)
   2550		ql_log(ql_log_warn, vha, 0xb015,
   2551		    "Write disable failed.\n");
   2552
   2553done_unprotect:
   2554	qla82xx_rom_unlock(ha);
   2555	return ret;
   2556}
   2557
   2558static int
   2559qla82xx_protect_flash(struct qla_hw_data *ha)
   2560{
   2561	int ret;
   2562	uint32_t val;
   2563	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2564
   2565	ret = ql82xx_rom_lock_d(ha);
   2566	if (ret < 0) {
   2567		ql_log(ql_log_warn, vha, 0xb016,
   2568		    "ROM Lock failed.\n");
   2569		return ret;
   2570	}
   2571
   2572	ret = qla82xx_read_status_reg(ha, &val);
   2573	if (ret < 0)
   2574		goto done_protect;
   2575
   2576	val |= (BLOCK_PROTECT_BITS << 2);
   2577	/* LOCK all sectors */
   2578	ret = qla82xx_write_status_reg(ha, val);
   2579	if (ret < 0)
   2580		ql_log(ql_log_warn, vha, 0xb017,
   2581		    "Write status register failed.\n");
   2582
   2583	if (qla82xx_write_disable_flash(ha) != 0)
   2584		ql_log(ql_log_warn, vha, 0xb018,
   2585		    "Write disable failed.\n");
   2586done_protect:
   2587	qla82xx_rom_unlock(ha);
   2588	return ret;
   2589}
   2590
   2591static int
   2592qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
   2593{
   2594	int ret = 0;
   2595	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2596
   2597	ret = ql82xx_rom_lock_d(ha);
   2598	if (ret < 0) {
   2599		ql_log(ql_log_warn, vha, 0xb019,
   2600		    "ROM Lock failed.\n");
   2601		return ret;
   2602	}
   2603
   2604	qla82xx_flash_set_write_enable(ha);
   2605	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
   2606	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
   2607	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
   2608
   2609	if (qla82xx_wait_rom_done(ha)) {
   2610		ql_log(ql_log_warn, vha, 0xb01a,
   2611		    "Error waiting for rom done.\n");
   2612		ret = -1;
   2613		goto done;
   2614	}
   2615	ret = qla82xx_flash_wait_write_finish(ha);
   2616done:
   2617	qla82xx_rom_unlock(ha);
   2618	return ret;
   2619}
   2620
   2621/*
   2622 * Address and length are byte address
   2623 */
   2624void *
   2625qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
   2626	uint32_t offset, uint32_t length)
   2627{
   2628	scsi_block_requests(vha->host);
   2629	qla82xx_read_flash_data(vha, buf, offset, length);
   2630	scsi_unblock_requests(vha->host);
   2631	return buf;
   2632}
   2633
   2634static int
   2635qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr,
   2636	uint32_t faddr, uint32_t dwords)
   2637{
   2638	int ret;
   2639	uint32_t liter;
   2640	uint32_t rest_addr;
   2641	dma_addr_t optrom_dma;
   2642	void *optrom = NULL;
   2643	int page_mode = 0;
   2644	struct qla_hw_data *ha = vha->hw;
   2645
   2646	ret = -1;
   2647
   2648	/* Prepare burst-capable write on supported ISPs. */
   2649	if (page_mode && !(faddr & 0xfff) &&
   2650	    dwords > OPTROM_BURST_DWORDS) {
   2651		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
   2652		    &optrom_dma, GFP_KERNEL);
   2653		if (!optrom) {
   2654			ql_log(ql_log_warn, vha, 0xb01b,
   2655			    "Unable to allocate memory "
   2656			    "for optrom burst write (%x KB).\n",
   2657			    OPTROM_BURST_SIZE / 1024);
   2658		}
   2659	}
   2660
   2661	rest_addr = ha->fdt_block_size - 1;
   2662
   2663	ret = qla82xx_unprotect_flash(ha);
   2664	if (ret) {
   2665		ql_log(ql_log_warn, vha, 0xb01c,
   2666		    "Unable to unprotect flash for update.\n");
   2667		goto write_done;
   2668	}
   2669
   2670	for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
   2671		/* Are we at the beginning of a sector? */
   2672		if ((faddr & rest_addr) == 0) {
   2673
   2674			ret = qla82xx_erase_sector(ha, faddr);
   2675			if (ret) {
   2676				ql_log(ql_log_warn, vha, 0xb01d,
   2677				    "Unable to erase sector: address=%x.\n",
   2678				    faddr);
   2679				break;
   2680			}
   2681		}
   2682
   2683		/* Go with burst-write. */
   2684		if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
   2685			/* Copy data to DMA'ble buffer. */
   2686			memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
   2687
   2688			ret = qla2x00_load_ram(vha, optrom_dma,
   2689			    (ha->flash_data_off | faddr),
   2690			    OPTROM_BURST_DWORDS);
   2691			if (ret != QLA_SUCCESS) {
   2692				ql_log(ql_log_warn, vha, 0xb01e,
   2693				    "Unable to burst-write optrom segment "
   2694				    "(%x/%x/%llx).\n", ret,
   2695				    (ha->flash_data_off | faddr),
   2696				    (unsigned long long)optrom_dma);
   2697				ql_log(ql_log_warn, vha, 0xb01f,
   2698				    "Reverting to slow-write.\n");
   2699
   2700				dma_free_coherent(&ha->pdev->dev,
   2701				    OPTROM_BURST_SIZE, optrom, optrom_dma);
   2702				optrom = NULL;
   2703			} else {
   2704				liter += OPTROM_BURST_DWORDS - 1;
   2705				faddr += OPTROM_BURST_DWORDS - 1;
   2706				dwptr += OPTROM_BURST_DWORDS - 1;
   2707				continue;
   2708			}
   2709		}
   2710
   2711		ret = qla82xx_write_flash_dword(ha, faddr,
   2712						le32_to_cpu(*dwptr));
   2713		if (ret) {
   2714			ql_dbg(ql_dbg_p3p, vha, 0xb020,
   2715			    "Unable to program flash address=%x data=%x.\n",
   2716			    faddr, *dwptr);
   2717			break;
   2718		}
   2719	}
   2720
   2721	ret = qla82xx_protect_flash(ha);
   2722	if (ret)
   2723		ql_log(ql_log_warn, vha, 0xb021,
   2724		    "Unable to protect flash after update.\n");
   2725write_done:
   2726	if (optrom)
   2727		dma_free_coherent(&ha->pdev->dev,
   2728		    OPTROM_BURST_SIZE, optrom, optrom_dma);
   2729	return ret;
   2730}
   2731
   2732int
   2733qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
   2734	uint32_t offset, uint32_t length)
   2735{
   2736	int rval;
   2737
   2738	/* Suspend HBA. */
   2739	scsi_block_requests(vha->host);
   2740	rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2);
   2741	scsi_unblock_requests(vha->host);
   2742
   2743	/* Convert return ISP82xx to generic */
   2744	if (rval)
   2745		rval = QLA_FUNCTION_FAILED;
   2746	else
   2747		rval = QLA_SUCCESS;
   2748	return rval;
   2749}
   2750
   2751void
   2752qla82xx_start_iocbs(scsi_qla_host_t *vha)
   2753{
   2754	struct qla_hw_data *ha = vha->hw;
   2755	struct req_que *req = ha->req_q_map[0];
   2756	uint32_t dbval;
   2757
   2758	/* Adjust ring index. */
   2759	req->ring_index++;
   2760	if (req->ring_index == req->length) {
   2761		req->ring_index = 0;
   2762		req->ring_ptr = req->ring;
   2763	} else
   2764		req->ring_ptr++;
   2765
   2766	dbval = 0x04 | (ha->portnum << 5);
   2767
   2768	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
   2769	if (ql2xdbwr)
   2770		qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
   2771	else {
   2772		wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
   2773		wmb();
   2774		while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
   2775			wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
   2776			wmb();
   2777		}
   2778	}
   2779}
   2780
   2781static void
   2782qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
   2783{
   2784	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
   2785	uint32_t lock_owner = 0;
   2786
   2787	if (qla82xx_rom_lock(ha)) {
   2788		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
   2789		/* Someone else is holding the lock. */
   2790		ql_log(ql_log_info, vha, 0xb022,
   2791		    "Resetting rom_lock, Lock Owner %u.\n", lock_owner);
   2792	}
   2793	/*
   2794	 * Either we got the lock, or someone
   2795	 * else died while holding it.
   2796	 * In either case, unlock.
   2797	 */
   2798	qla82xx_rom_unlock(ha);
   2799}
   2800
   2801/*
   2802 * qla82xx_device_bootstrap
   2803 *    Initialize device, set DEV_READY, start fw
   2804 *
   2805 * Note:
   2806 *      IDC lock must be held upon entry
   2807 *
   2808 * Return:
   2809 *    Success : 0
   2810 *    Failed  : 1
   2811 */
   2812static int
   2813qla82xx_device_bootstrap(scsi_qla_host_t *vha)
   2814{
   2815	int rval = QLA_SUCCESS;
   2816	int i;
   2817	uint32_t old_count, count;
   2818	struct qla_hw_data *ha = vha->hw;
   2819	int need_reset = 0;
   2820
   2821	need_reset = qla82xx_need_reset(ha);
   2822
   2823	if (need_reset) {
   2824		/* We are trying to perform a recovery here. */
   2825		if (ha->flags.isp82xx_fw_hung)
   2826			qla82xx_rom_lock_recovery(ha);
   2827	} else  {
   2828		old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
   2829		for (i = 0; i < 10; i++) {
   2830			msleep(200);
   2831			count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
   2832			if (count != old_count) {
   2833				rval = QLA_SUCCESS;
   2834				goto dev_ready;
   2835			}
   2836		}
   2837		qla82xx_rom_lock_recovery(ha);
   2838	}
   2839
   2840	/* set to DEV_INITIALIZING */
   2841	ql_log(ql_log_info, vha, 0x009e,
   2842	    "HW State: INITIALIZING.\n");
   2843	qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
   2844
   2845	qla82xx_idc_unlock(ha);
   2846	rval = qla82xx_start_firmware(vha);
   2847	qla82xx_idc_lock(ha);
   2848
   2849	if (rval != QLA_SUCCESS) {
   2850		ql_log(ql_log_fatal, vha, 0x00ad,
   2851		    "HW State: FAILED.\n");
   2852		qla82xx_clear_drv_active(ha);
   2853		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
   2854		return rval;
   2855	}
   2856
   2857dev_ready:
   2858	ql_log(ql_log_info, vha, 0x00ae,
   2859	    "HW State: READY.\n");
   2860	qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
   2861
   2862	return QLA_SUCCESS;
   2863}
   2864
   2865/*
   2866* qla82xx_need_qsnt_handler
   2867*    Code to start quiescence sequence
   2868*
   2869* Note:
   2870*      IDC lock must be held upon entry
   2871*
   2872* Return: void
   2873*/
   2874
   2875static void
   2876qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
   2877{
   2878	struct qla_hw_data *ha = vha->hw;
   2879	uint32_t dev_state, drv_state, drv_active;
   2880	unsigned long reset_timeout;
   2881
   2882	if (vha->flags.online) {
   2883		/*Block any further I/O and wait for pending cmnds to complete*/
   2884		qla2x00_quiesce_io(vha);
   2885	}
   2886
   2887	/* Set the quiescence ready bit */
   2888	qla82xx_set_qsnt_ready(ha);
   2889
   2890	/*wait for 30 secs for other functions to ack */
   2891	reset_timeout = jiffies + (30 * HZ);
   2892
   2893	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2894	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   2895	/* Its 2 that is written when qsnt is acked, moving one bit */
   2896	drv_active = drv_active << 0x01;
   2897
   2898	while (drv_state != drv_active) {
   2899
   2900		if (time_after_eq(jiffies, reset_timeout)) {
   2901			/* quiescence timeout, other functions didn't ack
   2902			 * changing the state to DEV_READY
   2903			 */
   2904			ql_log(ql_log_info, vha, 0xb023,
   2905			    "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
   2906			    "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
   2907			    drv_active, drv_state);
   2908			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
   2909			    QLA8XXX_DEV_READY);
   2910			ql_log(ql_log_info, vha, 0xb025,
   2911			    "HW State: DEV_READY.\n");
   2912			qla82xx_idc_unlock(ha);
   2913			qla2x00_perform_loop_resync(vha);
   2914			qla82xx_idc_lock(ha);
   2915
   2916			qla82xx_clear_qsnt_ready(vha);
   2917			return;
   2918		}
   2919
   2920		qla82xx_idc_unlock(ha);
   2921		msleep(1000);
   2922		qla82xx_idc_lock(ha);
   2923
   2924		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   2925		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   2926		drv_active = drv_active << 0x01;
   2927	}
   2928	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   2929	/* everyone acked so set the state to DEV_QUIESCENCE */
   2930	if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
   2931		ql_log(ql_log_info, vha, 0xb026,
   2932		    "HW State: DEV_QUIESCENT.\n");
   2933		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
   2934	}
   2935}
   2936
   2937/*
   2938* qla82xx_wait_for_state_change
   2939*    Wait for device state to change from given current state
   2940*
   2941* Note:
   2942*     IDC lock must not be held upon entry
   2943*
   2944* Return:
   2945*    Changed device state.
   2946*/
   2947uint32_t
   2948qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
   2949{
   2950	struct qla_hw_data *ha = vha->hw;
   2951	uint32_t dev_state;
   2952
   2953	do {
   2954		msleep(1000);
   2955		qla82xx_idc_lock(ha);
   2956		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   2957		qla82xx_idc_unlock(ha);
   2958	} while (dev_state == curr_state);
   2959
   2960	return dev_state;
   2961}
   2962
   2963void
   2964qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
   2965{
   2966	struct qla_hw_data *ha = vha->hw;
   2967
   2968	/* Disable the board */
   2969	ql_log(ql_log_fatal, vha, 0x00b8,
   2970	    "Disabling the board.\n");
   2971
   2972	if (IS_QLA82XX(ha)) {
   2973		qla82xx_clear_drv_active(ha);
   2974		qla82xx_idc_unlock(ha);
   2975	} else if (IS_QLA8044(ha)) {
   2976		qla8044_clear_drv_active(ha);
   2977		qla8044_idc_unlock(ha);
   2978	}
   2979
   2980	/* Set DEV_FAILED flag to disable timer */
   2981	vha->device_flags |= DFLG_DEV_FAILED;
   2982	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
   2983	qla2x00_mark_all_devices_lost(vha);
   2984	vha->flags.online = 0;
   2985	vha->flags.init_done = 0;
   2986}
   2987
   2988/*
   2989 * qla82xx_need_reset_handler
   2990 *    Code to start reset sequence
   2991 *
   2992 * Note:
   2993 *      IDC lock must be held upon entry
   2994 *
   2995 * Return:
   2996 *    Success : 0
   2997 *    Failed  : 1
   2998 */
   2999static void
   3000qla82xx_need_reset_handler(scsi_qla_host_t *vha)
   3001{
   3002	uint32_t dev_state, drv_state, drv_active;
   3003	uint32_t active_mask = 0;
   3004	unsigned long reset_timeout;
   3005	struct qla_hw_data *ha = vha->hw;
   3006	struct req_que *req = ha->req_q_map[0];
   3007
   3008	if (vha->flags.online) {
   3009		qla82xx_idc_unlock(ha);
   3010		qla2x00_abort_isp_cleanup(vha);
   3011		ha->isp_ops->get_flash_version(vha, req->ring);
   3012		ha->isp_ops->nvram_config(vha);
   3013		qla82xx_idc_lock(ha);
   3014	}
   3015
   3016	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   3017	if (!ha->flags.nic_core_reset_owner) {
   3018		ql_dbg(ql_dbg_p3p, vha, 0xb028,
   3019		    "reset_acknowledged by 0x%x\n", ha->portnum);
   3020		qla82xx_set_rst_ready(ha);
   3021	} else {
   3022		active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
   3023		drv_active &= active_mask;
   3024		ql_dbg(ql_dbg_p3p, vha, 0xb029,
   3025		    "active_mask: 0x%08x\n", active_mask);
   3026	}
   3027
   3028	/* wait for 10 seconds for reset ack from all functions */
   3029	reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
   3030
   3031	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   3032	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   3033	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   3034
   3035	ql_dbg(ql_dbg_p3p, vha, 0xb02a,
   3036	    "drv_state: 0x%08x, drv_active: 0x%08x, "
   3037	    "dev_state: 0x%08x, active_mask: 0x%08x\n",
   3038	    drv_state, drv_active, dev_state, active_mask);
   3039
   3040	while (drv_state != drv_active &&
   3041	    dev_state != QLA8XXX_DEV_INITIALIZING) {
   3042		if (time_after_eq(jiffies, reset_timeout)) {
   3043			ql_log(ql_log_warn, vha, 0x00b5,
   3044			    "Reset timeout.\n");
   3045			break;
   3046		}
   3047		qla82xx_idc_unlock(ha);
   3048		msleep(1000);
   3049		qla82xx_idc_lock(ha);
   3050		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
   3051		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
   3052		if (ha->flags.nic_core_reset_owner)
   3053			drv_active &= active_mask;
   3054		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   3055	}
   3056
   3057	ql_dbg(ql_dbg_p3p, vha, 0xb02b,
   3058	    "drv_state: 0x%08x, drv_active: 0x%08x, "
   3059	    "dev_state: 0x%08x, active_mask: 0x%08x\n",
   3060	    drv_state, drv_active, dev_state, active_mask);
   3061
   3062	ql_log(ql_log_info, vha, 0x00b6,
   3063	    "Device state is 0x%x = %s.\n",
   3064	    dev_state, qdev_state(dev_state));
   3065
   3066	/* Force to DEV_COLD unless someone else is starting a reset */
   3067	if (dev_state != QLA8XXX_DEV_INITIALIZING &&
   3068	    dev_state != QLA8XXX_DEV_COLD) {
   3069		ql_log(ql_log_info, vha, 0x00b7,
   3070		    "HW State: COLD/RE-INIT.\n");
   3071		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
   3072		qla82xx_set_rst_ready(ha);
   3073		if (ql2xmdenable) {
   3074			if (qla82xx_md_collect(vha))
   3075				ql_log(ql_log_warn, vha, 0xb02c,
   3076				    "Minidump not collected.\n");
   3077		} else
   3078			ql_log(ql_log_warn, vha, 0xb04f,
   3079			    "Minidump disabled.\n");
   3080	}
   3081}
   3082
   3083int
   3084qla82xx_check_md_needed(scsi_qla_host_t *vha)
   3085{
   3086	struct qla_hw_data *ha = vha->hw;
   3087	uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
   3088	int rval = QLA_SUCCESS;
   3089
   3090	fw_major_version = ha->fw_major_version;
   3091	fw_minor_version = ha->fw_minor_version;
   3092	fw_subminor_version = ha->fw_subminor_version;
   3093
   3094	rval = qla2x00_get_fw_version(vha);
   3095	if (rval != QLA_SUCCESS)
   3096		return rval;
   3097
   3098	if (ql2xmdenable) {
   3099		if (!ha->fw_dumped) {
   3100			if ((fw_major_version != ha->fw_major_version ||
   3101			    fw_minor_version != ha->fw_minor_version ||
   3102			    fw_subminor_version != ha->fw_subminor_version) ||
   3103			    (ha->prev_minidump_failed)) {
   3104				ql_dbg(ql_dbg_p3p, vha, 0xb02d,
   3105				    "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
   3106				    fw_major_version, fw_minor_version,
   3107				    fw_subminor_version,
   3108				    ha->fw_major_version,
   3109				    ha->fw_minor_version,
   3110				    ha->fw_subminor_version,
   3111				    ha->prev_minidump_failed);
   3112				/* Release MiniDump resources */
   3113				qla82xx_md_free(vha);
   3114				/* ALlocate MiniDump resources */
   3115				qla82xx_md_prep(vha);
   3116			}
   3117		} else
   3118			ql_log(ql_log_info, vha, 0xb02e,
   3119			    "Firmware dump available to retrieve\n");
   3120	}
   3121	return rval;
   3122}
   3123
   3124
   3125static int
   3126qla82xx_check_fw_alive(scsi_qla_host_t *vha)
   3127{
   3128	uint32_t fw_heartbeat_counter;
   3129	int status = 0;
   3130
   3131	fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
   3132		QLA82XX_PEG_ALIVE_COUNTER);
   3133	/* all 0xff, assume AER/EEH in progress, ignore */
   3134	if (fw_heartbeat_counter == 0xffffffff) {
   3135		ql_dbg(ql_dbg_timer, vha, 0x6003,
   3136		    "FW heartbeat counter is 0xffffffff, "
   3137		    "returning status=%d.\n", status);
   3138		return status;
   3139	}
   3140	if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
   3141		vha->seconds_since_last_heartbeat++;
   3142		/* FW not alive after 2 seconds */
   3143		if (vha->seconds_since_last_heartbeat == 2) {
   3144			vha->seconds_since_last_heartbeat = 0;
   3145			status = 1;
   3146		}
   3147	} else
   3148		vha->seconds_since_last_heartbeat = 0;
   3149	vha->fw_heartbeat_counter = fw_heartbeat_counter;
   3150	if (status)
   3151		ql_dbg(ql_dbg_timer, vha, 0x6004,
   3152		    "Returning status=%d.\n", status);
   3153	return status;
   3154}
   3155
   3156/*
   3157 * qla82xx_device_state_handler
   3158 *	Main state handler
   3159 *
   3160 * Note:
   3161 *      IDC lock must be held upon entry
   3162 *
   3163 * Return:
   3164 *    Success : 0
   3165 *    Failed  : 1
   3166 */
   3167int
   3168qla82xx_device_state_handler(scsi_qla_host_t *vha)
   3169{
   3170	uint32_t dev_state;
   3171	uint32_t old_dev_state;
   3172	int rval = QLA_SUCCESS;
   3173	unsigned long dev_init_timeout;
   3174	struct qla_hw_data *ha = vha->hw;
   3175	int loopcount = 0;
   3176
   3177	qla82xx_idc_lock(ha);
   3178	if (!vha->flags.init_done) {
   3179		qla82xx_set_drv_active(vha);
   3180		qla82xx_set_idc_version(vha);
   3181	}
   3182
   3183	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   3184	old_dev_state = dev_state;
   3185	ql_log(ql_log_info, vha, 0x009b,
   3186	    "Device state is 0x%x = %s.\n",
   3187	    dev_state, qdev_state(dev_state));
   3188
   3189	/* wait for 30 seconds for device to go ready */
   3190	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
   3191
   3192	while (1) {
   3193
   3194		if (time_after_eq(jiffies, dev_init_timeout)) {
   3195			ql_log(ql_log_fatal, vha, 0x009c,
   3196			    "Device init failed.\n");
   3197			rval = QLA_FUNCTION_FAILED;
   3198			break;
   3199		}
   3200		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   3201		if (old_dev_state != dev_state) {
   3202			loopcount = 0;
   3203			old_dev_state = dev_state;
   3204		}
   3205		if (loopcount < 5) {
   3206			ql_log(ql_log_info, vha, 0x009d,
   3207			    "Device state is 0x%x = %s.\n",
   3208			    dev_state, qdev_state(dev_state));
   3209		}
   3210
   3211		switch (dev_state) {
   3212		case QLA8XXX_DEV_READY:
   3213			ha->flags.nic_core_reset_owner = 0;
   3214			goto rel_lock;
   3215		case QLA8XXX_DEV_COLD:
   3216			rval = qla82xx_device_bootstrap(vha);
   3217			break;
   3218		case QLA8XXX_DEV_INITIALIZING:
   3219			qla82xx_idc_unlock(ha);
   3220			msleep(1000);
   3221			qla82xx_idc_lock(ha);
   3222			break;
   3223		case QLA8XXX_DEV_NEED_RESET:
   3224			if (!ql2xdontresethba)
   3225				qla82xx_need_reset_handler(vha);
   3226			else {
   3227				qla82xx_idc_unlock(ha);
   3228				msleep(1000);
   3229				qla82xx_idc_lock(ha);
   3230			}
   3231			dev_init_timeout = jiffies +
   3232			    (ha->fcoe_dev_init_timeout * HZ);
   3233			break;
   3234		case QLA8XXX_DEV_NEED_QUIESCENT:
   3235			qla82xx_need_qsnt_handler(vha);
   3236			/* Reset timeout value after quiescence handler */
   3237			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
   3238							 * HZ);
   3239			break;
   3240		case QLA8XXX_DEV_QUIESCENT:
   3241			/* Owner will exit and other will wait for the state
   3242			 * to get changed
   3243			 */
   3244			if (ha->flags.quiesce_owner)
   3245				goto rel_lock;
   3246
   3247			qla82xx_idc_unlock(ha);
   3248			msleep(1000);
   3249			qla82xx_idc_lock(ha);
   3250
   3251			/* Reset timeout value after quiescence handler */
   3252			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
   3253							 * HZ);
   3254			break;
   3255		case QLA8XXX_DEV_FAILED:
   3256			qla8xxx_dev_failed_handler(vha);
   3257			rval = QLA_FUNCTION_FAILED;
   3258			goto exit;
   3259		default:
   3260			qla82xx_idc_unlock(ha);
   3261			msleep(1000);
   3262			qla82xx_idc_lock(ha);
   3263		}
   3264		loopcount++;
   3265	}
   3266rel_lock:
   3267	qla82xx_idc_unlock(ha);
   3268exit:
   3269	return rval;
   3270}
   3271
   3272static int qla82xx_check_temp(scsi_qla_host_t *vha)
   3273{
   3274	uint32_t temp, temp_state, temp_val;
   3275	struct qla_hw_data *ha = vha->hw;
   3276
   3277	temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
   3278	temp_state = qla82xx_get_temp_state(temp);
   3279	temp_val = qla82xx_get_temp_val(temp);
   3280
   3281	if (temp_state == QLA82XX_TEMP_PANIC) {
   3282		ql_log(ql_log_warn, vha, 0x600e,
   3283		    "Device temperature %d degrees C exceeds "
   3284		    " maximum allowed. Hardware has been shut down.\n",
   3285		    temp_val);
   3286		return 1;
   3287	} else if (temp_state == QLA82XX_TEMP_WARN) {
   3288		ql_log(ql_log_warn, vha, 0x600f,
   3289		    "Device temperature %d degrees C exceeds "
   3290		    "operating range. Immediate action needed.\n",
   3291		    temp_val);
   3292	}
   3293	return 0;
   3294}
   3295
   3296int qla82xx_read_temperature(scsi_qla_host_t *vha)
   3297{
   3298	uint32_t temp;
   3299
   3300	temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
   3301	return qla82xx_get_temp_val(temp);
   3302}
   3303
   3304void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
   3305{
   3306	struct qla_hw_data *ha = vha->hw;
   3307
   3308	if (ha->flags.mbox_busy) {
   3309		ha->flags.mbox_int = 1;
   3310		ha->flags.mbox_busy = 0;
   3311		ql_log(ql_log_warn, vha, 0x6010,
   3312		    "Doing premature completion of mbx command.\n");
   3313		if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
   3314			complete(&ha->mbx_intr_comp);
   3315	}
   3316}
   3317
   3318void qla82xx_watchdog(scsi_qla_host_t *vha)
   3319{
   3320	uint32_t dev_state, halt_status;
   3321	struct qla_hw_data *ha = vha->hw;
   3322
   3323	/* don't poll if reset is going on */
   3324	if (!ha->flags.nic_core_reset_hdlr_active) {
   3325		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   3326		if (qla82xx_check_temp(vha)) {
   3327			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
   3328			ha->flags.isp82xx_fw_hung = 1;
   3329			qla82xx_clear_pending_mbx(vha);
   3330		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
   3331		    !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
   3332			ql_log(ql_log_warn, vha, 0x6001,
   3333			    "Adapter reset needed.\n");
   3334			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
   3335		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
   3336			!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
   3337			ql_log(ql_log_warn, vha, 0x6002,
   3338			    "Quiescent needed.\n");
   3339			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
   3340		} else if (dev_state == QLA8XXX_DEV_FAILED &&
   3341			!test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
   3342			vha->flags.online == 1) {
   3343			ql_log(ql_log_warn, vha, 0xb055,
   3344			    "Adapter state is failed. Offlining.\n");
   3345			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
   3346			ha->flags.isp82xx_fw_hung = 1;
   3347			qla82xx_clear_pending_mbx(vha);
   3348		} else {
   3349			if (qla82xx_check_fw_alive(vha)) {
   3350				ql_dbg(ql_dbg_timer, vha, 0x6011,
   3351				    "disabling pause transmit on port 0 & 1.\n");
   3352				qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
   3353				    CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
   3354				halt_status = qla82xx_rd_32(ha,
   3355				    QLA82XX_PEG_HALT_STATUS1);
   3356				ql_log(ql_log_info, vha, 0x6005,
   3357				    "dumping hw/fw registers:.\n "
   3358				    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
   3359				    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
   3360				    " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
   3361				    " PEG_NET_4_PC: 0x%x.\n", halt_status,
   3362				    qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
   3363				    qla82xx_rd_32(ha,
   3364					    QLA82XX_CRB_PEG_NET_0 + 0x3c),
   3365				    qla82xx_rd_32(ha,
   3366					    QLA82XX_CRB_PEG_NET_1 + 0x3c),
   3367				    qla82xx_rd_32(ha,
   3368					    QLA82XX_CRB_PEG_NET_2 + 0x3c),
   3369				    qla82xx_rd_32(ha,
   3370					    QLA82XX_CRB_PEG_NET_3 + 0x3c),
   3371				    qla82xx_rd_32(ha,
   3372					    QLA82XX_CRB_PEG_NET_4 + 0x3c));
   3373				if (((halt_status & 0x1fffff00) >> 8) == 0x67)
   3374					ql_log(ql_log_warn, vha, 0xb052,
   3375					    "Firmware aborted with "
   3376					    "error code 0x00006700. Device is "
   3377					    "being reset.\n");
   3378				if (halt_status & HALT_STATUS_UNRECOVERABLE) {
   3379					set_bit(ISP_UNRECOVERABLE,
   3380					    &vha->dpc_flags);
   3381				} else {
   3382					ql_log(ql_log_info, vha, 0x6006,
   3383					    "Detect abort  needed.\n");
   3384					set_bit(ISP_ABORT_NEEDED,
   3385					    &vha->dpc_flags);
   3386				}
   3387				ha->flags.isp82xx_fw_hung = 1;
   3388				ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
   3389				qla82xx_clear_pending_mbx(vha);
   3390			}
   3391		}
   3392	}
   3393}
   3394
   3395int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
   3396{
   3397	int rval = -1;
   3398	struct qla_hw_data *ha = vha->hw;
   3399
   3400	if (IS_QLA82XX(ha))
   3401		rval = qla82xx_device_state_handler(vha);
   3402	else if (IS_QLA8044(ha)) {
   3403		qla8044_idc_lock(ha);
   3404		/* Decide the reset ownership */
   3405		qla83xx_reset_ownership(vha);
   3406		qla8044_idc_unlock(ha);
   3407		rval = qla8044_device_state_handler(vha);
   3408	}
   3409	return rval;
   3410}
   3411
   3412void
   3413qla82xx_set_reset_owner(scsi_qla_host_t *vha)
   3414{
   3415	struct qla_hw_data *ha = vha->hw;
   3416	uint32_t dev_state = 0;
   3417
   3418	if (IS_QLA82XX(ha))
   3419		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
   3420	else if (IS_QLA8044(ha))
   3421		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
   3422
   3423	if (dev_state == QLA8XXX_DEV_READY) {
   3424		ql_log(ql_log_info, vha, 0xb02f,
   3425		    "HW State: NEED RESET\n");
   3426		if (IS_QLA82XX(ha)) {
   3427			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
   3428			    QLA8XXX_DEV_NEED_RESET);
   3429			ha->flags.nic_core_reset_owner = 1;
   3430			ql_dbg(ql_dbg_p3p, vha, 0xb030,
   3431			    "reset_owner is 0x%x\n", ha->portnum);
   3432		} else if (IS_QLA8044(ha))
   3433			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
   3434			    QLA8XXX_DEV_NEED_RESET);
   3435	} else
   3436		ql_log(ql_log_info, vha, 0xb031,
   3437		    "Device state is 0x%x = %s.\n",
   3438		    dev_state, qdev_state(dev_state));
   3439}
   3440
   3441/*
   3442 *  qla82xx_abort_isp
   3443 *      Resets ISP and aborts all outstanding commands.
   3444 *
   3445 * Input:
   3446 *      ha           = adapter block pointer.
   3447 *
   3448 * Returns:
   3449 *      0 = success
   3450 */
   3451int
   3452qla82xx_abort_isp(scsi_qla_host_t *vha)
   3453{
   3454	int rval = -1;
   3455	struct qla_hw_data *ha = vha->hw;
   3456
   3457	if (vha->device_flags & DFLG_DEV_FAILED) {
   3458		ql_log(ql_log_warn, vha, 0x8024,
   3459		    "Device in failed state, exiting.\n");
   3460		return QLA_SUCCESS;
   3461	}
   3462	ha->flags.nic_core_reset_hdlr_active = 1;
   3463
   3464	qla82xx_idc_lock(ha);
   3465	qla82xx_set_reset_owner(vha);
   3466	qla82xx_idc_unlock(ha);
   3467
   3468	if (IS_QLA82XX(ha))
   3469		rval = qla82xx_device_state_handler(vha);
   3470	else if (IS_QLA8044(ha)) {
   3471		qla8044_idc_lock(ha);
   3472		/* Decide the reset ownership */
   3473		qla83xx_reset_ownership(vha);
   3474		qla8044_idc_unlock(ha);
   3475		rval = qla8044_device_state_handler(vha);
   3476	}
   3477
   3478	qla82xx_idc_lock(ha);
   3479	qla82xx_clear_rst_ready(ha);
   3480	qla82xx_idc_unlock(ha);
   3481
   3482	if (rval == QLA_SUCCESS) {
   3483		ha->flags.isp82xx_fw_hung = 0;
   3484		ha->flags.nic_core_reset_hdlr_active = 0;
   3485		qla82xx_restart_isp(vha);
   3486	}
   3487
   3488	if (rval) {
   3489		vha->flags.online = 1;
   3490		if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
   3491			if (ha->isp_abort_cnt == 0) {
   3492				ql_log(ql_log_warn, vha, 0x8027,
   3493				    "ISP error recover failed - board "
   3494				    "disabled.\n");
   3495				/*
   3496				 * The next call disables the board
   3497				 * completely.
   3498				 */
   3499				ha->isp_ops->reset_adapter(vha);
   3500				vha->flags.online = 0;
   3501				clear_bit(ISP_ABORT_RETRY,
   3502				    &vha->dpc_flags);
   3503				rval = QLA_SUCCESS;
   3504			} else { /* schedule another ISP abort */
   3505				ha->isp_abort_cnt--;
   3506				ql_log(ql_log_warn, vha, 0x8036,
   3507				    "ISP abort - retry remaining %d.\n",
   3508				    ha->isp_abort_cnt);
   3509				rval = QLA_FUNCTION_FAILED;
   3510			}
   3511		} else {
   3512			ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
   3513			ql_dbg(ql_dbg_taskm, vha, 0x8029,
   3514			    "ISP error recovery - retrying (%d) more times.\n",
   3515			    ha->isp_abort_cnt);
   3516			set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
   3517			rval = QLA_FUNCTION_FAILED;
   3518		}
   3519	}
   3520	return rval;
   3521}
   3522
   3523/*
   3524 *  qla82xx_fcoe_ctx_reset
   3525 *      Perform a quick reset and aborts all outstanding commands.
   3526 *      This will only perform an FCoE context reset and avoids a full blown
   3527 *      chip reset.
   3528 *
   3529 * Input:
   3530 *      ha = adapter block pointer.
   3531 *      is_reset_path = flag for identifying the reset path.
   3532 *
   3533 * Returns:
   3534 *      0 = success
   3535 */
   3536int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
   3537{
   3538	int rval = QLA_FUNCTION_FAILED;
   3539
   3540	if (vha->flags.online) {
   3541		/* Abort all outstanding commands, so as to be requeued later */
   3542		qla2x00_abort_isp_cleanup(vha);
   3543	}
   3544
   3545	/* Stop currently executing firmware.
   3546	 * This will destroy existing FCoE context at the F/W end.
   3547	 */
   3548	qla2x00_try_to_stop_firmware(vha);
   3549
   3550	/* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
   3551	rval = qla82xx_restart_isp(vha);
   3552
   3553	return rval;
   3554}
   3555
   3556/*
   3557 * qla2x00_wait_for_fcoe_ctx_reset
   3558 *    Wait till the FCoE context is reset.
   3559 *
   3560 * Note:
   3561 *    Does context switching here.
   3562 *    Release SPIN_LOCK (if any) before calling this routine.
   3563 *
   3564 * Return:
   3565 *    Success (fcoe_ctx reset is done) : 0
   3566 *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
   3567 */
   3568int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
   3569{
   3570	int status = QLA_FUNCTION_FAILED;
   3571	unsigned long wait_reset;
   3572
   3573	wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
   3574	while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
   3575	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
   3576	    && time_before(jiffies, wait_reset)) {
   3577
   3578		set_current_state(TASK_UNINTERRUPTIBLE);
   3579		schedule_timeout(HZ);
   3580
   3581		if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
   3582		    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
   3583			status = QLA_SUCCESS;
   3584			break;
   3585		}
   3586	}
   3587	ql_dbg(ql_dbg_p3p, vha, 0xb027,
   3588	       "%s: status=%d.\n", __func__, status);
   3589
   3590	return status;
   3591}
   3592
   3593void
   3594qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
   3595{
   3596	int i, fw_state = 0;
   3597	unsigned long flags;
   3598	struct qla_hw_data *ha = vha->hw;
   3599
   3600	/* Check if 82XX firmware is alive or not
   3601	 * We may have arrived here from NEED_RESET
   3602	 * detection only
   3603	 */
   3604	if (!ha->flags.isp82xx_fw_hung) {
   3605		for (i = 0; i < 2; i++) {
   3606			msleep(1000);
   3607			if (IS_QLA82XX(ha))
   3608				fw_state = qla82xx_check_fw_alive(vha);
   3609			else if (IS_QLA8044(ha))
   3610				fw_state = qla8044_check_fw_alive(vha);
   3611			if (fw_state) {
   3612				ha->flags.isp82xx_fw_hung = 1;
   3613				qla82xx_clear_pending_mbx(vha);
   3614				break;
   3615			}
   3616		}
   3617	}
   3618	ql_dbg(ql_dbg_init, vha, 0x00b0,
   3619	    "Entered %s fw_hung=%d.\n",
   3620	    __func__, ha->flags.isp82xx_fw_hung);
   3621
   3622	/* Abort all commands gracefully if fw NOT hung */
   3623	if (!ha->flags.isp82xx_fw_hung) {
   3624		int cnt, que;
   3625		srb_t *sp;
   3626		struct req_que *req;
   3627
   3628		spin_lock_irqsave(&ha->hardware_lock, flags);
   3629		for (que = 0; que < ha->max_req_queues; que++) {
   3630			req = ha->req_q_map[que];
   3631			if (!req)
   3632				continue;
   3633			for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
   3634				sp = req->outstanding_cmds[cnt];
   3635				if (sp) {
   3636					if ((!sp->u.scmd.crc_ctx ||
   3637					    (sp->flags &
   3638						SRB_FCP_CMND_DMA_VALID)) &&
   3639						!ha->flags.isp82xx_fw_hung) {
   3640						spin_unlock_irqrestore(
   3641						    &ha->hardware_lock, flags);
   3642						if (ha->isp_ops->abort_command(sp)) {
   3643							ql_log(ql_log_info, vha,
   3644							    0x00b1,
   3645							    "mbx abort failed.\n");
   3646						} else {
   3647							ql_log(ql_log_info, vha,
   3648							    0x00b2,
   3649							    "mbx abort success.\n");
   3650						}
   3651						spin_lock_irqsave(&ha->hardware_lock, flags);
   3652					}
   3653				}
   3654			}
   3655		}
   3656		spin_unlock_irqrestore(&ha->hardware_lock, flags);
   3657
   3658		/* Wait for pending cmds (physical and virtual) to complete */
   3659		if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
   3660		    WAIT_HOST) == QLA_SUCCESS) {
   3661			ql_dbg(ql_dbg_init, vha, 0x00b3,
   3662			    "Done wait for "
   3663			    "pending commands.\n");
   3664		} else {
   3665			WARN_ON_ONCE(true);
   3666		}
   3667	}
   3668}
   3669
   3670/* Minidump related functions */
   3671static int
   3672qla82xx_minidump_process_control(scsi_qla_host_t *vha,
   3673	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3674{
   3675	struct qla_hw_data *ha = vha->hw;
   3676	struct qla82xx_md_entry_crb *crb_entry;
   3677	uint32_t read_value, opcode, poll_time;
   3678	uint32_t addr, index, crb_addr;
   3679	unsigned long wtime;
   3680	struct qla82xx_md_template_hdr *tmplt_hdr;
   3681	uint32_t rval = QLA_SUCCESS;
   3682	int i;
   3683
   3684	tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
   3685	crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
   3686	crb_addr = crb_entry->addr;
   3687
   3688	for (i = 0; i < crb_entry->op_count; i++) {
   3689		opcode = crb_entry->crb_ctrl.opcode;
   3690		if (opcode & QLA82XX_DBG_OPCODE_WR) {
   3691			qla82xx_md_rw_32(ha, crb_addr,
   3692			    crb_entry->value_1, 1);
   3693			opcode &= ~QLA82XX_DBG_OPCODE_WR;
   3694		}
   3695
   3696		if (opcode & QLA82XX_DBG_OPCODE_RW) {
   3697			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
   3698			qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
   3699			opcode &= ~QLA82XX_DBG_OPCODE_RW;
   3700		}
   3701
   3702		if (opcode & QLA82XX_DBG_OPCODE_AND) {
   3703			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
   3704			read_value &= crb_entry->value_2;
   3705			opcode &= ~QLA82XX_DBG_OPCODE_AND;
   3706			if (opcode & QLA82XX_DBG_OPCODE_OR) {
   3707				read_value |= crb_entry->value_3;
   3708				opcode &= ~QLA82XX_DBG_OPCODE_OR;
   3709			}
   3710			qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
   3711		}
   3712
   3713		if (opcode & QLA82XX_DBG_OPCODE_OR) {
   3714			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
   3715			read_value |= crb_entry->value_3;
   3716			qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
   3717			opcode &= ~QLA82XX_DBG_OPCODE_OR;
   3718		}
   3719
   3720		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
   3721			poll_time = crb_entry->crb_strd.poll_timeout;
   3722			wtime = jiffies + poll_time;
   3723			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
   3724
   3725			do {
   3726				if ((read_value & crb_entry->value_2)
   3727				    == crb_entry->value_1)
   3728					break;
   3729				else if (time_after_eq(jiffies, wtime)) {
   3730					/* capturing dump failed */
   3731					rval = QLA_FUNCTION_FAILED;
   3732					break;
   3733				} else
   3734					read_value = qla82xx_md_rw_32(ha,
   3735					    crb_addr, 0, 0);
   3736			} while (1);
   3737			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
   3738		}
   3739
   3740		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
   3741			if (crb_entry->crb_strd.state_index_a) {
   3742				index = crb_entry->crb_strd.state_index_a;
   3743				addr = tmplt_hdr->saved_state_array[index];
   3744			} else
   3745				addr = crb_addr;
   3746
   3747			read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
   3748			index = crb_entry->crb_ctrl.state_index_v;
   3749			tmplt_hdr->saved_state_array[index] = read_value;
   3750			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
   3751		}
   3752
   3753		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
   3754			if (crb_entry->crb_strd.state_index_a) {
   3755				index = crb_entry->crb_strd.state_index_a;
   3756				addr = tmplt_hdr->saved_state_array[index];
   3757			} else
   3758				addr = crb_addr;
   3759
   3760			if (crb_entry->crb_ctrl.state_index_v) {
   3761				index = crb_entry->crb_ctrl.state_index_v;
   3762				read_value =
   3763				    tmplt_hdr->saved_state_array[index];
   3764			} else
   3765				read_value = crb_entry->value_1;
   3766
   3767			qla82xx_md_rw_32(ha, addr, read_value, 1);
   3768			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
   3769		}
   3770
   3771		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
   3772			index = crb_entry->crb_ctrl.state_index_v;
   3773			read_value = tmplt_hdr->saved_state_array[index];
   3774			read_value <<= crb_entry->crb_ctrl.shl;
   3775			read_value >>= crb_entry->crb_ctrl.shr;
   3776			if (crb_entry->value_2)
   3777				read_value &= crb_entry->value_2;
   3778			read_value |= crb_entry->value_3;
   3779			read_value += crb_entry->value_1;
   3780			tmplt_hdr->saved_state_array[index] = read_value;
   3781			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
   3782		}
   3783		crb_addr += crb_entry->crb_strd.addr_stride;
   3784	}
   3785	return rval;
   3786}
   3787
   3788static void
   3789qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
   3790	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3791{
   3792	struct qla_hw_data *ha = vha->hw;
   3793	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
   3794	struct qla82xx_md_entry_rdocm *ocm_hdr;
   3795	__le32 *data_ptr = *d_ptr;
   3796
   3797	ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
   3798	r_addr = ocm_hdr->read_addr;
   3799	r_stride = ocm_hdr->read_addr_stride;
   3800	loop_cnt = ocm_hdr->op_count;
   3801
   3802	for (i = 0; i < loop_cnt; i++) {
   3803		r_value = rd_reg_dword(r_addr + ha->nx_pcibase);
   3804		*data_ptr++ = cpu_to_le32(r_value);
   3805		r_addr += r_stride;
   3806	}
   3807	*d_ptr = data_ptr;
   3808}
   3809
   3810static void
   3811qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
   3812	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3813{
   3814	struct qla_hw_data *ha = vha->hw;
   3815	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
   3816	struct qla82xx_md_entry_mux *mux_hdr;
   3817	__le32 *data_ptr = *d_ptr;
   3818
   3819	mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
   3820	r_addr = mux_hdr->read_addr;
   3821	s_addr = mux_hdr->select_addr;
   3822	s_stride = mux_hdr->select_value_stride;
   3823	s_value = mux_hdr->select_value;
   3824	loop_cnt = mux_hdr->op_count;
   3825
   3826	for (i = 0; i < loop_cnt; i++) {
   3827		qla82xx_md_rw_32(ha, s_addr, s_value, 1);
   3828		r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
   3829		*data_ptr++ = cpu_to_le32(s_value);
   3830		*data_ptr++ = cpu_to_le32(r_value);
   3831		s_value += s_stride;
   3832	}
   3833	*d_ptr = data_ptr;
   3834}
   3835
   3836static void
   3837qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
   3838	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3839{
   3840	struct qla_hw_data *ha = vha->hw;
   3841	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
   3842	struct qla82xx_md_entry_crb *crb_hdr;
   3843	__le32 *data_ptr = *d_ptr;
   3844
   3845	crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
   3846	r_addr = crb_hdr->addr;
   3847	r_stride = crb_hdr->crb_strd.addr_stride;
   3848	loop_cnt = crb_hdr->op_count;
   3849
   3850	for (i = 0; i < loop_cnt; i++) {
   3851		r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
   3852		*data_ptr++ = cpu_to_le32(r_addr);
   3853		*data_ptr++ = cpu_to_le32(r_value);
   3854		r_addr += r_stride;
   3855	}
   3856	*d_ptr = data_ptr;
   3857}
   3858
   3859static int
   3860qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
   3861	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3862{
   3863	struct qla_hw_data *ha = vha->hw;
   3864	uint32_t addr, r_addr, c_addr, t_r_addr;
   3865	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
   3866	unsigned long p_wait, w_time, p_mask;
   3867	uint32_t c_value_w, c_value_r;
   3868	struct qla82xx_md_entry_cache *cache_hdr;
   3869	int rval = QLA_FUNCTION_FAILED;
   3870	__le32 *data_ptr = *d_ptr;
   3871
   3872	cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
   3873	loop_count = cache_hdr->op_count;
   3874	r_addr = cache_hdr->read_addr;
   3875	c_addr = cache_hdr->control_addr;
   3876	c_value_w = cache_hdr->cache_ctrl.write_value;
   3877
   3878	t_r_addr = cache_hdr->tag_reg_addr;
   3879	t_value = cache_hdr->addr_ctrl.init_tag_value;
   3880	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
   3881	p_wait = cache_hdr->cache_ctrl.poll_wait;
   3882	p_mask = cache_hdr->cache_ctrl.poll_mask;
   3883
   3884	for (i = 0; i < loop_count; i++) {
   3885		qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
   3886		if (c_value_w)
   3887			qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
   3888
   3889		if (p_mask) {
   3890			w_time = jiffies + p_wait;
   3891			do {
   3892				c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
   3893				if ((c_value_r & p_mask) == 0)
   3894					break;
   3895				else if (time_after_eq(jiffies, w_time)) {
   3896					/* capturing dump failed */
   3897					ql_dbg(ql_dbg_p3p, vha, 0xb032,
   3898					    "c_value_r: 0x%x, poll_mask: 0x%lx, "
   3899					    "w_time: 0x%lx\n",
   3900					    c_value_r, p_mask, w_time);
   3901					return rval;
   3902				}
   3903			} while (1);
   3904		}
   3905
   3906		addr = r_addr;
   3907		for (k = 0; k < r_cnt; k++) {
   3908			r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
   3909			*data_ptr++ = cpu_to_le32(r_value);
   3910			addr += cache_hdr->read_ctrl.read_addr_stride;
   3911		}
   3912		t_value += cache_hdr->addr_ctrl.tag_value_stride;
   3913	}
   3914	*d_ptr = data_ptr;
   3915	return QLA_SUCCESS;
   3916}
   3917
   3918static void
   3919qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
   3920	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3921{
   3922	struct qla_hw_data *ha = vha->hw;
   3923	uint32_t addr, r_addr, c_addr, t_r_addr;
   3924	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
   3925	uint32_t c_value_w;
   3926	struct qla82xx_md_entry_cache *cache_hdr;
   3927	__le32 *data_ptr = *d_ptr;
   3928
   3929	cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
   3930	loop_count = cache_hdr->op_count;
   3931	r_addr = cache_hdr->read_addr;
   3932	c_addr = cache_hdr->control_addr;
   3933	c_value_w = cache_hdr->cache_ctrl.write_value;
   3934
   3935	t_r_addr = cache_hdr->tag_reg_addr;
   3936	t_value = cache_hdr->addr_ctrl.init_tag_value;
   3937	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
   3938
   3939	for (i = 0; i < loop_count; i++) {
   3940		qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
   3941		qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
   3942		addr = r_addr;
   3943		for (k = 0; k < r_cnt; k++) {
   3944			r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
   3945			*data_ptr++ = cpu_to_le32(r_value);
   3946			addr += cache_hdr->read_ctrl.read_addr_stride;
   3947		}
   3948		t_value += cache_hdr->addr_ctrl.tag_value_stride;
   3949	}
   3950	*d_ptr = data_ptr;
   3951}
   3952
   3953static void
   3954qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
   3955	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3956{
   3957	struct qla_hw_data *ha = vha->hw;
   3958	uint32_t s_addr, r_addr;
   3959	uint32_t r_stride, r_value, r_cnt, qid = 0;
   3960	uint32_t i, k, loop_cnt;
   3961	struct qla82xx_md_entry_queue *q_hdr;
   3962	__le32 *data_ptr = *d_ptr;
   3963
   3964	q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
   3965	s_addr = q_hdr->select_addr;
   3966	r_cnt = q_hdr->rd_strd.read_addr_cnt;
   3967	r_stride = q_hdr->rd_strd.read_addr_stride;
   3968	loop_cnt = q_hdr->op_count;
   3969
   3970	for (i = 0; i < loop_cnt; i++) {
   3971		qla82xx_md_rw_32(ha, s_addr, qid, 1);
   3972		r_addr = q_hdr->read_addr;
   3973		for (k = 0; k < r_cnt; k++) {
   3974			r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
   3975			*data_ptr++ = cpu_to_le32(r_value);
   3976			r_addr += r_stride;
   3977		}
   3978		qid += q_hdr->q_strd.queue_id_stride;
   3979	}
   3980	*d_ptr = data_ptr;
   3981}
   3982
   3983static void
   3984qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
   3985	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   3986{
   3987	struct qla_hw_data *ha = vha->hw;
   3988	uint32_t r_addr, r_value;
   3989	uint32_t i, loop_cnt;
   3990	struct qla82xx_md_entry_rdrom *rom_hdr;
   3991	__le32 *data_ptr = *d_ptr;
   3992
   3993	rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
   3994	r_addr = rom_hdr->read_addr;
   3995	loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
   3996
   3997	for (i = 0; i < loop_cnt; i++) {
   3998		qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
   3999		    (r_addr & 0xFFFF0000), 1);
   4000		r_value = qla82xx_md_rw_32(ha,
   4001		    MD_DIRECT_ROM_READ_BASE +
   4002		    (r_addr & 0x0000FFFF), 0, 0);
   4003		*data_ptr++ = cpu_to_le32(r_value);
   4004		r_addr += sizeof(uint32_t);
   4005	}
   4006	*d_ptr = data_ptr;
   4007}
   4008
   4009static int
   4010qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
   4011	qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
   4012{
   4013	struct qla_hw_data *ha = vha->hw;
   4014	uint32_t r_addr, r_value, r_data;
   4015	uint32_t i, j, loop_cnt;
   4016	struct qla82xx_md_entry_rdmem *m_hdr;
   4017	unsigned long flags;
   4018	int rval = QLA_FUNCTION_FAILED;
   4019	__le32 *data_ptr = *d_ptr;
   4020
   4021	m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
   4022	r_addr = m_hdr->read_addr;
   4023	loop_cnt = m_hdr->read_data_size/16;
   4024
   4025	if (r_addr & 0xf) {
   4026		ql_log(ql_log_warn, vha, 0xb033,
   4027		    "Read addr 0x%x not 16 bytes aligned\n", r_addr);
   4028		return rval;
   4029	}
   4030
   4031	if (m_hdr->read_data_size % 16) {
   4032		ql_log(ql_log_warn, vha, 0xb034,
   4033		    "Read data[0x%x] not multiple of 16 bytes\n",
   4034		    m_hdr->read_data_size);
   4035		return rval;
   4036	}
   4037
   4038	ql_dbg(ql_dbg_p3p, vha, 0xb035,
   4039	    "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
   4040	    __func__, r_addr, m_hdr->read_data_size, loop_cnt);
   4041
   4042	write_lock_irqsave(&ha->hw_lock, flags);
   4043	for (i = 0; i < loop_cnt; i++) {
   4044		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
   4045		r_value = 0;
   4046		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
   4047		r_value = MIU_TA_CTL_ENABLE;
   4048		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
   4049		r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
   4050		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
   4051
   4052		for (j = 0; j < MAX_CTL_CHECK; j++) {
   4053			r_value = qla82xx_md_rw_32(ha,
   4054			    MD_MIU_TEST_AGT_CTRL, 0, 0);
   4055			if ((r_value & MIU_TA_CTL_BUSY) == 0)
   4056				break;
   4057		}
   4058
   4059		if (j >= MAX_CTL_CHECK) {
   4060			printk_ratelimited(KERN_ERR
   4061			    "failed to read through agent\n");
   4062			write_unlock_irqrestore(&ha->hw_lock, flags);
   4063			return rval;
   4064		}
   4065
   4066		for (j = 0; j < 4; j++) {
   4067			r_data = qla82xx_md_rw_32(ha,
   4068			    MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
   4069			*data_ptr++ = cpu_to_le32(r_data);
   4070		}
   4071		r_addr += 16;
   4072	}
   4073	write_unlock_irqrestore(&ha->hw_lock, flags);
   4074	*d_ptr = data_ptr;
   4075	return QLA_SUCCESS;
   4076}
   4077
   4078int
   4079qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
   4080{
   4081	struct qla_hw_data *ha = vha->hw;
   4082	uint64_t chksum = 0;
   4083	uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
   4084	int count = ha->md_template_size/sizeof(uint32_t);
   4085
   4086	while (count-- > 0)
   4087		chksum += *d_ptr++;
   4088	while (chksum >> 32)
   4089		chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
   4090	return ~chksum;
   4091}
   4092
   4093static void
   4094qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
   4095	qla82xx_md_entry_hdr_t *entry_hdr, int index)
   4096{
   4097	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
   4098	ql_dbg(ql_dbg_p3p, vha, 0xb036,
   4099	    "Skipping entry[%d]: "
   4100	    "ETYPE[0x%x]-ELEVEL[0x%x]\n",
   4101	    index, entry_hdr->entry_type,
   4102	    entry_hdr->d_ctrl.entry_capture_mask);
   4103}
   4104
   4105int
   4106qla82xx_md_collect(scsi_qla_host_t *vha)
   4107{
   4108	struct qla_hw_data *ha = vha->hw;
   4109	int no_entry_hdr = 0;
   4110	qla82xx_md_entry_hdr_t *entry_hdr;
   4111	struct qla82xx_md_template_hdr *tmplt_hdr;
   4112	__le32 *data_ptr;
   4113	uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
   4114	int i = 0, rval = QLA_FUNCTION_FAILED;
   4115
   4116	tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
   4117	data_ptr = ha->md_dump;
   4118
   4119	if (ha->fw_dumped) {
   4120		ql_log(ql_log_warn, vha, 0xb037,
   4121		    "Firmware has been previously dumped (%p) "
   4122		    "-- ignoring request.\n", ha->fw_dump);
   4123		goto md_failed;
   4124	}
   4125
   4126	ha->fw_dumped = false;
   4127
   4128	if (!ha->md_tmplt_hdr || !ha->md_dump) {
   4129		ql_log(ql_log_warn, vha, 0xb038,
   4130		    "Memory not allocated for minidump capture\n");
   4131		goto md_failed;
   4132	}
   4133
   4134	if (ha->flags.isp82xx_no_md_cap) {
   4135		ql_log(ql_log_warn, vha, 0xb054,
   4136		    "Forced reset from application, "
   4137		    "ignore minidump capture\n");
   4138		ha->flags.isp82xx_no_md_cap = 0;
   4139		goto md_failed;
   4140	}
   4141
   4142	if (qla82xx_validate_template_chksum(vha)) {
   4143		ql_log(ql_log_info, vha, 0xb039,
   4144		    "Template checksum validation error\n");
   4145		goto md_failed;
   4146	}
   4147
   4148	no_entry_hdr = tmplt_hdr->num_of_entries;
   4149	ql_dbg(ql_dbg_p3p, vha, 0xb03a,
   4150	    "No of entry headers in Template: 0x%x\n", no_entry_hdr);
   4151
   4152	ql_dbg(ql_dbg_p3p, vha, 0xb03b,
   4153	    "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
   4154
   4155	f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
   4156
   4157	/* Validate whether required debug level is set */
   4158	if ((f_capture_mask & 0x3) != 0x3) {
   4159		ql_log(ql_log_warn, vha, 0xb03c,
   4160		    "Minimum required capture mask[0x%x] level not set\n",
   4161		    f_capture_mask);
   4162		goto md_failed;
   4163	}
   4164	tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
   4165
   4166	tmplt_hdr->driver_info[0] = vha->host_no;
   4167	tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
   4168	    (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
   4169	    QLA_DRIVER_BETA_VER;
   4170
   4171	total_data_size = ha->md_dump_size;
   4172
   4173	ql_dbg(ql_dbg_p3p, vha, 0xb03d,
   4174	    "Total minidump data_size 0x%x to be captured\n", total_data_size);
   4175
   4176	/* Check whether template obtained is valid */
   4177	if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
   4178		ql_log(ql_log_warn, vha, 0xb04e,
   4179		    "Bad template header entry type: 0x%x obtained\n",
   4180		    tmplt_hdr->entry_type);
   4181		goto md_failed;
   4182	}
   4183
   4184	entry_hdr = (qla82xx_md_entry_hdr_t *)
   4185	    (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
   4186
   4187	/* Walk through the entry headers */
   4188	for (i = 0; i < no_entry_hdr; i++) {
   4189
   4190		if (data_collected > total_data_size) {
   4191			ql_log(ql_log_warn, vha, 0xb03e,
   4192			    "More MiniDump data collected: [0x%x]\n",
   4193			    data_collected);
   4194			goto md_failed;
   4195		}
   4196
   4197		if (!(entry_hdr->d_ctrl.entry_capture_mask &
   4198		    ql2xmdcapmask)) {
   4199			entry_hdr->d_ctrl.driver_flags |=
   4200			    QLA82XX_DBG_SKIPPED_FLAG;
   4201			ql_dbg(ql_dbg_p3p, vha, 0xb03f,
   4202			    "Skipping entry[%d]: "
   4203			    "ETYPE[0x%x]-ELEVEL[0x%x]\n",
   4204			    i, entry_hdr->entry_type,
   4205			    entry_hdr->d_ctrl.entry_capture_mask);
   4206			goto skip_nxt_entry;
   4207		}
   4208
   4209		ql_dbg(ql_dbg_p3p, vha, 0xb040,
   4210		    "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
   4211		    "entry_type: 0x%x, capture_mask: 0x%x\n",
   4212		    __func__, i, data_ptr, entry_hdr,
   4213		    entry_hdr->entry_type,
   4214		    entry_hdr->d_ctrl.entry_capture_mask);
   4215
   4216		ql_dbg(ql_dbg_p3p, vha, 0xb041,
   4217		    "Data collected: [0x%x], Dump size left:[0x%x]\n",
   4218		    data_collected, (ha->md_dump_size - data_collected));
   4219
   4220		/* Decode the entry type and take
   4221		 * required action to capture debug data */
   4222		switch (entry_hdr->entry_type) {
   4223		case QLA82XX_RDEND:
   4224			qla82xx_mark_entry_skipped(vha, entry_hdr, i);
   4225			break;
   4226		case QLA82XX_CNTRL:
   4227			rval = qla82xx_minidump_process_control(vha,
   4228			    entry_hdr, &data_ptr);
   4229			if (rval != QLA_SUCCESS) {
   4230				qla82xx_mark_entry_skipped(vha, entry_hdr, i);
   4231				goto md_failed;
   4232			}
   4233			break;
   4234		case QLA82XX_RDCRB:
   4235			qla82xx_minidump_process_rdcrb(vha,
   4236			    entry_hdr, &data_ptr);
   4237			break;
   4238		case QLA82XX_RDMEM:
   4239			rval = qla82xx_minidump_process_rdmem(vha,
   4240			    entry_hdr, &data_ptr);
   4241			if (rval != QLA_SUCCESS) {
   4242				qla82xx_mark_entry_skipped(vha, entry_hdr, i);
   4243				goto md_failed;
   4244			}
   4245			break;
   4246		case QLA82XX_BOARD:
   4247		case QLA82XX_RDROM:
   4248			qla82xx_minidump_process_rdrom(vha,
   4249			    entry_hdr, &data_ptr);
   4250			break;
   4251		case QLA82XX_L2DTG:
   4252		case QLA82XX_L2ITG:
   4253		case QLA82XX_L2DAT:
   4254		case QLA82XX_L2INS:
   4255			rval = qla82xx_minidump_process_l2tag(vha,
   4256			    entry_hdr, &data_ptr);
   4257			if (rval != QLA_SUCCESS) {
   4258				qla82xx_mark_entry_skipped(vha, entry_hdr, i);
   4259				goto md_failed;
   4260			}
   4261			break;
   4262		case QLA82XX_L1DAT:
   4263		case QLA82XX_L1INS:
   4264			qla82xx_minidump_process_l1cache(vha,
   4265			    entry_hdr, &data_ptr);
   4266			break;
   4267		case QLA82XX_RDOCM:
   4268			qla82xx_minidump_process_rdocm(vha,
   4269			    entry_hdr, &data_ptr);
   4270			break;
   4271		case QLA82XX_RDMUX:
   4272			qla82xx_minidump_process_rdmux(vha,
   4273			    entry_hdr, &data_ptr);
   4274			break;
   4275		case QLA82XX_QUEUE:
   4276			qla82xx_minidump_process_queue(vha,
   4277			    entry_hdr, &data_ptr);
   4278			break;
   4279		case QLA82XX_RDNOP:
   4280		default:
   4281			qla82xx_mark_entry_skipped(vha, entry_hdr, i);
   4282			break;
   4283		}
   4284
   4285		ql_dbg(ql_dbg_p3p, vha, 0xb042,
   4286		    "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
   4287
   4288		data_collected = (uint8_t *)data_ptr -
   4289		    (uint8_t *)ha->md_dump;
   4290skip_nxt_entry:
   4291		entry_hdr = (qla82xx_md_entry_hdr_t *)
   4292		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
   4293	}
   4294
   4295	if (data_collected != total_data_size) {
   4296		ql_dbg(ql_dbg_p3p, vha, 0xb043,
   4297		    "MiniDump data mismatch: Data collected: [0x%x],"
   4298		    "total_data_size:[0x%x]\n",
   4299		    data_collected, total_data_size);
   4300		goto md_failed;
   4301	}
   4302
   4303	ql_log(ql_log_info, vha, 0xb044,
   4304	    "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
   4305	    vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
   4306	ha->fw_dumped = true;
   4307	qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
   4308
   4309md_failed:
   4310	return rval;
   4311}
   4312
   4313int
   4314qla82xx_md_alloc(scsi_qla_host_t *vha)
   4315{
   4316	struct qla_hw_data *ha = vha->hw;
   4317	int i, k;
   4318	struct qla82xx_md_template_hdr *tmplt_hdr;
   4319
   4320	tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
   4321
   4322	if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
   4323		ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
   4324		ql_log(ql_log_info, vha, 0xb045,
   4325		    "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
   4326		    ql2xmdcapmask);
   4327	}
   4328
   4329	for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
   4330		if (i & ql2xmdcapmask)
   4331			ha->md_dump_size += tmplt_hdr->capture_size_array[k];
   4332	}
   4333
   4334	if (ha->md_dump) {
   4335		ql_log(ql_log_warn, vha, 0xb046,
   4336		    "Firmware dump previously allocated.\n");
   4337		return 1;
   4338	}
   4339
   4340	ha->md_dump = vmalloc(ha->md_dump_size);
   4341	if (ha->md_dump == NULL) {
   4342		ql_log(ql_log_warn, vha, 0xb047,
   4343		    "Unable to allocate memory for Minidump size "
   4344		    "(0x%x).\n", ha->md_dump_size);
   4345		return 1;
   4346	}
   4347	return 0;
   4348}
   4349
   4350void
   4351qla82xx_md_free(scsi_qla_host_t *vha)
   4352{
   4353	struct qla_hw_data *ha = vha->hw;
   4354
   4355	/* Release the template header allocated */
   4356	if (ha->md_tmplt_hdr) {
   4357		ql_log(ql_log_info, vha, 0xb048,
   4358		    "Free MiniDump template: %p, size (%d KB)\n",
   4359		    ha->md_tmplt_hdr, ha->md_template_size / 1024);
   4360		dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
   4361		    ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
   4362		ha->md_tmplt_hdr = NULL;
   4363	}
   4364
   4365	/* Release the template data buffer allocated */
   4366	if (ha->md_dump) {
   4367		ql_log(ql_log_info, vha, 0xb049,
   4368		    "Free MiniDump memory: %p, size (%d KB)\n",
   4369		    ha->md_dump, ha->md_dump_size / 1024);
   4370		vfree(ha->md_dump);
   4371		ha->md_dump_size = 0;
   4372		ha->md_dump = NULL;
   4373	}
   4374}
   4375
   4376void
   4377qla82xx_md_prep(scsi_qla_host_t *vha)
   4378{
   4379	struct qla_hw_data *ha = vha->hw;
   4380	int rval;
   4381
   4382	/* Get Minidump template size */
   4383	rval = qla82xx_md_get_template_size(vha);
   4384	if (rval == QLA_SUCCESS) {
   4385		ql_log(ql_log_info, vha, 0xb04a,
   4386		    "MiniDump Template size obtained (%d KB)\n",
   4387		    ha->md_template_size / 1024);
   4388
   4389		/* Get Minidump template */
   4390		if (IS_QLA8044(ha))
   4391			rval = qla8044_md_get_template(vha);
   4392		else
   4393			rval = qla82xx_md_get_template(vha);
   4394
   4395		if (rval == QLA_SUCCESS) {
   4396			ql_dbg(ql_dbg_p3p, vha, 0xb04b,
   4397			    "MiniDump Template obtained\n");
   4398
   4399			/* Allocate memory for minidump */
   4400			rval = qla82xx_md_alloc(vha);
   4401			if (rval == QLA_SUCCESS)
   4402				ql_log(ql_log_info, vha, 0xb04c,
   4403				    "MiniDump memory allocated (%d KB)\n",
   4404				    ha->md_dump_size / 1024);
   4405			else {
   4406				ql_log(ql_log_info, vha, 0xb04d,
   4407				    "Free MiniDump template: %p, size: (%d KB)\n",
   4408				    ha->md_tmplt_hdr,
   4409				    ha->md_template_size / 1024);
   4410				dma_free_coherent(&ha->pdev->dev,
   4411				    ha->md_template_size,
   4412				    ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
   4413				ha->md_tmplt_hdr = NULL;
   4414			}
   4415
   4416		}
   4417	}
   4418}
   4419
   4420int
   4421qla82xx_beacon_on(struct scsi_qla_host *vha)
   4422{
   4423
   4424	int rval;
   4425	struct qla_hw_data *ha = vha->hw;
   4426
   4427	qla82xx_idc_lock(ha);
   4428	rval = qla82xx_mbx_beacon_ctl(vha, 1);
   4429
   4430	if (rval) {
   4431		ql_log(ql_log_warn, vha, 0xb050,
   4432		    "mbx set led config failed in %s\n", __func__);
   4433		goto exit;
   4434	}
   4435	ha->beacon_blink_led = 1;
   4436exit:
   4437	qla82xx_idc_unlock(ha);
   4438	return rval;
   4439}
   4440
   4441int
   4442qla82xx_beacon_off(struct scsi_qla_host *vha)
   4443{
   4444
   4445	int rval;
   4446	struct qla_hw_data *ha = vha->hw;
   4447
   4448	qla82xx_idc_lock(ha);
   4449	rval = qla82xx_mbx_beacon_ctl(vha, 0);
   4450
   4451	if (rval) {
   4452		ql_log(ql_log_warn, vha, 0xb051,
   4453		    "mbx set led config failed in %s\n", __func__);
   4454		goto exit;
   4455	}
   4456	ha->beacon_blink_led = 0;
   4457exit:
   4458	qla82xx_idc_unlock(ha);
   4459	return rval;
   4460}
   4461
   4462void
   4463qla82xx_fw_dump(scsi_qla_host_t *vha)
   4464{
   4465	struct qla_hw_data *ha = vha->hw;
   4466
   4467	if (!ha->allow_cna_fw_dump)
   4468		return;
   4469
   4470	scsi_block_requests(vha->host);
   4471	ha->flags.isp82xx_no_md_cap = 1;
   4472	qla82xx_idc_lock(ha);
   4473	qla82xx_set_reset_owner(vha);
   4474	qla82xx_idc_unlock(ha);
   4475	qla2x00_wait_for_chip_reset(vha);
   4476	scsi_unblock_requests(vha->host);
   4477}