cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qlcnic_minidump.c (35324B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * QLogic qlcnic NIC Driver
      4 * Copyright (c) 2009-2013 QLogic Corporation
      5 */
      6
      7#include <net/ip.h>
      8
      9#include "qlcnic.h"
     10#include "qlcnic_hdr.h"
     11#include "qlcnic_83xx_hw.h"
     12#include "qlcnic_hw.h"
     13
     14#define QLC_83XX_MINIDUMP_FLASH		0x520000
     15#define QLC_83XX_OCM_INDEX			3
     16#define QLC_83XX_PCI_INDEX			0
     17#define QLC_83XX_DMA_ENGINE_INDEX		8
     18
     19static const u32 qlcnic_ms_read_data[] = {
     20	0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
     21};
     22
     23#define QLCNIC_DUMP_WCRB	BIT_0
     24#define QLCNIC_DUMP_RWCRB	BIT_1
     25#define QLCNIC_DUMP_ANDCRB	BIT_2
     26#define QLCNIC_DUMP_ORCRB	BIT_3
     27#define QLCNIC_DUMP_POLLCRB	BIT_4
     28#define QLCNIC_DUMP_RD_SAVE	BIT_5
     29#define QLCNIC_DUMP_WRT_SAVED	BIT_6
     30#define QLCNIC_DUMP_MOD_SAVE_ST	BIT_7
     31#define QLCNIC_DUMP_SKIP	BIT_7
     32
     33#define QLCNIC_DUMP_MASK_MAX	0xff
     34
     35struct qlcnic_pex_dma_descriptor {
     36	u32	read_data_size;
     37	u32	dma_desc_cmd;
     38	u32	src_addr_low;
     39	u32	src_addr_high;
     40	u32	dma_bus_addr_low;
     41	u32	dma_bus_addr_high;
     42	u32	rsvd[6];
     43} __packed;
     44
     45struct qlcnic_common_entry_hdr {
     46	u32     type;
     47	u32     offset;
     48	u32     cap_size;
     49#if defined(__LITTLE_ENDIAN)
     50	u8      mask;
     51	u8      rsvd[2];
     52	u8      flags;
     53#else
     54	u8      flags;
     55	u8      rsvd[2];
     56	u8      mask;
     57#endif
     58} __packed;
     59
     60struct __crb {
     61	u32	addr;
     62#if defined(__LITTLE_ENDIAN)
     63	u8	stride;
     64	u8	rsvd1[3];
     65#else
     66	u8	rsvd1[3];
     67	u8	stride;
     68#endif
     69	u32	data_size;
     70	u32	no_ops;
     71	u32	rsvd2[4];
     72} __packed;
     73
     74struct __ctrl {
     75	u32	addr;
     76#if defined(__LITTLE_ENDIAN)
     77	u8	stride;
     78	u8	index_a;
     79	u16	timeout;
     80#else
     81	u16	timeout;
     82	u8	index_a;
     83	u8	stride;
     84#endif
     85	u32	data_size;
     86	u32	no_ops;
     87#if defined(__LITTLE_ENDIAN)
     88	u8	opcode;
     89	u8	index_v;
     90	u8	shl_val;
     91	u8	shr_val;
     92#else
     93	u8	shr_val;
     94	u8	shl_val;
     95	u8	index_v;
     96	u8	opcode;
     97#endif
     98	u32	val1;
     99	u32	val2;
    100	u32	val3;
    101} __packed;
    102
    103struct __cache {
    104	u32	addr;
    105#if defined(__LITTLE_ENDIAN)
    106	u16	stride;
    107	u16	init_tag_val;
    108#else
    109	u16	init_tag_val;
    110	u16	stride;
    111#endif
    112	u32	size;
    113	u32	no_ops;
    114	u32	ctrl_addr;
    115	u32	ctrl_val;
    116	u32	read_addr;
    117#if defined(__LITTLE_ENDIAN)
    118	u8	read_addr_stride;
    119	u8	read_addr_num;
    120	u8	rsvd1[2];
    121#else
    122	u8	rsvd1[2];
    123	u8	read_addr_num;
    124	u8	read_addr_stride;
    125#endif
    126} __packed;
    127
    128struct __ocm {
    129	u8	rsvd[8];
    130	u32	size;
    131	u32	no_ops;
    132	u8	rsvd1[8];
    133	u32	read_addr;
    134	u32	read_addr_stride;
    135} __packed;
    136
    137struct __mem {
    138	u32	desc_card_addr;
    139	u32	dma_desc_cmd;
    140	u32	start_dma_cmd;
    141	u32	rsvd[3];
    142	u32	addr;
    143	u32	size;
    144} __packed;
    145
    146struct __mux {
    147	u32	addr;
    148	u8	rsvd[4];
    149	u32	size;
    150	u32	no_ops;
    151	u32	val;
    152	u32	val_stride;
    153	u32	read_addr;
    154	u8	rsvd2[4];
    155} __packed;
    156
    157struct __queue {
    158	u32	sel_addr;
    159#if defined(__LITTLE_ENDIAN)
    160	u16	stride;
    161	u8	rsvd[2];
    162#else
    163	u8	rsvd[2];
    164	u16	stride;
    165#endif
    166	u32	size;
    167	u32	no_ops;
    168	u8	rsvd2[8];
    169	u32	read_addr;
    170#if defined(__LITTLE_ENDIAN)
    171	u8	read_addr_stride;
    172	u8	read_addr_cnt;
    173	u8	rsvd3[2];
    174#else
    175	u8	rsvd3[2];
    176	u8	read_addr_cnt;
    177	u8	read_addr_stride;
    178#endif
    179} __packed;
    180
    181struct __pollrd {
    182	u32	sel_addr;
    183	u32	read_addr;
    184	u32	sel_val;
    185#if defined(__LITTLE_ENDIAN)
    186	u16	sel_val_stride;
    187	u16	no_ops;
    188#else
    189	u16	no_ops;
    190	u16	sel_val_stride;
    191#endif
    192	u32	poll_wait;
    193	u32	poll_mask;
    194	u32	data_size;
    195	u8	rsvd[4];
    196} __packed;
    197
    198struct __mux2 {
    199	u32	sel_addr1;
    200	u32	sel_addr2;
    201	u32	sel_val1;
    202	u32	sel_val2;
    203	u32	no_ops;
    204	u32	sel_val_mask;
    205	u32	read_addr;
    206#if defined(__LITTLE_ENDIAN)
    207	u8	sel_val_stride;
    208	u8	data_size;
    209	u8	rsvd[2];
    210#else
    211	u8	rsvd[2];
    212	u8	data_size;
    213	u8	sel_val_stride;
    214#endif
    215} __packed;
    216
    217struct __pollrdmwr {
    218	u32	addr1;
    219	u32	addr2;
    220	u32	val1;
    221	u32	val2;
    222	u32	poll_wait;
    223	u32	poll_mask;
    224	u32	mod_mask;
    225	u32	data_size;
    226} __packed;
    227
    228struct qlcnic_dump_entry {
    229	struct qlcnic_common_entry_hdr hdr;
    230	union {
    231		struct __crb		crb;
    232		struct __cache		cache;
    233		struct __ocm		ocm;
    234		struct __mem		mem;
    235		struct __mux		mux;
    236		struct __queue		que;
    237		struct __ctrl		ctrl;
    238		struct __pollrdmwr	pollrdmwr;
    239		struct __mux2		mux2;
    240		struct __pollrd		pollrd;
    241	} region;
    242} __packed;
    243
    244enum qlcnic_minidump_opcode {
    245	QLCNIC_DUMP_NOP		= 0,
    246	QLCNIC_DUMP_READ_CRB	= 1,
    247	QLCNIC_DUMP_READ_MUX	= 2,
    248	QLCNIC_DUMP_QUEUE	= 3,
    249	QLCNIC_DUMP_BRD_CONFIG	= 4,
    250	QLCNIC_DUMP_READ_OCM	= 6,
    251	QLCNIC_DUMP_PEG_REG	= 7,
    252	QLCNIC_DUMP_L1_DTAG	= 8,
    253	QLCNIC_DUMP_L1_ITAG	= 9,
    254	QLCNIC_DUMP_L1_DATA	= 11,
    255	QLCNIC_DUMP_L1_INST	= 12,
    256	QLCNIC_DUMP_L2_DTAG	= 21,
    257	QLCNIC_DUMP_L2_ITAG	= 22,
    258	QLCNIC_DUMP_L2_DATA	= 23,
    259	QLCNIC_DUMP_L2_INST	= 24,
    260	QLCNIC_DUMP_POLL_RD	= 35,
    261	QLCNIC_READ_MUX2	= 36,
    262	QLCNIC_READ_POLLRDMWR	= 37,
    263	QLCNIC_DUMP_READ_ROM	= 71,
    264	QLCNIC_DUMP_READ_MEM	= 72,
    265	QLCNIC_DUMP_READ_CTRL	= 98,
    266	QLCNIC_DUMP_TLHDR	= 99,
    267	QLCNIC_DUMP_RDEND	= 255
    268};
    269
    270inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
    271{
    272	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
    273
    274	return hdr->saved_state[index];
    275}
    276
    277inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
    278					u32 value)
    279{
    280	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
    281
    282	hdr->saved_state[index] = value;
    283}
    284
    285void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
    286{
    287	struct qlcnic_82xx_dump_template_hdr *hdr;
    288
    289	hdr = fw_dump->tmpl_hdr;
    290	fw_dump->tmpl_hdr_size = hdr->size;
    291	fw_dump->version = hdr->version;
    292	fw_dump->num_entries = hdr->num_entries;
    293	fw_dump->offset = hdr->offset;
    294
    295	hdr->drv_cap_mask = hdr->cap_mask;
    296	fw_dump->cap_mask = hdr->cap_mask;
    297
    298	fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
    299}
    300
    301inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
    302{
    303	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
    304
    305	return hdr->cap_sizes[index];
    306}
    307
    308void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
    309{
    310	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
    311
    312	hdr->sys_info[idx] = value;
    313}
    314
    315void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
    316{
    317	struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
    318
    319	hdr->drv_cap_mask = mask;
    320}
    321
    322inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
    323{
    324	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
    325
    326	return hdr->saved_state[index];
    327}
    328
    329inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
    330					u32 value)
    331{
    332	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
    333
    334	hdr->saved_state[index] = value;
    335}
    336
    337#define QLCNIC_TEMPLATE_VERSION (0x20001)
    338
    339void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
    340{
    341	struct qlcnic_83xx_dump_template_hdr *hdr;
    342
    343	hdr = fw_dump->tmpl_hdr;
    344	fw_dump->tmpl_hdr_size = hdr->size;
    345	fw_dump->version = hdr->version;
    346	fw_dump->num_entries = hdr->num_entries;
    347	fw_dump->offset = hdr->offset;
    348
    349	hdr->drv_cap_mask = hdr->cap_mask;
    350	fw_dump->cap_mask = hdr->cap_mask;
    351
    352	fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
    353			       QLCNIC_TEMPLATE_VERSION;
    354}
    355
    356inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
    357{
    358	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
    359
    360	return hdr->cap_sizes[index];
    361}
    362
    363void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
    364{
    365	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
    366
    367	hdr->sys_info[idx] = value;
    368}
    369
    370void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
    371{
    372	struct qlcnic_83xx_dump_template_hdr *hdr;
    373
    374	hdr = tmpl_hdr;
    375	hdr->drv_cap_mask = mask;
    376}
    377
    378struct qlcnic_dump_operations {
    379	enum qlcnic_minidump_opcode opcode;
    380	u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
    381		       __le32 *);
    382};
    383
    384static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
    385			   struct qlcnic_dump_entry *entry, __le32 *buffer)
    386{
    387	int i;
    388	u32 addr, data;
    389	struct __crb *crb = &entry->region.crb;
    390
    391	addr = crb->addr;
    392
    393	for (i = 0; i < crb->no_ops; i++) {
    394		data = qlcnic_ind_rd(adapter, addr);
    395		*buffer++ = cpu_to_le32(addr);
    396		*buffer++ = cpu_to_le32(data);
    397		addr += crb->stride;
    398	}
    399	return crb->no_ops * 2 * sizeof(u32);
    400}
    401
    402static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
    403			    struct qlcnic_dump_entry *entry, __le32 *buffer)
    404{
    405	void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
    406	struct __ctrl *ctr = &entry->region.ctrl;
    407	int i, k, timeout = 0;
    408	u32 addr, data, temp;
    409	u8 no_ops;
    410
    411	addr = ctr->addr;
    412	no_ops = ctr->no_ops;
    413
    414	for (i = 0; i < no_ops; i++) {
    415		k = 0;
    416		for (k = 0; k < 8; k++) {
    417			if (!(ctr->opcode & (1 << k)))
    418				continue;
    419			switch (1 << k) {
    420			case QLCNIC_DUMP_WCRB:
    421				qlcnic_ind_wr(adapter, addr, ctr->val1);
    422				break;
    423			case QLCNIC_DUMP_RWCRB:
    424				data = qlcnic_ind_rd(adapter, addr);
    425				qlcnic_ind_wr(adapter, addr, data);
    426				break;
    427			case QLCNIC_DUMP_ANDCRB:
    428				data = qlcnic_ind_rd(adapter, addr);
    429				qlcnic_ind_wr(adapter, addr,
    430					      (data & ctr->val2));
    431				break;
    432			case QLCNIC_DUMP_ORCRB:
    433				data = qlcnic_ind_rd(adapter, addr);
    434				qlcnic_ind_wr(adapter, addr,
    435					      (data | ctr->val3));
    436				break;
    437			case QLCNIC_DUMP_POLLCRB:
    438				while (timeout <= ctr->timeout) {
    439					data = qlcnic_ind_rd(adapter, addr);
    440					if ((data & ctr->val2) == ctr->val1)
    441						break;
    442					usleep_range(1000, 2000);
    443					timeout++;
    444				}
    445				if (timeout > ctr->timeout) {
    446					dev_info(&adapter->pdev->dev,
    447					"Timed out, aborting poll CRB\n");
    448					return -EINVAL;
    449				}
    450				break;
    451			case QLCNIC_DUMP_RD_SAVE:
    452				temp = ctr->index_a;
    453				if (temp)
    454					addr = qlcnic_get_saved_state(adapter,
    455								      hdr,
    456								      temp);
    457				data = qlcnic_ind_rd(adapter, addr);
    458				qlcnic_set_saved_state(adapter, hdr,
    459						       ctr->index_v, data);
    460				break;
    461			case QLCNIC_DUMP_WRT_SAVED:
    462				temp = ctr->index_v;
    463				if (temp)
    464					data = qlcnic_get_saved_state(adapter,
    465								      hdr,
    466								      temp);
    467				else
    468					data = ctr->val1;
    469
    470				temp = ctr->index_a;
    471				if (temp)
    472					addr = qlcnic_get_saved_state(adapter,
    473								      hdr,
    474								      temp);
    475				qlcnic_ind_wr(adapter, addr, data);
    476				break;
    477			case QLCNIC_DUMP_MOD_SAVE_ST:
    478				data = qlcnic_get_saved_state(adapter, hdr,
    479							      ctr->index_v);
    480				data <<= ctr->shl_val;
    481				data >>= ctr->shr_val;
    482				if (ctr->val2)
    483					data &= ctr->val2;
    484				data |= ctr->val3;
    485				data += ctr->val1;
    486				qlcnic_set_saved_state(adapter, hdr,
    487						       ctr->index_v, data);
    488				break;
    489			default:
    490				dev_info(&adapter->pdev->dev,
    491					 "Unknown opcode\n");
    492				break;
    493			}
    494		}
    495		addr += ctr->stride;
    496	}
    497	return 0;
    498}
    499
    500static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
    501			   struct qlcnic_dump_entry *entry, __le32 *buffer)
    502{
    503	int loop;
    504	u32 val, data = 0;
    505	struct __mux *mux = &entry->region.mux;
    506
    507	val = mux->val;
    508	for (loop = 0; loop < mux->no_ops; loop++) {
    509		qlcnic_ind_wr(adapter, mux->addr, val);
    510		data = qlcnic_ind_rd(adapter, mux->read_addr);
    511		*buffer++ = cpu_to_le32(val);
    512		*buffer++ = cpu_to_le32(data);
    513		val += mux->val_stride;
    514	}
    515	return 2 * mux->no_ops * sizeof(u32);
    516}
    517
    518static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
    519			   struct qlcnic_dump_entry *entry, __le32 *buffer)
    520{
    521	int i, loop;
    522	u32 cnt, addr, data, que_id = 0;
    523	struct __queue *que = &entry->region.que;
    524
    525	addr = que->read_addr;
    526	cnt = que->read_addr_cnt;
    527
    528	for (loop = 0; loop < que->no_ops; loop++) {
    529		qlcnic_ind_wr(adapter, que->sel_addr, que_id);
    530		addr = que->read_addr;
    531		for (i = 0; i < cnt; i++) {
    532			data = qlcnic_ind_rd(adapter, addr);
    533			*buffer++ = cpu_to_le32(data);
    534			addr += que->read_addr_stride;
    535		}
    536		que_id += que->stride;
    537	}
    538	return que->no_ops * cnt * sizeof(u32);
    539}
    540
    541static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
    542			   struct qlcnic_dump_entry *entry, __le32 *buffer)
    543{
    544	int i;
    545	u32 data;
    546	void __iomem *addr;
    547	struct __ocm *ocm = &entry->region.ocm;
    548
    549	addr = adapter->ahw->pci_base0 + ocm->read_addr;
    550	for (i = 0; i < ocm->no_ops; i++) {
    551		data = readl(addr);
    552		*buffer++ = cpu_to_le32(data);
    553		addr += ocm->read_addr_stride;
    554	}
    555	return ocm->no_ops * sizeof(u32);
    556}
    557
    558static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
    559			   struct qlcnic_dump_entry *entry, __le32 *buffer)
    560{
    561	int i, count = 0;
    562	u32 fl_addr, size, val, lck_val, addr;
    563	struct __mem *rom = &entry->region.mem;
    564
    565	fl_addr = rom->addr;
    566	size = rom->size / 4;
    567lock_try:
    568	lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
    569	if (!lck_val && count < MAX_CTL_CHECK) {
    570		usleep_range(10000, 11000);
    571		count++;
    572		goto lock_try;
    573	}
    574	QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
    575			    adapter->ahw->pci_func);
    576	for (i = 0; i < size; i++) {
    577		addr = fl_addr & 0xFFFF0000;
    578		qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
    579		addr = LSW(fl_addr) + FLASH_ROM_DATA;
    580		val = qlcnic_ind_rd(adapter, addr);
    581		fl_addr += 4;
    582		*buffer++ = cpu_to_le32(val);
    583	}
    584	QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
    585	return rom->size;
    586}
    587
    588static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
    589				struct qlcnic_dump_entry *entry, __le32 *buffer)
    590{
    591	int i;
    592	u32 cnt, val, data, addr;
    593	struct __cache *l1 = &entry->region.cache;
    594
    595	val = l1->init_tag_val;
    596
    597	for (i = 0; i < l1->no_ops; i++) {
    598		qlcnic_ind_wr(adapter, l1->addr, val);
    599		qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
    600		addr = l1->read_addr;
    601		cnt = l1->read_addr_num;
    602		while (cnt) {
    603			data = qlcnic_ind_rd(adapter, addr);
    604			*buffer++ = cpu_to_le32(data);
    605			addr += l1->read_addr_stride;
    606			cnt--;
    607		}
    608		val += l1->stride;
    609	}
    610	return l1->no_ops * l1->read_addr_num * sizeof(u32);
    611}
    612
    613static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
    614				struct qlcnic_dump_entry *entry, __le32 *buffer)
    615{
    616	int i;
    617	u32 cnt, val, data, addr;
    618	u8 poll_mask, poll_to, time_out = 0;
    619	struct __cache *l2 = &entry->region.cache;
    620
    621	val = l2->init_tag_val;
    622	poll_mask = LSB(MSW(l2->ctrl_val));
    623	poll_to = MSB(MSW(l2->ctrl_val));
    624
    625	for (i = 0; i < l2->no_ops; i++) {
    626		qlcnic_ind_wr(adapter, l2->addr, val);
    627		if (LSW(l2->ctrl_val))
    628			qlcnic_ind_wr(adapter, l2->ctrl_addr,
    629				      LSW(l2->ctrl_val));
    630		if (!poll_mask)
    631			goto skip_poll;
    632		do {
    633			data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
    634			if (!(data & poll_mask))
    635				break;
    636			usleep_range(1000, 2000);
    637			time_out++;
    638		} while (time_out <= poll_to);
    639
    640		if (time_out > poll_to) {
    641			dev_err(&adapter->pdev->dev,
    642				"Timeout exceeded in %s, aborting dump\n",
    643				__func__);
    644			return -EINVAL;
    645		}
    646skip_poll:
    647		addr = l2->read_addr;
    648		cnt = l2->read_addr_num;
    649		while (cnt) {
    650			data = qlcnic_ind_rd(adapter, addr);
    651			*buffer++ = cpu_to_le32(data);
    652			addr += l2->read_addr_stride;
    653			cnt--;
    654		}
    655		val += l2->stride;
    656	}
    657	return l2->no_ops * l2->read_addr_num * sizeof(u32);
    658}
    659
    660static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
    661					 struct __mem *mem, __le32 *buffer,
    662					 int *ret)
    663{
    664	u32 addr, data, test;
    665	int i, reg_read;
    666
    667	reg_read = mem->size;
    668	addr = mem->addr;
    669	/* check for data size of multiple of 16 and 16 byte alignment */
    670	if ((addr & 0xf) || (reg_read%16)) {
    671		dev_info(&adapter->pdev->dev,
    672			 "Unaligned memory addr:0x%x size:0x%x\n",
    673			 addr, reg_read);
    674		*ret = -EINVAL;
    675		return 0;
    676	}
    677
    678	mutex_lock(&adapter->ahw->mem_lock);
    679
    680	while (reg_read != 0) {
    681		qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
    682		qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
    683		qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
    684
    685		for (i = 0; i < MAX_CTL_CHECK; i++) {
    686			test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
    687			if (!(test & TA_CTL_BUSY))
    688				break;
    689		}
    690		if (i == MAX_CTL_CHECK) {
    691			if (printk_ratelimit()) {
    692				dev_err(&adapter->pdev->dev,
    693					"failed to read through agent\n");
    694				*ret = -EIO;
    695				goto out;
    696			}
    697		}
    698		for (i = 0; i < 4; i++) {
    699			data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
    700			*buffer++ = cpu_to_le32(data);
    701		}
    702		addr += 16;
    703		reg_read -= 16;
    704		ret += 16;
    705		cond_resched();
    706	}
    707out:
    708	mutex_unlock(&adapter->ahw->mem_lock);
    709	return mem->size;
    710}
    711
    712/* DMA register base address */
    713#define QLC_DMA_REG_BASE_ADDR(dma_no)	(0x77320000 + (dma_no * 0x10000))
    714
    715/* DMA register offsets w.r.t base address */
    716#define QLC_DMA_CMD_BUFF_ADDR_LOW	0
    717#define QLC_DMA_CMD_BUFF_ADDR_HI	4
    718#define QLC_DMA_CMD_STATUS_CTRL		8
    719
    720static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
    721				struct __mem *mem)
    722{
    723	struct device *dev = &adapter->pdev->dev;
    724	u32 dma_no, dma_base_addr, temp_addr;
    725	int i, ret, dma_sts;
    726	void *tmpl_hdr;
    727
    728	tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
    729	dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
    730					QLC_83XX_DMA_ENGINE_INDEX);
    731	dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
    732
    733	temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
    734	ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
    735	if (ret)
    736		return ret;
    737
    738	temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
    739	ret = qlcnic_ind_wr(adapter, temp_addr, 0);
    740	if (ret)
    741		return ret;
    742
    743	temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
    744	ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
    745	if (ret)
    746		return ret;
    747
    748	/* Wait for DMA to complete */
    749	temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
    750	for (i = 0; i < 400; i++) {
    751		dma_sts = qlcnic_ind_rd(adapter, temp_addr);
    752
    753		if (dma_sts & BIT_1)
    754			usleep_range(250, 500);
    755		else
    756			break;
    757	}
    758
    759	if (i >= 400) {
    760		dev_info(dev, "PEX DMA operation timed out");
    761		ret = -EIO;
    762	}
    763
    764	return ret;
    765}
    766
    767static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
    768				     struct __mem *mem,
    769				     __le32 *buffer, int *ret)
    770{
    771	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
    772	u32 temp, dma_base_addr, size = 0, read_size = 0;
    773	struct qlcnic_pex_dma_descriptor *dma_descr;
    774	struct device *dev = &adapter->pdev->dev;
    775	dma_addr_t dma_phys_addr;
    776	void *dma_buffer;
    777	void *tmpl_hdr;
    778
    779	tmpl_hdr = fw_dump->tmpl_hdr;
    780
    781	/* Check if DMA engine is available */
    782	temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
    783				      QLC_83XX_DMA_ENGINE_INDEX);
    784	dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
    785	temp = qlcnic_ind_rd(adapter,
    786			     dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
    787
    788	if (!(temp & BIT_31)) {
    789		dev_info(dev, "%s: DMA engine is not available\n", __func__);
    790		*ret = -EIO;
    791		return 0;
    792	}
    793
    794	/* Create DMA descriptor */
    795	dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
    796			    GFP_KERNEL);
    797	if (!dma_descr) {
    798		*ret = -ENOMEM;
    799		return 0;
    800	}
    801
    802	/* dma_desc_cmd  0:15  = 0
    803	 * dma_desc_cmd 16:19  = mem->dma_desc_cmd 0:3
    804	 * dma_desc_cmd 20:23  = pci function number
    805	 * dma_desc_cmd 24:31  = mem->dma_desc_cmd 8:15
    806	 */
    807	dma_phys_addr = fw_dump->phys_addr;
    808	dma_buffer = fw_dump->dma_buffer;
    809	temp = 0;
    810	temp = mem->dma_desc_cmd & 0xff0f;
    811	temp |= (adapter->ahw->pci_func & 0xf) << 4;
    812	dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
    813	dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
    814	dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
    815	dma_descr->src_addr_high = 0;
    816
    817	/* Collect memory dump using multiple DMA operations if required */
    818	while (read_size < mem->size) {
    819		if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
    820			size = QLC_PEX_DMA_READ_SIZE;
    821		else
    822			size = mem->size - read_size;
    823
    824		dma_descr->src_addr_low = mem->addr + read_size;
    825		dma_descr->read_data_size = size;
    826
    827		/* Write DMA descriptor to MS memory*/
    828		temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
    829		*ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
    830					      (u32 *)dma_descr, temp);
    831		if (*ret) {
    832			dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
    833				 mem->desc_card_addr);
    834			goto free_dma_descr;
    835		}
    836
    837		*ret = qlcnic_start_pex_dma(adapter, mem);
    838		if (*ret) {
    839			dev_info(dev, "Failed to start PEX DMA operation\n");
    840			goto free_dma_descr;
    841		}
    842
    843		memcpy(buffer, dma_buffer, size);
    844		buffer += size / 4;
    845		read_size += size;
    846	}
    847
    848free_dma_descr:
    849	kfree(dma_descr);
    850
    851	return read_size;
    852}
    853
    854static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
    855			      struct qlcnic_dump_entry *entry, __le32 *buffer)
    856{
    857	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
    858	struct device *dev = &adapter->pdev->dev;
    859	struct __mem *mem = &entry->region.mem;
    860	u32 data_size;
    861	int ret = 0;
    862
    863	if (fw_dump->use_pex_dma) {
    864		data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
    865						      &ret);
    866		if (ret)
    867			dev_info(dev,
    868				 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
    869				 entry->hdr.mask);
    870		else
    871			return data_size;
    872	}
    873
    874	data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
    875	if (ret) {
    876		dev_info(dev,
    877			 "Failed to read memory dump using test agent method: mask[0x%x]\n",
    878			 entry->hdr.mask);
    879		return 0;
    880	} else {
    881		return data_size;
    882	}
    883}
    884
    885static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
    886			   struct qlcnic_dump_entry *entry, __le32 *buffer)
    887{
    888	entry->hdr.flags |= QLCNIC_DUMP_SKIP;
    889	return 0;
    890}
    891
    892static int qlcnic_valid_dump_entry(struct device *dev,
    893				   struct qlcnic_dump_entry *entry, u32 size)
    894{
    895	int ret = 1;
    896	if (size != entry->hdr.cap_size) {
    897		dev_err(dev,
    898			"Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
    899			entry->hdr.type, entry->hdr.mask, size,
    900			entry->hdr.cap_size);
    901		ret = 0;
    902	}
    903	return ret;
    904}
    905
    906static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
    907				 struct qlcnic_dump_entry *entry,
    908				 __le32 *buffer)
    909{
    910	struct __pollrdmwr *poll = &entry->region.pollrdmwr;
    911	u32 data, wait_count, poll_wait, temp;
    912
    913	poll_wait = poll->poll_wait;
    914
    915	qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
    916	wait_count = 0;
    917
    918	while (wait_count < poll_wait) {
    919		data = qlcnic_ind_rd(adapter, poll->addr1);
    920		if ((data & poll->poll_mask) != 0)
    921			break;
    922		wait_count++;
    923	}
    924
    925	if (wait_count == poll_wait) {
    926		dev_err(&adapter->pdev->dev,
    927			"Timeout exceeded in %s, aborting dump\n",
    928			__func__);
    929		return 0;
    930	}
    931
    932	data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
    933	qlcnic_ind_wr(adapter, poll->addr2, data);
    934	qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
    935	wait_count = 0;
    936
    937	while (wait_count < poll_wait) {
    938		temp = qlcnic_ind_rd(adapter, poll->addr1);
    939		if ((temp & poll->poll_mask) != 0)
    940			break;
    941		wait_count++;
    942	}
    943
    944	*buffer++ = cpu_to_le32(poll->addr2);
    945	*buffer++ = cpu_to_le32(data);
    946
    947	return 2 * sizeof(u32);
    948
    949}
    950
    951static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
    952			      struct qlcnic_dump_entry *entry, __le32 *buffer)
    953{
    954	struct __pollrd *pollrd = &entry->region.pollrd;
    955	u32 data, wait_count, poll_wait, sel_val;
    956	int i;
    957
    958	poll_wait = pollrd->poll_wait;
    959	sel_val = pollrd->sel_val;
    960
    961	for (i = 0; i < pollrd->no_ops; i++) {
    962		qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
    963		wait_count = 0;
    964		while (wait_count < poll_wait) {
    965			data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
    966			if ((data & pollrd->poll_mask) != 0)
    967				break;
    968			wait_count++;
    969		}
    970
    971		if (wait_count == poll_wait) {
    972			dev_err(&adapter->pdev->dev,
    973				"Timeout exceeded in %s, aborting dump\n",
    974				__func__);
    975			return 0;
    976		}
    977
    978		data = qlcnic_ind_rd(adapter, pollrd->read_addr);
    979		*buffer++ = cpu_to_le32(sel_val);
    980		*buffer++ = cpu_to_le32(data);
    981		sel_val += pollrd->sel_val_stride;
    982	}
    983	return pollrd->no_ops * (2 * sizeof(u32));
    984}
    985
    986static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
    987			    struct qlcnic_dump_entry *entry, __le32 *buffer)
    988{
    989	struct __mux2 *mux2 = &entry->region.mux2;
    990	u32 data;
    991	u32 t_sel_val, sel_val1, sel_val2;
    992	int i;
    993
    994	sel_val1 = mux2->sel_val1;
    995	sel_val2 = mux2->sel_val2;
    996
    997	for (i = 0; i < mux2->no_ops; i++) {
    998		qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
    999		t_sel_val = sel_val1 & mux2->sel_val_mask;
   1000		qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
   1001		data = qlcnic_ind_rd(adapter, mux2->read_addr);
   1002		*buffer++ = cpu_to_le32(t_sel_val);
   1003		*buffer++ = cpu_to_le32(data);
   1004		qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
   1005		t_sel_val = sel_val2 & mux2->sel_val_mask;
   1006		qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
   1007		data = qlcnic_ind_rd(adapter, mux2->read_addr);
   1008		*buffer++ = cpu_to_le32(t_sel_val);
   1009		*buffer++ = cpu_to_le32(data);
   1010		sel_val1 += mux2->sel_val_stride;
   1011		sel_val2 += mux2->sel_val_stride;
   1012	}
   1013
   1014	return mux2->no_ops * (4 * sizeof(u32));
   1015}
   1016
   1017static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
   1018				struct qlcnic_dump_entry *entry, __le32 *buffer)
   1019{
   1020	u32 fl_addr, size;
   1021	struct __mem *rom = &entry->region.mem;
   1022
   1023	fl_addr = rom->addr;
   1024	size = rom->size / 4;
   1025
   1026	if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
   1027					       (u8 *)buffer, size))
   1028		return rom->size;
   1029
   1030	return 0;
   1031}
   1032
   1033static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
   1034	{QLCNIC_DUMP_NOP, qlcnic_dump_nop},
   1035	{QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
   1036	{QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
   1037	{QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
   1038	{QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
   1039	{QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
   1040	{QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
   1041	{QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
   1042	{QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
   1043	{QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
   1044	{QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
   1045	{QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
   1046	{QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
   1047	{QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
   1048	{QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
   1049	{QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
   1050	{QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
   1051	{QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
   1052	{QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
   1053	{QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
   1054};
   1055
   1056static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
   1057	{QLCNIC_DUMP_NOP, qlcnic_dump_nop},
   1058	{QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
   1059	{QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
   1060	{QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
   1061	{QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
   1062	{QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
   1063	{QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
   1064	{QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
   1065	{QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
   1066	{QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
   1067	{QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
   1068	{QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
   1069	{QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
   1070	{QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
   1071	{QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
   1072	{QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
   1073	{QLCNIC_READ_MUX2, qlcnic_read_mux2},
   1074	{QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
   1075	{QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
   1076	{QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
   1077	{QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
   1078	{QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
   1079	{QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
   1080};
   1081
   1082static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
   1083{
   1084	uint64_t sum = 0;
   1085	int count = temp_size / sizeof(uint32_t);
   1086	while (count-- > 0)
   1087		sum += *temp_buffer++;
   1088	while (sum >> 32)
   1089		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
   1090	return ~sum;
   1091}
   1092
   1093static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
   1094					     u8 *buffer, u32 size)
   1095{
   1096	int ret = 0;
   1097
   1098	if (qlcnic_82xx_check(adapter))
   1099		return -EIO;
   1100
   1101	if (qlcnic_83xx_lock_flash(adapter))
   1102		return -EIO;
   1103
   1104	ret = qlcnic_83xx_lockless_flash_read32(adapter,
   1105						QLC_83XX_MINIDUMP_FLASH,
   1106						buffer, size / sizeof(u32));
   1107
   1108	qlcnic_83xx_unlock_flash(adapter);
   1109
   1110	return ret;
   1111}
   1112
   1113static int
   1114qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
   1115				       struct qlcnic_cmd_args *cmd)
   1116{
   1117	struct qlcnic_83xx_dump_template_hdr tmp_hdr;
   1118	u32 size = sizeof(tmp_hdr) / sizeof(u32);
   1119	int ret = 0;
   1120
   1121	if (qlcnic_82xx_check(adapter))
   1122		return -EIO;
   1123
   1124	if (qlcnic_83xx_lock_flash(adapter))
   1125		return -EIO;
   1126
   1127	ret = qlcnic_83xx_lockless_flash_read32(adapter,
   1128						QLC_83XX_MINIDUMP_FLASH,
   1129						(u8 *)&tmp_hdr, size);
   1130
   1131	qlcnic_83xx_unlock_flash(adapter);
   1132
   1133	cmd->rsp.arg[2] = tmp_hdr.size;
   1134	cmd->rsp.arg[3] = tmp_hdr.version;
   1135
   1136	return ret;
   1137}
   1138
   1139static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
   1140					    u32 *version, u32 *temp_size,
   1141					    u8 *use_flash_temp)
   1142{
   1143	int err = 0;
   1144	struct qlcnic_cmd_args cmd;
   1145
   1146	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
   1147		return -ENOMEM;
   1148
   1149	err = qlcnic_issue_cmd(adapter, &cmd);
   1150	if (err != QLCNIC_RCODE_SUCCESS) {
   1151		if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
   1152			qlcnic_free_mbx_args(&cmd);
   1153			return -EIO;
   1154		}
   1155		*use_flash_temp = 1;
   1156	}
   1157
   1158	*temp_size = cmd.rsp.arg[2];
   1159	*version = cmd.rsp.arg[3];
   1160	qlcnic_free_mbx_args(&cmd);
   1161
   1162	if (!(*temp_size))
   1163		return -EIO;
   1164
   1165	return 0;
   1166}
   1167
   1168static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
   1169					     u32 *buffer, u32 temp_size)
   1170{
   1171	int err = 0, i;
   1172	void *tmp_addr;
   1173	__le32 *tmp_buf;
   1174	struct qlcnic_cmd_args cmd;
   1175	dma_addr_t tmp_addr_t = 0;
   1176
   1177	tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
   1178				      &tmp_addr_t, GFP_KERNEL);
   1179	if (!tmp_addr)
   1180		return -ENOMEM;
   1181
   1182	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
   1183		err = -ENOMEM;
   1184		goto free_mem;
   1185	}
   1186
   1187	cmd.req.arg[1] = LSD(tmp_addr_t);
   1188	cmd.req.arg[2] = MSD(tmp_addr_t);
   1189	cmd.req.arg[3] = temp_size;
   1190	err = qlcnic_issue_cmd(adapter, &cmd);
   1191
   1192	tmp_buf = tmp_addr;
   1193	if (err == QLCNIC_RCODE_SUCCESS) {
   1194		for (i = 0; i < temp_size / sizeof(u32); i++)
   1195			*buffer++ = __le32_to_cpu(*tmp_buf++);
   1196	}
   1197
   1198	qlcnic_free_mbx_args(&cmd);
   1199
   1200free_mem:
   1201	dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
   1202
   1203	return err;
   1204}
   1205
   1206int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
   1207{
   1208	struct qlcnic_hardware_context *ahw;
   1209	struct qlcnic_fw_dump *fw_dump;
   1210	u32 version, csum, *tmp_buf;
   1211	u8 use_flash_temp = 0;
   1212	u32 temp_size = 0;
   1213	void *temp_buffer;
   1214	int err;
   1215
   1216	ahw = adapter->ahw;
   1217	fw_dump = &ahw->fw_dump;
   1218	err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
   1219					       &use_flash_temp);
   1220	if (err) {
   1221		dev_err(&adapter->pdev->dev,
   1222			"Can't get template size %d\n", err);
   1223		return -EIO;
   1224	}
   1225
   1226	fw_dump->tmpl_hdr = vzalloc(temp_size);
   1227	if (!fw_dump->tmpl_hdr)
   1228		return -ENOMEM;
   1229
   1230	tmp_buf = (u32 *)fw_dump->tmpl_hdr;
   1231	if (use_flash_temp)
   1232		goto flash_temp;
   1233
   1234	err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
   1235
   1236	if (err) {
   1237flash_temp:
   1238		err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
   1239							temp_size);
   1240
   1241		if (err) {
   1242			dev_err(&adapter->pdev->dev,
   1243				"Failed to get minidump template header %d\n",
   1244				err);
   1245			vfree(fw_dump->tmpl_hdr);
   1246			fw_dump->tmpl_hdr = NULL;
   1247			return -EIO;
   1248		}
   1249	}
   1250
   1251	csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
   1252
   1253	if (csum) {
   1254		dev_err(&adapter->pdev->dev,
   1255			"Template header checksum validation failed\n");
   1256		vfree(fw_dump->tmpl_hdr);
   1257		fw_dump->tmpl_hdr = NULL;
   1258		return -EIO;
   1259	}
   1260
   1261	qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
   1262
   1263	if (fw_dump->use_pex_dma) {
   1264		fw_dump->dma_buffer = NULL;
   1265		temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
   1266						 QLC_PEX_DMA_READ_SIZE,
   1267						 &fw_dump->phys_addr,
   1268						 GFP_KERNEL);
   1269		if (!temp_buffer)
   1270			fw_dump->use_pex_dma = false;
   1271		else
   1272			fw_dump->dma_buffer = temp_buffer;
   1273	}
   1274
   1275
   1276	dev_info(&adapter->pdev->dev,
   1277		 "Default minidump capture mask 0x%x\n",
   1278		 fw_dump->cap_mask);
   1279
   1280	qlcnic_enable_fw_dump_state(adapter);
   1281
   1282	return 0;
   1283}
   1284
   1285int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
   1286{
   1287	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
   1288	const struct qlcnic_dump_operations *fw_dump_ops;
   1289	struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
   1290	u32 entry_offset, dump, no_entries, buf_offset = 0;
   1291	int i, k, ops_cnt, ops_index, dump_size = 0;
   1292	struct device *dev = &adapter->pdev->dev;
   1293	struct qlcnic_hardware_context *ahw;
   1294	struct qlcnic_dump_entry *entry;
   1295	void *tmpl_hdr;
   1296	u32 ocm_window;
   1297	__le32 *buffer;
   1298	char mesg[64];
   1299	char *msg[] = {mesg, NULL};
   1300
   1301	ahw = adapter->ahw;
   1302	tmpl_hdr = fw_dump->tmpl_hdr;
   1303
   1304	/* Return if we don't have firmware dump template header */
   1305	if (!tmpl_hdr)
   1306		return -EIO;
   1307
   1308	if (!qlcnic_check_fw_dump_state(adapter)) {
   1309		dev_info(&adapter->pdev->dev, "Dump not enabled\n");
   1310		return -EIO;
   1311	}
   1312
   1313	if (fw_dump->clr) {
   1314		dev_info(&adapter->pdev->dev,
   1315			 "Previous dump not cleared, not capturing dump\n");
   1316		return -EIO;
   1317	}
   1318
   1319	netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
   1320	/* Calculate the size for dump data area only */
   1321	for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
   1322		if (i & fw_dump->cap_mask)
   1323			dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
   1324
   1325	if (!dump_size)
   1326		return -EIO;
   1327
   1328	fw_dump->data = vzalloc(dump_size);
   1329	if (!fw_dump->data)
   1330		return -ENOMEM;
   1331
   1332	buffer = fw_dump->data;
   1333	fw_dump->size = dump_size;
   1334	no_entries = fw_dump->num_entries;
   1335	entry_offset = fw_dump->offset;
   1336	qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
   1337	qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
   1338
   1339	if (qlcnic_82xx_check(adapter)) {
   1340		ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
   1341		fw_dump_ops = qlcnic_fw_dump_ops;
   1342	} else {
   1343		hdr_83xx = tmpl_hdr;
   1344		ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
   1345		fw_dump_ops = qlcnic_83xx_fw_dump_ops;
   1346		ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
   1347		hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
   1348		hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
   1349	}
   1350
   1351	for (i = 0; i < no_entries; i++) {
   1352		entry = tmpl_hdr + entry_offset;
   1353		if (!(entry->hdr.mask & fw_dump->cap_mask)) {
   1354			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
   1355			entry_offset += entry->hdr.offset;
   1356			continue;
   1357		}
   1358
   1359		/* Find the handler for this entry */
   1360		ops_index = 0;
   1361		while (ops_index < ops_cnt) {
   1362			if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
   1363				break;
   1364			ops_index++;
   1365		}
   1366
   1367		if (ops_index == ops_cnt) {
   1368			dev_info(dev, "Skipping unknown entry opcode %d\n",
   1369				 entry->hdr.type);
   1370			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
   1371			entry_offset += entry->hdr.offset;
   1372			continue;
   1373		}
   1374
   1375		/* Collect dump for this entry */
   1376		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
   1377		if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
   1378			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
   1379			entry_offset += entry->hdr.offset;
   1380			continue;
   1381		}
   1382
   1383		buf_offset += entry->hdr.cap_size;
   1384		entry_offset += entry->hdr.offset;
   1385		buffer = fw_dump->data + buf_offset;
   1386		cond_resched();
   1387	}
   1388
   1389	fw_dump->clr = 1;
   1390	snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
   1391	netdev_info(adapter->netdev,
   1392		    "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
   1393		    fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
   1394		    fw_dump->tmpl_hdr);
   1395	/* Send a udev event to notify availability of FW dump */
   1396	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
   1397
   1398	return 0;
   1399}
   1400
   1401static inline bool
   1402qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
   1403{
   1404	/* For special adapters (with 0x8830 device ID), where iSCSI firmware
   1405	 * dump needs to be captured as part of regular firmware dump
   1406	 * collection process, firmware exports it's capability through
   1407	 * capability registers
   1408	 */
   1409	return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
   1410		(adapter->ahw->extra_capability[0] &
   1411		 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
   1412}
   1413
   1414void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
   1415{
   1416	u32 prev_version, current_version;
   1417	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1418	struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
   1419	struct pci_dev *pdev = adapter->pdev;
   1420	bool extended = false;
   1421	int ret;
   1422
   1423	prev_version = adapter->fw_version;
   1424	current_version = qlcnic_83xx_get_fw_version(adapter);
   1425
   1426	if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
   1427		vfree(fw_dump->tmpl_hdr);
   1428		fw_dump->tmpl_hdr = NULL;
   1429
   1430		if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
   1431			extended = !qlcnic_83xx_extend_md_capab(adapter);
   1432
   1433		ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
   1434		if (ret)
   1435			return;
   1436
   1437		dev_info(&pdev->dev, "Supports FW dump capability\n");
   1438
   1439		/* Once we have minidump template with extended iSCSI dump
   1440		 * capability, update the minidump capture mask to 0x1f as
   1441		 * per FW requirement
   1442		 */
   1443		if (extended) {
   1444			struct qlcnic_83xx_dump_template_hdr *hdr;
   1445
   1446			hdr = fw_dump->tmpl_hdr;
   1447			if (!hdr)
   1448				return;
   1449			hdr->drv_cap_mask = 0x1f;
   1450			fw_dump->cap_mask = 0x1f;
   1451			dev_info(&pdev->dev,
   1452				 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
   1453		}
   1454	}
   1455}