cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vxge-config.c (136459B)


      1/******************************************************************************
      2 * This software may be used and distributed according to the terms of
      3 * the GNU General Public License (GPL), incorporated herein by reference.
      4 * Drivers based on or derived from this code fall under the GPL and must
      5 * retain the authorship, copyright and license notice.  This file is not
      6 * a complete program and may only be used when the entire operating
      7 * system is licensed under the GPL.
      8 * See the file COPYING in this distribution for more information.
      9 *
     10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
     11 *                Virtualized Server Adapter.
     12 * Copyright(c) 2002-2010 Exar Corp.
     13 ******************************************************************************/
     14#include <linux/vmalloc.h>
     15#include <linux/etherdevice.h>
     16#include <linux/io-64-nonatomic-lo-hi.h>
     17#include <linux/pci.h>
     18#include <linux/slab.h>
     19
     20#include "vxge-traffic.h"
     21#include "vxge-config.h"
     22#include "vxge-main.h"
     23
     24#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {				\
     25	status = __vxge_hw_vpath_stats_access(vpath,			\
     26					      VXGE_HW_STATS_OP_READ,	\
     27					      offset,			\
     28					      &val64);			\
     29	if (status != VXGE_HW_OK)					\
     30		return status;						\
     31}
     32
     33static void
     34vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
     35{
     36	u64 val64;
     37
     38	val64 = readq(&vp_reg->rxmac_vcfg0);
     39	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
     40	writeq(val64, &vp_reg->rxmac_vcfg0);
     41	val64 = readq(&vp_reg->rxmac_vcfg0);
     42}
     43
     44/*
     45 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
     46 */
     47int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
     48{
     49	struct vxge_hw_vpath_reg __iomem *vp_reg;
     50	struct __vxge_hw_virtualpath *vpath;
     51	u64 val64, rxd_count, rxd_spat;
     52	int count = 0, total_count = 0;
     53
     54	vpath = &hldev->virtual_paths[vp_id];
     55	vp_reg = vpath->vp_reg;
     56
     57	vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
     58
     59	/* Check that the ring controller for this vpath has enough free RxDs
     60	 * to send frames to the host.  This is done by reading the
     61	 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
     62	 * RXD_SPAT value for the vpath.
     63	 */
     64	val64 = readq(&vp_reg->prc_cfg6);
     65	rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
     66	/* Use a factor of 2 when comparing rxd_count against rxd_spat for some
     67	 * leg room.
     68	 */
     69	rxd_spat *= 2;
     70
     71	do {
     72		mdelay(1);
     73
     74		rxd_count = readq(&vp_reg->prc_rxd_doorbell);
     75
     76		/* Check that the ring controller for this vpath does
     77		 * not have any frame in its pipeline.
     78		 */
     79		val64 = readq(&vp_reg->frm_in_progress_cnt);
     80		if ((rxd_count <= rxd_spat) || (val64 > 0))
     81			count = 0;
     82		else
     83			count++;
     84		total_count++;
     85	} while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
     86			(total_count < VXGE_HW_MAX_POLLING_COUNT));
     87
     88	if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
     89		printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
     90			__func__);
     91
     92	return total_count;
     93}
     94
     95/* vxge_hw_device_wait_receive_idle - This function waits until all frames
     96 * stored in the frame buffer for each vpath assigned to the given
     97 * function (hldev) have been sent to the host.
     98 */
     99void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
    100{
    101	int i, total_count = 0;
    102
    103	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
    104		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
    105			continue;
    106
    107		total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
    108		if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
    109			break;
    110	}
    111}
    112
    113/*
    114 * __vxge_hw_device_register_poll
    115 * Will poll certain register for specified amount of time.
    116 * Will poll until masked bit is not cleared.
    117 */
    118static enum vxge_hw_status
    119__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
    120{
    121	u64 val64;
    122	u32 i = 0;
    123
    124	udelay(10);
    125
    126	do {
    127		val64 = readq(reg);
    128		if (!(val64 & mask))
    129			return VXGE_HW_OK;
    130		udelay(100);
    131	} while (++i <= 9);
    132
    133	i = 0;
    134	do {
    135		val64 = readq(reg);
    136		if (!(val64 & mask))
    137			return VXGE_HW_OK;
    138		mdelay(1);
    139	} while (++i <= max_millis);
    140
    141	return VXGE_HW_FAIL;
    142}
    143
    144static inline enum vxge_hw_status
    145__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
    146			  u64 mask, u32 max_millis)
    147{
    148	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
    149	wmb();
    150	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
    151	wmb();
    152
    153	return __vxge_hw_device_register_poll(addr, mask, max_millis);
    154}
    155
    156static enum vxge_hw_status
    157vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
    158		     u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
    159		     u64 *steer_ctrl)
    160{
    161	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
    162	enum vxge_hw_status status;
    163	u64 val64;
    164	u32 retry = 0, max_retry = 3;
    165
    166	spin_lock(&vpath->lock);
    167	if (!vpath->vp_open) {
    168		spin_unlock(&vpath->lock);
    169		max_retry = 100;
    170	}
    171
    172	writeq(*data0, &vp_reg->rts_access_steer_data0);
    173	writeq(*data1, &vp_reg->rts_access_steer_data1);
    174	wmb();
    175
    176	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
    177		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
    178		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
    179		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
    180		*steer_ctrl;
    181
    182	status = __vxge_hw_pio_mem_write64(val64,
    183					   &vp_reg->rts_access_steer_ctrl,
    184					   VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
    185					   VXGE_HW_DEF_DEVICE_POLL_MILLIS);
    186
    187	/* The __vxge_hw_device_register_poll can udelay for a significant
    188	 * amount of time, blocking other process from the CPU.  If it delays
    189	 * for ~5secs, a NMI error can occur.  A way around this is to give up
    190	 * the processor via msleep, but this is not allowed is under lock.
    191	 * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
    192	 * 1sec and sleep for 10ms until the firmware operation has completed
    193	 * or timed-out.
    194	 */
    195	while ((status != VXGE_HW_OK) && retry++ < max_retry) {
    196		if (!vpath->vp_open)
    197			msleep(20);
    198		status = __vxge_hw_device_register_poll(
    199					&vp_reg->rts_access_steer_ctrl,
    200					VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
    201					VXGE_HW_DEF_DEVICE_POLL_MILLIS);
    202	}
    203
    204	if (status != VXGE_HW_OK)
    205		goto out;
    206
    207	val64 = readq(&vp_reg->rts_access_steer_ctrl);
    208	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
    209		*data0 = readq(&vp_reg->rts_access_steer_data0);
    210		*data1 = readq(&vp_reg->rts_access_steer_data1);
    211		*steer_ctrl = val64;
    212	} else
    213		status = VXGE_HW_FAIL;
    214
    215out:
    216	if (vpath->vp_open)
    217		spin_unlock(&vpath->lock);
    218	return status;
    219}
    220
    221enum vxge_hw_status
    222vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
    223			     u32 *minor, u32 *build)
    224{
    225	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
    226	struct __vxge_hw_virtualpath *vpath;
    227	enum vxge_hw_status status;
    228
    229	vpath = &hldev->virtual_paths[hldev->first_vp_id];
    230
    231	status = vxge_hw_vpath_fw_api(vpath,
    232				      VXGE_HW_FW_UPGRADE_ACTION,
    233				      VXGE_HW_FW_UPGRADE_MEMO,
    234				      VXGE_HW_FW_UPGRADE_OFFSET_READ,
    235				      &data0, &data1, &steer_ctrl);
    236	if (status != VXGE_HW_OK)
    237		return status;
    238
    239	*major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
    240	*minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
    241	*build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
    242
    243	return status;
    244}
    245
    246enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
    247{
    248	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
    249	struct __vxge_hw_virtualpath *vpath;
    250	enum vxge_hw_status status;
    251	u32 ret;
    252
    253	vpath = &hldev->virtual_paths[hldev->first_vp_id];
    254
    255	status = vxge_hw_vpath_fw_api(vpath,
    256				      VXGE_HW_FW_UPGRADE_ACTION,
    257				      VXGE_HW_FW_UPGRADE_MEMO,
    258				      VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
    259				      &data0, &data1, &steer_ctrl);
    260	if (status != VXGE_HW_OK) {
    261		vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
    262		goto exit;
    263	}
    264
    265	ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
    266	if (ret != 1) {
    267		vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
    268				__func__, ret);
    269		status = VXGE_HW_FAIL;
    270	}
    271
    272exit:
    273	return status;
    274}
    275
    276enum vxge_hw_status
    277vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
    278{
    279	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
    280	struct __vxge_hw_virtualpath *vpath;
    281	enum vxge_hw_status status;
    282	int ret_code, sec_code;
    283
    284	vpath = &hldev->virtual_paths[hldev->first_vp_id];
    285
    286	/* send upgrade start command */
    287	status = vxge_hw_vpath_fw_api(vpath,
    288				      VXGE_HW_FW_UPGRADE_ACTION,
    289				      VXGE_HW_FW_UPGRADE_MEMO,
    290				      VXGE_HW_FW_UPGRADE_OFFSET_START,
    291				      &data0, &data1, &steer_ctrl);
    292	if (status != VXGE_HW_OK) {
    293		vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
    294				__func__);
    295		return status;
    296	}
    297
    298	/* Transfer fw image to adapter 16 bytes at a time */
    299	for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
    300		steer_ctrl = 0;
    301
    302		/* The next 128bits of fwdata to be loaded onto the adapter */
    303		data0 = *((u64 *)fwdata);
    304		data1 = *((u64 *)fwdata + 1);
    305
    306		status = vxge_hw_vpath_fw_api(vpath,
    307					      VXGE_HW_FW_UPGRADE_ACTION,
    308					      VXGE_HW_FW_UPGRADE_MEMO,
    309					      VXGE_HW_FW_UPGRADE_OFFSET_SEND,
    310					      &data0, &data1, &steer_ctrl);
    311		if (status != VXGE_HW_OK) {
    312			vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
    313					__func__);
    314			goto out;
    315		}
    316
    317		ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
    318		switch (ret_code) {
    319		case VXGE_HW_FW_UPGRADE_OK:
    320			/* All OK, send next 16 bytes. */
    321			break;
    322		case VXGE_FW_UPGRADE_BYTES2SKIP:
    323			/* skip bytes in the stream */
    324			fwdata += (data0 >> 8) & 0xFFFFFFFF;
    325			break;
    326		case VXGE_HW_FW_UPGRADE_DONE:
    327			goto out;
    328		case VXGE_HW_FW_UPGRADE_ERR:
    329			sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
    330			switch (sec_code) {
    331			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
    332			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
    333				printk(KERN_ERR
    334				       "corrupted data from .ncf file\n");
    335				break;
    336			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
    337			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
    338			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
    339			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
    340			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
    341				printk(KERN_ERR "invalid .ncf file\n");
    342				break;
    343			case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
    344				printk(KERN_ERR "buffer overflow\n");
    345				break;
    346			case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
    347				printk(KERN_ERR "failed to flash the image\n");
    348				break;
    349			case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
    350				printk(KERN_ERR
    351				       "generic error. Unknown error type\n");
    352				break;
    353			default:
    354				printk(KERN_ERR "Unknown error of type %d\n",
    355				       sec_code);
    356				break;
    357			}
    358			status = VXGE_HW_FAIL;
    359			goto out;
    360		default:
    361			printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
    362			status = VXGE_HW_FAIL;
    363			goto out;
    364		}
    365		/* point to next 16 bytes */
    366		fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
    367	}
    368out:
    369	return status;
    370}
    371
    372enum vxge_hw_status
    373vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
    374				struct eprom_image *img)
    375{
    376	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
    377	struct __vxge_hw_virtualpath *vpath;
    378	enum vxge_hw_status status;
    379	int i;
    380
    381	vpath = &hldev->virtual_paths[hldev->first_vp_id];
    382
    383	for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
    384		data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
    385		data1 = steer_ctrl = 0;
    386
    387		status = vxge_hw_vpath_fw_api(vpath,
    388			VXGE_HW_FW_API_GET_EPROM_REV,
    389			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
    390			0, &data0, &data1, &steer_ctrl);
    391		if (status != VXGE_HW_OK)
    392			break;
    393
    394		img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
    395		img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
    396		img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
    397		img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
    398	}
    399
    400	return status;
    401}
    402
    403/*
    404 * __vxge_hw_channel_free - Free memory allocated for channel
    405 * This function deallocates memory from the channel and various arrays
    406 * in the channel
    407 */
    408static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
    409{
    410	kfree(channel->work_arr);
    411	kfree(channel->free_arr);
    412	kfree(channel->reserve_arr);
    413	kfree(channel->orig_arr);
    414	kfree(channel);
    415}
    416
    417/*
    418 * __vxge_hw_channel_initialize - Initialize a channel
    419 * This function initializes a channel by properly setting the
    420 * various references
    421 */
    422static enum vxge_hw_status
    423__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
    424{
    425	u32 i;
    426	struct __vxge_hw_virtualpath *vpath;
    427
    428	vpath = channel->vph->vpath;
    429
    430	if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
    431		for (i = 0; i < channel->length; i++)
    432			channel->orig_arr[i] = channel->reserve_arr[i];
    433	}
    434
    435	switch (channel->type) {
    436	case VXGE_HW_CHANNEL_TYPE_FIFO:
    437		vpath->fifoh = (struct __vxge_hw_fifo *)channel;
    438		channel->stats = &((struct __vxge_hw_fifo *)
    439				channel)->stats->common_stats;
    440		break;
    441	case VXGE_HW_CHANNEL_TYPE_RING:
    442		vpath->ringh = (struct __vxge_hw_ring *)channel;
    443		channel->stats = &((struct __vxge_hw_ring *)
    444				channel)->stats->common_stats;
    445		break;
    446	default:
    447		break;
    448	}
    449
    450	return VXGE_HW_OK;
    451}
    452
    453/*
    454 * __vxge_hw_channel_reset - Resets a channel
    455 * This function resets a channel by properly setting the various references
    456 */
    457static enum vxge_hw_status
    458__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
    459{
    460	u32 i;
    461
    462	for (i = 0; i < channel->length; i++) {
    463		if (channel->reserve_arr != NULL)
    464			channel->reserve_arr[i] = channel->orig_arr[i];
    465		if (channel->free_arr != NULL)
    466			channel->free_arr[i] = NULL;
    467		if (channel->work_arr != NULL)
    468			channel->work_arr[i] = NULL;
    469	}
    470	channel->free_ptr = channel->length;
    471	channel->reserve_ptr = channel->length;
    472	channel->reserve_top = 0;
    473	channel->post_index = 0;
    474	channel->compl_index = 0;
    475
    476	return VXGE_HW_OK;
    477}
    478
    479/*
    480 * __vxge_hw_device_pci_e_init
    481 * Initialize certain PCI/PCI-X configuration registers
    482 * with recommended values. Save config space for future hw resets.
    483 */
    484static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
    485{
    486	u16 cmd = 0;
    487
    488	/* Set the PErr Repconse bit and SERR in PCI command register. */
    489	pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
    490	cmd |= 0x140;
    491	pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
    492
    493	pci_save_state(hldev->pdev);
    494}
    495
    496/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
    497 * in progress
    498 * This routine checks the vpath reset in progress register is turned zero
    499 */
    500static enum vxge_hw_status
    501__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
    502{
    503	enum vxge_hw_status status;
    504	status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
    505			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
    506			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
    507	return status;
    508}
    509
    510/*
    511 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
    512 * Set the swapper bits appropriately for the lagacy section.
    513 */
    514static enum vxge_hw_status
    515__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
    516{
    517	u64 val64;
    518	enum vxge_hw_status status = VXGE_HW_OK;
    519
    520	val64 = readq(&legacy_reg->toc_swapper_fb);
    521
    522	wmb();
    523
    524	switch (val64) {
    525	case VXGE_HW_SWAPPER_INITIAL_VALUE:
    526		return status;
    527
    528	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
    529		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
    530			&legacy_reg->pifm_rd_swap_en);
    531		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
    532			&legacy_reg->pifm_rd_flip_en);
    533		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
    534			&legacy_reg->pifm_wr_swap_en);
    535		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
    536			&legacy_reg->pifm_wr_flip_en);
    537		break;
    538
    539	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
    540		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
    541			&legacy_reg->pifm_rd_swap_en);
    542		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
    543			&legacy_reg->pifm_wr_swap_en);
    544		break;
    545
    546	case VXGE_HW_SWAPPER_BIT_FLIPPED:
    547		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
    548			&legacy_reg->pifm_rd_flip_en);
    549		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
    550			&legacy_reg->pifm_wr_flip_en);
    551		break;
    552	}
    553
    554	wmb();
    555
    556	val64 = readq(&legacy_reg->toc_swapper_fb);
    557
    558	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
    559		status = VXGE_HW_ERR_SWAPPER_CTRL;
    560
    561	return status;
    562}
    563
    564/*
    565 * __vxge_hw_device_toc_get
    566 * This routine sets the swapper and reads the toc pointer and returns the
    567 * memory mapped address of the toc
    568 */
    569static struct vxge_hw_toc_reg __iomem *
    570__vxge_hw_device_toc_get(void __iomem *bar0)
    571{
    572	u64 val64;
    573	struct vxge_hw_toc_reg __iomem *toc = NULL;
    574	enum vxge_hw_status status;
    575
    576	struct vxge_hw_legacy_reg __iomem *legacy_reg =
    577		(struct vxge_hw_legacy_reg __iomem *)bar0;
    578
    579	status = __vxge_hw_legacy_swapper_set(legacy_reg);
    580	if (status != VXGE_HW_OK)
    581		goto exit;
    582
    583	val64 =	readq(&legacy_reg->toc_first_pointer);
    584	toc = bar0 + val64;
    585exit:
    586	return toc;
    587}
    588
    589/*
    590 * __vxge_hw_device_reg_addr_get
    591 * This routine sets the swapper and reads the toc pointer and initializes the
    592 * register location pointers in the device object. It waits until the ric is
    593 * completed initializing registers.
    594 */
    595static enum vxge_hw_status
    596__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
    597{
    598	u64 val64;
    599	u32 i;
    600	enum vxge_hw_status status = VXGE_HW_OK;
    601
    602	hldev->legacy_reg = hldev->bar0;
    603
    604	hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
    605	if (hldev->toc_reg  == NULL) {
    606		status = VXGE_HW_FAIL;
    607		goto exit;
    608	}
    609
    610	val64 = readq(&hldev->toc_reg->toc_common_pointer);
    611	hldev->common_reg = hldev->bar0 + val64;
    612
    613	val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
    614	hldev->mrpcim_reg = hldev->bar0 + val64;
    615
    616	for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
    617		val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
    618		hldev->srpcim_reg[i] = hldev->bar0 + val64;
    619	}
    620
    621	for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
    622		val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
    623		hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
    624	}
    625
    626	for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
    627		val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
    628		hldev->vpath_reg[i] = hldev->bar0 + val64;
    629	}
    630
    631	val64 = readq(&hldev->toc_reg->toc_kdfc);
    632
    633	switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
    634	case 0:
    635		hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
    636		break;
    637	default:
    638		break;
    639	}
    640
    641	status = __vxge_hw_device_vpath_reset_in_prog_check(
    642			(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
    643exit:
    644	return status;
    645}
    646
    647/*
    648 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
    649 * This routine returns the Access Rights of the driver
    650 */
    651static u32
    652__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
    653{
    654	u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
    655
    656	switch (host_type) {
    657	case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
    658		if (func_id == 0) {
    659			access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
    660					VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
    661		}
    662		break;
    663	case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
    664		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
    665				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
    666		break;
    667	case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
    668		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
    669				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
    670		break;
    671	case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
    672	case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
    673	case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
    674		break;
    675	case VXGE_HW_SR_VH_FUNCTION0:
    676	case VXGE_HW_VH_NORMAL_FUNCTION:
    677		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
    678		break;
    679	}
    680
    681	return access_rights;
    682}
    683/*
    684 * __vxge_hw_device_is_privilaged
    685 * This routine checks if the device function is privilaged or not
    686 */
    687
    688enum vxge_hw_status
    689__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
    690{
    691	if (__vxge_hw_device_access_rights_get(host_type,
    692		func_id) &
    693		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
    694		return VXGE_HW_OK;
    695	else
    696		return VXGE_HW_ERR_PRIVILEGED_OPERATION;
    697}
    698
    699/*
    700 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
    701 * Returns the function number of the vpath.
    702 */
    703static u32
    704__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
    705{
    706	u64 val64;
    707
    708	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
    709
    710	return
    711	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
    712}
    713
    714/*
    715 * __vxge_hw_device_host_info_get
    716 * This routine returns the host type assignments
    717 */
    718static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
    719{
    720	u64 val64;
    721	u32 i;
    722
    723	val64 = readq(&hldev->common_reg->host_type_assignments);
    724
    725	hldev->host_type =
    726	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
    727
    728	hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
    729
    730	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
    731		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
    732			continue;
    733
    734		hldev->func_id =
    735			__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
    736
    737		hldev->access_rights = __vxge_hw_device_access_rights_get(
    738			hldev->host_type, hldev->func_id);
    739
    740		hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
    741		hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
    742
    743		hldev->first_vp_id = i;
    744		break;
    745	}
    746}
    747
    748/*
    749 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
    750 * link width and signalling rate.
    751 */
    752static enum vxge_hw_status
    753__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
    754{
    755	struct pci_dev *dev = hldev->pdev;
    756	u16 lnk;
    757
    758	/* Get the negotiated link width and speed from PCI config space */
    759	pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
    760
    761	if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
    762		return VXGE_HW_ERR_INVALID_PCI_INFO;
    763
    764	switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
    765	case PCIE_LNK_WIDTH_RESRV:
    766	case PCIE_LNK_X1:
    767	case PCIE_LNK_X2:
    768	case PCIE_LNK_X4:
    769	case PCIE_LNK_X8:
    770		break;
    771	default:
    772		return VXGE_HW_ERR_INVALID_PCI_INFO;
    773	}
    774
    775	return VXGE_HW_OK;
    776}
    777
    778/*
    779 * __vxge_hw_device_initialize
    780 * Initialize Titan-V hardware.
    781 */
    782static enum vxge_hw_status
    783__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
    784{
    785	enum vxge_hw_status status = VXGE_HW_OK;
    786
    787	if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
    788				hldev->func_id)) {
    789		/* Validate the pci-e link width and speed */
    790		status = __vxge_hw_verify_pci_e_info(hldev);
    791		if (status != VXGE_HW_OK)
    792			goto exit;
    793	}
    794
    795exit:
    796	return status;
    797}
    798
    799/*
    800 * __vxge_hw_vpath_fw_ver_get - Get the fw version
    801 * Returns FW Version
    802 */
    803static enum vxge_hw_status
    804__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
    805			   struct vxge_hw_device_hw_info *hw_info)
    806{
    807	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
    808	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
    809	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
    810	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
    811	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
    812	enum vxge_hw_status status;
    813
    814	status = vxge_hw_vpath_fw_api(vpath,
    815			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
    816			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
    817			0, &data0, &data1, &steer_ctrl);
    818	if (status != VXGE_HW_OK)
    819		goto exit;
    820
    821	fw_date->day =
    822	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
    823	fw_date->month =
    824	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
    825	fw_date->year =
    826	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
    827
    828	snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
    829		 fw_date->month, fw_date->day, fw_date->year);
    830
    831	fw_version->major =
    832	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
    833	fw_version->minor =
    834	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
    835	fw_version->build =
    836	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
    837
    838	snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
    839		 fw_version->major, fw_version->minor, fw_version->build);
    840
    841	flash_date->day =
    842	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
    843	flash_date->month =
    844	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
    845	flash_date->year =
    846	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
    847
    848	snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
    849		 flash_date->month, flash_date->day, flash_date->year);
    850
    851	flash_version->major =
    852	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
    853	flash_version->minor =
    854	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
    855	flash_version->build =
    856	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
    857
    858	snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
    859		 flash_version->major, flash_version->minor,
    860		 flash_version->build);
    861
    862exit:
    863	return status;
    864}
    865
    866/*
    867 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
    868 * part number and product description.
    869 */
    870static enum vxge_hw_status
    871__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
    872			      struct vxge_hw_device_hw_info *hw_info)
    873{
    874	__be64 *serial_number = (void *)hw_info->serial_number;
    875	__be64 *product_desc = (void *)hw_info->product_desc;
    876	__be64 *part_number = (void *)hw_info->part_number;
    877	enum vxge_hw_status status;
    878	u64 data0, data1 = 0, steer_ctrl = 0;
    879	u32 i, j = 0;
    880
    881	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
    882
    883	status = vxge_hw_vpath_fw_api(vpath,
    884			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
    885			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
    886			0, &data0, &data1, &steer_ctrl);
    887	if (status != VXGE_HW_OK)
    888		return status;
    889
    890	serial_number[0] = cpu_to_be64(data0);
    891	serial_number[1] = cpu_to_be64(data1);
    892
    893	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
    894	data1 = steer_ctrl = 0;
    895
    896	status = vxge_hw_vpath_fw_api(vpath,
    897			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
    898			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
    899			0, &data0, &data1, &steer_ctrl);
    900	if (status != VXGE_HW_OK)
    901		return status;
    902
    903	part_number[0] = cpu_to_be64(data0);
    904	part_number[1] = cpu_to_be64(data1);
    905
    906	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
    907	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
    908		data0 = i;
    909		data1 = steer_ctrl = 0;
    910
    911		status = vxge_hw_vpath_fw_api(vpath,
    912			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
    913			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
    914			0, &data0, &data1, &steer_ctrl);
    915		if (status != VXGE_HW_OK)
    916			return status;
    917
    918		product_desc[j++] = cpu_to_be64(data0);
    919		product_desc[j++] = cpu_to_be64(data1);
    920	}
    921
    922	return status;
    923}
    924
    925/*
    926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
    927 * Returns pci function mode
    928 */
    929static enum vxge_hw_status
    930__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
    931				  struct vxge_hw_device_hw_info *hw_info)
    932{
    933	u64 data0, data1 = 0, steer_ctrl = 0;
    934	enum vxge_hw_status status;
    935
    936	data0 = 0;
    937
    938	status = vxge_hw_vpath_fw_api(vpath,
    939			VXGE_HW_FW_API_GET_FUNC_MODE,
    940			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
    941			0, &data0, &data1, &steer_ctrl);
    942	if (status != VXGE_HW_OK)
    943		return status;
    944
    945	hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
    946	return status;
    947}
    948
    949/*
    950 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
    951 *               from MAC address table.
    952 */
    953static enum vxge_hw_status
    954__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
    955			 u8 *macaddr, u8 *macaddr_mask)
    956{
    957	u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
    958	    data0 = 0, data1 = 0, steer_ctrl = 0;
    959	enum vxge_hw_status status;
    960	int i;
    961
    962	do {
    963		status = vxge_hw_vpath_fw_api(vpath, action,
    964			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
    965			0, &data0, &data1, &steer_ctrl);
    966		if (status != VXGE_HW_OK)
    967			goto exit;
    968
    969		data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
    970		data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
    971									data1);
    972
    973		for (i = ETH_ALEN; i > 0; i--) {
    974			macaddr[i - 1] = (u8) (data0 & 0xFF);
    975			data0 >>= 8;
    976
    977			macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
    978			data1 >>= 8;
    979		}
    980
    981		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
    982		data0 = 0, data1 = 0, steer_ctrl = 0;
    983
    984	} while (!is_valid_ether_addr(macaddr));
    985exit:
    986	return status;
    987}
    988
    989/**
    990 * vxge_hw_device_hw_info_get - Get the hw information
    991 * @bar0: the bar
    992 * @hw_info: the hw_info struct
    993 *
    994 * Returns the vpath mask that has the bits set for each vpath allocated
    995 * for the driver, FW version information, and the first mac address for
    996 * each vpath
    997 */
    998enum vxge_hw_status
    999vxge_hw_device_hw_info_get(void __iomem *bar0,
   1000			   struct vxge_hw_device_hw_info *hw_info)
   1001{
   1002	u32 i;
   1003	u64 val64;
   1004	struct vxge_hw_toc_reg __iomem *toc;
   1005	struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
   1006	struct vxge_hw_common_reg __iomem *common_reg;
   1007	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
   1008	enum vxge_hw_status status;
   1009	struct __vxge_hw_virtualpath vpath;
   1010
   1011	memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
   1012
   1013	toc = __vxge_hw_device_toc_get(bar0);
   1014	if (toc == NULL) {
   1015		status = VXGE_HW_ERR_CRITICAL;
   1016		goto exit;
   1017	}
   1018
   1019	val64 = readq(&toc->toc_common_pointer);
   1020	common_reg = bar0 + val64;
   1021
   1022	status = __vxge_hw_device_vpath_reset_in_prog_check(
   1023		(u64 __iomem *)&common_reg->vpath_rst_in_prog);
   1024	if (status != VXGE_HW_OK)
   1025		goto exit;
   1026
   1027	hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
   1028
   1029	val64 = readq(&common_reg->host_type_assignments);
   1030
   1031	hw_info->host_type =
   1032	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
   1033
   1034	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   1035		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
   1036			continue;
   1037
   1038		val64 = readq(&toc->toc_vpmgmt_pointer[i]);
   1039
   1040		vpmgmt_reg = bar0 + val64;
   1041
   1042		hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
   1043		if (__vxge_hw_device_access_rights_get(hw_info->host_type,
   1044			hw_info->func_id) &
   1045			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
   1046
   1047			val64 = readq(&toc->toc_mrpcim_pointer);
   1048
   1049			mrpcim_reg = bar0 + val64;
   1050
   1051			writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
   1052			wmb();
   1053		}
   1054
   1055		val64 = readq(&toc->toc_vpath_pointer[i]);
   1056
   1057		spin_lock_init(&vpath.lock);
   1058		vpath.vp_reg = bar0 + val64;
   1059		vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
   1060
   1061		status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
   1062		if (status != VXGE_HW_OK)
   1063			goto exit;
   1064
   1065		status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
   1066		if (status != VXGE_HW_OK)
   1067			goto exit;
   1068
   1069		status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
   1070		if (status != VXGE_HW_OK)
   1071			goto exit;
   1072
   1073		break;
   1074	}
   1075
   1076	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   1077		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
   1078			continue;
   1079
   1080		val64 = readq(&toc->toc_vpath_pointer[i]);
   1081		vpath.vp_reg = bar0 + val64;
   1082		vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
   1083
   1084		status =  __vxge_hw_vpath_addr_get(&vpath,
   1085				hw_info->mac_addrs[i],
   1086				hw_info->mac_addr_masks[i]);
   1087		if (status != VXGE_HW_OK)
   1088			goto exit;
   1089	}
   1090exit:
   1091	return status;
   1092}
   1093
   1094/*
   1095 * __vxge_hw_blockpool_destroy - Deallocates the block pool
   1096 */
   1097static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
   1098{
   1099	struct __vxge_hw_device *hldev;
   1100	struct list_head *p, *n;
   1101
   1102	if (!blockpool)
   1103		return;
   1104
   1105	hldev = blockpool->hldev;
   1106
   1107	list_for_each_safe(p, n, &blockpool->free_block_list) {
   1108		dma_unmap_single(&hldev->pdev->dev,
   1109				 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
   1110				 ((struct __vxge_hw_blockpool_entry *)p)->length,
   1111				 DMA_BIDIRECTIONAL);
   1112
   1113		vxge_os_dma_free(hldev->pdev,
   1114			((struct __vxge_hw_blockpool_entry *)p)->memblock,
   1115			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
   1116
   1117		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
   1118		kfree(p);
   1119		blockpool->pool_size--;
   1120	}
   1121
   1122	list_for_each_safe(p, n, &blockpool->free_entry_list) {
   1123		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
   1124		kfree(p);
   1125	}
   1126
   1127	return;
   1128}
   1129
   1130/*
   1131 * __vxge_hw_blockpool_create - Create block pool
   1132 */
   1133static enum vxge_hw_status
   1134__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
   1135			   struct __vxge_hw_blockpool *blockpool,
   1136			   u32 pool_size,
   1137			   u32 pool_max)
   1138{
   1139	u32 i;
   1140	struct __vxge_hw_blockpool_entry *entry = NULL;
   1141	void *memblock;
   1142	dma_addr_t dma_addr;
   1143	struct pci_dev *dma_handle;
   1144	struct pci_dev *acc_handle;
   1145	enum vxge_hw_status status = VXGE_HW_OK;
   1146
   1147	if (blockpool == NULL) {
   1148		status = VXGE_HW_FAIL;
   1149		goto blockpool_create_exit;
   1150	}
   1151
   1152	blockpool->hldev = hldev;
   1153	blockpool->block_size = VXGE_HW_BLOCK_SIZE;
   1154	blockpool->pool_size = 0;
   1155	blockpool->pool_max = pool_max;
   1156	blockpool->req_out = 0;
   1157
   1158	INIT_LIST_HEAD(&blockpool->free_block_list);
   1159	INIT_LIST_HEAD(&blockpool->free_entry_list);
   1160
   1161	for (i = 0; i < pool_size + pool_max; i++) {
   1162		entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
   1163				GFP_KERNEL);
   1164		if (entry == NULL) {
   1165			__vxge_hw_blockpool_destroy(blockpool);
   1166			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   1167			goto blockpool_create_exit;
   1168		}
   1169		list_add(&entry->item, &blockpool->free_entry_list);
   1170	}
   1171
   1172	for (i = 0; i < pool_size; i++) {
   1173		memblock = vxge_os_dma_malloc(
   1174				hldev->pdev,
   1175				VXGE_HW_BLOCK_SIZE,
   1176				&dma_handle,
   1177				&acc_handle);
   1178		if (memblock == NULL) {
   1179			__vxge_hw_blockpool_destroy(blockpool);
   1180			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   1181			goto blockpool_create_exit;
   1182		}
   1183
   1184		dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
   1185					  VXGE_HW_BLOCK_SIZE,
   1186					  DMA_BIDIRECTIONAL);
   1187		if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
   1188			vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
   1189			__vxge_hw_blockpool_destroy(blockpool);
   1190			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   1191			goto blockpool_create_exit;
   1192		}
   1193
   1194		if (!list_empty(&blockpool->free_entry_list))
   1195			entry = (struct __vxge_hw_blockpool_entry *)
   1196				list_first_entry(&blockpool->free_entry_list,
   1197					struct __vxge_hw_blockpool_entry,
   1198					item);
   1199
   1200		if (entry == NULL)
   1201			entry =
   1202			    kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
   1203					GFP_KERNEL);
   1204		if (entry != NULL) {
   1205			list_del(&entry->item);
   1206			entry->length = VXGE_HW_BLOCK_SIZE;
   1207			entry->memblock = memblock;
   1208			entry->dma_addr = dma_addr;
   1209			entry->acc_handle = acc_handle;
   1210			entry->dma_handle = dma_handle;
   1211			list_add(&entry->item,
   1212					  &blockpool->free_block_list);
   1213			blockpool->pool_size++;
   1214		} else {
   1215			__vxge_hw_blockpool_destroy(blockpool);
   1216			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   1217			goto blockpool_create_exit;
   1218		}
   1219	}
   1220
   1221blockpool_create_exit:
   1222	return status;
   1223}
   1224
   1225/*
   1226 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
   1227 * Check the fifo configuration
   1228 */
   1229static enum vxge_hw_status
   1230__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
   1231{
   1232	if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
   1233	    (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
   1234		return VXGE_HW_BADCFG_FIFO_BLOCKS;
   1235
   1236	return VXGE_HW_OK;
   1237}
   1238
   1239/*
   1240 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
   1241 * Check the vpath configuration
   1242 */
   1243static enum vxge_hw_status
   1244__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
   1245{
   1246	enum vxge_hw_status status;
   1247
   1248	if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
   1249	    (vp_config->min_bandwidth >	VXGE_HW_VPATH_BANDWIDTH_MAX))
   1250		return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
   1251
   1252	status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
   1253	if (status != VXGE_HW_OK)
   1254		return status;
   1255
   1256	if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
   1257		((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
   1258		(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
   1259		return VXGE_HW_BADCFG_VPATH_MTU;
   1260
   1261	if ((vp_config->rpa_strip_vlan_tag !=
   1262		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
   1263		(vp_config->rpa_strip_vlan_tag !=
   1264		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
   1265		(vp_config->rpa_strip_vlan_tag !=
   1266		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
   1267		return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
   1268
   1269	return VXGE_HW_OK;
   1270}
   1271
   1272/*
   1273 * __vxge_hw_device_config_check - Check device configuration.
   1274 * Check the device configuration
   1275 */
   1276static enum vxge_hw_status
   1277__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
   1278{
   1279	u32 i;
   1280	enum vxge_hw_status status;
   1281
   1282	if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
   1283	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
   1284	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
   1285	    (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
   1286		return VXGE_HW_BADCFG_INTR_MODE;
   1287
   1288	if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
   1289	    (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
   1290		return VXGE_HW_BADCFG_RTS_MAC_EN;
   1291
   1292	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   1293		status = __vxge_hw_device_vpath_config_check(
   1294				&new_config->vp_config[i]);
   1295		if (status != VXGE_HW_OK)
   1296			return status;
   1297	}
   1298
   1299	return VXGE_HW_OK;
   1300}
   1301
   1302/*
   1303 * vxge_hw_device_initialize - Initialize Titan device.
   1304 * Initialize Titan device. Note that all the arguments of this public API
   1305 * are 'IN', including @hldev. Driver cooperates with
   1306 * OS to find new Titan device, locate its PCI and memory spaces.
   1307 *
   1308 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
   1309 * to enable the latter to perform Titan hardware initialization.
   1310 */
   1311enum vxge_hw_status
   1312vxge_hw_device_initialize(
   1313	struct __vxge_hw_device **devh,
   1314	struct vxge_hw_device_attr *attr,
   1315	struct vxge_hw_device_config *device_config)
   1316{
   1317	u32 i;
   1318	u32 nblocks = 0;
   1319	struct __vxge_hw_device *hldev = NULL;
   1320	enum vxge_hw_status status = VXGE_HW_OK;
   1321
   1322	status = __vxge_hw_device_config_check(device_config);
   1323	if (status != VXGE_HW_OK)
   1324		goto exit;
   1325
   1326	hldev = vzalloc(sizeof(struct __vxge_hw_device));
   1327	if (hldev == NULL) {
   1328		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   1329		goto exit;
   1330	}
   1331
   1332	hldev->magic = VXGE_HW_DEVICE_MAGIC;
   1333
   1334	vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
   1335
   1336	/* apply config */
   1337	memcpy(&hldev->config, device_config,
   1338		sizeof(struct vxge_hw_device_config));
   1339
   1340	hldev->bar0 = attr->bar0;
   1341	hldev->pdev = attr->pdev;
   1342
   1343	hldev->uld_callbacks = attr->uld_callbacks;
   1344
   1345	__vxge_hw_device_pci_e_init(hldev);
   1346
   1347	status = __vxge_hw_device_reg_addr_get(hldev);
   1348	if (status != VXGE_HW_OK) {
   1349		vfree(hldev);
   1350		goto exit;
   1351	}
   1352
   1353	__vxge_hw_device_host_info_get(hldev);
   1354
   1355	/* Incrementing for stats blocks */
   1356	nblocks++;
   1357
   1358	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   1359		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
   1360			continue;
   1361
   1362		if (device_config->vp_config[i].ring.enable ==
   1363			VXGE_HW_RING_ENABLE)
   1364			nblocks += device_config->vp_config[i].ring.ring_blocks;
   1365
   1366		if (device_config->vp_config[i].fifo.enable ==
   1367			VXGE_HW_FIFO_ENABLE)
   1368			nblocks += device_config->vp_config[i].fifo.fifo_blocks;
   1369		nblocks++;
   1370	}
   1371
   1372	if (__vxge_hw_blockpool_create(hldev,
   1373		&hldev->block_pool,
   1374		device_config->dma_blockpool_initial + nblocks,
   1375		device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
   1376
   1377		vxge_hw_device_terminate(hldev);
   1378		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   1379		goto exit;
   1380	}
   1381
   1382	status = __vxge_hw_device_initialize(hldev);
   1383	if (status != VXGE_HW_OK) {
   1384		vxge_hw_device_terminate(hldev);
   1385		goto exit;
   1386	}
   1387
   1388	*devh = hldev;
   1389exit:
   1390	return status;
   1391}
   1392
   1393/*
   1394 * vxge_hw_device_terminate - Terminate Titan device.
   1395 * Terminate HW device.
   1396 */
   1397void
   1398vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
   1399{
   1400	vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
   1401
   1402	hldev->magic = VXGE_HW_DEVICE_DEAD;
   1403	__vxge_hw_blockpool_destroy(&hldev->block_pool);
   1404	vfree(hldev);
   1405}
   1406
   1407/*
   1408 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
   1409 *                           and offset and perform an operation
   1410 */
   1411static enum vxge_hw_status
   1412__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
   1413			     u32 operation, u32 offset, u64 *stat)
   1414{
   1415	u64 val64;
   1416	enum vxge_hw_status status = VXGE_HW_OK;
   1417	struct vxge_hw_vpath_reg __iomem *vp_reg;
   1418
   1419	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   1420		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   1421		goto vpath_stats_access_exit;
   1422	}
   1423
   1424	vp_reg = vpath->vp_reg;
   1425
   1426	val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
   1427		 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
   1428		 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
   1429
   1430	status = __vxge_hw_pio_mem_write64(val64,
   1431				&vp_reg->xmac_stats_access_cmd,
   1432				VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
   1433				vpath->hldev->config.device_poll_millis);
   1434	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
   1435		*stat = readq(&vp_reg->xmac_stats_access_data);
   1436	else
   1437		*stat = 0;
   1438
   1439vpath_stats_access_exit:
   1440	return status;
   1441}
   1442
   1443/*
   1444 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
   1445 */
   1446static enum vxge_hw_status
   1447__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
   1448			struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
   1449{
   1450	u64 *val64;
   1451	int i;
   1452	u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
   1453	enum vxge_hw_status status = VXGE_HW_OK;
   1454
   1455	val64 = (u64 *)vpath_tx_stats;
   1456
   1457	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   1458		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   1459		goto exit;
   1460	}
   1461
   1462	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
   1463		status = __vxge_hw_vpath_stats_access(vpath,
   1464					VXGE_HW_STATS_OP_READ,
   1465					offset, val64);
   1466		if (status != VXGE_HW_OK)
   1467			goto exit;
   1468		offset++;
   1469		val64++;
   1470	}
   1471exit:
   1472	return status;
   1473}
   1474
   1475/*
   1476 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
   1477 */
   1478static enum vxge_hw_status
   1479__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
   1480			struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
   1481{
   1482	u64 *val64;
   1483	enum vxge_hw_status status = VXGE_HW_OK;
   1484	int i;
   1485	u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
   1486	val64 = (u64 *) vpath_rx_stats;
   1487
   1488	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   1489		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   1490		goto exit;
   1491	}
   1492	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
   1493		status = __vxge_hw_vpath_stats_access(vpath,
   1494					VXGE_HW_STATS_OP_READ,
   1495					offset >> 3, val64);
   1496		if (status != VXGE_HW_OK)
   1497			goto exit;
   1498
   1499		offset += 8;
   1500		val64++;
   1501	}
   1502exit:
   1503	return status;
   1504}
   1505
   1506/*
   1507 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
   1508 */
   1509static enum vxge_hw_status
   1510__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
   1511			  struct vxge_hw_vpath_stats_hw_info *hw_stats)
   1512{
   1513	u64 val64;
   1514	enum vxge_hw_status status = VXGE_HW_OK;
   1515	struct vxge_hw_vpath_reg __iomem *vp_reg;
   1516
   1517	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   1518		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   1519		goto exit;
   1520	}
   1521	vp_reg = vpath->vp_reg;
   1522
   1523	val64 = readq(&vp_reg->vpath_debug_stats0);
   1524	hw_stats->ini_num_mwr_sent =
   1525		(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
   1526
   1527	val64 = readq(&vp_reg->vpath_debug_stats1);
   1528	hw_stats->ini_num_mrd_sent =
   1529		(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
   1530
   1531	val64 = readq(&vp_reg->vpath_debug_stats2);
   1532	hw_stats->ini_num_cpl_rcvd =
   1533		(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
   1534
   1535	val64 = readq(&vp_reg->vpath_debug_stats3);
   1536	hw_stats->ini_num_mwr_byte_sent =
   1537		VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
   1538
   1539	val64 = readq(&vp_reg->vpath_debug_stats4);
   1540	hw_stats->ini_num_cpl_byte_rcvd =
   1541		VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
   1542
   1543	val64 = readq(&vp_reg->vpath_debug_stats5);
   1544	hw_stats->wrcrdtarb_xoff =
   1545		(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
   1546
   1547	val64 = readq(&vp_reg->vpath_debug_stats6);
   1548	hw_stats->rdcrdtarb_xoff =
   1549		(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
   1550
   1551	val64 = readq(&vp_reg->vpath_genstats_count01);
   1552	hw_stats->vpath_genstats_count0 =
   1553	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
   1554		val64);
   1555
   1556	val64 = readq(&vp_reg->vpath_genstats_count01);
   1557	hw_stats->vpath_genstats_count1 =
   1558	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
   1559		val64);
   1560
   1561	val64 = readq(&vp_reg->vpath_genstats_count23);
   1562	hw_stats->vpath_genstats_count2 =
   1563	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
   1564		val64);
   1565
   1566	val64 = readq(&vp_reg->vpath_genstats_count01);
   1567	hw_stats->vpath_genstats_count3 =
   1568	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
   1569		val64);
   1570
   1571	val64 = readq(&vp_reg->vpath_genstats_count4);
   1572	hw_stats->vpath_genstats_count4 =
   1573	(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
   1574		val64);
   1575
   1576	val64 = readq(&vp_reg->vpath_genstats_count5);
   1577	hw_stats->vpath_genstats_count5 =
   1578	(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
   1579		val64);
   1580
   1581	status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
   1582	if (status != VXGE_HW_OK)
   1583		goto exit;
   1584
   1585	status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
   1586	if (status != VXGE_HW_OK)
   1587		goto exit;
   1588
   1589	VXGE_HW_VPATH_STATS_PIO_READ(
   1590		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
   1591
   1592	hw_stats->prog_event_vnum0 =
   1593			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
   1594
   1595	hw_stats->prog_event_vnum1 =
   1596			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
   1597
   1598	VXGE_HW_VPATH_STATS_PIO_READ(
   1599		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
   1600
   1601	hw_stats->prog_event_vnum2 =
   1602			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
   1603
   1604	hw_stats->prog_event_vnum3 =
   1605			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
   1606
   1607	val64 = readq(&vp_reg->rx_multi_cast_stats);
   1608	hw_stats->rx_multi_cast_frame_discard =
   1609		(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
   1610
   1611	val64 = readq(&vp_reg->rx_frm_transferred);
   1612	hw_stats->rx_frm_transferred =
   1613		(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
   1614
   1615	val64 = readq(&vp_reg->rxd_returned);
   1616	hw_stats->rxd_returned =
   1617		(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
   1618
   1619	val64 = readq(&vp_reg->dbg_stats_rx_mpa);
   1620	hw_stats->rx_mpa_len_fail_frms =
   1621		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
   1622	hw_stats->rx_mpa_mrk_fail_frms =
   1623		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
   1624	hw_stats->rx_mpa_crc_fail_frms =
   1625		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
   1626
   1627	val64 = readq(&vp_reg->dbg_stats_rx_fau);
   1628	hw_stats->rx_permitted_frms =
   1629		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
   1630	hw_stats->rx_vp_reset_discarded_frms =
   1631	(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
   1632	hw_stats->rx_wol_frms =
   1633		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
   1634
   1635	val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
   1636	hw_stats->tx_vp_reset_discarded_frms =
   1637	(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
   1638		val64);
   1639exit:
   1640	return status;
   1641}
   1642
   1643/*
   1644 * vxge_hw_device_stats_get - Get the device hw statistics.
   1645 * Returns the vpath h/w stats for the device.
   1646 */
   1647enum vxge_hw_status
   1648vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
   1649			struct vxge_hw_device_stats_hw_info *hw_stats)
   1650{
   1651	u32 i;
   1652	enum vxge_hw_status status = VXGE_HW_OK;
   1653
   1654	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   1655		if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
   1656			(hldev->virtual_paths[i].vp_open ==
   1657				VXGE_HW_VP_NOT_OPEN))
   1658			continue;
   1659
   1660		memcpy(hldev->virtual_paths[i].hw_stats_sav,
   1661				hldev->virtual_paths[i].hw_stats,
   1662				sizeof(struct vxge_hw_vpath_stats_hw_info));
   1663
   1664		status = __vxge_hw_vpath_stats_get(
   1665			&hldev->virtual_paths[i],
   1666			hldev->virtual_paths[i].hw_stats);
   1667	}
   1668
   1669	memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
   1670			sizeof(struct vxge_hw_device_stats_hw_info));
   1671
   1672	return status;
   1673}
   1674
   1675/*
   1676 * vxge_hw_driver_stats_get - Get the device sw statistics.
   1677 * Returns the vpath s/w stats for the device.
   1678 */
   1679enum vxge_hw_status vxge_hw_driver_stats_get(
   1680			struct __vxge_hw_device *hldev,
   1681			struct vxge_hw_device_stats_sw_info *sw_stats)
   1682{
   1683	memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
   1684		sizeof(struct vxge_hw_device_stats_sw_info));
   1685
   1686	return VXGE_HW_OK;
   1687}
   1688
   1689/*
   1690 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
   1691 *                           and offset and perform an operation
   1692 * Get the statistics from the given location and offset.
   1693 */
   1694enum vxge_hw_status
   1695vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
   1696			    u32 operation, u32 location, u32 offset, u64 *stat)
   1697{
   1698	u64 val64;
   1699	enum vxge_hw_status status = VXGE_HW_OK;
   1700
   1701	status = __vxge_hw_device_is_privilaged(hldev->host_type,
   1702			hldev->func_id);
   1703	if (status != VXGE_HW_OK)
   1704		goto exit;
   1705
   1706	val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
   1707		VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
   1708		VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
   1709		VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
   1710
   1711	status = __vxge_hw_pio_mem_write64(val64,
   1712				&hldev->mrpcim_reg->xmac_stats_sys_cmd,
   1713				VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
   1714				hldev->config.device_poll_millis);
   1715
   1716	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
   1717		*stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
   1718	else
   1719		*stat = 0;
   1720exit:
   1721	return status;
   1722}
   1723
   1724/*
   1725 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
   1726 * Get the Statistics on aggregate port
   1727 */
   1728static enum vxge_hw_status
   1729vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
   1730				   struct vxge_hw_xmac_aggr_stats *aggr_stats)
   1731{
   1732	u64 *val64;
   1733	int i;
   1734	u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
   1735	enum vxge_hw_status status = VXGE_HW_OK;
   1736
   1737	val64 = (u64 *)aggr_stats;
   1738
   1739	status = __vxge_hw_device_is_privilaged(hldev->host_type,
   1740			hldev->func_id);
   1741	if (status != VXGE_HW_OK)
   1742		goto exit;
   1743
   1744	for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
   1745		status = vxge_hw_mrpcim_stats_access(hldev,
   1746					VXGE_HW_STATS_OP_READ,
   1747					VXGE_HW_STATS_LOC_AGGR,
   1748					((offset + (104 * port)) >> 3), val64);
   1749		if (status != VXGE_HW_OK)
   1750			goto exit;
   1751
   1752		offset += 8;
   1753		val64++;
   1754	}
   1755exit:
   1756	return status;
   1757}
   1758
   1759/*
   1760 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
   1761 * Get the Statistics on port
   1762 */
   1763static enum vxge_hw_status
   1764vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
   1765				   struct vxge_hw_xmac_port_stats *port_stats)
   1766{
   1767	u64 *val64;
   1768	enum vxge_hw_status status = VXGE_HW_OK;
   1769	int i;
   1770	u32 offset = 0x0;
   1771	val64 = (u64 *) port_stats;
   1772
   1773	status = __vxge_hw_device_is_privilaged(hldev->host_type,
   1774			hldev->func_id);
   1775	if (status != VXGE_HW_OK)
   1776		goto exit;
   1777
   1778	for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
   1779		status = vxge_hw_mrpcim_stats_access(hldev,
   1780					VXGE_HW_STATS_OP_READ,
   1781					VXGE_HW_STATS_LOC_AGGR,
   1782					((offset + (608 * port)) >> 3), val64);
   1783		if (status != VXGE_HW_OK)
   1784			goto exit;
   1785
   1786		offset += 8;
   1787		val64++;
   1788	}
   1789
   1790exit:
   1791	return status;
   1792}
   1793
   1794/*
   1795 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
   1796 * Get the XMAC Statistics
   1797 */
   1798enum vxge_hw_status
   1799vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
   1800			      struct vxge_hw_xmac_stats *xmac_stats)
   1801{
   1802	enum vxge_hw_status status = VXGE_HW_OK;
   1803	u32 i;
   1804
   1805	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
   1806					0, &xmac_stats->aggr_stats[0]);
   1807	if (status != VXGE_HW_OK)
   1808		goto exit;
   1809
   1810	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
   1811				1, &xmac_stats->aggr_stats[1]);
   1812	if (status != VXGE_HW_OK)
   1813		goto exit;
   1814
   1815	for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
   1816
   1817		status = vxge_hw_device_xmac_port_stats_get(hldev,
   1818					i, &xmac_stats->port_stats[i]);
   1819		if (status != VXGE_HW_OK)
   1820			goto exit;
   1821	}
   1822
   1823	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   1824
   1825		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
   1826			continue;
   1827
   1828		status = __vxge_hw_vpath_xmac_tx_stats_get(
   1829					&hldev->virtual_paths[i],
   1830					&xmac_stats->vpath_tx_stats[i]);
   1831		if (status != VXGE_HW_OK)
   1832			goto exit;
   1833
   1834		status = __vxge_hw_vpath_xmac_rx_stats_get(
   1835					&hldev->virtual_paths[i],
   1836					&xmac_stats->vpath_rx_stats[i]);
   1837		if (status != VXGE_HW_OK)
   1838			goto exit;
   1839	}
   1840exit:
   1841	return status;
   1842}
   1843
   1844/*
   1845 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
   1846 * This routine is used to dynamically change the debug output
   1847 */
   1848void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
   1849			      enum vxge_debug_level level, u32 mask)
   1850{
   1851	if (hldev == NULL)
   1852		return;
   1853
   1854#if defined(VXGE_DEBUG_TRACE_MASK) || \
   1855	defined(VXGE_DEBUG_ERR_MASK)
   1856	hldev->debug_module_mask = mask;
   1857	hldev->debug_level = level;
   1858#endif
   1859
   1860#if defined(VXGE_DEBUG_ERR_MASK)
   1861	hldev->level_err = level & VXGE_ERR;
   1862#endif
   1863
   1864#if defined(VXGE_DEBUG_TRACE_MASK)
   1865	hldev->level_trace = level & VXGE_TRACE;
   1866#endif
   1867}
   1868
   1869/*
   1870 * vxge_hw_device_error_level_get - Get the error level
   1871 * This routine returns the current error level set
   1872 */
   1873u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
   1874{
   1875#if defined(VXGE_DEBUG_ERR_MASK)
   1876	if (hldev == NULL)
   1877		return VXGE_ERR;
   1878	else
   1879		return hldev->level_err;
   1880#else
   1881	return 0;
   1882#endif
   1883}
   1884
   1885/*
   1886 * vxge_hw_device_trace_level_get - Get the trace level
   1887 * This routine returns the current trace level set
   1888 */
   1889u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
   1890{
   1891#if defined(VXGE_DEBUG_TRACE_MASK)
   1892	if (hldev == NULL)
   1893		return VXGE_TRACE;
   1894	else
   1895		return hldev->level_trace;
   1896#else
   1897	return 0;
   1898#endif
   1899}
   1900
   1901/*
   1902 * vxge_hw_getpause_data -Pause frame frame generation and reception.
   1903 * Returns the Pause frame generation and reception capability of the NIC.
   1904 */
   1905enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
   1906						 u32 port, u32 *tx, u32 *rx)
   1907{
   1908	u64 val64;
   1909	enum vxge_hw_status status = VXGE_HW_OK;
   1910
   1911	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
   1912		status = VXGE_HW_ERR_INVALID_DEVICE;
   1913		goto exit;
   1914	}
   1915
   1916	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
   1917		status = VXGE_HW_ERR_INVALID_PORT;
   1918		goto exit;
   1919	}
   1920
   1921	if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
   1922		status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
   1923		goto exit;
   1924	}
   1925
   1926	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
   1927	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
   1928		*tx = 1;
   1929	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
   1930		*rx = 1;
   1931exit:
   1932	return status;
   1933}
   1934
   1935/*
   1936 * vxge_hw_device_setpause_data -  set/reset pause frame generation.
   1937 * It can be used to set or reset Pause frame generation or reception
   1938 * support of the NIC.
   1939 */
   1940enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
   1941						 u32 port, u32 tx, u32 rx)
   1942{
   1943	u64 val64;
   1944	enum vxge_hw_status status = VXGE_HW_OK;
   1945
   1946	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
   1947		status = VXGE_HW_ERR_INVALID_DEVICE;
   1948		goto exit;
   1949	}
   1950
   1951	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
   1952		status = VXGE_HW_ERR_INVALID_PORT;
   1953		goto exit;
   1954	}
   1955
   1956	status = __vxge_hw_device_is_privilaged(hldev->host_type,
   1957			hldev->func_id);
   1958	if (status != VXGE_HW_OK)
   1959		goto exit;
   1960
   1961	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
   1962	if (tx)
   1963		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
   1964	else
   1965		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
   1966	if (rx)
   1967		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
   1968	else
   1969		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
   1970
   1971	writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
   1972exit:
   1973	return status;
   1974}
   1975
   1976u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
   1977{
   1978	struct pci_dev *dev = hldev->pdev;
   1979	u16 lnk;
   1980
   1981	pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
   1982	return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
   1983}
   1984
   1985/*
   1986 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
   1987 * This function returns the index of memory block
   1988 */
   1989static inline u32
   1990__vxge_hw_ring_block_memblock_idx(u8 *block)
   1991{
   1992	return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
   1993}
   1994
   1995/*
   1996 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
   1997 * This function sets index to a memory block
   1998 */
   1999static inline void
   2000__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
   2001{
   2002	*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
   2003}
   2004
   2005/*
   2006 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
   2007 * in RxD block
   2008 * Sets the next block pointer in RxD block
   2009 */
   2010static inline void
   2011__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
   2012{
   2013	*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
   2014}
   2015
   2016/*
   2017 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
   2018 *             first block
   2019 * Returns the dma address of the first RxD block
   2020 */
   2021static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
   2022{
   2023	struct vxge_hw_mempool_dma *dma_object;
   2024
   2025	dma_object = ring->mempool->memblocks_dma_arr;
   2026	vxge_assert(dma_object != NULL);
   2027
   2028	return dma_object->addr;
   2029}
   2030
   2031/*
   2032 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
   2033 * This function returns the dma address of a given item
   2034 */
   2035static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
   2036					       void *item)
   2037{
   2038	u32 memblock_idx;
   2039	void *memblock;
   2040	struct vxge_hw_mempool_dma *memblock_dma_object;
   2041	ptrdiff_t dma_item_offset;
   2042
   2043	/* get owner memblock index */
   2044	memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
   2045
   2046	/* get owner memblock by memblock index */
   2047	memblock = mempoolh->memblocks_arr[memblock_idx];
   2048
   2049	/* get memblock DMA object by memblock index */
   2050	memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
   2051
   2052	/* calculate offset in the memblock of this item */
   2053	dma_item_offset = (u8 *)item - (u8 *)memblock;
   2054
   2055	return memblock_dma_object->addr + dma_item_offset;
   2056}
   2057
   2058/*
   2059 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
   2060 * This function returns the dma address of a given item
   2061 */
   2062static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
   2063					 struct __vxge_hw_ring *ring, u32 from,
   2064					 u32 to)
   2065{
   2066	u8 *to_item , *from_item;
   2067	dma_addr_t to_dma;
   2068
   2069	/* get "from" RxD block */
   2070	from_item = mempoolh->items_arr[from];
   2071	vxge_assert(from_item);
   2072
   2073	/* get "to" RxD block */
   2074	to_item = mempoolh->items_arr[to];
   2075	vxge_assert(to_item);
   2076
   2077	/* return address of the beginning of previous RxD block */
   2078	to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
   2079
   2080	/* set next pointer for this RxD block to point on
   2081	 * previous item's DMA start address */
   2082	__vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
   2083}
   2084
   2085/*
   2086 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
   2087 * block callback
   2088 * This function is callback passed to __vxge_hw_mempool_create to create memory
   2089 * pool for RxD block
   2090 */
   2091static void
   2092__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
   2093				  u32 memblock_index,
   2094				  struct vxge_hw_mempool_dma *dma_object,
   2095				  u32 index, u32 is_last)
   2096{
   2097	u32 i;
   2098	void *item = mempoolh->items_arr[index];
   2099	struct __vxge_hw_ring *ring =
   2100		(struct __vxge_hw_ring *)mempoolh->userdata;
   2101
   2102	/* format rxds array */
   2103	for (i = 0; i < ring->rxds_per_block; i++) {
   2104		void *rxdblock_priv;
   2105		void *uld_priv;
   2106		struct vxge_hw_ring_rxd_1 *rxdp;
   2107
   2108		u32 reserve_index = ring->channel.reserve_ptr -
   2109				(index * ring->rxds_per_block + i + 1);
   2110		u32 memblock_item_idx;
   2111
   2112		ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
   2113						i * ring->rxd_size;
   2114
   2115		/* Note: memblock_item_idx is index of the item within
   2116		 *       the memblock. For instance, in case of three RxD-blocks
   2117		 *       per memblock this value can be 0, 1 or 2. */
   2118		rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
   2119					memblock_index, item,
   2120					&memblock_item_idx);
   2121
   2122		rxdp = ring->channel.reserve_arr[reserve_index];
   2123
   2124		uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
   2125
   2126		/* pre-format Host_Control */
   2127		rxdp->host_control = (u64)(size_t)uld_priv;
   2128	}
   2129
   2130	__vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
   2131
   2132	if (is_last) {
   2133		/* link last one with first one */
   2134		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
   2135	}
   2136
   2137	if (index > 0) {
   2138		/* link this RxD block with previous one */
   2139		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
   2140	}
   2141}
   2142
   2143/*
   2144 * __vxge_hw_ring_replenish - Initial replenish of RxDs
   2145 * This function replenishes the RxDs from reserve array to work array
   2146 */
   2147static enum vxge_hw_status
   2148vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
   2149{
   2150	void *rxd;
   2151	struct __vxge_hw_channel *channel;
   2152	enum vxge_hw_status status = VXGE_HW_OK;
   2153
   2154	channel = &ring->channel;
   2155
   2156	while (vxge_hw_channel_dtr_count(channel) > 0) {
   2157
   2158		status = vxge_hw_ring_rxd_reserve(ring, &rxd);
   2159
   2160		vxge_assert(status == VXGE_HW_OK);
   2161
   2162		if (ring->rxd_init) {
   2163			status = ring->rxd_init(rxd, channel->userdata);
   2164			if (status != VXGE_HW_OK) {
   2165				vxge_hw_ring_rxd_free(ring, rxd);
   2166				goto exit;
   2167			}
   2168		}
   2169
   2170		vxge_hw_ring_rxd_post(ring, rxd);
   2171	}
   2172	status = VXGE_HW_OK;
   2173exit:
   2174	return status;
   2175}
   2176
   2177/*
   2178 * __vxge_hw_channel_allocate - Allocate memory for channel
   2179 * This function allocates required memory for the channel and various arrays
   2180 * in the channel
   2181 */
   2182static struct __vxge_hw_channel *
   2183__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
   2184			   enum __vxge_hw_channel_type type,
   2185			   u32 length, u32 per_dtr_space,
   2186			   void *userdata)
   2187{
   2188	struct __vxge_hw_channel *channel;
   2189	struct __vxge_hw_device *hldev;
   2190	int size = 0;
   2191	u32 vp_id;
   2192
   2193	hldev = vph->vpath->hldev;
   2194	vp_id = vph->vpath->vp_id;
   2195
   2196	switch (type) {
   2197	case VXGE_HW_CHANNEL_TYPE_FIFO:
   2198		size = sizeof(struct __vxge_hw_fifo);
   2199		break;
   2200	case VXGE_HW_CHANNEL_TYPE_RING:
   2201		size = sizeof(struct __vxge_hw_ring);
   2202		break;
   2203	default:
   2204		break;
   2205	}
   2206
   2207	channel = kzalloc(size, GFP_KERNEL);
   2208	if (channel == NULL)
   2209		goto exit0;
   2210	INIT_LIST_HEAD(&channel->item);
   2211
   2212	channel->common_reg = hldev->common_reg;
   2213	channel->first_vp_id = hldev->first_vp_id;
   2214	channel->type = type;
   2215	channel->devh = hldev;
   2216	channel->vph = vph;
   2217	channel->userdata = userdata;
   2218	channel->per_dtr_space = per_dtr_space;
   2219	channel->length = length;
   2220	channel->vp_id = vp_id;
   2221
   2222	channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
   2223	if (channel->work_arr == NULL)
   2224		goto exit1;
   2225
   2226	channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
   2227	if (channel->free_arr == NULL)
   2228		goto exit1;
   2229	channel->free_ptr = length;
   2230
   2231	channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
   2232	if (channel->reserve_arr == NULL)
   2233		goto exit1;
   2234	channel->reserve_ptr = length;
   2235	channel->reserve_top = 0;
   2236
   2237	channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
   2238	if (channel->orig_arr == NULL)
   2239		goto exit1;
   2240
   2241	return channel;
   2242exit1:
   2243	__vxge_hw_channel_free(channel);
   2244
   2245exit0:
   2246	return NULL;
   2247}
   2248
   2249/*
   2250 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
   2251 * Adds a block to block pool
   2252 */
   2253static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
   2254					void *block_addr,
   2255					u32 length,
   2256					struct pci_dev *dma_h,
   2257					struct pci_dev *acc_handle)
   2258{
   2259	struct __vxge_hw_blockpool *blockpool;
   2260	struct __vxge_hw_blockpool_entry *entry = NULL;
   2261	dma_addr_t dma_addr;
   2262
   2263	blockpool = &devh->block_pool;
   2264
   2265	if (block_addr == NULL) {
   2266		blockpool->req_out--;
   2267		goto exit;
   2268	}
   2269
   2270	dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
   2271				  DMA_BIDIRECTIONAL);
   2272
   2273	if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
   2274		vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
   2275		blockpool->req_out--;
   2276		goto exit;
   2277	}
   2278
   2279	if (!list_empty(&blockpool->free_entry_list))
   2280		entry = (struct __vxge_hw_blockpool_entry *)
   2281			list_first_entry(&blockpool->free_entry_list,
   2282				struct __vxge_hw_blockpool_entry,
   2283				item);
   2284
   2285	if (entry == NULL)
   2286		entry =	vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
   2287	else
   2288		list_del(&entry->item);
   2289
   2290	if (entry) {
   2291		entry->length = length;
   2292		entry->memblock = block_addr;
   2293		entry->dma_addr = dma_addr;
   2294		entry->acc_handle = acc_handle;
   2295		entry->dma_handle = dma_h;
   2296		list_add(&entry->item, &blockpool->free_block_list);
   2297		blockpool->pool_size++;
   2298	}
   2299
   2300	blockpool->req_out--;
   2301
   2302exit:
   2303	return;
   2304}
   2305
   2306static inline void
   2307vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
   2308{
   2309	void *vaddr;
   2310
   2311	vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
   2312	vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
   2313}
   2314
   2315/*
   2316 * __vxge_hw_blockpool_blocks_add - Request additional blocks
   2317 */
   2318static
   2319void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
   2320{
   2321	u32 nreq = 0, i;
   2322
   2323	if ((blockpool->pool_size  +  blockpool->req_out) <
   2324		VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
   2325		nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
   2326		blockpool->req_out += nreq;
   2327	}
   2328
   2329	for (i = 0; i < nreq; i++)
   2330		vxge_os_dma_malloc_async(
   2331			(blockpool->hldev)->pdev,
   2332			blockpool->hldev, VXGE_HW_BLOCK_SIZE);
   2333}
   2334
   2335/*
   2336 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
   2337 * Allocates a block of memory of given size, either from block pool
   2338 * or by calling vxge_os_dma_malloc()
   2339 */
   2340static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
   2341					struct vxge_hw_mempool_dma *dma_object)
   2342{
   2343	struct __vxge_hw_blockpool_entry *entry = NULL;
   2344	struct __vxge_hw_blockpool  *blockpool;
   2345	void *memblock = NULL;
   2346
   2347	blockpool = &devh->block_pool;
   2348
   2349	if (size != blockpool->block_size) {
   2350
   2351		memblock = vxge_os_dma_malloc(devh->pdev, size,
   2352						&dma_object->handle,
   2353						&dma_object->acc_handle);
   2354
   2355		if (!memblock)
   2356			goto exit;
   2357
   2358		dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
   2359						  size, DMA_BIDIRECTIONAL);
   2360
   2361		if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
   2362			vxge_os_dma_free(devh->pdev, memblock,
   2363				&dma_object->acc_handle);
   2364			memblock = NULL;
   2365			goto exit;
   2366		}
   2367
   2368	} else {
   2369
   2370		if (!list_empty(&blockpool->free_block_list))
   2371			entry = (struct __vxge_hw_blockpool_entry *)
   2372				list_first_entry(&blockpool->free_block_list,
   2373					struct __vxge_hw_blockpool_entry,
   2374					item);
   2375
   2376		if (entry != NULL) {
   2377			list_del(&entry->item);
   2378			dma_object->addr = entry->dma_addr;
   2379			dma_object->handle = entry->dma_handle;
   2380			dma_object->acc_handle = entry->acc_handle;
   2381			memblock = entry->memblock;
   2382
   2383			list_add(&entry->item,
   2384				&blockpool->free_entry_list);
   2385			blockpool->pool_size--;
   2386		}
   2387
   2388		if (memblock != NULL)
   2389			__vxge_hw_blockpool_blocks_add(blockpool);
   2390	}
   2391exit:
   2392	return memblock;
   2393}
   2394
   2395/*
   2396 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
   2397 */
   2398static void
   2399__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
   2400{
   2401	struct list_head *p, *n;
   2402
   2403	list_for_each_safe(p, n, &blockpool->free_block_list) {
   2404
   2405		if (blockpool->pool_size < blockpool->pool_max)
   2406			break;
   2407
   2408		dma_unmap_single(&(blockpool->hldev)->pdev->dev,
   2409				 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
   2410				 ((struct __vxge_hw_blockpool_entry *)p)->length,
   2411				 DMA_BIDIRECTIONAL);
   2412
   2413		vxge_os_dma_free(
   2414			(blockpool->hldev)->pdev,
   2415			((struct __vxge_hw_blockpool_entry *)p)->memblock,
   2416			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
   2417
   2418		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
   2419
   2420		list_add(p, &blockpool->free_entry_list);
   2421
   2422		blockpool->pool_size--;
   2423
   2424	}
   2425}
   2426
   2427/*
   2428 * __vxge_hw_blockpool_free - Frees the memory allcoated with
   2429 *				__vxge_hw_blockpool_malloc
   2430 */
   2431static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
   2432				     void *memblock, u32 size,
   2433				     struct vxge_hw_mempool_dma *dma_object)
   2434{
   2435	struct __vxge_hw_blockpool_entry *entry = NULL;
   2436	struct __vxge_hw_blockpool  *blockpool;
   2437	enum vxge_hw_status status = VXGE_HW_OK;
   2438
   2439	blockpool = &devh->block_pool;
   2440
   2441	if (size != blockpool->block_size) {
   2442		dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
   2443				 DMA_BIDIRECTIONAL);
   2444		vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
   2445	} else {
   2446
   2447		if (!list_empty(&blockpool->free_entry_list))
   2448			entry = (struct __vxge_hw_blockpool_entry *)
   2449				list_first_entry(&blockpool->free_entry_list,
   2450					struct __vxge_hw_blockpool_entry,
   2451					item);
   2452
   2453		if (entry == NULL)
   2454			entry =	vmalloc(sizeof(
   2455					struct __vxge_hw_blockpool_entry));
   2456		else
   2457			list_del(&entry->item);
   2458
   2459		if (entry != NULL) {
   2460			entry->length = size;
   2461			entry->memblock = memblock;
   2462			entry->dma_addr = dma_object->addr;
   2463			entry->acc_handle = dma_object->acc_handle;
   2464			entry->dma_handle = dma_object->handle;
   2465			list_add(&entry->item,
   2466					&blockpool->free_block_list);
   2467			blockpool->pool_size++;
   2468			status = VXGE_HW_OK;
   2469		} else
   2470			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2471
   2472		if (status == VXGE_HW_OK)
   2473			__vxge_hw_blockpool_blocks_remove(blockpool);
   2474	}
   2475}
   2476
   2477/*
   2478 * vxge_hw_mempool_destroy
   2479 */
   2480static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
   2481{
   2482	u32 i, j;
   2483	struct __vxge_hw_device *devh = mempool->devh;
   2484
   2485	for (i = 0; i < mempool->memblocks_allocated; i++) {
   2486		struct vxge_hw_mempool_dma *dma_object;
   2487
   2488		vxge_assert(mempool->memblocks_arr[i]);
   2489		vxge_assert(mempool->memblocks_dma_arr + i);
   2490
   2491		dma_object = mempool->memblocks_dma_arr + i;
   2492
   2493		for (j = 0; j < mempool->items_per_memblock; j++) {
   2494			u32 index = i * mempool->items_per_memblock + j;
   2495
   2496			/* to skip last partially filled(if any) memblock */
   2497			if (index >= mempool->items_current)
   2498				break;
   2499		}
   2500
   2501		vfree(mempool->memblocks_priv_arr[i]);
   2502
   2503		__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
   2504				mempool->memblock_size, dma_object);
   2505	}
   2506
   2507	vfree(mempool->items_arr);
   2508	vfree(mempool->memblocks_dma_arr);
   2509	vfree(mempool->memblocks_priv_arr);
   2510	vfree(mempool->memblocks_arr);
   2511	vfree(mempool);
   2512}
   2513
   2514/*
   2515 * __vxge_hw_mempool_grow
   2516 * Will resize mempool up to %num_allocate value.
   2517 */
   2518static enum vxge_hw_status
   2519__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
   2520		       u32 *num_allocated)
   2521{
   2522	u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
   2523	u32 n_items = mempool->items_per_memblock;
   2524	u32 start_block_idx = mempool->memblocks_allocated;
   2525	u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
   2526	enum vxge_hw_status status = VXGE_HW_OK;
   2527
   2528	*num_allocated = 0;
   2529
   2530	if (end_block_idx > mempool->memblocks_max) {
   2531		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2532		goto exit;
   2533	}
   2534
   2535	for (i = start_block_idx; i < end_block_idx; i++) {
   2536		u32 j;
   2537		u32 is_last = ((end_block_idx - 1) == i);
   2538		struct vxge_hw_mempool_dma *dma_object =
   2539			mempool->memblocks_dma_arr + i;
   2540		void *the_memblock;
   2541
   2542		/* allocate memblock's private part. Each DMA memblock
   2543		 * has a space allocated for item's private usage upon
   2544		 * mempool's user request. Each time mempool grows, it will
   2545		 * allocate new memblock and its private part at once.
   2546		 * This helps to minimize memory usage a lot. */
   2547		mempool->memblocks_priv_arr[i] =
   2548			vzalloc(array_size(mempool->items_priv_size, n_items));
   2549		if (mempool->memblocks_priv_arr[i] == NULL) {
   2550			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2551			goto exit;
   2552		}
   2553
   2554		/* allocate DMA-capable memblock */
   2555		mempool->memblocks_arr[i] =
   2556			__vxge_hw_blockpool_malloc(mempool->devh,
   2557				mempool->memblock_size, dma_object);
   2558		if (mempool->memblocks_arr[i] == NULL) {
   2559			vfree(mempool->memblocks_priv_arr[i]);
   2560			status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2561			goto exit;
   2562		}
   2563
   2564		(*num_allocated)++;
   2565		mempool->memblocks_allocated++;
   2566
   2567		memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
   2568
   2569		the_memblock = mempool->memblocks_arr[i];
   2570
   2571		/* fill the items hash array */
   2572		for (j = 0; j < n_items; j++) {
   2573			u32 index = i * n_items + j;
   2574
   2575			if (first_time && index >= mempool->items_initial)
   2576				break;
   2577
   2578			mempool->items_arr[index] =
   2579				((char *)the_memblock + j*mempool->item_size);
   2580
   2581			/* let caller to do more job on each item */
   2582			if (mempool->item_func_alloc != NULL)
   2583				mempool->item_func_alloc(mempool, i,
   2584					dma_object, index, is_last);
   2585
   2586			mempool->items_current = index + 1;
   2587		}
   2588
   2589		if (first_time && mempool->items_current ==
   2590					mempool->items_initial)
   2591			break;
   2592	}
   2593exit:
   2594	return status;
   2595}
   2596
   2597/*
   2598 * vxge_hw_mempool_create
   2599 * This function will create memory pool object. Pool may grow but will
   2600 * never shrink. Pool consists of number of dynamically allocated blocks
   2601 * with size enough to hold %items_initial number of items. Memory is
   2602 * DMA-able but client must map/unmap before interoperating with the device.
   2603 */
   2604static struct vxge_hw_mempool *
   2605__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
   2606			 u32 memblock_size,
   2607			 u32 item_size,
   2608			 u32 items_priv_size,
   2609			 u32 items_initial,
   2610			 u32 items_max,
   2611			 const struct vxge_hw_mempool_cbs *mp_callback,
   2612			 void *userdata)
   2613{
   2614	enum vxge_hw_status status = VXGE_HW_OK;
   2615	u32 memblocks_to_allocate;
   2616	struct vxge_hw_mempool *mempool = NULL;
   2617	u32 allocated;
   2618
   2619	if (memblock_size < item_size) {
   2620		status = VXGE_HW_FAIL;
   2621		goto exit;
   2622	}
   2623
   2624	mempool = vzalloc(sizeof(struct vxge_hw_mempool));
   2625	if (mempool == NULL) {
   2626		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2627		goto exit;
   2628	}
   2629
   2630	mempool->devh			= devh;
   2631	mempool->memblock_size		= memblock_size;
   2632	mempool->items_max		= items_max;
   2633	mempool->items_initial		= items_initial;
   2634	mempool->item_size		= item_size;
   2635	mempool->items_priv_size	= items_priv_size;
   2636	mempool->item_func_alloc	= mp_callback->item_func_alloc;
   2637	mempool->userdata		= userdata;
   2638
   2639	mempool->memblocks_allocated = 0;
   2640
   2641	mempool->items_per_memblock = memblock_size / item_size;
   2642
   2643	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
   2644					mempool->items_per_memblock;
   2645
   2646	/* allocate array of memblocks */
   2647	mempool->memblocks_arr =
   2648		vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
   2649	if (mempool->memblocks_arr == NULL) {
   2650		__vxge_hw_mempool_destroy(mempool);
   2651		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2652		mempool = NULL;
   2653		goto exit;
   2654	}
   2655
   2656	/* allocate array of private parts of items per memblocks */
   2657	mempool->memblocks_priv_arr =
   2658		vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
   2659	if (mempool->memblocks_priv_arr == NULL) {
   2660		__vxge_hw_mempool_destroy(mempool);
   2661		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2662		mempool = NULL;
   2663		goto exit;
   2664	}
   2665
   2666	/* allocate array of memblocks DMA objects */
   2667	mempool->memblocks_dma_arr =
   2668		vzalloc(array_size(sizeof(struct vxge_hw_mempool_dma),
   2669				   mempool->memblocks_max));
   2670	if (mempool->memblocks_dma_arr == NULL) {
   2671		__vxge_hw_mempool_destroy(mempool);
   2672		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2673		mempool = NULL;
   2674		goto exit;
   2675	}
   2676
   2677	/* allocate hash array of items */
   2678	mempool->items_arr = vzalloc(array_size(sizeof(void *),
   2679						mempool->items_max));
   2680	if (mempool->items_arr == NULL) {
   2681		__vxge_hw_mempool_destroy(mempool);
   2682		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2683		mempool = NULL;
   2684		goto exit;
   2685	}
   2686
   2687	/* calculate initial number of memblocks */
   2688	memblocks_to_allocate = (mempool->items_initial +
   2689				 mempool->items_per_memblock - 1) /
   2690						mempool->items_per_memblock;
   2691
   2692	/* pre-allocate the mempool */
   2693	status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
   2694					&allocated);
   2695	if (status != VXGE_HW_OK) {
   2696		__vxge_hw_mempool_destroy(mempool);
   2697		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2698		mempool = NULL;
   2699		goto exit;
   2700	}
   2701
   2702exit:
   2703	return mempool;
   2704}
   2705
   2706/*
   2707 * __vxge_hw_ring_abort - Returns the RxD
   2708 * This function terminates the RxDs of ring
   2709 */
   2710static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
   2711{
   2712	void *rxdh;
   2713	struct __vxge_hw_channel *channel;
   2714
   2715	channel = &ring->channel;
   2716
   2717	for (;;) {
   2718		vxge_hw_channel_dtr_try_complete(channel, &rxdh);
   2719
   2720		if (rxdh == NULL)
   2721			break;
   2722
   2723		vxge_hw_channel_dtr_complete(channel);
   2724
   2725		if (ring->rxd_term)
   2726			ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
   2727				channel->userdata);
   2728
   2729		vxge_hw_channel_dtr_free(channel, rxdh);
   2730	}
   2731
   2732	return VXGE_HW_OK;
   2733}
   2734
   2735/*
   2736 * __vxge_hw_ring_reset - Resets the ring
   2737 * This function resets the ring during vpath reset operation
   2738 */
   2739static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
   2740{
   2741	enum vxge_hw_status status = VXGE_HW_OK;
   2742	struct __vxge_hw_channel *channel;
   2743
   2744	channel = &ring->channel;
   2745
   2746	__vxge_hw_ring_abort(ring);
   2747
   2748	status = __vxge_hw_channel_reset(channel);
   2749
   2750	if (status != VXGE_HW_OK)
   2751		goto exit;
   2752
   2753	if (ring->rxd_init) {
   2754		status = vxge_hw_ring_replenish(ring);
   2755		if (status != VXGE_HW_OK)
   2756			goto exit;
   2757	}
   2758exit:
   2759	return status;
   2760}
   2761
   2762/*
   2763 * __vxge_hw_ring_delete - Removes the ring
   2764 * This function freeup the memory pool and removes the ring
   2765 */
   2766static enum vxge_hw_status
   2767__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
   2768{
   2769	struct __vxge_hw_ring *ring = vp->vpath->ringh;
   2770
   2771	__vxge_hw_ring_abort(ring);
   2772
   2773	if (ring->mempool)
   2774		__vxge_hw_mempool_destroy(ring->mempool);
   2775
   2776	vp->vpath->ringh = NULL;
   2777	__vxge_hw_channel_free(&ring->channel);
   2778
   2779	return VXGE_HW_OK;
   2780}
   2781
   2782/*
   2783 * __vxge_hw_ring_create - Create a Ring
   2784 * This function creates Ring and initializes it.
   2785 */
   2786static enum vxge_hw_status
   2787__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
   2788		      struct vxge_hw_ring_attr *attr)
   2789{
   2790	enum vxge_hw_status status = VXGE_HW_OK;
   2791	struct __vxge_hw_ring *ring;
   2792	u32 ring_length;
   2793	struct vxge_hw_ring_config *config;
   2794	struct __vxge_hw_device *hldev;
   2795	u32 vp_id;
   2796	static const struct vxge_hw_mempool_cbs ring_mp_callback = {
   2797		.item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
   2798	};
   2799
   2800	if ((vp == NULL) || (attr == NULL)) {
   2801		status = VXGE_HW_FAIL;
   2802		goto exit;
   2803	}
   2804
   2805	hldev = vp->vpath->hldev;
   2806	vp_id = vp->vpath->vp_id;
   2807
   2808	config = &hldev->config.vp_config[vp_id].ring;
   2809
   2810	ring_length = config->ring_blocks *
   2811			vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
   2812
   2813	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
   2814						VXGE_HW_CHANNEL_TYPE_RING,
   2815						ring_length,
   2816						attr->per_rxd_space,
   2817						attr->userdata);
   2818	if (ring == NULL) {
   2819		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   2820		goto exit;
   2821	}
   2822
   2823	vp->vpath->ringh = ring;
   2824	ring->vp_id = vp_id;
   2825	ring->vp_reg = vp->vpath->vp_reg;
   2826	ring->common_reg = hldev->common_reg;
   2827	ring->stats = &vp->vpath->sw_stats->ring_stats;
   2828	ring->config = config;
   2829	ring->callback = attr->callback;
   2830	ring->rxd_init = attr->rxd_init;
   2831	ring->rxd_term = attr->rxd_term;
   2832	ring->buffer_mode = config->buffer_mode;
   2833	ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
   2834	ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
   2835	ring->rxds_limit = config->rxds_limit;
   2836
   2837	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
   2838	ring->rxd_priv_size =
   2839		sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
   2840	ring->per_rxd_space = attr->per_rxd_space;
   2841
   2842	ring->rxd_priv_size =
   2843		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
   2844		VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
   2845
   2846	/* how many RxDs can fit into one block. Depends on configured
   2847	 * buffer_mode. */
   2848	ring->rxds_per_block =
   2849		vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
   2850
   2851	/* calculate actual RxD block private size */
   2852	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
   2853	ring->mempool = __vxge_hw_mempool_create(hldev,
   2854				VXGE_HW_BLOCK_SIZE,
   2855				VXGE_HW_BLOCK_SIZE,
   2856				ring->rxdblock_priv_size,
   2857				ring->config->ring_blocks,
   2858				ring->config->ring_blocks,
   2859				&ring_mp_callback,
   2860				ring);
   2861	if (ring->mempool == NULL) {
   2862		__vxge_hw_ring_delete(vp);
   2863		return VXGE_HW_ERR_OUT_OF_MEMORY;
   2864	}
   2865
   2866	status = __vxge_hw_channel_initialize(&ring->channel);
   2867	if (status != VXGE_HW_OK) {
   2868		__vxge_hw_ring_delete(vp);
   2869		goto exit;
   2870	}
   2871
   2872	/* Note:
   2873	 * Specifying rxd_init callback means two things:
   2874	 * 1) rxds need to be initialized by driver at channel-open time;
   2875	 * 2) rxds need to be posted at channel-open time
   2876	 *    (that's what the initial_replenish() below does)
   2877	 * Currently we don't have a case when the 1) is done without the 2).
   2878	 */
   2879	if (ring->rxd_init) {
   2880		status = vxge_hw_ring_replenish(ring);
   2881		if (status != VXGE_HW_OK) {
   2882			__vxge_hw_ring_delete(vp);
   2883			goto exit;
   2884		}
   2885	}
   2886
   2887	/* initial replenish will increment the counter in its post() routine,
   2888	 * we have to reset it */
   2889	ring->stats->common_stats.usage_cnt = 0;
   2890exit:
   2891	return status;
   2892}
   2893
   2894/*
   2895 * vxge_hw_device_config_default_get - Initialize device config with defaults.
   2896 * Initialize Titan device config with default values.
   2897 */
   2898enum vxge_hw_status
   2899vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
   2900{
   2901	u32 i;
   2902
   2903	device_config->dma_blockpool_initial =
   2904					VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
   2905	device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
   2906	device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
   2907	device_config->rth_en = VXGE_HW_RTH_DEFAULT;
   2908	device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
   2909	device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
   2910	device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
   2911
   2912	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   2913		device_config->vp_config[i].vp_id = i;
   2914
   2915		device_config->vp_config[i].min_bandwidth =
   2916				VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
   2917
   2918		device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
   2919
   2920		device_config->vp_config[i].ring.ring_blocks =
   2921				VXGE_HW_DEF_RING_BLOCKS;
   2922
   2923		device_config->vp_config[i].ring.buffer_mode =
   2924				VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
   2925
   2926		device_config->vp_config[i].ring.scatter_mode =
   2927				VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
   2928
   2929		device_config->vp_config[i].ring.rxds_limit =
   2930				VXGE_HW_DEF_RING_RXDS_LIMIT;
   2931
   2932		device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
   2933
   2934		device_config->vp_config[i].fifo.fifo_blocks =
   2935				VXGE_HW_MIN_FIFO_BLOCKS;
   2936
   2937		device_config->vp_config[i].fifo.max_frags =
   2938				VXGE_HW_MAX_FIFO_FRAGS;
   2939
   2940		device_config->vp_config[i].fifo.memblock_size =
   2941				VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
   2942
   2943		device_config->vp_config[i].fifo.alignment_size =
   2944				VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
   2945
   2946		device_config->vp_config[i].fifo.intr =
   2947				VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
   2948
   2949		device_config->vp_config[i].fifo.no_snoop_bits =
   2950				VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
   2951		device_config->vp_config[i].tti.intr_enable =
   2952				VXGE_HW_TIM_INTR_DEFAULT;
   2953
   2954		device_config->vp_config[i].tti.btimer_val =
   2955				VXGE_HW_USE_FLASH_DEFAULT;
   2956
   2957		device_config->vp_config[i].tti.timer_ac_en =
   2958				VXGE_HW_USE_FLASH_DEFAULT;
   2959
   2960		device_config->vp_config[i].tti.timer_ci_en =
   2961				VXGE_HW_USE_FLASH_DEFAULT;
   2962
   2963		device_config->vp_config[i].tti.timer_ri_en =
   2964				VXGE_HW_USE_FLASH_DEFAULT;
   2965
   2966		device_config->vp_config[i].tti.rtimer_val =
   2967				VXGE_HW_USE_FLASH_DEFAULT;
   2968
   2969		device_config->vp_config[i].tti.util_sel =
   2970				VXGE_HW_USE_FLASH_DEFAULT;
   2971
   2972		device_config->vp_config[i].tti.ltimer_val =
   2973				VXGE_HW_USE_FLASH_DEFAULT;
   2974
   2975		device_config->vp_config[i].tti.urange_a =
   2976				VXGE_HW_USE_FLASH_DEFAULT;
   2977
   2978		device_config->vp_config[i].tti.uec_a =
   2979				VXGE_HW_USE_FLASH_DEFAULT;
   2980
   2981		device_config->vp_config[i].tti.urange_b =
   2982				VXGE_HW_USE_FLASH_DEFAULT;
   2983
   2984		device_config->vp_config[i].tti.uec_b =
   2985				VXGE_HW_USE_FLASH_DEFAULT;
   2986
   2987		device_config->vp_config[i].tti.urange_c =
   2988				VXGE_HW_USE_FLASH_DEFAULT;
   2989
   2990		device_config->vp_config[i].tti.uec_c =
   2991				VXGE_HW_USE_FLASH_DEFAULT;
   2992
   2993		device_config->vp_config[i].tti.uec_d =
   2994				VXGE_HW_USE_FLASH_DEFAULT;
   2995
   2996		device_config->vp_config[i].rti.intr_enable =
   2997				VXGE_HW_TIM_INTR_DEFAULT;
   2998
   2999		device_config->vp_config[i].rti.btimer_val =
   3000				VXGE_HW_USE_FLASH_DEFAULT;
   3001
   3002		device_config->vp_config[i].rti.timer_ac_en =
   3003				VXGE_HW_USE_FLASH_DEFAULT;
   3004
   3005		device_config->vp_config[i].rti.timer_ci_en =
   3006				VXGE_HW_USE_FLASH_DEFAULT;
   3007
   3008		device_config->vp_config[i].rti.timer_ri_en =
   3009				VXGE_HW_USE_FLASH_DEFAULT;
   3010
   3011		device_config->vp_config[i].rti.rtimer_val =
   3012				VXGE_HW_USE_FLASH_DEFAULT;
   3013
   3014		device_config->vp_config[i].rti.util_sel =
   3015				VXGE_HW_USE_FLASH_DEFAULT;
   3016
   3017		device_config->vp_config[i].rti.ltimer_val =
   3018				VXGE_HW_USE_FLASH_DEFAULT;
   3019
   3020		device_config->vp_config[i].rti.urange_a =
   3021				VXGE_HW_USE_FLASH_DEFAULT;
   3022
   3023		device_config->vp_config[i].rti.uec_a =
   3024				VXGE_HW_USE_FLASH_DEFAULT;
   3025
   3026		device_config->vp_config[i].rti.urange_b =
   3027				VXGE_HW_USE_FLASH_DEFAULT;
   3028
   3029		device_config->vp_config[i].rti.uec_b =
   3030				VXGE_HW_USE_FLASH_DEFAULT;
   3031
   3032		device_config->vp_config[i].rti.urange_c =
   3033				VXGE_HW_USE_FLASH_DEFAULT;
   3034
   3035		device_config->vp_config[i].rti.uec_c =
   3036				VXGE_HW_USE_FLASH_DEFAULT;
   3037
   3038		device_config->vp_config[i].rti.uec_d =
   3039				VXGE_HW_USE_FLASH_DEFAULT;
   3040
   3041		device_config->vp_config[i].mtu =
   3042				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
   3043
   3044		device_config->vp_config[i].rpa_strip_vlan_tag =
   3045			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
   3046	}
   3047
   3048	return VXGE_HW_OK;
   3049}
   3050
   3051/*
   3052 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
   3053 * Set the swapper bits appropriately for the vpath.
   3054 */
   3055static enum vxge_hw_status
   3056__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
   3057{
   3058#ifndef __BIG_ENDIAN
   3059	u64 val64;
   3060
   3061	val64 = readq(&vpath_reg->vpath_general_cfg1);
   3062	wmb();
   3063	val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
   3064	writeq(val64, &vpath_reg->vpath_general_cfg1);
   3065	wmb();
   3066#endif
   3067	return VXGE_HW_OK;
   3068}
   3069
   3070/*
   3071 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
   3072 * Set the swapper bits appropriately for the vpath.
   3073 */
   3074static enum vxge_hw_status
   3075__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
   3076			   struct vxge_hw_vpath_reg __iomem *vpath_reg)
   3077{
   3078	u64 val64;
   3079
   3080	val64 = readq(&legacy_reg->pifm_wr_swap_en);
   3081
   3082	if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
   3083		val64 = readq(&vpath_reg->kdfcctl_cfg0);
   3084		wmb();
   3085
   3086		val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0	|
   3087			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1	|
   3088			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
   3089
   3090		writeq(val64, &vpath_reg->kdfcctl_cfg0);
   3091		wmb();
   3092	}
   3093
   3094	return VXGE_HW_OK;
   3095}
   3096
   3097/*
   3098 * vxge_hw_mgmt_reg_read - Read Titan register.
   3099 */
   3100enum vxge_hw_status
   3101vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
   3102		      enum vxge_hw_mgmt_reg_type type,
   3103		      u32 index, u32 offset, u64 *value)
   3104{
   3105	enum vxge_hw_status status = VXGE_HW_OK;
   3106
   3107	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
   3108		status = VXGE_HW_ERR_INVALID_DEVICE;
   3109		goto exit;
   3110	}
   3111
   3112	switch (type) {
   3113	case vxge_hw_mgmt_reg_type_legacy:
   3114		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
   3115			status = VXGE_HW_ERR_INVALID_OFFSET;
   3116			break;
   3117		}
   3118		*value = readq((void __iomem *)hldev->legacy_reg + offset);
   3119		break;
   3120	case vxge_hw_mgmt_reg_type_toc:
   3121		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
   3122			status = VXGE_HW_ERR_INVALID_OFFSET;
   3123			break;
   3124		}
   3125		*value = readq((void __iomem *)hldev->toc_reg + offset);
   3126		break;
   3127	case vxge_hw_mgmt_reg_type_common:
   3128		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
   3129			status = VXGE_HW_ERR_INVALID_OFFSET;
   3130			break;
   3131		}
   3132		*value = readq((void __iomem *)hldev->common_reg + offset);
   3133		break;
   3134	case vxge_hw_mgmt_reg_type_mrpcim:
   3135		if (!(hldev->access_rights &
   3136			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
   3137			status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
   3138			break;
   3139		}
   3140		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
   3141			status = VXGE_HW_ERR_INVALID_OFFSET;
   3142			break;
   3143		}
   3144		*value = readq((void __iomem *)hldev->mrpcim_reg + offset);
   3145		break;
   3146	case vxge_hw_mgmt_reg_type_srpcim:
   3147		if (!(hldev->access_rights &
   3148			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
   3149			status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
   3150			break;
   3151		}
   3152		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
   3153			status = VXGE_HW_ERR_INVALID_INDEX;
   3154			break;
   3155		}
   3156		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
   3157			status = VXGE_HW_ERR_INVALID_OFFSET;
   3158			break;
   3159		}
   3160		*value = readq((void __iomem *)hldev->srpcim_reg[index] +
   3161				offset);
   3162		break;
   3163	case vxge_hw_mgmt_reg_type_vpmgmt:
   3164		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
   3165			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
   3166			status = VXGE_HW_ERR_INVALID_INDEX;
   3167			break;
   3168		}
   3169		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
   3170			status = VXGE_HW_ERR_INVALID_OFFSET;
   3171			break;
   3172		}
   3173		*value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
   3174				offset);
   3175		break;
   3176	case vxge_hw_mgmt_reg_type_vpath:
   3177		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
   3178			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
   3179			status = VXGE_HW_ERR_INVALID_INDEX;
   3180			break;
   3181		}
   3182		if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
   3183			status = VXGE_HW_ERR_INVALID_INDEX;
   3184			break;
   3185		}
   3186		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
   3187			status = VXGE_HW_ERR_INVALID_OFFSET;
   3188			break;
   3189		}
   3190		*value = readq((void __iomem *)hldev->vpath_reg[index] +
   3191				offset);
   3192		break;
   3193	default:
   3194		status = VXGE_HW_ERR_INVALID_TYPE;
   3195		break;
   3196	}
   3197
   3198exit:
   3199	return status;
   3200}
   3201
   3202/*
   3203 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
   3204 */
   3205enum vxge_hw_status
   3206vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
   3207{
   3208	struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
   3209	int i = 0, j = 0;
   3210
   3211	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   3212		if (!((vpath_mask) & vxge_mBIT(i)))
   3213			continue;
   3214		vpmgmt_reg = hldev->vpmgmt_reg[i];
   3215		for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
   3216			if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
   3217			& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
   3218				return VXGE_HW_FAIL;
   3219		}
   3220	}
   3221	return VXGE_HW_OK;
   3222}
   3223/*
   3224 * vxge_hw_mgmt_reg_Write - Write Titan register.
   3225 */
   3226enum vxge_hw_status
   3227vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
   3228		      enum vxge_hw_mgmt_reg_type type,
   3229		      u32 index, u32 offset, u64 value)
   3230{
   3231	enum vxge_hw_status status = VXGE_HW_OK;
   3232
   3233	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
   3234		status = VXGE_HW_ERR_INVALID_DEVICE;
   3235		goto exit;
   3236	}
   3237
   3238	switch (type) {
   3239	case vxge_hw_mgmt_reg_type_legacy:
   3240		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
   3241			status = VXGE_HW_ERR_INVALID_OFFSET;
   3242			break;
   3243		}
   3244		writeq(value, (void __iomem *)hldev->legacy_reg + offset);
   3245		break;
   3246	case vxge_hw_mgmt_reg_type_toc:
   3247		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
   3248			status = VXGE_HW_ERR_INVALID_OFFSET;
   3249			break;
   3250		}
   3251		writeq(value, (void __iomem *)hldev->toc_reg + offset);
   3252		break;
   3253	case vxge_hw_mgmt_reg_type_common:
   3254		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
   3255			status = VXGE_HW_ERR_INVALID_OFFSET;
   3256			break;
   3257		}
   3258		writeq(value, (void __iomem *)hldev->common_reg + offset);
   3259		break;
   3260	case vxge_hw_mgmt_reg_type_mrpcim:
   3261		if (!(hldev->access_rights &
   3262			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
   3263			status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
   3264			break;
   3265		}
   3266		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
   3267			status = VXGE_HW_ERR_INVALID_OFFSET;
   3268			break;
   3269		}
   3270		writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
   3271		break;
   3272	case vxge_hw_mgmt_reg_type_srpcim:
   3273		if (!(hldev->access_rights &
   3274			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
   3275			status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
   3276			break;
   3277		}
   3278		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
   3279			status = VXGE_HW_ERR_INVALID_INDEX;
   3280			break;
   3281		}
   3282		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
   3283			status = VXGE_HW_ERR_INVALID_OFFSET;
   3284			break;
   3285		}
   3286		writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
   3287			offset);
   3288
   3289		break;
   3290	case vxge_hw_mgmt_reg_type_vpmgmt:
   3291		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
   3292			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
   3293			status = VXGE_HW_ERR_INVALID_INDEX;
   3294			break;
   3295		}
   3296		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
   3297			status = VXGE_HW_ERR_INVALID_OFFSET;
   3298			break;
   3299		}
   3300		writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
   3301			offset);
   3302		break;
   3303	case vxge_hw_mgmt_reg_type_vpath:
   3304		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
   3305			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
   3306			status = VXGE_HW_ERR_INVALID_INDEX;
   3307			break;
   3308		}
   3309		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
   3310			status = VXGE_HW_ERR_INVALID_OFFSET;
   3311			break;
   3312		}
   3313		writeq(value, (void __iomem *)hldev->vpath_reg[index] +
   3314			offset);
   3315		break;
   3316	default:
   3317		status = VXGE_HW_ERR_INVALID_TYPE;
   3318		break;
   3319	}
   3320exit:
   3321	return status;
   3322}
   3323
   3324/*
   3325 * __vxge_hw_fifo_abort - Returns the TxD
   3326 * This function terminates the TxDs of fifo
   3327 */
   3328static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
   3329{
   3330	void *txdlh;
   3331
   3332	for (;;) {
   3333		vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
   3334
   3335		if (txdlh == NULL)
   3336			break;
   3337
   3338		vxge_hw_channel_dtr_complete(&fifo->channel);
   3339
   3340		if (fifo->txdl_term) {
   3341			fifo->txdl_term(txdlh,
   3342			VXGE_HW_TXDL_STATE_POSTED,
   3343			fifo->channel.userdata);
   3344		}
   3345
   3346		vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
   3347	}
   3348
   3349	return VXGE_HW_OK;
   3350}
   3351
   3352/*
   3353 * __vxge_hw_fifo_reset - Resets the fifo
   3354 * This function resets the fifo during vpath reset operation
   3355 */
   3356static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
   3357{
   3358	enum vxge_hw_status status = VXGE_HW_OK;
   3359
   3360	__vxge_hw_fifo_abort(fifo);
   3361	status = __vxge_hw_channel_reset(&fifo->channel);
   3362
   3363	return status;
   3364}
   3365
   3366/*
   3367 * __vxge_hw_fifo_delete - Removes the FIFO
   3368 * This function freeup the memory pool and removes the FIFO
   3369 */
   3370static enum vxge_hw_status
   3371__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
   3372{
   3373	struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
   3374
   3375	__vxge_hw_fifo_abort(fifo);
   3376
   3377	if (fifo->mempool)
   3378		__vxge_hw_mempool_destroy(fifo->mempool);
   3379
   3380	vp->vpath->fifoh = NULL;
   3381
   3382	__vxge_hw_channel_free(&fifo->channel);
   3383
   3384	return VXGE_HW_OK;
   3385}
   3386
   3387/*
   3388 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
   3389 * list callback
   3390 * This function is callback passed to __vxge_hw_mempool_create to create memory
   3391 * pool for TxD list
   3392 */
   3393static void
   3394__vxge_hw_fifo_mempool_item_alloc(
   3395	struct vxge_hw_mempool *mempoolh,
   3396	u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
   3397	u32 index, u32 is_last)
   3398{
   3399	u32 memblock_item_idx;
   3400	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
   3401	struct vxge_hw_fifo_txd *txdp =
   3402		(struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
   3403	struct __vxge_hw_fifo *fifo =
   3404			(struct __vxge_hw_fifo *)mempoolh->userdata;
   3405	void *memblock = mempoolh->memblocks_arr[memblock_index];
   3406
   3407	vxge_assert(txdp);
   3408
   3409	txdp->host_control = (u64) (size_t)
   3410	__vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
   3411					&memblock_item_idx);
   3412
   3413	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
   3414
   3415	vxge_assert(txdl_priv);
   3416
   3417	fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
   3418
   3419	/* pre-format HW's TxDL's private */
   3420	txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
   3421	txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
   3422	txdl_priv->dma_handle = dma_object->handle;
   3423	txdl_priv->memblock   = memblock;
   3424	txdl_priv->first_txdp = txdp;
   3425	txdl_priv->next_txdl_priv = NULL;
   3426	txdl_priv->alloc_frags = 0;
   3427}
   3428
   3429/*
   3430 * __vxge_hw_fifo_create - Create a FIFO
   3431 * This function creates FIFO and initializes it.
   3432 */
   3433static enum vxge_hw_status
   3434__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
   3435		      struct vxge_hw_fifo_attr *attr)
   3436{
   3437	enum vxge_hw_status status = VXGE_HW_OK;
   3438	struct __vxge_hw_fifo *fifo;
   3439	struct vxge_hw_fifo_config *config;
   3440	u32 txdl_size, txdl_per_memblock;
   3441	struct vxge_hw_mempool_cbs fifo_mp_callback;
   3442	struct __vxge_hw_virtualpath *vpath;
   3443
   3444	if ((vp == NULL) || (attr == NULL)) {
   3445		status = VXGE_HW_ERR_INVALID_HANDLE;
   3446		goto exit;
   3447	}
   3448	vpath = vp->vpath;
   3449	config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
   3450
   3451	txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
   3452
   3453	txdl_per_memblock = config->memblock_size / txdl_size;
   3454
   3455	fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
   3456					VXGE_HW_CHANNEL_TYPE_FIFO,
   3457					config->fifo_blocks * txdl_per_memblock,
   3458					attr->per_txdl_space, attr->userdata);
   3459
   3460	if (fifo == NULL) {
   3461		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   3462		goto exit;
   3463	}
   3464
   3465	vpath->fifoh = fifo;
   3466	fifo->nofl_db = vpath->nofl_db;
   3467
   3468	fifo->vp_id = vpath->vp_id;
   3469	fifo->vp_reg = vpath->vp_reg;
   3470	fifo->stats = &vpath->sw_stats->fifo_stats;
   3471
   3472	fifo->config = config;
   3473
   3474	/* apply "interrupts per txdl" attribute */
   3475	fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
   3476	fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
   3477	fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
   3478
   3479	if (fifo->config->intr)
   3480		fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
   3481
   3482	fifo->no_snoop_bits = config->no_snoop_bits;
   3483
   3484	/*
   3485	 * FIFO memory management strategy:
   3486	 *
   3487	 * TxDL split into three independent parts:
   3488	 *	- set of TxD's
   3489	 *	- TxD HW private part
   3490	 *	- driver private part
   3491	 *
   3492	 * Adaptative memory allocation used. i.e. Memory allocated on
   3493	 * demand with the size which will fit into one memory block.
   3494	 * One memory block may contain more than one TxDL.
   3495	 *
   3496	 * During "reserve" operations more memory can be allocated on demand
   3497	 * for example due to FIFO full condition.
   3498	 *
   3499	 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
   3500	 * routine which will essentially stop the channel and free resources.
   3501	 */
   3502
   3503	/* TxDL common private size == TxDL private  +  driver private */
   3504	fifo->priv_size =
   3505		sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
   3506	fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
   3507			VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
   3508
   3509	fifo->per_txdl_space = attr->per_txdl_space;
   3510
   3511	/* recompute txdl size to be cacheline aligned */
   3512	fifo->txdl_size = txdl_size;
   3513	fifo->txdl_per_memblock = txdl_per_memblock;
   3514
   3515	fifo->txdl_term = attr->txdl_term;
   3516	fifo->callback = attr->callback;
   3517
   3518	if (fifo->txdl_per_memblock == 0) {
   3519		__vxge_hw_fifo_delete(vp);
   3520		status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
   3521		goto exit;
   3522	}
   3523
   3524	fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
   3525
   3526	fifo->mempool =
   3527		__vxge_hw_mempool_create(vpath->hldev,
   3528			fifo->config->memblock_size,
   3529			fifo->txdl_size,
   3530			fifo->priv_size,
   3531			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
   3532			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
   3533			&fifo_mp_callback,
   3534			fifo);
   3535
   3536	if (fifo->mempool == NULL) {
   3537		__vxge_hw_fifo_delete(vp);
   3538		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   3539		goto exit;
   3540	}
   3541
   3542	status = __vxge_hw_channel_initialize(&fifo->channel);
   3543	if (status != VXGE_HW_OK) {
   3544		__vxge_hw_fifo_delete(vp);
   3545		goto exit;
   3546	}
   3547
   3548	vxge_assert(fifo->channel.reserve_ptr);
   3549exit:
   3550	return status;
   3551}
   3552
   3553/*
   3554 * __vxge_hw_vpath_pci_read - Read the content of given address
   3555 *                          in pci config space.
   3556 * Read from the vpath pci config space.
   3557 */
   3558static enum vxge_hw_status
   3559__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
   3560			 u32 phy_func_0, u32 offset, u32 *val)
   3561{
   3562	u64 val64;
   3563	enum vxge_hw_status status = VXGE_HW_OK;
   3564	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
   3565
   3566	val64 =	VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
   3567
   3568	if (phy_func_0)
   3569		val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
   3570
   3571	writeq(val64, &vp_reg->pci_config_access_cfg1);
   3572	wmb();
   3573	writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
   3574			&vp_reg->pci_config_access_cfg2);
   3575	wmb();
   3576
   3577	status = __vxge_hw_device_register_poll(
   3578			&vp_reg->pci_config_access_cfg2,
   3579			VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
   3580
   3581	if (status != VXGE_HW_OK)
   3582		goto exit;
   3583
   3584	val64 = readq(&vp_reg->pci_config_access_status);
   3585
   3586	if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
   3587		status = VXGE_HW_FAIL;
   3588		*val = 0;
   3589	} else
   3590		*val = (u32)vxge_bVALn(val64, 32, 32);
   3591exit:
   3592	return status;
   3593}
   3594
   3595/**
   3596 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
   3597 * @hldev: HW device.
   3598 * @on_off: TRUE if flickering to be on, FALSE to be off
   3599 *
   3600 * Flicker the link LED.
   3601 */
   3602enum vxge_hw_status
   3603vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
   3604{
   3605	struct __vxge_hw_virtualpath *vpath;
   3606	u64 data0, data1 = 0, steer_ctrl = 0;
   3607	enum vxge_hw_status status;
   3608
   3609	if (hldev == NULL) {
   3610		status = VXGE_HW_ERR_INVALID_DEVICE;
   3611		goto exit;
   3612	}
   3613
   3614	vpath = &hldev->virtual_paths[hldev->first_vp_id];
   3615
   3616	data0 = on_off;
   3617	status = vxge_hw_vpath_fw_api(vpath,
   3618			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
   3619			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
   3620			0, &data0, &data1, &steer_ctrl);
   3621exit:
   3622	return status;
   3623}
   3624
   3625/*
   3626 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
   3627 */
   3628enum vxge_hw_status
   3629__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
   3630			      u32 action, u32 rts_table, u32 offset,
   3631			      u64 *data0, u64 *data1)
   3632{
   3633	enum vxge_hw_status status;
   3634	u64 steer_ctrl = 0;
   3635
   3636	if (vp == NULL) {
   3637		status = VXGE_HW_ERR_INVALID_HANDLE;
   3638		goto exit;
   3639	}
   3640
   3641	if ((rts_table ==
   3642	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
   3643	    (rts_table ==
   3644	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
   3645	    (rts_table ==
   3646	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
   3647	    (rts_table ==
   3648	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
   3649		steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
   3650	}
   3651
   3652	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
   3653				      data0, data1, &steer_ctrl);
   3654	if (status != VXGE_HW_OK)
   3655		goto exit;
   3656
   3657	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
   3658	    (rts_table !=
   3659	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
   3660		*data1 = 0;
   3661exit:
   3662	return status;
   3663}
   3664
   3665/*
   3666 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
   3667 */
   3668enum vxge_hw_status
   3669__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
   3670			      u32 rts_table, u32 offset, u64 steer_data0,
   3671			      u64 steer_data1)
   3672{
   3673	u64 data0, data1 = 0, steer_ctrl = 0;
   3674	enum vxge_hw_status status;
   3675
   3676	if (vp == NULL) {
   3677		status = VXGE_HW_ERR_INVALID_HANDLE;
   3678		goto exit;
   3679	}
   3680
   3681	data0 = steer_data0;
   3682
   3683	if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
   3684	    (rts_table ==
   3685	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
   3686		data1 = steer_data1;
   3687
   3688	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
   3689				      &data0, &data1, &steer_ctrl);
   3690exit:
   3691	return status;
   3692}
   3693
   3694/*
   3695 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
   3696 */
   3697enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
   3698			struct __vxge_hw_vpath_handle *vp,
   3699			enum vxge_hw_rth_algoritms algorithm,
   3700			struct vxge_hw_rth_hash_types *hash_type,
   3701			u16 bucket_size)
   3702{
   3703	u64 data0, data1;
   3704	enum vxge_hw_status status = VXGE_HW_OK;
   3705
   3706	if (vp == NULL) {
   3707		status = VXGE_HW_ERR_INVALID_HANDLE;
   3708		goto exit;
   3709	}
   3710
   3711	status = __vxge_hw_vpath_rts_table_get(vp,
   3712		     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
   3713		     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
   3714			0, &data0, &data1);
   3715	if (status != VXGE_HW_OK)
   3716		goto exit;
   3717
   3718	data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
   3719			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
   3720
   3721	data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
   3722	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
   3723	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
   3724
   3725	if (hash_type->hash_type_tcpipv4_en)
   3726		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
   3727
   3728	if (hash_type->hash_type_ipv4_en)
   3729		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
   3730
   3731	if (hash_type->hash_type_tcpipv6_en)
   3732		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
   3733
   3734	if (hash_type->hash_type_ipv6_en)
   3735		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
   3736
   3737	if (hash_type->hash_type_tcpipv6ex_en)
   3738		data0 |=
   3739		VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
   3740
   3741	if (hash_type->hash_type_ipv6ex_en)
   3742		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
   3743
   3744	if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
   3745		data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
   3746	else
   3747		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
   3748
   3749	status = __vxge_hw_vpath_rts_table_set(vp,
   3750		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
   3751		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
   3752		0, data0, 0);
   3753exit:
   3754	return status;
   3755}
   3756
   3757static void
   3758vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
   3759				u16 flag, u8 *itable)
   3760{
   3761	switch (flag) {
   3762	case 1:
   3763		*data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
   3764			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
   3765			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
   3766			itable[j]);
   3767		fallthrough;
   3768	case 2:
   3769		*data0 |=
   3770			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
   3771			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
   3772			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
   3773			itable[j]);
   3774		fallthrough;
   3775	case 3:
   3776		*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
   3777			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
   3778			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
   3779			itable[j]);
   3780		fallthrough;
   3781	case 4:
   3782		*data1 |=
   3783			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
   3784			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
   3785			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
   3786			itable[j]);
   3787		return;
   3788	default:
   3789		return;
   3790	}
   3791}
   3792/*
   3793 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
   3794 */
   3795enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
   3796			struct __vxge_hw_vpath_handle **vpath_handles,
   3797			u32 vpath_count,
   3798			u8 *mtable,
   3799			u8 *itable,
   3800			u32 itable_size)
   3801{
   3802	u32 i, j, action, rts_table;
   3803	u64 data0;
   3804	u64 data1;
   3805	u32 max_entries;
   3806	enum vxge_hw_status status = VXGE_HW_OK;
   3807	struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
   3808
   3809	if (vp == NULL) {
   3810		status = VXGE_HW_ERR_INVALID_HANDLE;
   3811		goto exit;
   3812	}
   3813
   3814	max_entries = (((u32)1) << itable_size);
   3815
   3816	if (vp->vpath->hldev->config.rth_it_type
   3817				== VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
   3818		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
   3819		rts_table =
   3820			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
   3821
   3822		for (j = 0; j < max_entries; j++) {
   3823
   3824			data1 = 0;
   3825
   3826			data0 =
   3827			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
   3828				itable[j]);
   3829
   3830			status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
   3831				action, rts_table, j, data0, data1);
   3832
   3833			if (status != VXGE_HW_OK)
   3834				goto exit;
   3835		}
   3836
   3837		for (j = 0; j < max_entries; j++) {
   3838
   3839			data1 = 0;
   3840
   3841			data0 =
   3842			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
   3843			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
   3844				itable[j]);
   3845
   3846			status = __vxge_hw_vpath_rts_table_set(
   3847				vpath_handles[mtable[itable[j]]], action,
   3848				rts_table, j, data0, data1);
   3849
   3850			if (status != VXGE_HW_OK)
   3851				goto exit;
   3852		}
   3853	} else {
   3854		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
   3855		rts_table =
   3856			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
   3857		for (i = 0; i < vpath_count; i++) {
   3858
   3859			for (j = 0; j < max_entries;) {
   3860
   3861				data0 = 0;
   3862				data1 = 0;
   3863
   3864				while (j < max_entries) {
   3865					if (mtable[itable[j]] != i) {
   3866						j++;
   3867						continue;
   3868					}
   3869					vxge_hw_rts_rth_data0_data1_get(j,
   3870						&data0, &data1, 1, itable);
   3871					j++;
   3872					break;
   3873				}
   3874
   3875				while (j < max_entries) {
   3876					if (mtable[itable[j]] != i) {
   3877						j++;
   3878						continue;
   3879					}
   3880					vxge_hw_rts_rth_data0_data1_get(j,
   3881						&data0, &data1, 2, itable);
   3882					j++;
   3883					break;
   3884				}
   3885
   3886				while (j < max_entries) {
   3887					if (mtable[itable[j]] != i) {
   3888						j++;
   3889						continue;
   3890					}
   3891					vxge_hw_rts_rth_data0_data1_get(j,
   3892						&data0, &data1, 3, itable);
   3893					j++;
   3894					break;
   3895				}
   3896
   3897				while (j < max_entries) {
   3898					if (mtable[itable[j]] != i) {
   3899						j++;
   3900						continue;
   3901					}
   3902					vxge_hw_rts_rth_data0_data1_get(j,
   3903						&data0, &data1, 4, itable);
   3904					j++;
   3905					break;
   3906				}
   3907
   3908				if (data0 != 0) {
   3909					status = __vxge_hw_vpath_rts_table_set(
   3910							vpath_handles[i],
   3911							action, rts_table,
   3912							0, data0, data1);
   3913
   3914					if (status != VXGE_HW_OK)
   3915						goto exit;
   3916				}
   3917			}
   3918		}
   3919	}
   3920exit:
   3921	return status;
   3922}
   3923
   3924/**
   3925 * vxge_hw_vpath_check_leak - Check for memory leak
   3926 * @ring: Handle to the ring object used for receive
   3927 *
   3928 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
   3929 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
   3930 * Returns: VXGE_HW_FAIL, if leak has occurred.
   3931 *
   3932 */
   3933enum vxge_hw_status
   3934vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
   3935{
   3936	enum vxge_hw_status status = VXGE_HW_OK;
   3937	u64 rxd_new_count, rxd_spat;
   3938
   3939	if (ring == NULL)
   3940		return status;
   3941
   3942	rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
   3943	rxd_spat = readq(&ring->vp_reg->prc_cfg6);
   3944	rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
   3945
   3946	if (rxd_new_count >= rxd_spat)
   3947		status = VXGE_HW_FAIL;
   3948
   3949	return status;
   3950}
   3951
   3952/*
   3953 * __vxge_hw_vpath_mgmt_read
   3954 * This routine reads the vpath_mgmt registers
   3955 */
   3956static enum vxge_hw_status
   3957__vxge_hw_vpath_mgmt_read(
   3958	struct __vxge_hw_device *hldev,
   3959	struct __vxge_hw_virtualpath *vpath)
   3960{
   3961	u32 i, mtu = 0, max_pyld = 0;
   3962	u64 val64;
   3963
   3964	for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
   3965
   3966		val64 = readq(&vpath->vpmgmt_reg->
   3967				rxmac_cfg0_port_vpmgmt_clone[i]);
   3968		max_pyld =
   3969			(u32)
   3970			VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
   3971			(val64);
   3972		if (mtu < max_pyld)
   3973			mtu = max_pyld;
   3974	}
   3975
   3976	vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
   3977
   3978	val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
   3979
   3980	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
   3981		if (val64 & vxge_mBIT(i))
   3982			vpath->vsport_number = i;
   3983	}
   3984
   3985	val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
   3986
   3987	if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
   3988		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
   3989	else
   3990		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
   3991
   3992	return VXGE_HW_OK;
   3993}
   3994
   3995/*
   3996 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
   3997 * This routine checks the vpath_rst_in_prog register to see if
   3998 * adapter completed the reset process for the vpath
   3999 */
   4000static enum vxge_hw_status
   4001__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
   4002{
   4003	enum vxge_hw_status status;
   4004
   4005	status = __vxge_hw_device_register_poll(
   4006			&vpath->hldev->common_reg->vpath_rst_in_prog,
   4007			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
   4008				1 << (16 - vpath->vp_id)),
   4009			vpath->hldev->config.device_poll_millis);
   4010
   4011	return status;
   4012}
   4013
   4014/*
   4015 * __vxge_hw_vpath_reset
   4016 * This routine resets the vpath on the device
   4017 */
   4018static enum vxge_hw_status
   4019__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
   4020{
   4021	u64 val64;
   4022
   4023	val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
   4024
   4025	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
   4026				&hldev->common_reg->cmn_rsthdlr_cfg0);
   4027
   4028	return VXGE_HW_OK;
   4029}
   4030
   4031/*
   4032 * __vxge_hw_vpath_sw_reset
   4033 * This routine resets the vpath structures
   4034 */
   4035static enum vxge_hw_status
   4036__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
   4037{
   4038	enum vxge_hw_status status = VXGE_HW_OK;
   4039	struct __vxge_hw_virtualpath *vpath;
   4040
   4041	vpath = &hldev->virtual_paths[vp_id];
   4042
   4043	if (vpath->ringh) {
   4044		status = __vxge_hw_ring_reset(vpath->ringh);
   4045		if (status != VXGE_HW_OK)
   4046			goto exit;
   4047	}
   4048
   4049	if (vpath->fifoh)
   4050		status = __vxge_hw_fifo_reset(vpath->fifoh);
   4051exit:
   4052	return status;
   4053}
   4054
   4055/*
   4056 * __vxge_hw_vpath_prc_configure
   4057 * This routine configures the prc registers of virtual path using the config
   4058 * passed
   4059 */
   4060static void
   4061__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
   4062{
   4063	u64 val64;
   4064	struct __vxge_hw_virtualpath *vpath;
   4065	struct vxge_hw_vp_config *vp_config;
   4066	struct vxge_hw_vpath_reg __iomem *vp_reg;
   4067
   4068	vpath = &hldev->virtual_paths[vp_id];
   4069	vp_reg = vpath->vp_reg;
   4070	vp_config = vpath->vp_config;
   4071
   4072	if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
   4073		return;
   4074
   4075	val64 = readq(&vp_reg->prc_cfg1);
   4076	val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
   4077	writeq(val64, &vp_reg->prc_cfg1);
   4078
   4079	val64 = readq(&vpath->vp_reg->prc_cfg6);
   4080	val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
   4081	writeq(val64, &vpath->vp_reg->prc_cfg6);
   4082
   4083	val64 = readq(&vp_reg->prc_cfg7);
   4084
   4085	if (vpath->vp_config->ring.scatter_mode !=
   4086		VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
   4087
   4088		val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
   4089
   4090		switch (vpath->vp_config->ring.scatter_mode) {
   4091		case VXGE_HW_RING_SCATTER_MODE_A:
   4092			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
   4093					VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
   4094			break;
   4095		case VXGE_HW_RING_SCATTER_MODE_B:
   4096			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
   4097					VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
   4098			break;
   4099		case VXGE_HW_RING_SCATTER_MODE_C:
   4100			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
   4101					VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
   4102			break;
   4103		}
   4104	}
   4105
   4106	writeq(val64, &vp_reg->prc_cfg7);
   4107
   4108	writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
   4109				__vxge_hw_ring_first_block_address_get(
   4110					vpath->ringh) >> 3), &vp_reg->prc_cfg5);
   4111
   4112	val64 = readq(&vp_reg->prc_cfg4);
   4113	val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
   4114	val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
   4115
   4116	val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
   4117			VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
   4118
   4119	if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
   4120		val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
   4121	else
   4122		val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
   4123
   4124	writeq(val64, &vp_reg->prc_cfg4);
   4125}
   4126
   4127/*
   4128 * __vxge_hw_vpath_kdfc_configure
   4129 * This routine configures the kdfc registers of virtual path using the
   4130 * config passed
   4131 */
   4132static enum vxge_hw_status
   4133__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
   4134{
   4135	u64 val64;
   4136	u64 vpath_stride;
   4137	enum vxge_hw_status status = VXGE_HW_OK;
   4138	struct __vxge_hw_virtualpath *vpath;
   4139	struct vxge_hw_vpath_reg __iomem *vp_reg;
   4140
   4141	vpath = &hldev->virtual_paths[vp_id];
   4142	vp_reg = vpath->vp_reg;
   4143	status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
   4144
   4145	if (status != VXGE_HW_OK)
   4146		goto exit;
   4147
   4148	val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
   4149
   4150	vpath->max_kdfc_db =
   4151		(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
   4152			val64+1)/2;
   4153
   4154	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
   4155
   4156		vpath->max_nofl_db = vpath->max_kdfc_db;
   4157
   4158		if (vpath->max_nofl_db <
   4159			((vpath->vp_config->fifo.memblock_size /
   4160			(vpath->vp_config->fifo.max_frags *
   4161			sizeof(struct vxge_hw_fifo_txd))) *
   4162			vpath->vp_config->fifo.fifo_blocks)) {
   4163
   4164			return VXGE_HW_BADCFG_FIFO_BLOCKS;
   4165		}
   4166		val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
   4167				(vpath->max_nofl_db*2)-1);
   4168	}
   4169
   4170	writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
   4171
   4172	writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
   4173		&vp_reg->kdfc_fifo_trpl_ctrl);
   4174
   4175	val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
   4176
   4177	val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
   4178		   VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
   4179
   4180	val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
   4181		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
   4182#ifndef __BIG_ENDIAN
   4183		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
   4184#endif
   4185		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
   4186
   4187	writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
   4188	writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
   4189	wmb();
   4190	vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
   4191
   4192	vpath->nofl_db =
   4193		(struct __vxge_hw_non_offload_db_wrapper __iomem *)
   4194		(hldev->kdfc + (vp_id *
   4195		VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
   4196					vpath_stride)));
   4197exit:
   4198	return status;
   4199}
   4200
   4201/*
   4202 * __vxge_hw_vpath_mac_configure
   4203 * This routine configures the mac of virtual path using the config passed
   4204 */
   4205static enum vxge_hw_status
   4206__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
   4207{
   4208	u64 val64;
   4209	struct __vxge_hw_virtualpath *vpath;
   4210	struct vxge_hw_vp_config *vp_config;
   4211	struct vxge_hw_vpath_reg __iomem *vp_reg;
   4212
   4213	vpath = &hldev->virtual_paths[vp_id];
   4214	vp_reg = vpath->vp_reg;
   4215	vp_config = vpath->vp_config;
   4216
   4217	writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
   4218			vpath->vsport_number), &vp_reg->xmac_vsport_choice);
   4219
   4220	if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
   4221
   4222		val64 = readq(&vp_reg->xmac_rpa_vcfg);
   4223
   4224		if (vp_config->rpa_strip_vlan_tag !=
   4225			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
   4226			if (vp_config->rpa_strip_vlan_tag)
   4227				val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
   4228			else
   4229				val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
   4230		}
   4231
   4232		writeq(val64, &vp_reg->xmac_rpa_vcfg);
   4233		val64 = readq(&vp_reg->rxmac_vcfg0);
   4234
   4235		if (vp_config->mtu !=
   4236				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
   4237			val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
   4238			if ((vp_config->mtu  +
   4239				VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
   4240				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
   4241					vp_config->mtu  +
   4242					VXGE_HW_MAC_HEADER_MAX_SIZE);
   4243			else
   4244				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
   4245					vpath->max_mtu);
   4246		}
   4247
   4248		writeq(val64, &vp_reg->rxmac_vcfg0);
   4249
   4250		val64 = readq(&vp_reg->rxmac_vcfg1);
   4251
   4252		val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
   4253			VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
   4254
   4255		if (hldev->config.rth_it_type ==
   4256				VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
   4257			val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
   4258				0x2) |
   4259				VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
   4260		}
   4261
   4262		writeq(val64, &vp_reg->rxmac_vcfg1);
   4263	}
   4264	return VXGE_HW_OK;
   4265}
   4266
   4267/*
   4268 * __vxge_hw_vpath_tim_configure
   4269 * This routine configures the tim registers of virtual path using the config
   4270 * passed
   4271 */
   4272static enum vxge_hw_status
   4273__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
   4274{
   4275	u64 val64;
   4276	struct __vxge_hw_virtualpath *vpath;
   4277	struct vxge_hw_vpath_reg __iomem *vp_reg;
   4278	struct vxge_hw_vp_config *config;
   4279
   4280	vpath = &hldev->virtual_paths[vp_id];
   4281	vp_reg = vpath->vp_reg;
   4282	config = vpath->vp_config;
   4283
   4284	writeq(0, &vp_reg->tim_dest_addr);
   4285	writeq(0, &vp_reg->tim_vpath_map);
   4286	writeq(0, &vp_reg->tim_bitmap);
   4287	writeq(0, &vp_reg->tim_remap);
   4288
   4289	if (config->ring.enable == VXGE_HW_RING_ENABLE)
   4290		writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
   4291			(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
   4292			VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
   4293
   4294	val64 = readq(&vp_reg->tim_pci_cfg);
   4295	val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
   4296	writeq(val64, &vp_reg->tim_pci_cfg);
   4297
   4298	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
   4299
   4300		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
   4301
   4302		if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
   4303			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
   4304				0x3ffffff);
   4305			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
   4306					config->tti.btimer_val);
   4307		}
   4308
   4309		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
   4310
   4311		if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
   4312			if (config->tti.timer_ac_en)
   4313				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
   4314			else
   4315				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
   4316		}
   4317
   4318		if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
   4319			if (config->tti.timer_ci_en)
   4320				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
   4321			else
   4322				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
   4323		}
   4324
   4325		if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
   4326			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
   4327			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
   4328					config->tti.urange_a);
   4329		}
   4330
   4331		if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
   4332			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
   4333			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
   4334					config->tti.urange_b);
   4335		}
   4336
   4337		if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
   4338			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
   4339			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
   4340					config->tti.urange_c);
   4341		}
   4342
   4343		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
   4344		vpath->tim_tti_cfg1_saved = val64;
   4345
   4346		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
   4347
   4348		if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
   4349			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
   4350			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
   4351						config->tti.uec_a);
   4352		}
   4353
   4354		if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
   4355			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
   4356			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
   4357						config->tti.uec_b);
   4358		}
   4359
   4360		if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
   4361			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
   4362			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
   4363						config->tti.uec_c);
   4364		}
   4365
   4366		if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
   4367			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
   4368			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
   4369						config->tti.uec_d);
   4370		}
   4371
   4372		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
   4373		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
   4374
   4375		if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
   4376			if (config->tti.timer_ri_en)
   4377				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
   4378			else
   4379				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
   4380		}
   4381
   4382		if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
   4383			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
   4384					0x3ffffff);
   4385			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
   4386					config->tti.rtimer_val);
   4387		}
   4388
   4389		if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
   4390			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
   4391			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
   4392		}
   4393
   4394		if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
   4395			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
   4396					0x3ffffff);
   4397			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
   4398					config->tti.ltimer_val);
   4399		}
   4400
   4401		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
   4402		vpath->tim_tti_cfg3_saved = val64;
   4403	}
   4404
   4405	if (config->ring.enable == VXGE_HW_RING_ENABLE) {
   4406
   4407		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
   4408
   4409		if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
   4410			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
   4411					0x3ffffff);
   4412			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
   4413					config->rti.btimer_val);
   4414		}
   4415
   4416		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
   4417
   4418		if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
   4419			if (config->rti.timer_ac_en)
   4420				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
   4421			else
   4422				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
   4423		}
   4424
   4425		if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
   4426			if (config->rti.timer_ci_en)
   4427				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
   4428			else
   4429				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
   4430		}
   4431
   4432		if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
   4433			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
   4434			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
   4435					config->rti.urange_a);
   4436		}
   4437
   4438		if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
   4439			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
   4440			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
   4441					config->rti.urange_b);
   4442		}
   4443
   4444		if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
   4445			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
   4446			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
   4447					config->rti.urange_c);
   4448		}
   4449
   4450		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
   4451		vpath->tim_rti_cfg1_saved = val64;
   4452
   4453		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
   4454
   4455		if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
   4456			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
   4457			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
   4458						config->rti.uec_a);
   4459		}
   4460
   4461		if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
   4462			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
   4463			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
   4464						config->rti.uec_b);
   4465		}
   4466
   4467		if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
   4468			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
   4469			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
   4470						config->rti.uec_c);
   4471		}
   4472
   4473		if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
   4474			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
   4475			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
   4476						config->rti.uec_d);
   4477		}
   4478
   4479		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
   4480		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
   4481
   4482		if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
   4483			if (config->rti.timer_ri_en)
   4484				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
   4485			else
   4486				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
   4487		}
   4488
   4489		if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
   4490			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
   4491					0x3ffffff);
   4492			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
   4493					config->rti.rtimer_val);
   4494		}
   4495
   4496		if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
   4497			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
   4498			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
   4499		}
   4500
   4501		if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
   4502			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
   4503					0x3ffffff);
   4504			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
   4505					config->rti.ltimer_val);
   4506		}
   4507
   4508		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
   4509		vpath->tim_rti_cfg3_saved = val64;
   4510	}
   4511
   4512	val64 = 0;
   4513	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
   4514	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
   4515	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
   4516	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
   4517	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
   4518	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
   4519
   4520	val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
   4521	val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
   4522	val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
   4523	writeq(val64, &vp_reg->tim_wrkld_clc);
   4524
   4525	return VXGE_HW_OK;
   4526}
   4527
   4528/*
   4529 * __vxge_hw_vpath_initialize
   4530 * This routine is the final phase of init which initializes the
   4531 * registers of the vpath using the configuration passed.
   4532 */
   4533static enum vxge_hw_status
   4534__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
   4535{
   4536	u64 val64;
   4537	u32 val32;
   4538	enum vxge_hw_status status = VXGE_HW_OK;
   4539	struct __vxge_hw_virtualpath *vpath;
   4540	struct vxge_hw_vpath_reg __iomem *vp_reg;
   4541
   4542	vpath = &hldev->virtual_paths[vp_id];
   4543
   4544	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
   4545		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
   4546		goto exit;
   4547	}
   4548	vp_reg = vpath->vp_reg;
   4549
   4550	status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
   4551	if (status != VXGE_HW_OK)
   4552		goto exit;
   4553
   4554	status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
   4555	if (status != VXGE_HW_OK)
   4556		goto exit;
   4557
   4558	status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
   4559	if (status != VXGE_HW_OK)
   4560		goto exit;
   4561
   4562	status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
   4563	if (status != VXGE_HW_OK)
   4564		goto exit;
   4565
   4566	val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
   4567
   4568	/* Get MRRS value from device control */
   4569	status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
   4570	if (status == VXGE_HW_OK) {
   4571		val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
   4572		val64 &=
   4573		    ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
   4574		val64 |=
   4575		    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
   4576
   4577		val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
   4578	}
   4579
   4580	val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
   4581	val64 |=
   4582	    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
   4583		    VXGE_HW_MAX_PAYLOAD_SIZE_512);
   4584
   4585	val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
   4586	writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
   4587
   4588exit:
   4589	return status;
   4590}
   4591
   4592/*
   4593 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
   4594 * This routine closes all channels it opened and freeup memory
   4595 */
   4596static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
   4597{
   4598	struct __vxge_hw_virtualpath *vpath;
   4599
   4600	vpath = &hldev->virtual_paths[vp_id];
   4601
   4602	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
   4603		goto exit;
   4604
   4605	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
   4606		vpath->hldev->tim_int_mask1, vpath->vp_id);
   4607	hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
   4608
   4609	/* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
   4610	 * work after the interface is brought down.
   4611	 */
   4612	spin_lock(&vpath->lock);
   4613	vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
   4614	spin_unlock(&vpath->lock);
   4615
   4616	vpath->vpmgmt_reg = NULL;
   4617	vpath->nofl_db = NULL;
   4618	vpath->max_mtu = 0;
   4619	vpath->vsport_number = 0;
   4620	vpath->max_kdfc_db = 0;
   4621	vpath->max_nofl_db = 0;
   4622	vpath->ringh = NULL;
   4623	vpath->fifoh = NULL;
   4624	memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
   4625	vpath->stats_block = NULL;
   4626	vpath->hw_stats = NULL;
   4627	vpath->hw_stats_sav = NULL;
   4628	vpath->sw_stats = NULL;
   4629
   4630exit:
   4631	return;
   4632}
   4633
   4634/*
   4635 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
   4636 * This routine is the initial phase of init which resets the vpath and
   4637 * initializes the software support structures.
   4638 */
   4639static enum vxge_hw_status
   4640__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
   4641			struct vxge_hw_vp_config *config)
   4642{
   4643	struct __vxge_hw_virtualpath *vpath;
   4644	enum vxge_hw_status status = VXGE_HW_OK;
   4645
   4646	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
   4647		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
   4648		goto exit;
   4649	}
   4650
   4651	vpath = &hldev->virtual_paths[vp_id];
   4652
   4653	spin_lock_init(&vpath->lock);
   4654	vpath->vp_id = vp_id;
   4655	vpath->vp_open = VXGE_HW_VP_OPEN;
   4656	vpath->hldev = hldev;
   4657	vpath->vp_config = config;
   4658	vpath->vp_reg = hldev->vpath_reg[vp_id];
   4659	vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
   4660
   4661	__vxge_hw_vpath_reset(hldev, vp_id);
   4662
   4663	status = __vxge_hw_vpath_reset_check(vpath);
   4664	if (status != VXGE_HW_OK) {
   4665		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
   4666		goto exit;
   4667	}
   4668
   4669	status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
   4670	if (status != VXGE_HW_OK) {
   4671		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
   4672		goto exit;
   4673	}
   4674
   4675	INIT_LIST_HEAD(&vpath->vpath_handles);
   4676
   4677	vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
   4678
   4679	VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
   4680		hldev->tim_int_mask1, vp_id);
   4681
   4682	status = __vxge_hw_vpath_initialize(hldev, vp_id);
   4683	if (status != VXGE_HW_OK)
   4684		__vxge_hw_vp_terminate(hldev, vp_id);
   4685exit:
   4686	return status;
   4687}
   4688
   4689/*
   4690 * vxge_hw_vpath_mtu_set - Set MTU.
   4691 * Set new MTU value. Example, to use jumbo frames:
   4692 * vxge_hw_vpath_mtu_set(my_device, 9600);
   4693 */
   4694enum vxge_hw_status
   4695vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
   4696{
   4697	u64 val64;
   4698	enum vxge_hw_status status = VXGE_HW_OK;
   4699	struct __vxge_hw_virtualpath *vpath;
   4700
   4701	if (vp == NULL) {
   4702		status = VXGE_HW_ERR_INVALID_HANDLE;
   4703		goto exit;
   4704	}
   4705	vpath = vp->vpath;
   4706
   4707	new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
   4708
   4709	if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
   4710		status = VXGE_HW_ERR_INVALID_MTU_SIZE;
   4711
   4712	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
   4713
   4714	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
   4715	val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
   4716
   4717	writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
   4718
   4719	vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
   4720
   4721exit:
   4722	return status;
   4723}
   4724
   4725/*
   4726 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
   4727 * Enable the DMA vpath statistics. The function is to be called to re-enable
   4728 * the adapter to update stats into the host memory
   4729 */
   4730static enum vxge_hw_status
   4731vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
   4732{
   4733	enum vxge_hw_status status = VXGE_HW_OK;
   4734	struct __vxge_hw_virtualpath *vpath;
   4735
   4736	vpath = vp->vpath;
   4737
   4738	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   4739		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   4740		goto exit;
   4741	}
   4742
   4743	memcpy(vpath->hw_stats_sav, vpath->hw_stats,
   4744			sizeof(struct vxge_hw_vpath_stats_hw_info));
   4745
   4746	status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
   4747exit:
   4748	return status;
   4749}
   4750
   4751/*
   4752 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
   4753 * This function allocates a block from block pool or from the system
   4754 */
   4755static struct __vxge_hw_blockpool_entry *
   4756__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
   4757{
   4758	struct __vxge_hw_blockpool_entry *entry = NULL;
   4759	struct __vxge_hw_blockpool  *blockpool;
   4760
   4761	blockpool = &devh->block_pool;
   4762
   4763	if (size == blockpool->block_size) {
   4764
   4765		if (!list_empty(&blockpool->free_block_list))
   4766			entry = (struct __vxge_hw_blockpool_entry *)
   4767				list_first_entry(&blockpool->free_block_list,
   4768					struct __vxge_hw_blockpool_entry,
   4769					item);
   4770
   4771		if (entry != NULL) {
   4772			list_del(&entry->item);
   4773			blockpool->pool_size--;
   4774		}
   4775	}
   4776
   4777	if (entry != NULL)
   4778		__vxge_hw_blockpool_blocks_add(blockpool);
   4779
   4780	return entry;
   4781}
   4782
   4783/*
   4784 * vxge_hw_vpath_open - Open a virtual path on a given adapter
   4785 * This function is used to open access to virtual path of an
   4786 * adapter for offload, GRO operations. This function returns
   4787 * synchronously.
   4788 */
   4789enum vxge_hw_status
   4790vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
   4791		   struct vxge_hw_vpath_attr *attr,
   4792		   struct __vxge_hw_vpath_handle **vpath_handle)
   4793{
   4794	struct __vxge_hw_virtualpath *vpath;
   4795	struct __vxge_hw_vpath_handle *vp;
   4796	enum vxge_hw_status status;
   4797
   4798	vpath = &hldev->virtual_paths[attr->vp_id];
   4799
   4800	if (vpath->vp_open == VXGE_HW_VP_OPEN) {
   4801		status = VXGE_HW_ERR_INVALID_STATE;
   4802		goto vpath_open_exit1;
   4803	}
   4804
   4805	status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
   4806			&hldev->config.vp_config[attr->vp_id]);
   4807	if (status != VXGE_HW_OK)
   4808		goto vpath_open_exit1;
   4809
   4810	vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
   4811	if (vp == NULL) {
   4812		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   4813		goto vpath_open_exit2;
   4814	}
   4815
   4816	vp->vpath = vpath;
   4817
   4818	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
   4819		status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
   4820		if (status != VXGE_HW_OK)
   4821			goto vpath_open_exit6;
   4822	}
   4823
   4824	if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
   4825		status = __vxge_hw_ring_create(vp, &attr->ring_attr);
   4826		if (status != VXGE_HW_OK)
   4827			goto vpath_open_exit7;
   4828
   4829		__vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
   4830	}
   4831
   4832	vpath->fifoh->tx_intr_num =
   4833		(attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
   4834			VXGE_HW_VPATH_INTR_TX;
   4835
   4836	vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
   4837				VXGE_HW_BLOCK_SIZE);
   4838	if (vpath->stats_block == NULL) {
   4839		status = VXGE_HW_ERR_OUT_OF_MEMORY;
   4840		goto vpath_open_exit8;
   4841	}
   4842
   4843	vpath->hw_stats = vpath->stats_block->memblock;
   4844	memset(vpath->hw_stats, 0,
   4845		sizeof(struct vxge_hw_vpath_stats_hw_info));
   4846
   4847	hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
   4848						vpath->hw_stats;
   4849
   4850	vpath->hw_stats_sav =
   4851		&hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
   4852	memset(vpath->hw_stats_sav, 0,
   4853			sizeof(struct vxge_hw_vpath_stats_hw_info));
   4854
   4855	writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
   4856
   4857	status = vxge_hw_vpath_stats_enable(vp);
   4858	if (status != VXGE_HW_OK)
   4859		goto vpath_open_exit8;
   4860
   4861	list_add(&vp->item, &vpath->vpath_handles);
   4862
   4863	hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
   4864
   4865	*vpath_handle = vp;
   4866
   4867	attr->fifo_attr.userdata = vpath->fifoh;
   4868	attr->ring_attr.userdata = vpath->ringh;
   4869
   4870	return VXGE_HW_OK;
   4871
   4872vpath_open_exit8:
   4873	if (vpath->ringh != NULL)
   4874		__vxge_hw_ring_delete(vp);
   4875vpath_open_exit7:
   4876	if (vpath->fifoh != NULL)
   4877		__vxge_hw_fifo_delete(vp);
   4878vpath_open_exit6:
   4879	vfree(vp);
   4880vpath_open_exit2:
   4881	__vxge_hw_vp_terminate(hldev, attr->vp_id);
   4882vpath_open_exit1:
   4883
   4884	return status;
   4885}
   4886
   4887/**
   4888 * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath
   4889 * (vpath) open
   4890 * @vp: Handle got from previous vpath open
   4891 *
   4892 * This function is used to close access to virtual path opened
   4893 * earlier.
   4894 */
   4895void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
   4896{
   4897	struct __vxge_hw_virtualpath *vpath = vp->vpath;
   4898	struct __vxge_hw_ring *ring = vpath->ringh;
   4899	struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
   4900	u64 new_count, val64, val164;
   4901
   4902	if (vdev->titan1) {
   4903		new_count = readq(&vpath->vp_reg->rxdmem_size);
   4904		new_count &= 0x1fff;
   4905	} else
   4906		new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
   4907
   4908	val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
   4909
   4910	writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
   4911		&vpath->vp_reg->prc_rxd_doorbell);
   4912	readl(&vpath->vp_reg->prc_rxd_doorbell);
   4913
   4914	val164 /= 2;
   4915	val64 = readq(&vpath->vp_reg->prc_cfg6);
   4916	val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
   4917	val64 &= 0x1ff;
   4918
   4919	/*
   4920	 * Each RxD is of 4 qwords
   4921	 */
   4922	new_count -= (val64 + 1);
   4923	val64 = min(val164, new_count) / 4;
   4924
   4925	ring->rxds_limit = min(ring->rxds_limit, val64);
   4926	if (ring->rxds_limit < 4)
   4927		ring->rxds_limit = 4;
   4928}
   4929
   4930/*
   4931 * __vxge_hw_blockpool_block_free - Frees a block from block pool
   4932 * @devh: Hal device
   4933 * @entry: Entry of block to be freed
   4934 *
   4935 * This function frees a block from block pool
   4936 */
   4937static void
   4938__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
   4939			       struct __vxge_hw_blockpool_entry *entry)
   4940{
   4941	struct __vxge_hw_blockpool  *blockpool;
   4942
   4943	blockpool = &devh->block_pool;
   4944
   4945	if (entry->length == blockpool->block_size) {
   4946		list_add(&entry->item, &blockpool->free_block_list);
   4947		blockpool->pool_size++;
   4948	}
   4949
   4950	__vxge_hw_blockpool_blocks_remove(blockpool);
   4951}
   4952
   4953/*
   4954 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
   4955 * This function is used to close access to virtual path opened
   4956 * earlier.
   4957 */
   4958enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
   4959{
   4960	struct __vxge_hw_virtualpath *vpath = NULL;
   4961	struct __vxge_hw_device *devh = NULL;
   4962	u32 vp_id = vp->vpath->vp_id;
   4963	u32 is_empty = TRUE;
   4964	enum vxge_hw_status status = VXGE_HW_OK;
   4965
   4966	vpath = vp->vpath;
   4967	devh = vpath->hldev;
   4968
   4969	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   4970		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   4971		goto vpath_close_exit;
   4972	}
   4973
   4974	list_del(&vp->item);
   4975
   4976	if (!list_empty(&vpath->vpath_handles)) {
   4977		list_add(&vp->item, &vpath->vpath_handles);
   4978		is_empty = FALSE;
   4979	}
   4980
   4981	if (!is_empty) {
   4982		status = VXGE_HW_FAIL;
   4983		goto vpath_close_exit;
   4984	}
   4985
   4986	devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
   4987
   4988	if (vpath->ringh != NULL)
   4989		__vxge_hw_ring_delete(vp);
   4990
   4991	if (vpath->fifoh != NULL)
   4992		__vxge_hw_fifo_delete(vp);
   4993
   4994	if (vpath->stats_block != NULL)
   4995		__vxge_hw_blockpool_block_free(devh, vpath->stats_block);
   4996
   4997	vfree(vp);
   4998
   4999	__vxge_hw_vp_terminate(devh, vp_id);
   5000
   5001vpath_close_exit:
   5002	return status;
   5003}
   5004
   5005/*
   5006 * vxge_hw_vpath_reset - Resets vpath
   5007 * This function is used to request a reset of vpath
   5008 */
   5009enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
   5010{
   5011	enum vxge_hw_status status;
   5012	u32 vp_id;
   5013	struct __vxge_hw_virtualpath *vpath = vp->vpath;
   5014
   5015	vp_id = vpath->vp_id;
   5016
   5017	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   5018		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   5019		goto exit;
   5020	}
   5021
   5022	status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
   5023	if (status == VXGE_HW_OK)
   5024		vpath->sw_stats->soft_reset_cnt++;
   5025exit:
   5026	return status;
   5027}
   5028
   5029/*
   5030 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
   5031 * This function poll's for the vpath reset completion and re initializes
   5032 * the vpath.
   5033 */
   5034enum vxge_hw_status
   5035vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
   5036{
   5037	struct __vxge_hw_virtualpath *vpath = NULL;
   5038	enum vxge_hw_status status;
   5039	struct __vxge_hw_device *hldev;
   5040	u32 vp_id;
   5041
   5042	vp_id = vp->vpath->vp_id;
   5043	vpath = vp->vpath;
   5044	hldev = vpath->hldev;
   5045
   5046	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
   5047		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
   5048		goto exit;
   5049	}
   5050
   5051	status = __vxge_hw_vpath_reset_check(vpath);
   5052	if (status != VXGE_HW_OK)
   5053		goto exit;
   5054
   5055	status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
   5056	if (status != VXGE_HW_OK)
   5057		goto exit;
   5058
   5059	status = __vxge_hw_vpath_initialize(hldev, vp_id);
   5060	if (status != VXGE_HW_OK)
   5061		goto exit;
   5062
   5063	if (vpath->ringh != NULL)
   5064		__vxge_hw_vpath_prc_configure(hldev, vp_id);
   5065
   5066	memset(vpath->hw_stats, 0,
   5067		sizeof(struct vxge_hw_vpath_stats_hw_info));
   5068
   5069	memset(vpath->hw_stats_sav, 0,
   5070		sizeof(struct vxge_hw_vpath_stats_hw_info));
   5071
   5072	writeq(vpath->stats_block->dma_addr,
   5073		&vpath->vp_reg->stats_cfg);
   5074
   5075	status = vxge_hw_vpath_stats_enable(vp);
   5076
   5077exit:
   5078	return status;
   5079}
   5080
   5081/*
   5082 * vxge_hw_vpath_enable - Enable vpath.
   5083 * This routine clears the vpath reset thereby enabling a vpath
   5084 * to start forwarding frames and generating interrupts.
   5085 */
   5086void
   5087vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
   5088{
   5089	struct __vxge_hw_device *hldev;
   5090	u64 val64;
   5091
   5092	hldev = vp->vpath->hldev;
   5093
   5094	val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
   5095		1 << (16 - vp->vpath->vp_id));
   5096
   5097	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
   5098		&hldev->common_reg->cmn_rsthdlr_cfg1);
   5099}