cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

octeon_device.c (39106B)


      1/**********************************************************************
      2 * Author: Cavium, Inc.
      3 *
      4 * Contact: support@cavium.com
      5 *          Please include "LiquidIO" in the subject.
      6 *
      7 * Copyright (c) 2003-2016 Cavium, Inc.
      8 *
      9 * This file is free software; you can redistribute it and/or modify
     10 * it under the terms of the GNU General Public License, Version 2, as
     11 * published by the Free Software Foundation.
     12 *
     13 * This file is distributed in the hope that it will be useful, but
     14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
     15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
     16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
     17 ***********************************************************************/
     18#include <linux/pci.h>
     19#include <linux/netdevice.h>
     20#include <linux/vmalloc.h>
     21#include "liquidio_common.h"
     22#include "octeon_droq.h"
     23#include "octeon_iq.h"
     24#include "response_manager.h"
     25#include "octeon_device.h"
     26#include "octeon_main.h"
     27#include "octeon_network.h"
     28#include "cn66xx_regs.h"
     29#include "cn66xx_device.h"
     30#include "cn23xx_pf_device.h"
     31#include "cn23xx_vf_device.h"
     32
     33/** Default configuration
     34 *  for CN66XX OCTEON Models.
     35 */
     36static struct octeon_config default_cn66xx_conf = {
     37	.card_type                              = LIO_210SV,
     38	.card_name                              = LIO_210SV_NAME,
     39
     40	/** IQ attributes */
     41	.iq					= {
     42		.max_iqs			= CN6XXX_CFG_IO_QUEUES,
     43		.pending_list_size		=
     44			(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
     45		.instr_type			= OCTEON_64BYTE_INSTR,
     46		.db_min				= CN6XXX_DB_MIN,
     47		.db_timeout			= CN6XXX_DB_TIMEOUT,
     48	}
     49	,
     50
     51	/** OQ attributes */
     52	.oq					= {
     53		.max_oqs			= CN6XXX_CFG_IO_QUEUES,
     54		.refill_threshold		= CN6XXX_OQ_REFIL_THRESHOLD,
     55		.oq_intr_pkt			= CN6XXX_OQ_INTR_PKT,
     56		.oq_intr_time			= CN6XXX_OQ_INTR_TIME,
     57		.pkts_per_intr			= CN6XXX_OQ_PKTSPER_INTR,
     58	}
     59	,
     60
     61	.num_nic_ports				= DEFAULT_NUM_NIC_PORTS_66XX,
     62	.num_def_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
     63	.num_def_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
     64	.def_rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
     65
     66	/* For ethernet interface 0:  Port cfg Attributes */
     67	.nic_if_cfg[0] = {
     68		/* Max Txqs: Half for each of the two ports :max_iq/2 */
     69		.max_txqs			= MAX_TXQS_PER_INTF,
     70
     71		/* Actual configured value. Range could be: 1...max_txqs */
     72		.num_txqs			= DEF_TXQS_PER_INTF,
     73
     74		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
     75		.max_rxqs			= MAX_RXQS_PER_INTF,
     76
     77		/* Actual configured value. Range could be: 1...max_rxqs */
     78		.num_rxqs			= DEF_RXQS_PER_INTF,
     79
     80		/* Num of desc for rx rings */
     81		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
     82
     83		/* Num of desc for tx rings */
     84		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
     85
     86		/* SKB size, We need not change buf size even for Jumbo frames.
     87		 * Octeon can send jumbo frames in 4 consecutive descriptors,
     88		 */
     89		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
     90
     91		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
     92
     93		.gmx_port_id			= 0,
     94	},
     95
     96	.nic_if_cfg[1] = {
     97		/* Max Txqs: Half for each of the two ports :max_iq/2 */
     98		.max_txqs			= MAX_TXQS_PER_INTF,
     99
    100		/* Actual configured value. Range could be: 1...max_txqs */
    101		.num_txqs			= DEF_TXQS_PER_INTF,
    102
    103		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    104		.max_rxqs			= MAX_RXQS_PER_INTF,
    105
    106		/* Actual configured value. Range could be: 1...max_rxqs */
    107		.num_rxqs			= DEF_RXQS_PER_INTF,
    108
    109		/* Num of desc for rx rings */
    110		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    111
    112		/* Num of desc for tx rings */
    113		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    114
    115		/* SKB size, We need not change buf size even for Jumbo frames.
    116		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    117		 */
    118		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    119
    120		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    121
    122		.gmx_port_id			= 1,
    123	},
    124
    125	/** Miscellaneous attributes */
    126	.misc					= {
    127		/* Host driver link query interval */
    128		.oct_link_query_interval	= 100,
    129
    130		/* Octeon link query interval */
    131		.host_link_query_interval	= 500,
    132
    133		.enable_sli_oq_bp		= 0,
    134
    135		/* Control queue group */
    136		.ctrlq_grp			= 1,
    137	}
    138	,
    139};
    140
    141/** Default configuration
    142 *  for CN68XX OCTEON Model.
    143 */
    144
    145static struct octeon_config default_cn68xx_conf = {
    146	.card_type                              = LIO_410NV,
    147	.card_name                              = LIO_410NV_NAME,
    148
    149	/** IQ attributes */
    150	.iq					= {
    151		.max_iqs			= CN6XXX_CFG_IO_QUEUES,
    152		.pending_list_size		=
    153			(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
    154		.instr_type			= OCTEON_64BYTE_INSTR,
    155		.db_min				= CN6XXX_DB_MIN,
    156		.db_timeout			= CN6XXX_DB_TIMEOUT,
    157	}
    158	,
    159
    160	/** OQ attributes */
    161	.oq					= {
    162		.max_oqs			= CN6XXX_CFG_IO_QUEUES,
    163		.refill_threshold		= CN6XXX_OQ_REFIL_THRESHOLD,
    164		.oq_intr_pkt			= CN6XXX_OQ_INTR_PKT,
    165		.oq_intr_time			= CN6XXX_OQ_INTR_TIME,
    166		.pkts_per_intr			= CN6XXX_OQ_PKTSPER_INTR,
    167	}
    168	,
    169
    170	.num_nic_ports				= DEFAULT_NUM_NIC_PORTS_68XX,
    171	.num_def_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    172	.num_def_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    173	.def_rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    174
    175	.nic_if_cfg[0] = {
    176		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    177		.max_txqs			= MAX_TXQS_PER_INTF,
    178
    179		/* Actual configured value. Range could be: 1...max_txqs */
    180		.num_txqs			= DEF_TXQS_PER_INTF,
    181
    182		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    183		.max_rxqs			= MAX_RXQS_PER_INTF,
    184
    185		/* Actual configured value. Range could be: 1...max_rxqs */
    186		.num_rxqs			= DEF_RXQS_PER_INTF,
    187
    188		/* Num of desc for rx rings */
    189		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    190
    191		/* Num of desc for tx rings */
    192		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    193
    194		/* SKB size, We need not change buf size even for Jumbo frames.
    195		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    196		 */
    197		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    198
    199		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    200
    201		.gmx_port_id			= 0,
    202	},
    203
    204	.nic_if_cfg[1] = {
    205		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    206		.max_txqs			= MAX_TXQS_PER_INTF,
    207
    208		/* Actual configured value. Range could be: 1...max_txqs */
    209		.num_txqs			= DEF_TXQS_PER_INTF,
    210
    211		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    212		.max_rxqs			= MAX_RXQS_PER_INTF,
    213
    214		/* Actual configured value. Range could be: 1...max_rxqs */
    215		.num_rxqs			= DEF_RXQS_PER_INTF,
    216
    217		/* Num of desc for rx rings */
    218		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    219
    220		/* Num of desc for tx rings */
    221		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    222
    223		/* SKB size, We need not change buf size even for Jumbo frames.
    224		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    225		 */
    226		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    227
    228		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    229
    230		.gmx_port_id			= 1,
    231	},
    232
    233	.nic_if_cfg[2] = {
    234		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    235		.max_txqs			= MAX_TXQS_PER_INTF,
    236
    237		/* Actual configured value. Range could be: 1...max_txqs */
    238		.num_txqs			= DEF_TXQS_PER_INTF,
    239
    240		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    241		.max_rxqs			= MAX_RXQS_PER_INTF,
    242
    243		/* Actual configured value. Range could be: 1...max_rxqs */
    244		.num_rxqs			= DEF_RXQS_PER_INTF,
    245
    246		/* Num of desc for rx rings */
    247		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    248
    249		/* Num of desc for tx rings */
    250		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    251
    252		/* SKB size, We need not change buf size even for Jumbo frames.
    253		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    254		 */
    255		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    256
    257		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    258
    259		.gmx_port_id			= 2,
    260	},
    261
    262	.nic_if_cfg[3] = {
    263		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    264		.max_txqs			= MAX_TXQS_PER_INTF,
    265
    266		/* Actual configured value. Range could be: 1...max_txqs */
    267		.num_txqs			= DEF_TXQS_PER_INTF,
    268
    269		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    270		.max_rxqs			= MAX_RXQS_PER_INTF,
    271
    272		/* Actual configured value. Range could be: 1...max_rxqs */
    273		.num_rxqs			= DEF_RXQS_PER_INTF,
    274
    275		/* Num of desc for rx rings */
    276		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    277
    278		/* Num of desc for tx rings */
    279		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    280
    281		/* SKB size, We need not change buf size even for Jumbo frames.
    282		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    283		 */
    284		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    285
    286		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    287
    288		.gmx_port_id			= 3,
    289	},
    290
    291	/** Miscellaneous attributes */
    292	.misc					= {
    293		/* Host driver link query interval */
    294		.oct_link_query_interval	= 100,
    295
    296		/* Octeon link query interval */
    297		.host_link_query_interval	= 500,
    298
    299		.enable_sli_oq_bp		= 0,
    300
    301		/* Control queue group */
    302		.ctrlq_grp			= 1,
    303	}
    304	,
    305};
    306
    307/** Default configuration
    308 *  for CN68XX OCTEON Model.
    309 */
    310static struct octeon_config default_cn68xx_210nv_conf = {
    311	.card_type                              = LIO_210NV,
    312	.card_name                              = LIO_210NV_NAME,
    313
    314	/** IQ attributes */
    315
    316	.iq					= {
    317		.max_iqs			= CN6XXX_CFG_IO_QUEUES,
    318		.pending_list_size		=
    319			(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
    320		.instr_type			= OCTEON_64BYTE_INSTR,
    321		.db_min				= CN6XXX_DB_MIN,
    322		.db_timeout			= CN6XXX_DB_TIMEOUT,
    323	}
    324	,
    325
    326	/** OQ attributes */
    327	.oq					= {
    328		.max_oqs			= CN6XXX_CFG_IO_QUEUES,
    329		.refill_threshold		= CN6XXX_OQ_REFIL_THRESHOLD,
    330		.oq_intr_pkt			= CN6XXX_OQ_INTR_PKT,
    331		.oq_intr_time			= CN6XXX_OQ_INTR_TIME,
    332		.pkts_per_intr			= CN6XXX_OQ_PKTSPER_INTR,
    333	}
    334	,
    335
    336	.num_nic_ports			= DEFAULT_NUM_NIC_PORTS_68XX_210NV,
    337	.num_def_rx_descs		= CN6XXX_MAX_OQ_DESCRIPTORS,
    338	.num_def_tx_descs		= CN6XXX_MAX_IQ_DESCRIPTORS,
    339	.def_rx_buf_size		= CN6XXX_OQ_BUF_SIZE,
    340
    341	.nic_if_cfg[0] = {
    342		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    343		.max_txqs			= MAX_TXQS_PER_INTF,
    344
    345		/* Actual configured value. Range could be: 1...max_txqs */
    346		.num_txqs			= DEF_TXQS_PER_INTF,
    347
    348		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    349		.max_rxqs			= MAX_RXQS_PER_INTF,
    350
    351		/* Actual configured value. Range could be: 1...max_rxqs */
    352		.num_rxqs			= DEF_RXQS_PER_INTF,
    353
    354		/* Num of desc for rx rings */
    355		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    356
    357		/* Num of desc for tx rings */
    358		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    359
    360		/* SKB size, We need not change buf size even for Jumbo frames.
    361		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    362		 */
    363		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    364
    365		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    366
    367		.gmx_port_id			= 0,
    368	},
    369
    370	.nic_if_cfg[1] = {
    371		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    372		.max_txqs			= MAX_TXQS_PER_INTF,
    373
    374		/* Actual configured value. Range could be: 1...max_txqs */
    375		.num_txqs			= DEF_TXQS_PER_INTF,
    376
    377		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    378		.max_rxqs			= MAX_RXQS_PER_INTF,
    379
    380		/* Actual configured value. Range could be: 1...max_rxqs */
    381		.num_rxqs			= DEF_RXQS_PER_INTF,
    382
    383		/* Num of desc for rx rings */
    384		.num_rx_descs			= CN6XXX_MAX_OQ_DESCRIPTORS,
    385
    386		/* Num of desc for tx rings */
    387		.num_tx_descs			= CN6XXX_MAX_IQ_DESCRIPTORS,
    388
    389		/* SKB size, We need not change buf size even for Jumbo frames.
    390		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    391		 */
    392		.rx_buf_size			= CN6XXX_OQ_BUF_SIZE,
    393
    394		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    395
    396		.gmx_port_id			= 1,
    397	},
    398
    399	/** Miscellaneous attributes */
    400	.misc					= {
    401		/* Host driver link query interval */
    402		.oct_link_query_interval	= 100,
    403
    404		/* Octeon link query interval */
    405		.host_link_query_interval	= 500,
    406
    407		.enable_sli_oq_bp		= 0,
    408
    409		/* Control queue group */
    410		.ctrlq_grp			= 1,
    411	}
    412	,
    413};
    414
    415static struct octeon_config default_cn23xx_conf = {
    416	.card_type                              = LIO_23XX,
    417	.card_name                              = LIO_23XX_NAME,
    418	/** IQ attributes */
    419	.iq = {
    420		.max_iqs		= CN23XX_CFG_IO_QUEUES,
    421		.pending_list_size	= (CN23XX_DEFAULT_IQ_DESCRIPTORS *
    422					   CN23XX_CFG_IO_QUEUES),
    423		.instr_type		= OCTEON_64BYTE_INSTR,
    424		.db_min			= CN23XX_DB_MIN,
    425		.db_timeout		= CN23XX_DB_TIMEOUT,
    426		.iq_intr_pkt		= CN23XX_DEF_IQ_INTR_THRESHOLD,
    427	},
    428
    429	/** OQ attributes */
    430	.oq = {
    431		.max_oqs		= CN23XX_CFG_IO_QUEUES,
    432		.pkts_per_intr	= CN23XX_OQ_PKTSPER_INTR,
    433		.refill_threshold	= CN23XX_OQ_REFIL_THRESHOLD,
    434		.oq_intr_pkt	= CN23XX_OQ_INTR_PKT,
    435		.oq_intr_time	= CN23XX_OQ_INTR_TIME,
    436	},
    437
    438	.num_nic_ports				= DEFAULT_NUM_NIC_PORTS_23XX,
    439	.num_def_rx_descs			= CN23XX_DEFAULT_OQ_DESCRIPTORS,
    440	.num_def_tx_descs			= CN23XX_DEFAULT_IQ_DESCRIPTORS,
    441	.def_rx_buf_size			= CN23XX_OQ_BUF_SIZE,
    442
    443	/* For ethernet interface 0:  Port cfg Attributes */
    444	.nic_if_cfg[0] = {
    445		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    446		.max_txqs			= MAX_TXQS_PER_INTF,
    447
    448		/* Actual configured value. Range could be: 1...max_txqs */
    449		.num_txqs			= DEF_TXQS_PER_INTF,
    450
    451		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    452		.max_rxqs			= MAX_RXQS_PER_INTF,
    453
    454		/* Actual configured value. Range could be: 1...max_rxqs */
    455		.num_rxqs			= DEF_RXQS_PER_INTF,
    456
    457		/* Num of desc for rx rings */
    458		.num_rx_descs			= CN23XX_DEFAULT_OQ_DESCRIPTORS,
    459
    460		/* Num of desc for tx rings */
    461		.num_tx_descs			= CN23XX_DEFAULT_IQ_DESCRIPTORS,
    462
    463		/* SKB size, We need not change buf size even for Jumbo frames.
    464		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    465		 */
    466		.rx_buf_size			= CN23XX_OQ_BUF_SIZE,
    467
    468		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    469
    470		.gmx_port_id			= 0,
    471	},
    472
    473	.nic_if_cfg[1] = {
    474		/* Max Txqs: Half for each of the two ports :max_iq/2 */
    475		.max_txqs			= MAX_TXQS_PER_INTF,
    476
    477		/* Actual configured value. Range could be: 1...max_txqs */
    478		.num_txqs			= DEF_TXQS_PER_INTF,
    479
    480		/* Max Rxqs: Half for each of the two ports :max_oq/2  */
    481		.max_rxqs			= MAX_RXQS_PER_INTF,
    482
    483		/* Actual configured value. Range could be: 1...max_rxqs */
    484		.num_rxqs			= DEF_RXQS_PER_INTF,
    485
    486		/* Num of desc for rx rings */
    487		.num_rx_descs			= CN23XX_DEFAULT_OQ_DESCRIPTORS,
    488
    489		/* Num of desc for tx rings */
    490		.num_tx_descs			= CN23XX_DEFAULT_IQ_DESCRIPTORS,
    491
    492		/* SKB size, We need not change buf size even for Jumbo frames.
    493		 * Octeon can send jumbo frames in 4 consecutive descriptors,
    494		 */
    495		.rx_buf_size			= CN23XX_OQ_BUF_SIZE,
    496
    497		.base_queue			= BASE_QUEUE_NOT_REQUESTED,
    498
    499		.gmx_port_id			= 1,
    500	},
    501
    502	.misc					= {
    503		/* Host driver link query interval */
    504		.oct_link_query_interval	= 100,
    505
    506		/* Octeon link query interval */
    507		.host_link_query_interval	= 500,
    508
    509		.enable_sli_oq_bp		= 0,
    510
    511		/* Control queue group */
    512		.ctrlq_grp			= 1,
    513	}
    514};
    515
    516static struct octeon_config_ptr {
    517	u32 conf_type;
    518} oct_conf_info[MAX_OCTEON_DEVICES] = {
    519	{
    520		OCTEON_CONFIG_TYPE_DEFAULT,
    521	}, {
    522		OCTEON_CONFIG_TYPE_DEFAULT,
    523	}, {
    524		OCTEON_CONFIG_TYPE_DEFAULT,
    525	}, {
    526		OCTEON_CONFIG_TYPE_DEFAULT,
    527	},
    528};
    529
    530static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
    531	"BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
    532	"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
    533	"DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE",
    534	"INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
    535	"HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
    536	"INVALID"
    537};
    538
    539static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
    540	"BASE", "NIC", "UNKNOWN"};
    541
    542static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
    543static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
    544static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
    545
    546static u32 octeon_device_count;
    547/* locks device array (i.e. octeon_device[]) */
    548static DEFINE_SPINLOCK(octeon_devices_lock);
    549
    550static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
    551
    552static void oct_set_config_info(int oct_id, int conf_type)
    553{
    554	if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
    555		conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
    556	oct_conf_info[oct_id].conf_type = conf_type;
    557}
    558
    559void octeon_init_device_list(int conf_type)
    560{
    561	int i;
    562
    563	memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
    564	for (i = 0; i <  MAX_OCTEON_DEVICES; i++)
    565		oct_set_config_info(i, conf_type);
    566}
    567
    568static void *__retrieve_octeon_config_info(struct octeon_device *oct,
    569					   u16 card_type)
    570{
    571	u32 oct_id = oct->octeon_id;
    572	void *ret = NULL;
    573
    574	switch (oct_conf_info[oct_id].conf_type) {
    575	case OCTEON_CONFIG_TYPE_DEFAULT:
    576		if (oct->chip_id == OCTEON_CN66XX) {
    577			ret = &default_cn66xx_conf;
    578		} else if ((oct->chip_id == OCTEON_CN68XX) &&
    579			   (card_type == LIO_210NV)) {
    580			ret = &default_cn68xx_210nv_conf;
    581		} else if ((oct->chip_id == OCTEON_CN68XX) &&
    582			   (card_type == LIO_410NV)) {
    583			ret = &default_cn68xx_conf;
    584		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
    585			ret = &default_cn23xx_conf;
    586		} else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
    587			ret = &default_cn23xx_conf;
    588		}
    589		break;
    590	default:
    591		break;
    592	}
    593	return ret;
    594}
    595
    596static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
    597{
    598	switch (oct->chip_id) {
    599	case OCTEON_CN66XX:
    600	case OCTEON_CN68XX:
    601		return lio_validate_cn6xxx_config_info(oct, conf);
    602	case OCTEON_CN23XX_PF_VID:
    603	case OCTEON_CN23XX_VF_VID:
    604		return 0;
    605	default:
    606		break;
    607	}
    608
    609	return 1;
    610}
    611
    612void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
    613{
    614	void *conf = NULL;
    615
    616	conf = __retrieve_octeon_config_info(oct, card_type);
    617	if (!conf)
    618		return NULL;
    619
    620	if (__verify_octeon_config_info(oct, conf)) {
    621		dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
    622		return NULL;
    623	}
    624
    625	return conf;
    626}
    627
    628char *lio_get_state_string(atomic_t *state_ptr)
    629{
    630	s32 istate = (s32)atomic_read(state_ptr);
    631
    632	if (istate > OCT_DEV_STATES || istate < 0)
    633		return oct_dev_state_str[OCT_DEV_STATE_INVALID];
    634	return oct_dev_state_str[istate];
    635}
    636
    637static char *get_oct_app_string(u32 app_mode)
    638{
    639	if (app_mode <= CVM_DRV_APP_END)
    640		return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
    641	return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
    642}
    643
    644void octeon_free_device_mem(struct octeon_device *oct)
    645{
    646	int i;
    647
    648	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
    649		if (oct->io_qmask.oq & BIT_ULL(i))
    650			vfree(oct->droq[i]);
    651	}
    652
    653	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
    654		if (oct->io_qmask.iq & BIT_ULL(i))
    655			vfree(oct->instr_queue[i]);
    656	}
    657
    658	i = oct->octeon_id;
    659	vfree(oct);
    660
    661	octeon_device[i] = NULL;
    662	octeon_device_count--;
    663}
    664
    665static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
    666							u32 priv_size)
    667{
    668	struct octeon_device *oct;
    669	u8 *buf = NULL;
    670	u32 octdevsize = 0, configsize = 0, size;
    671
    672	switch (pci_id) {
    673	case OCTEON_CN68XX:
    674	case OCTEON_CN66XX:
    675		configsize = sizeof(struct octeon_cn6xxx);
    676		break;
    677
    678	case OCTEON_CN23XX_PF_VID:
    679		configsize = sizeof(struct octeon_cn23xx_pf);
    680		break;
    681	case OCTEON_CN23XX_VF_VID:
    682		configsize = sizeof(struct octeon_cn23xx_vf);
    683		break;
    684	default:
    685		pr_err("%s: Unknown PCI Device: 0x%x\n",
    686		       __func__,
    687		       pci_id);
    688		return NULL;
    689	}
    690
    691	if (configsize & 0x7)
    692		configsize += (8 - (configsize & 0x7));
    693
    694	octdevsize = sizeof(struct octeon_device);
    695	if (octdevsize & 0x7)
    696		octdevsize += (8 - (octdevsize & 0x7));
    697
    698	if (priv_size & 0x7)
    699		priv_size += (8 - (priv_size & 0x7));
    700
    701	size = octdevsize + priv_size + configsize +
    702		(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
    703
    704	buf = vzalloc(size);
    705	if (!buf)
    706		return NULL;
    707
    708	oct = (struct octeon_device *)buf;
    709	oct->priv = (void *)(buf + octdevsize);
    710	oct->chip = (void *)(buf + octdevsize + priv_size);
    711	oct->dispatch.dlist = (struct octeon_dispatch *)
    712		(buf + octdevsize + priv_size + configsize);
    713
    714	return oct;
    715}
    716
    717struct octeon_device *octeon_allocate_device(u32 pci_id,
    718					     u32 priv_size)
    719{
    720	u32 oct_idx = 0;
    721	struct octeon_device *oct = NULL;
    722
    723	spin_lock(&octeon_devices_lock);
    724
    725	for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
    726		if (!octeon_device[oct_idx])
    727			break;
    728
    729	if (oct_idx < MAX_OCTEON_DEVICES) {
    730		oct = octeon_allocate_device_mem(pci_id, priv_size);
    731		if (oct) {
    732			octeon_device_count++;
    733			octeon_device[oct_idx] = oct;
    734		}
    735	}
    736
    737	spin_unlock(&octeon_devices_lock);
    738	if (!oct)
    739		return NULL;
    740
    741	spin_lock_init(&oct->pci_win_lock);
    742	spin_lock_init(&oct->mem_access_lock);
    743
    744	oct->octeon_id = oct_idx;
    745	snprintf(oct->device_name, sizeof(oct->device_name),
    746		 "LiquidIO%d", (oct->octeon_id));
    747
    748	return oct;
    749}
    750
    751/** Register a device's bus location at initialization time.
    752 *  @param octeon_dev - pointer to the octeon device structure.
    753 *  @param bus        - PCIe bus #
    754 *  @param dev        - PCIe device #
    755 *  @param func       - PCIe function #
    756 *  @param is_pf      - TRUE for PF, FALSE for VF
    757 *  @return reference count of device's adapter
    758 */
    759int octeon_register_device(struct octeon_device *oct,
    760			   int bus, int dev, int func, int is_pf)
    761{
    762	int idx, refcount;
    763
    764	oct->loc.bus = bus;
    765	oct->loc.dev = dev;
    766	oct->loc.func = func;
    767
    768	oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
    769	atomic_set(oct->adapter_refcount, 0);
    770
    771	/* Like the reference count, the f/w state is shared 'per-adapter' */
    772	oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id];
    773	atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED);
    774
    775	spin_lock(&octeon_devices_lock);
    776	for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
    777		if (!octeon_device[idx]) {
    778			dev_err(&oct->pci_dev->dev,
    779				"%s: Internal driver error, missing dev",
    780				__func__);
    781			spin_unlock(&octeon_devices_lock);
    782			atomic_inc(oct->adapter_refcount);
    783			return 1; /* here, refcount is guaranteed to be 1 */
    784		}
    785		/* If another device is at same bus/dev, use its refcounter
    786		 * (and f/w state variable).
    787		 */
    788		if ((octeon_device[idx]->loc.bus == bus) &&
    789		    (octeon_device[idx]->loc.dev == dev)) {
    790			oct->adapter_refcount =
    791				octeon_device[idx]->adapter_refcount;
    792			oct->adapter_fw_state =
    793				octeon_device[idx]->adapter_fw_state;
    794			break;
    795		}
    796	}
    797	spin_unlock(&octeon_devices_lock);
    798
    799	atomic_inc(oct->adapter_refcount);
    800	refcount = atomic_read(oct->adapter_refcount);
    801
    802	dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
    803		oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
    804
    805	return refcount;
    806}
    807
    808/** Deregister a device at de-initialization time.
    809 *  @param octeon_dev - pointer to the octeon device structure.
    810 *  @return reference count of device's adapter
    811 */
    812int octeon_deregister_device(struct octeon_device *oct)
    813{
    814	int refcount;
    815
    816	atomic_dec(oct->adapter_refcount);
    817	refcount = atomic_read(oct->adapter_refcount);
    818
    819	dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
    820		oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
    821
    822	return refcount;
    823}
    824
    825int
    826octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
    827{
    828	struct octeon_ioq_vector *ioq_vector;
    829	int cpu_num;
    830	int size;
    831	int i;
    832
    833	size = sizeof(struct octeon_ioq_vector) * num_ioqs;
    834
    835	oct->ioq_vector = vzalloc(size);
    836	if (!oct->ioq_vector)
    837		return -1;
    838	for (i = 0; i < num_ioqs; i++) {
    839		ioq_vector		= &oct->ioq_vector[i];
    840		ioq_vector->oct_dev	= oct;
    841		ioq_vector->iq_index	= i;
    842		ioq_vector->droq_index	= i;
    843		ioq_vector->mbox	= oct->mbox[i];
    844
    845		cpu_num = i % num_online_cpus();
    846		cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
    847
    848		if (oct->chip_id == OCTEON_CN23XX_PF_VID)
    849			ioq_vector->ioq_num	= i + oct->sriov_info.pf_srn;
    850		else
    851			ioq_vector->ioq_num	= i;
    852	}
    853
    854	return 0;
    855}
    856
    857void
    858octeon_free_ioq_vector(struct octeon_device *oct)
    859{
    860	vfree(oct->ioq_vector);
    861}
    862
    863/* this function is only for setting up the first queue */
    864int octeon_setup_instr_queues(struct octeon_device *oct)
    865{
    866	u32 num_descs = 0;
    867	u32 iq_no = 0;
    868	union oct_txpciq txpciq;
    869	int numa_node = dev_to_node(&oct->pci_dev->dev);
    870
    871	if (OCTEON_CN6XXX(oct))
    872		num_descs =
    873			CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx));
    874	else if (OCTEON_CN23XX_PF(oct))
    875		num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf));
    876	else if (OCTEON_CN23XX_VF(oct))
    877		num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf));
    878
    879	oct->num_iqs = 0;
    880
    881	oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]),
    882				numa_node);
    883	if (!oct->instr_queue[0])
    884		oct->instr_queue[0] =
    885			vzalloc(sizeof(struct octeon_instr_queue));
    886	if (!oct->instr_queue[0])
    887		return 1;
    888	memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
    889	oct->instr_queue[0]->q_index = 0;
    890	oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
    891	oct->instr_queue[0]->ifidx = 0;
    892	txpciq.u64 = 0;
    893	txpciq.s.q_no = iq_no;
    894	txpciq.s.pkind = oct->pfvf_hsword.pkind;
    895	txpciq.s.use_qpg = 0;
    896	txpciq.s.qpg = 0;
    897	if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
    898		/* prevent memory leak */
    899		vfree(oct->instr_queue[0]);
    900		oct->instr_queue[0] = NULL;
    901		return 1;
    902	}
    903
    904	oct->num_iqs++;
    905	return 0;
    906}
    907
    908int octeon_setup_output_queues(struct octeon_device *oct)
    909{
    910	u32 num_descs = 0;
    911	u32 desc_size = 0;
    912	u32 oq_no = 0;
    913	int numa_node = dev_to_node(&oct->pci_dev->dev);
    914
    915	if (OCTEON_CN6XXX(oct)) {
    916		num_descs =
    917			CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx));
    918		desc_size =
    919			CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx));
    920	} else if (OCTEON_CN23XX_PF(oct)) {
    921		num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf));
    922		desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf));
    923	} else if (OCTEON_CN23XX_VF(oct)) {
    924		num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf));
    925		desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
    926	}
    927	oct->num_oqs = 0;
    928	oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node);
    929	if (!oct->droq[0])
    930		oct->droq[0] = vzalloc(sizeof(*oct->droq[0]));
    931	if (!oct->droq[0])
    932		return 1;
    933
    934	if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) {
    935		vfree(oct->droq[oq_no]);
    936		oct->droq[oq_no] = NULL;
    937		return 1;
    938	}
    939	oct->num_oqs++;
    940
    941	return 0;
    942}
    943
    944int octeon_set_io_queues_off(struct octeon_device *oct)
    945{
    946	int loop = BUSY_READING_REG_VF_LOOP_COUNT;
    947
    948	if (OCTEON_CN6XXX(oct)) {
    949		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
    950		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
    951	} else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
    952		u32 q_no;
    953
    954		/* IOQs will already be in reset.
    955		 * If RST bit is set, wait for quiet bit to be set.
    956		 * Once quiet bit is set, clear the RST bit.
    957		 */
    958		for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
    959			u64 reg_val = octeon_read_csr64(
    960				oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
    961
    962			while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
    963			       !(reg_val &  CN23XX_PKT_INPUT_CTL_QUIET) &&
    964			       loop) {
    965				reg_val = octeon_read_csr64(
    966					oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
    967				loop--;
    968			}
    969			if (!loop) {
    970				dev_err(&oct->pci_dev->dev,
    971					"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
    972					q_no);
    973				return -1;
    974			}
    975
    976			reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
    977			octeon_write_csr64(oct,
    978					   CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
    979					   reg_val);
    980
    981			reg_val = octeon_read_csr64(
    982					oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
    983			if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
    984				dev_err(&oct->pci_dev->dev,
    985					"unable to reset qno %u\n", q_no);
    986				return -1;
    987			}
    988		}
    989	}
    990	return 0;
    991}
    992
    993void octeon_set_droq_pkt_op(struct octeon_device *oct,
    994			    u32 q_no,
    995			    u32 enable)
    996{
    997	u32 reg_val = 0;
    998
    999	/* Disable the i/p and o/p queues for this Octeon. */
   1000	if (OCTEON_CN6XXX(oct)) {
   1001		reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
   1002
   1003		if (enable)
   1004			reg_val = reg_val | (1 << q_no);
   1005		else
   1006			reg_val = reg_val & (~(1 << q_no));
   1007
   1008		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
   1009	}
   1010}
   1011
   1012int octeon_init_dispatch_list(struct octeon_device *oct)
   1013{
   1014	u32 i;
   1015
   1016	oct->dispatch.count = 0;
   1017
   1018	for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
   1019		oct->dispatch.dlist[i].opcode = 0;
   1020		INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
   1021	}
   1022
   1023	for (i = 0; i <= REQTYPE_LAST; i++)
   1024		octeon_register_reqtype_free_fn(oct, i, NULL);
   1025
   1026	spin_lock_init(&oct->dispatch.lock);
   1027
   1028	return 0;
   1029}
   1030
   1031void octeon_delete_dispatch_list(struct octeon_device *oct)
   1032{
   1033	u32 i;
   1034	struct list_head freelist, *temp, *tmp2;
   1035
   1036	INIT_LIST_HEAD(&freelist);
   1037
   1038	spin_lock_bh(&oct->dispatch.lock);
   1039
   1040	for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
   1041		struct list_head *dispatch;
   1042
   1043		dispatch = &oct->dispatch.dlist[i].list;
   1044		while (dispatch->next != dispatch) {
   1045			temp = dispatch->next;
   1046			list_move_tail(temp, &freelist);
   1047		}
   1048
   1049		oct->dispatch.dlist[i].opcode = 0;
   1050	}
   1051
   1052	oct->dispatch.count = 0;
   1053
   1054	spin_unlock_bh(&oct->dispatch.lock);
   1055
   1056	list_for_each_safe(temp, tmp2, &freelist) {
   1057		list_del(temp);
   1058		kfree(temp);
   1059	}
   1060}
   1061
   1062octeon_dispatch_fn_t
   1063octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
   1064		    u16 subcode)
   1065{
   1066	u32 idx;
   1067	struct list_head *dispatch;
   1068	octeon_dispatch_fn_t fn = NULL;
   1069	u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
   1070
   1071	idx = combined_opcode & OCTEON_OPCODE_MASK;
   1072
   1073	spin_lock_bh(&octeon_dev->dispatch.lock);
   1074
   1075	if (octeon_dev->dispatch.count == 0) {
   1076		spin_unlock_bh(&octeon_dev->dispatch.lock);
   1077		return NULL;
   1078	}
   1079
   1080	if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
   1081		spin_unlock_bh(&octeon_dev->dispatch.lock);
   1082		return NULL;
   1083	}
   1084
   1085	if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
   1086		fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
   1087	} else {
   1088		list_for_each(dispatch,
   1089			      &octeon_dev->dispatch.dlist[idx].list) {
   1090			if (((struct octeon_dispatch *)dispatch)->opcode ==
   1091			    combined_opcode) {
   1092				fn = ((struct octeon_dispatch *)
   1093				      dispatch)->dispatch_fn;
   1094				break;
   1095			}
   1096		}
   1097	}
   1098
   1099	spin_unlock_bh(&octeon_dev->dispatch.lock);
   1100	return fn;
   1101}
   1102
   1103/* octeon_register_dispatch_fn
   1104 * Parameters:
   1105 *   octeon_id - id of the octeon device.
   1106 *   opcode    - opcode for which driver should call the registered function
   1107 *   subcode   - subcode for which driver should call the registered function
   1108 *   fn        - The function to call when a packet with "opcode" arrives in
   1109 *		  octeon output queues.
   1110 *   fn_arg    - The argument to be passed when calling function "fn".
   1111 * Description:
   1112 *   Registers a function and its argument to be called when a packet
   1113 *   arrives in Octeon output queues with "opcode".
   1114 * Returns:
   1115 *   Success: 0
   1116 *   Failure: 1
   1117 * Locks:
   1118 *   No locks are held.
   1119 */
   1120int
   1121octeon_register_dispatch_fn(struct octeon_device *oct,
   1122			    u16 opcode,
   1123			    u16 subcode,
   1124			    octeon_dispatch_fn_t fn, void *fn_arg)
   1125{
   1126	u32 idx;
   1127	octeon_dispatch_fn_t pfn;
   1128	u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
   1129
   1130	idx = combined_opcode & OCTEON_OPCODE_MASK;
   1131
   1132	spin_lock_bh(&oct->dispatch.lock);
   1133	/* Add dispatch function to first level of lookup table */
   1134	if (oct->dispatch.dlist[idx].opcode == 0) {
   1135		oct->dispatch.dlist[idx].opcode = combined_opcode;
   1136		oct->dispatch.dlist[idx].dispatch_fn = fn;
   1137		oct->dispatch.dlist[idx].arg = fn_arg;
   1138		oct->dispatch.count++;
   1139		spin_unlock_bh(&oct->dispatch.lock);
   1140		return 0;
   1141	}
   1142
   1143	spin_unlock_bh(&oct->dispatch.lock);
   1144
   1145	/* Check if there was a function already registered for this
   1146	 * opcode/subcode.
   1147	 */
   1148	pfn = octeon_get_dispatch(oct, opcode, subcode);
   1149	if (!pfn) {
   1150		struct octeon_dispatch *dispatch;
   1151
   1152		dev_dbg(&oct->pci_dev->dev,
   1153			"Adding opcode to dispatch list linked list\n");
   1154		dispatch = kmalloc(sizeof(*dispatch), GFP_KERNEL);
   1155		if (!dispatch)
   1156			return 1;
   1157
   1158		dispatch->opcode = combined_opcode;
   1159		dispatch->dispatch_fn = fn;
   1160		dispatch->arg = fn_arg;
   1161
   1162		/* Add dispatch function to linked list of fn ptrs
   1163		 * at the hashed index.
   1164		 */
   1165		spin_lock_bh(&oct->dispatch.lock);
   1166		list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
   1167		oct->dispatch.count++;
   1168		spin_unlock_bh(&oct->dispatch.lock);
   1169
   1170	} else {
   1171		if (pfn == fn &&
   1172		    octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg)
   1173			return 0;
   1174
   1175		dev_err(&oct->pci_dev->dev,
   1176			"Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
   1177			opcode, subcode);
   1178		return 1;
   1179	}
   1180
   1181	return 0;
   1182}
   1183
   1184int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
   1185{
   1186	u32 i;
   1187	char app_name[16];
   1188	struct octeon_device *oct = (struct octeon_device *)buf;
   1189	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
   1190	struct octeon_core_setup *cs = NULL;
   1191	u32 num_nic_ports = 0;
   1192
   1193	if (OCTEON_CN6XXX(oct))
   1194		num_nic_ports =
   1195			CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx));
   1196	else if (OCTEON_CN23XX_PF(oct))
   1197		num_nic_ports =
   1198			CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf));
   1199
   1200	if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
   1201		dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
   1202			atomic_read(&oct->status));
   1203		goto core_drv_init_err;
   1204	}
   1205
   1206	strncpy(app_name,
   1207		get_oct_app_string(
   1208		(u32)recv_pkt->rh.r_core_drv_init.app_mode),
   1209		sizeof(app_name) - 1);
   1210	oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
   1211	if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
   1212		oct->fw_info.max_nic_ports =
   1213			(u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
   1214		oct->fw_info.num_gmx_ports =
   1215			(u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
   1216	}
   1217
   1218	if (oct->fw_info.max_nic_ports < num_nic_ports) {
   1219		dev_err(&oct->pci_dev->dev,
   1220			"Config has more ports than firmware allows (%d > %d).\n",
   1221			num_nic_ports, oct->fw_info.max_nic_ports);
   1222		goto core_drv_init_err;
   1223	}
   1224	oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
   1225	oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
   1226	oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
   1227
   1228	oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
   1229
   1230	for (i = 0; i < oct->num_iqs; i++)
   1231		oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
   1232
   1233	atomic_set(&oct->status, OCT_DEV_CORE_OK);
   1234
   1235	cs = &core_setup[oct->octeon_id];
   1236
   1237	if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) {
   1238		dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
   1239			(u32)sizeof(*cs),
   1240			recv_pkt->buffer_size[0]);
   1241	}
   1242
   1243	memcpy(cs, get_rbd(
   1244	       recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
   1245
   1246	strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
   1247	strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
   1248		OCT_SERIAL_LEN);
   1249
   1250	octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
   1251
   1252	oct->boardinfo.major = cs->board_rev_major;
   1253	oct->boardinfo.minor = cs->board_rev_minor;
   1254
   1255	dev_info(&oct->pci_dev->dev,
   1256		 "Running %s (%llu Hz)\n",
   1257		 app_name, CVM_CAST64(cs->corefreq));
   1258
   1259core_drv_init_err:
   1260	for (i = 0; i < recv_pkt->buffer_count; i++)
   1261		recv_buffer_free(recv_pkt->buffer_ptr[i]);
   1262	octeon_free_recv_info(recv_info);
   1263	return 0;
   1264}
   1265
   1266int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
   1267
   1268{
   1269	if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
   1270	    (oct->io_qmask.iq & BIT_ULL(q_no)))
   1271		return oct->instr_queue[q_no]->max_count;
   1272
   1273	return -1;
   1274}
   1275
   1276int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
   1277{
   1278	if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
   1279	    (oct->io_qmask.oq & BIT_ULL(q_no)))
   1280		return oct->droq[q_no]->max_count;
   1281	return -1;
   1282}
   1283
   1284/* Retruns the host firmware handshake OCTEON specific configuration */
   1285struct octeon_config *octeon_get_conf(struct octeon_device *oct)
   1286{
   1287	struct octeon_config *default_oct_conf = NULL;
   1288
   1289	/* check the OCTEON Device model & return the corresponding octeon
   1290	 * configuration
   1291	 */
   1292
   1293	if (OCTEON_CN6XXX(oct)) {
   1294		default_oct_conf =
   1295			(struct octeon_config *)(CHIP_CONF(oct, cn6xxx));
   1296	} else if (OCTEON_CN23XX_PF(oct)) {
   1297		default_oct_conf = (struct octeon_config *)
   1298			(CHIP_CONF(oct, cn23xx_pf));
   1299	} else if (OCTEON_CN23XX_VF(oct)) {
   1300		default_oct_conf = (struct octeon_config *)
   1301			(CHIP_CONF(oct, cn23xx_vf));
   1302	}
   1303	return default_oct_conf;
   1304}
   1305
   1306/* scratch register address is same in all the OCT-II and CN70XX models */
   1307#define CNXX_SLI_SCRATCH1   0x3C0
   1308
   1309/* Get the octeon device pointer.
   1310 *  @param octeon_id  - The id for which the octeon device pointer is required.
   1311 *  @return Success: Octeon device pointer.
   1312 *  @return Failure: NULL.
   1313 */
   1314struct octeon_device *lio_get_device(u32 octeon_id)
   1315{
   1316	if (octeon_id >= MAX_OCTEON_DEVICES)
   1317		return NULL;
   1318	else
   1319		return octeon_device[octeon_id];
   1320}
   1321
   1322u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
   1323{
   1324	u64 val64;
   1325	unsigned long flags;
   1326	u32 addrhi;
   1327
   1328	spin_lock_irqsave(&oct->pci_win_lock, flags);
   1329
   1330	/* The windowed read happens when the LSB of the addr is written.
   1331	 * So write MSB first
   1332	 */
   1333	addrhi = (addr >> 32);
   1334	if ((oct->chip_id == OCTEON_CN66XX) ||
   1335	    (oct->chip_id == OCTEON_CN68XX) ||
   1336	    (oct->chip_id == OCTEON_CN23XX_PF_VID))
   1337		addrhi |= 0x00060000;
   1338	writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
   1339
   1340	/* Read back to preserve ordering of writes */
   1341	readl(oct->reg_list.pci_win_rd_addr_hi);
   1342
   1343	writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
   1344	readl(oct->reg_list.pci_win_rd_addr_lo);
   1345
   1346	val64 = readq(oct->reg_list.pci_win_rd_data);
   1347
   1348	spin_unlock_irqrestore(&oct->pci_win_lock, flags);
   1349
   1350	return val64;
   1351}
   1352
   1353void lio_pci_writeq(struct octeon_device *oct,
   1354		    u64 val,
   1355		    u64 addr)
   1356{
   1357	unsigned long flags;
   1358
   1359	spin_lock_irqsave(&oct->pci_win_lock, flags);
   1360
   1361	writeq(addr, oct->reg_list.pci_win_wr_addr);
   1362
   1363	/* The write happens when the LSB is written. So write MSB first. */
   1364	writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
   1365	/* Read the MSB to ensure ordering of writes. */
   1366	readl(oct->reg_list.pci_win_wr_data_hi);
   1367
   1368	writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
   1369
   1370	spin_unlock_irqrestore(&oct->pci_win_lock, flags);
   1371}
   1372
   1373int octeon_mem_access_ok(struct octeon_device *oct)
   1374{
   1375	u64 access_okay = 0;
   1376	u64 lmc0_reset_ctl;
   1377
   1378	/* Check to make sure a DDR interface is enabled */
   1379	if (OCTEON_CN23XX_PF(oct)) {
   1380		lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
   1381		access_okay =
   1382			(lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
   1383	} else {
   1384		lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
   1385		access_okay =
   1386			(lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
   1387	}
   1388
   1389	return access_okay ? 0 : 1;
   1390}
   1391
   1392int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
   1393{
   1394	int ret = 1;
   1395	u32 ms;
   1396
   1397	if (!timeout)
   1398		return ret;
   1399
   1400	for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
   1401	     ms += HZ / 10) {
   1402		ret = octeon_mem_access_ok(oct);
   1403
   1404		/* wait 100 ms */
   1405		if (ret)
   1406			schedule_timeout_uninterruptible(HZ / 10);
   1407	}
   1408
   1409	return ret;
   1410}
   1411
   1412/* Get the octeon id assigned to the octeon device passed as argument.
   1413 *  This function is exported to other modules.
   1414 *  @param dev - octeon device pointer passed as a void *.
   1415 *  @return octeon device id
   1416 */
   1417int lio_get_device_id(void *dev)
   1418{
   1419	struct octeon_device *octeon_dev = (struct octeon_device *)dev;
   1420	u32 i;
   1421
   1422	for (i = 0; i < MAX_OCTEON_DEVICES; i++)
   1423		if (octeon_device[i] == octeon_dev)
   1424			return octeon_dev->octeon_id;
   1425	return -1;
   1426}
   1427
   1428void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
   1429{
   1430	u64 instr_cnt;
   1431	u32 pkts_pend;
   1432	struct octeon_device *oct = NULL;
   1433
   1434	/* the whole thing needs to be atomic, ideally */
   1435	if (droq) {
   1436		pkts_pend = (u32)atomic_read(&droq->pkts_pending);
   1437		writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
   1438		droq->pkt_count = pkts_pend;
   1439		oct = droq->oct_dev;
   1440	}
   1441	if (iq) {
   1442		spin_lock_bh(&iq->lock);
   1443		writel(iq->pkts_processed, iq->inst_cnt_reg);
   1444		iq->pkt_in_done -= iq->pkts_processed;
   1445		iq->pkts_processed = 0;
   1446		/* this write needs to be flushed before we release the lock */
   1447		spin_unlock_bh(&iq->lock);
   1448		oct = iq->oct_dev;
   1449	}
   1450	/*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
   1451	 *to trigger tx interrupts as well, if they are pending.
   1452	 */
   1453	if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) {
   1454		if (droq)
   1455			writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
   1456		/*we race with firmrware here. read and write the IN_DONE_CNTS*/
   1457		else if (iq) {
   1458			instr_cnt =  readq(iq->inst_cnt_reg);
   1459			writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) |
   1460				CN23XX_INTR_RESEND),
   1461			       iq->inst_cnt_reg);
   1462		}
   1463	}
   1464}