cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

adapter.h (17819B)


      1/*
      2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
      3 * driver for Linux.
      4 *
      5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
      6 *
      7 * This software is available to you under a choice of one of two
      8 * licenses.  You may choose to be licensed under the terms of the GNU
      9 * General Public License (GPL) Version 2, available from the file
     10 * COPYING in the main directory of this source tree, or the
     11 * OpenIB.org BSD license below:
     12 *
     13 *     Redistribution and use in source and binary forms, with or
     14 *     without modification, are permitted provided that the following
     15 *     conditions are met:
     16 *
     17 *      - Redistributions of source code must retain the above
     18 *        copyright notice, this list of conditions and the following
     19 *        disclaimer.
     20 *
     21 *      - Redistributions in binary form must reproduce the above
     22 *        copyright notice, this list of conditions and the following
     23 *        disclaimer in the documentation and/or other materials
     24 *        provided with the distribution.
     25 *
     26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     33 * SOFTWARE.
     34 */
     35
     36/*
     37 * This file should not be included directly.  Include t4vf_common.h instead.
     38 */
     39
     40#ifndef __CXGB4VF_ADAPTER_H__
     41#define __CXGB4VF_ADAPTER_H__
     42
     43#include <linux/etherdevice.h>
     44#include <linux/interrupt.h>
     45#include <linux/pci.h>
     46#include <linux/spinlock.h>
     47#include <linux/skbuff.h>
     48#include <linux/if_ether.h>
     49#include <linux/netdevice.h>
     50
     51#include "../cxgb4/t4_hw.h"
     52
     53/*
     54 * Constants of the implementation.
     55 */
     56enum {
     57	MAX_NPORTS	= 1,		/* max # of "ports" */
     58	MAX_PORT_QSETS	= 8,		/* max # of Queue Sets / "port" */
     59	MAX_ETH_QSETS	= MAX_NPORTS*MAX_PORT_QSETS,
     60
     61	/*
     62	 * MSI-X interrupt index usage.
     63	 */
     64	MSIX_FW		= 0,		/* MSI-X index for firmware Q */
     65	MSIX_IQFLINT	= 1,		/* MSI-X index base for Ingress Qs */
     66	MSIX_EXTRAS	= 1,
     67	MSIX_ENTRIES	= MAX_ETH_QSETS + MSIX_EXTRAS,
     68
     69	/*
     70	 * The maximum number of Ingress and Egress Queues is determined by
     71	 * the maximum number of "Queue Sets" which we support plus any
     72	 * ancillary queues.  Each "Queue Set" requires one Ingress Queue
     73	 * for RX Packet Ingress Event notifications and two Egress Queues for
     74	 * a Free List and an Ethernet TX list.
     75	 */
     76	INGQ_EXTRAS	= 2,		/* firmware event queue and */
     77					/*   forwarded interrupts */
     78	MAX_INGQ	= MAX_ETH_QSETS+INGQ_EXTRAS,
     79	MAX_EGRQ	= MAX_ETH_QSETS*2,
     80};
     81
     82/*
     83 * Forward structure definition references.
     84 */
     85struct adapter;
     86struct sge_eth_rxq;
     87struct sge_rspq;
     88
     89/*
     90 * Per-"port" information.  This is really per-Virtual Interface information
     91 * but the use of the "port" nomanclature makes it easier to go back and forth
     92 * between the PF and VF drivers ...
     93 */
     94struct port_info {
     95	struct adapter *adapter;	/* our adapter */
     96	u32 vlan_id;			/* vlan id for VST */
     97	u16 viid;			/* virtual interface ID */
     98	int xact_addr_filt;		/* index of our MAC address filter */
     99	u16 rss_size;			/* size of VI's RSS table slice */
    100	u8 pidx;			/* index into adapter port[] */
    101	s8 mdio_addr;
    102	u8 port_type;			/* firmware port type */
    103	u8 mod_type;			/* firmware module type */
    104	u8 port_id;			/* physical port ID */
    105	u8 nqsets;			/* # of "Queue Sets" */
    106	u8 first_qset;			/* index of first "Queue Set" */
    107	struct link_config link_cfg;	/* physical port configuration */
    108};
    109
    110/*
    111 * Scatter Gather Engine resources for the "adapter".  Our ingress and egress
    112 * queues are organized into "Queue Sets" with one ingress and one egress
    113 * queue per Queue Set.  These Queue Sets are aportionable between the "ports"
    114 * (Virtual Interfaces).  One extra ingress queue is used to receive
    115 * asynchronous messages from the firmware.  Note that the "Queue IDs" that we
    116 * use here are really "Relative Queue IDs" which are returned as part of the
    117 * firmware command to allocate queues.  These queue IDs are relative to the
    118 * absolute Queue ID base of the section of the Queue ID space allocated to
    119 * the PF/VF.
    120 */
    121
    122/*
    123 * SGE free-list queue state.
    124 */
    125struct rx_sw_desc;
    126struct sge_fl {
    127	unsigned int avail;		/* # of available RX buffers */
    128	unsigned int pend_cred;		/* new buffers since last FL DB ring */
    129	unsigned int cidx;		/* consumer index */
    130	unsigned int pidx;		/* producer index */
    131	unsigned long alloc_failed;	/* # of buffer allocation failures */
    132	unsigned long large_alloc_failed;
    133	unsigned long starving;		/* # of times FL was found starving */
    134
    135	/*
    136	 * Write-once/infrequently fields.
    137	 * -------------------------------
    138	 */
    139
    140	unsigned int cntxt_id;		/* SGE relative QID for the free list */
    141	unsigned int abs_id;		/* SGE absolute QID for the free list */
    142	unsigned int size;		/* capacity of free list */
    143	struct rx_sw_desc *sdesc;	/* address of SW RX descriptor ring */
    144	__be64 *desc;			/* address of HW RX descriptor ring */
    145	dma_addr_t addr;		/* PCI bus address of hardware ring */
    146	void __iomem *bar2_addr;	/* address of BAR2 Queue registers */
    147	unsigned int bar2_qid;		/* Queue ID for BAR2 Queue registers */
    148};
    149
    150/*
    151 * An ingress packet gather list.
    152 */
    153struct pkt_gl {
    154	struct page_frag frags[MAX_SKB_FRAGS];
    155	void *va;			/* virtual address of first byte */
    156	unsigned int nfrags;		/* # of fragments */
    157	unsigned int tot_len;		/* total length of fragments */
    158};
    159
    160typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
    161			      const struct pkt_gl *);
    162
    163/*
    164 * State for an SGE Response Queue.
    165 */
    166struct sge_rspq {
    167	struct napi_struct napi;	/* NAPI scheduling control */
    168	const __be64 *cur_desc;		/* current descriptor in queue */
    169	unsigned int cidx;		/* consumer index */
    170	u8 gen;				/* current generation bit */
    171	u8 next_intr_params;		/* holdoff params for next interrupt */
    172	int offset;			/* offset into current FL buffer */
    173
    174	unsigned int unhandled_irqs;	/* bogus interrupts */
    175
    176	/*
    177	 * Write-once/infrequently fields.
    178	 * -------------------------------
    179	 */
    180
    181	u8 intr_params;			/* interrupt holdoff parameters */
    182	u8 pktcnt_idx;			/* interrupt packet threshold */
    183	u8 idx;				/* queue index within its group */
    184	u16 cntxt_id;			/* SGE rel QID for the response Q */
    185	u16 abs_id;			/* SGE abs QID for the response Q */
    186	__be64 *desc;			/* address of hardware response ring */
    187	dma_addr_t phys_addr;		/* PCI bus address of ring */
    188	void __iomem *bar2_addr;	/* address of BAR2 Queue registers */
    189	unsigned int bar2_qid;		/* Queue ID for BAR2 Queue registers */
    190	unsigned int iqe_len;		/* entry size */
    191	unsigned int size;		/* capcity of response Q */
    192	struct adapter *adapter;	/* our adapter */
    193	struct net_device *netdev;	/* associated net device */
    194	rspq_handler_t handler;		/* the handler for this response Q */
    195};
    196
    197/*
    198 * Ethernet queue statistics
    199 */
    200struct sge_eth_stats {
    201	unsigned long pkts;		/* # of ethernet packets */
    202	unsigned long lro_pkts;		/* # of LRO super packets */
    203	unsigned long lro_merged;	/* # of wire packets merged by LRO */
    204	unsigned long rx_cso;		/* # of Rx checksum offloads */
    205	unsigned long vlan_ex;		/* # of Rx VLAN extractions */
    206	unsigned long rx_drops;		/* # of packets dropped due to no mem */
    207};
    208
    209/*
    210 * State for an Ethernet Receive Queue.
    211 */
    212struct sge_eth_rxq {
    213	struct sge_rspq rspq;		/* Response Queue */
    214	struct sge_fl fl;		/* Free List */
    215	struct sge_eth_stats stats;	/* receive statistics */
    216};
    217
    218/*
    219 * SGE Transmit Queue state.  This contains all of the resources associated
    220 * with the hardware status of a TX Queue which is a circular ring of hardware
    221 * TX Descriptors.  For convenience, it also contains a pointer to a parallel
    222 * "Software Descriptor" array but we don't know anything about it here other
    223 * than its type name.
    224 */
    225struct tx_desc {
    226	/*
    227	 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
    228	 * hardware: Sizes, Producer and Consumer indices, etc.
    229	 */
    230	__be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
    231};
    232struct tx_sw_desc;
    233struct sge_txq {
    234	unsigned int in_use;		/* # of in-use TX descriptors */
    235	unsigned int size;		/* # of descriptors */
    236	unsigned int cidx;		/* SW consumer index */
    237	unsigned int pidx;		/* producer index */
    238	unsigned long stops;		/* # of times queue has been stopped */
    239	unsigned long restarts;		/* # of queue restarts */
    240
    241	/*
    242	 * Write-once/infrequently fields.
    243	 * -------------------------------
    244	 */
    245
    246	unsigned int cntxt_id;		/* SGE relative QID for the TX Q */
    247	unsigned int abs_id;		/* SGE absolute QID for the TX Q */
    248	struct tx_desc *desc;		/* address of HW TX descriptor ring */
    249	struct tx_sw_desc *sdesc;	/* address of SW TX descriptor ring */
    250	struct sge_qstat *stat;		/* queue status entry */
    251	dma_addr_t phys_addr;		/* PCI bus address of hardware ring */
    252	void __iomem *bar2_addr;	/* address of BAR2 Queue registers */
    253	unsigned int bar2_qid;		/* Queue ID for BAR2 Queue registers */
    254};
    255
    256/*
    257 * State for an Ethernet Transmit Queue.
    258 */
    259struct sge_eth_txq {
    260	struct sge_txq q;		/* SGE TX Queue */
    261	struct netdev_queue *txq;	/* associated netdev TX queue */
    262	unsigned long tso;		/* # of TSO requests */
    263	unsigned long tx_cso;		/* # of TX checksum offloads */
    264	unsigned long vlan_ins;		/* # of TX VLAN insertions */
    265	unsigned long mapping_err;	/* # of I/O MMU packet mapping errors */
    266};
    267
    268/*
    269 * The complete set of Scatter/Gather Engine resources.
    270 */
    271struct sge {
    272	/*
    273	 * Our "Queue Sets" ...
    274	 */
    275	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
    276	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
    277
    278	/*
    279	 * Extra ingress queues for asynchronous firmware events and
    280	 * forwarded interrupts (when in MSI mode).
    281	 */
    282	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
    283
    284	struct sge_rspq intrq ____cacheline_aligned_in_smp;
    285	spinlock_t intrq_lock;
    286
    287	/*
    288	 * State for managing "starving Free Lists" -- Free Lists which have
    289	 * fallen below a certain threshold of buffers available to the
    290	 * hardware and attempts to refill them up to that threshold have
    291	 * failed.  We have a regular "slow tick" timer process which will
    292	 * make periodic attempts to refill these starving Free Lists ...
    293	 */
    294	DECLARE_BITMAP(starving_fl, MAX_EGRQ);
    295	struct timer_list rx_timer;
    296
    297	/*
    298	 * State for cleaning up completed TX descriptors.
    299	 */
    300	struct timer_list tx_timer;
    301
    302	/*
    303	 * Write-once/infrequently fields.
    304	 * -------------------------------
    305	 */
    306
    307	u16 max_ethqsets;		/* # of available Ethernet queue sets */
    308	u16 ethqsets;			/* # of active Ethernet queue sets */
    309	u16 ethtxq_rover;		/* Tx queue to clean up next */
    310	u16 timer_val[SGE_NTIMERS];	/* interrupt holdoff timer array */
    311	u8 counter_val[SGE_NCOUNTERS];	/* interrupt RX threshold array */
    312
    313	/* Decoded Adapter Parameters.
    314	 */
    315	u32 fl_pg_order;		/* large page allocation size */
    316	u32 stat_len;			/* length of status page at ring end */
    317	u32 pktshift;			/* padding between CPL & packet data */
    318	u32 fl_align;			/* response queue message alignment */
    319	u32 fl_starve_thres;		/* Free List starvation threshold */
    320
    321	/*
    322	 * Reverse maps from Absolute Queue IDs to associated queue pointers.
    323	 * The absolute Queue IDs are in a compact range which start at a
    324	 * [potentially large] Base Queue ID.  We perform the reverse map by
    325	 * first converting the Absolute Queue ID into a Relative Queue ID by
    326	 * subtracting off the Base Queue ID and then use a Relative Queue ID
    327	 * indexed table to get the pointer to the corresponding software
    328	 * queue structure.
    329	 */
    330	unsigned int egr_base;
    331	unsigned int ingr_base;
    332	void *egr_map[MAX_EGRQ];
    333	struct sge_rspq *ingr_map[MAX_INGQ];
    334};
    335
    336/*
    337 * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
    338 * and Ingress-Queues.  The EQ_MAP() and IQ_MAP() macros which provide
    339 * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
    340 */
    341#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
    342#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
    343
    344#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
    345#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
    346
    347/*
    348 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
    349 */
    350#define for_each_ethrxq(sge, iter) \
    351	for (iter = 0; iter < (sge)->ethqsets; iter++)
    352
    353struct hash_mac_addr {
    354	struct list_head list;
    355	u8 addr[ETH_ALEN];
    356	unsigned int iface_mac;
    357};
    358
    359struct mbox_list {
    360	struct list_head list;
    361};
    362
    363/*
    364 * Per-"adapter" (Virtual Function) information.
    365 */
    366struct adapter {
    367	/* PCI resources */
    368	void __iomem *regs;
    369	void __iomem *bar2;
    370	struct pci_dev *pdev;
    371	struct device *pdev_dev;
    372
    373	/* "adapter" resources */
    374	unsigned long registered_device_map;
    375	unsigned long open_device_map;
    376	unsigned long flags;
    377	struct adapter_params params;
    378
    379	/* queue and interrupt resources */
    380	struct {
    381		unsigned short vec;
    382		char desc[22];
    383	} msix_info[MSIX_ENTRIES];
    384	struct sge sge;
    385
    386	/* Linux network device resources */
    387	struct net_device *port[MAX_NPORTS];
    388	const char *name;
    389	unsigned int msg_enable;
    390
    391	/* debugfs resources */
    392	struct dentry *debugfs_root;
    393
    394	/* various locks */
    395	spinlock_t stats_lock;
    396
    397	/* lock for mailbox cmd list */
    398	spinlock_t mbox_lock;
    399	struct mbox_list mlist;
    400
    401	/* support for mailbox command/reply logging */
    402#define T4VF_OS_LOG_MBOX_CMDS 256
    403	struct mbox_cmd_log *mbox_log;
    404
    405	/* list of MAC addresses in MPS Hash */
    406	struct list_head mac_hlist;
    407};
    408
    409enum { /* adapter flags */
    410	CXGB4VF_FULL_INIT_DONE			= (1UL << 0),
    411	CXGB4VF_USING_MSI			= (1UL << 1),
    412	CXGB4VF_USING_MSIX			= (1UL << 2),
    413	CXGB4VF_QUEUES_BOUND			= (1UL << 3),
    414	CXGB4VF_ROOT_NO_RELAXED_ORDERING	= (1UL << 4),
    415	CXGB4VF_FW_OK				= (1UL << 5),
    416};
    417
    418/*
    419 * The following register read/write routine definitions are required by
    420 * the common code.
    421 */
    422
    423/**
    424 * t4_read_reg - read a HW register
    425 * @adapter: the adapter
    426 * @reg_addr: the register address
    427 *
    428 * Returns the 32-bit value of the given HW register.
    429 */
    430static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
    431{
    432	return readl(adapter->regs + reg_addr);
    433}
    434
    435/**
    436 * t4_write_reg - write a HW register
    437 * @adapter: the adapter
    438 * @reg_addr: the register address
    439 * @val: the value to write
    440 *
    441 * Write a 32-bit value into the given HW register.
    442 */
    443static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
    444{
    445	writel(val, adapter->regs + reg_addr);
    446}
    447
    448#ifndef readq
    449static inline u64 readq(const volatile void __iomem *addr)
    450{
    451	return readl(addr) + ((u64)readl(addr + 4) << 32);
    452}
    453
    454static inline void writeq(u64 val, volatile void __iomem *addr)
    455{
    456	writel(val, addr);
    457	writel(val >> 32, addr + 4);
    458}
    459#endif
    460
    461/**
    462 * t4_read_reg64 - read a 64-bit HW register
    463 * @adapter: the adapter
    464 * @reg_addr: the register address
    465 *
    466 * Returns the 64-bit value of the given HW register.
    467 */
    468static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
    469{
    470	return readq(adapter->regs + reg_addr);
    471}
    472
    473/**
    474 * t4_write_reg64 - write a 64-bit HW register
    475 * @adapter: the adapter
    476 * @reg_addr: the register address
    477 * @val: the value to write
    478 *
    479 * Write a 64-bit value into the given HW register.
    480 */
    481static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
    482				  u64 val)
    483{
    484	writeq(val, adapter->regs + reg_addr);
    485}
    486
    487/**
    488 * port_name - return the string name of a port
    489 * @adapter: the adapter
    490 * @pidx: the port index
    491 *
    492 * Return the string name of the selected port.
    493 */
    494static inline const char *port_name(struct adapter *adapter, int pidx)
    495{
    496	return adapter->port[pidx]->name;
    497}
    498
    499/**
    500 * t4_os_set_hw_addr - store a port's MAC address in SW
    501 * @adapter: the adapter
    502 * @pidx: the port index
    503 * @hw_addr: the Ethernet address
    504 *
    505 * Store the Ethernet address of the given port in SW.  Called by the common
    506 * code when it retrieves a port's Ethernet address from EEPROM.
    507 */
    508static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
    509				     u8 hw_addr[])
    510{
    511	eth_hw_addr_set(adapter->port[pidx], hw_addr);
    512}
    513
    514/**
    515 * netdev2pinfo - return the port_info structure associated with a net_device
    516 * @dev: the netdev
    517 *
    518 * Return the struct port_info associated with a net_device
    519 */
    520static inline struct port_info *netdev2pinfo(const struct net_device *dev)
    521{
    522	return netdev_priv(dev);
    523}
    524
    525/**
    526 * adap2pinfo - return the port_info of a port
    527 * @adap: the adapter
    528 * @pidx: the port index
    529 *
    530 * Return the port_info structure for the adapter.
    531 */
    532static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
    533{
    534	return netdev_priv(adapter->port[pidx]);
    535}
    536
    537/**
    538 * netdev2adap - return the adapter structure associated with a net_device
    539 * @dev: the netdev
    540 *
    541 * Return the struct adapter associated with a net_device
    542 */
    543static inline struct adapter *netdev2adap(const struct net_device *dev)
    544{
    545	return netdev2pinfo(dev)->adapter;
    546}
    547
    548/*
    549 * OS "Callback" function declarations.  These are functions that the OS code
    550 * is "contracted" to provide for the common code.
    551 */
    552void t4vf_os_link_changed(struct adapter *, int, int);
    553void t4vf_os_portmod_changed(struct adapter *, int);
    554
    555/*
    556 * SGE function prototype declarations.
    557 */
    558int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
    559		       struct net_device *, int,
    560		       struct sge_fl *, rspq_handler_t);
    561int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
    562			   struct net_device *, struct netdev_queue *,
    563			   unsigned int);
    564void t4vf_free_sge_resources(struct adapter *);
    565
    566netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *);
    567int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
    568		       const struct pkt_gl *);
    569
    570irq_handler_t t4vf_intr_handler(struct adapter *);
    571irqreturn_t t4vf_sge_intr_msix(int, void *);
    572
    573int t4vf_sge_init(struct adapter *);
    574void t4vf_sge_start(struct adapter *);
    575void t4vf_sge_stop(struct adapter *);
    576
    577#endif /* __CXGB4VF_ADAPTER_H__ */