cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mlx4_ib.h (26487B)


      1/*
      2 * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
      3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
      4 *
      5 * This software is available to you under a choice of one of two
      6 * licenses.  You may choose to be licensed under the terms of the GNU
      7 * General Public License (GPL) Version 2, available from the file
      8 * COPYING in the main directory of this source tree, or the
      9 * OpenIB.org BSD license below:
     10 *
     11 *     Redistribution and use in source and binary forms, with or
     12 *     without modification, are permitted provided that the following
     13 *     conditions are met:
     14 *
     15 *      - Redistributions of source code must retain the above
     16 *        copyright notice, this list of conditions and the following
     17 *        disclaimer.
     18 *
     19 *      - Redistributions in binary form must reproduce the above
     20 *        copyright notice, this list of conditions and the following
     21 *        disclaimer in the documentation and/or other materials
     22 *        provided with the distribution.
     23 *
     24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     31 * SOFTWARE.
     32 */
     33
     34#ifndef MLX4_IB_H
     35#define MLX4_IB_H
     36
     37#include <linux/compiler.h>
     38#include <linux/list.h>
     39#include <linux/mutex.h>
     40#include <linux/idr.h>
     41
     42#include <rdma/ib_verbs.h>
     43#include <rdma/ib_umem.h>
     44#include <rdma/ib_mad.h>
     45#include <rdma/ib_sa.h>
     46
     47#include <linux/mlx4/device.h>
     48#include <linux/mlx4/doorbell.h>
     49#include <linux/mlx4/qp.h>
     50#include <linux/mlx4/cq.h>
     51
     52#define MLX4_IB_DRV_NAME	"mlx4_ib"
     53
     54#ifdef pr_fmt
     55#undef pr_fmt
     56#endif
     57#define pr_fmt(fmt)	"<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
     58
     59#define mlx4_ib_warn(ibdev, format, arg...) \
     60	dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg)
     61
     62enum {
     63	MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
     64	MLX4_IB_MAX_HEADROOM	 = 2048
     65};
     66
     67#define MLX4_IB_SQ_HEADROOM(shift)	((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
     68#define MLX4_IB_SQ_MAX_SPARE		(MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
     69
     70/*module param to indicate if SM assigns the alias_GUID*/
     71extern int mlx4_ib_sm_guid_assign;
     72
     73#define MLX4_IB_UC_STEER_QPN_ALIGN 1
     74#define MLX4_IB_UC_MAX_NUM_QPS     256
     75
     76enum hw_bar_type {
     77	HW_BAR_BF,
     78	HW_BAR_DB,
     79	HW_BAR_CLOCK,
     80	HW_BAR_COUNT
     81};
     82
     83struct mlx4_ib_ucontext {
     84	struct ib_ucontext	ibucontext;
     85	struct mlx4_uar		uar;
     86	struct list_head	db_page_list;
     87	struct mutex		db_page_mutex;
     88	struct list_head	wqn_ranges_list;
     89	struct mutex		wqn_ranges_mutex; /* protect wqn_ranges_list */
     90};
     91
     92struct mlx4_ib_pd {
     93	struct ib_pd		ibpd;
     94	u32			pdn;
     95};
     96
     97struct mlx4_ib_xrcd {
     98	struct ib_xrcd		ibxrcd;
     99	u32			xrcdn;
    100	struct ib_pd	       *pd;
    101	struct ib_cq	       *cq;
    102};
    103
    104struct mlx4_ib_cq_buf {
    105	struct mlx4_buf		buf;
    106	struct mlx4_mtt		mtt;
    107	int			entry_size;
    108};
    109
    110struct mlx4_ib_cq_resize {
    111	struct mlx4_ib_cq_buf	buf;
    112	int			cqe;
    113};
    114
    115struct mlx4_ib_cq {
    116	struct ib_cq		ibcq;
    117	struct mlx4_cq		mcq;
    118	struct mlx4_ib_cq_buf	buf;
    119	struct mlx4_ib_cq_resize *resize_buf;
    120	struct mlx4_db		db;
    121	spinlock_t		lock;
    122	struct mutex		resize_mutex;
    123	struct ib_umem	       *umem;
    124	struct ib_umem	       *resize_umem;
    125	int			create_flags;
    126	/* List of qps that it serves.*/
    127	struct list_head		send_qp_list;
    128	struct list_head		recv_qp_list;
    129};
    130
    131#define MLX4_MR_PAGES_ALIGN 0x40
    132
    133struct mlx4_ib_mr {
    134	struct ib_mr		ibmr;
    135	__be64			*pages;
    136	dma_addr_t		page_map;
    137	u32			npages;
    138	u32			max_pages;
    139	struct mlx4_mr		mmr;
    140	struct ib_umem	       *umem;
    141	size_t			page_map_size;
    142};
    143
    144struct mlx4_ib_mw {
    145	struct ib_mw		ibmw;
    146	struct mlx4_mw		mmw;
    147};
    148
    149#define MAX_REGS_PER_FLOW 2
    150
    151struct mlx4_flow_reg_id {
    152	u64 id;
    153	u64 mirror;
    154};
    155
    156struct mlx4_ib_flow {
    157	struct ib_flow ibflow;
    158	/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
    159	struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
    160};
    161
    162struct mlx4_ib_wq {
    163	u64		       *wrid;
    164	spinlock_t		lock;
    165	int			wqe_cnt;
    166	int			max_post;
    167	int			max_gs;
    168	int			offset;
    169	int			wqe_shift;
    170	unsigned		head;
    171	unsigned		tail;
    172};
    173
    174enum {
    175	MLX4_IB_QP_CREATE_ROCE_V2_GSI = IB_QP_CREATE_RESERVED_START
    176};
    177
    178enum mlx4_ib_qp_flags {
    179	MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
    180	MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
    181	MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
    182	MLX4_IB_QP_SCATTER_FCS = IB_QP_CREATE_SCATTER_FCS,
    183
    184	/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
    185	MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
    186	MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
    187	MLX4_IB_SRIOV_SQP = 1 << 31,
    188};
    189
    190struct mlx4_ib_gid_entry {
    191	struct list_head	list;
    192	union ib_gid		gid;
    193	int			added;
    194	u8			port;
    195};
    196
    197enum mlx4_ib_qp_type {
    198	/*
    199	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
    200	 * here (and in that order) since the MAD layer uses them as
    201	 * indices into a 2-entry table.
    202	 */
    203	MLX4_IB_QPT_SMI = IB_QPT_SMI,
    204	MLX4_IB_QPT_GSI = IB_QPT_GSI,
    205
    206	MLX4_IB_QPT_RC = IB_QPT_RC,
    207	MLX4_IB_QPT_UC = IB_QPT_UC,
    208	MLX4_IB_QPT_UD = IB_QPT_UD,
    209	MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
    210	MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
    211	MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
    212	MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
    213	MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
    214
    215	MLX4_IB_QPT_PROXY_SMI_OWNER	= 1 << 16,
    216	MLX4_IB_QPT_PROXY_SMI		= 1 << 17,
    217	MLX4_IB_QPT_PROXY_GSI		= 1 << 18,
    218	MLX4_IB_QPT_TUN_SMI_OWNER	= 1 << 19,
    219	MLX4_IB_QPT_TUN_SMI		= 1 << 20,
    220	MLX4_IB_QPT_TUN_GSI		= 1 << 21,
    221};
    222
    223#define MLX4_IB_QPT_ANY_SRIOV	(MLX4_IB_QPT_PROXY_SMI_OWNER | \
    224	MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
    225	MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
    226
    227enum mlx4_ib_mad_ifc_flags {
    228	MLX4_MAD_IFC_IGNORE_MKEY	= 1,
    229	MLX4_MAD_IFC_IGNORE_BKEY	= 2,
    230	MLX4_MAD_IFC_IGNORE_KEYS	= (MLX4_MAD_IFC_IGNORE_MKEY |
    231					   MLX4_MAD_IFC_IGNORE_BKEY),
    232	MLX4_MAD_IFC_NET_VIEW		= 4,
    233};
    234
    235enum {
    236	MLX4_NUM_TUNNEL_BUFS		= 512,
    237	MLX4_NUM_WIRE_BUFS		= 2048,
    238};
    239
    240struct mlx4_ib_tunnel_header {
    241	struct mlx4_av av;
    242	__be32 remote_qpn;
    243	__be32 qkey;
    244	__be16 vlan;
    245	u8 mac[6];
    246	__be16 pkey_index;
    247	u8 reserved[6];
    248};
    249
    250struct mlx4_ib_buf {
    251	void *addr;
    252	dma_addr_t map;
    253};
    254
    255struct mlx4_rcv_tunnel_hdr {
    256	__be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
    257			      * 0x0 - no vlan was in the packet
    258			      * 0x01 - C-VLAN was in the packet */
    259	u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
    260	u8 reserved;
    261	__be16 pkey_index;
    262	__be16 sl_vid;
    263	__be16 slid_mac_47_32;
    264	__be32 mac_31_0;
    265};
    266
    267struct mlx4_ib_proxy_sqp_hdr {
    268	struct ib_grh grh;
    269	struct mlx4_rcv_tunnel_hdr tun;
    270}  __packed;
    271
    272struct mlx4_roce_smac_vlan_info {
    273	u64 smac;
    274	int smac_index;
    275	int smac_port;
    276	u64 candidate_smac;
    277	int candidate_smac_index;
    278	int candidate_smac_port;
    279	u16 vid;
    280	int vlan_index;
    281	int vlan_port;
    282	u16 candidate_vid;
    283	int candidate_vlan_index;
    284	int candidate_vlan_port;
    285	int update_vid;
    286};
    287
    288struct mlx4_wqn_range {
    289	int			base_wqn;
    290	int			size;
    291	int			refcount;
    292	bool			dirty;
    293	struct list_head	list;
    294};
    295
    296struct mlx4_ib_rss {
    297	unsigned int		base_qpn_tbl_sz;
    298	u8			flags;
    299	u8			rss_key[MLX4_EN_RSS_KEY_SIZE];
    300};
    301
    302enum {
    303	/*
    304	 * Largest possible UD header: send with GRH and immediate
    305	 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
    306	 * tag.  (LRH would only use 8 bytes, so Ethernet is the
    307	 * biggest case)
    308	 */
    309	MLX4_IB_UD_HEADER_SIZE		= 82,
    310	MLX4_IB_LSO_HEADER_SPARE	= 128,
    311};
    312
    313struct mlx4_ib_sqp {
    314	int pkey_index;
    315	u32 qkey;
    316	u32 send_psn;
    317	struct ib_ud_header ud_header;
    318	u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
    319	struct ib_qp *roce_v2_gsi;
    320};
    321
    322struct mlx4_ib_qp {
    323	union {
    324		struct ib_qp	ibqp;
    325		struct ib_wq	ibwq;
    326	};
    327	struct mlx4_qp		mqp;
    328	struct mlx4_buf		buf;
    329
    330	struct mlx4_db		db;
    331	struct mlx4_ib_wq	rq;
    332
    333	u32			doorbell_qpn;
    334	__be32			sq_signal_bits;
    335	unsigned		sq_next_wqe;
    336	int			sq_spare_wqes;
    337	struct mlx4_ib_wq	sq;
    338
    339	enum mlx4_ib_qp_type	mlx4_ib_qp_type;
    340	struct ib_umem	       *umem;
    341	struct mlx4_mtt		mtt;
    342	int			buf_size;
    343	struct mutex		mutex;
    344	u16			xrcdn;
    345	u32			flags;
    346	u8			port;
    347	u8			alt_port;
    348	u8			atomic_rd_en;
    349	u8			resp_depth;
    350	u8			sq_no_prefetch;
    351	u8			state;
    352	int			mlx_type;
    353	u32			inl_recv_sz;
    354	struct list_head	gid_list;
    355	struct list_head	steering_rules;
    356	struct mlx4_ib_buf	*sqp_proxy_rcv;
    357	struct mlx4_roce_smac_vlan_info pri;
    358	struct mlx4_roce_smac_vlan_info alt;
    359	u64			reg_id;
    360	struct list_head	qps_list;
    361	struct list_head	cq_recv_list;
    362	struct list_head	cq_send_list;
    363	struct counter_index	*counter_index;
    364	struct mlx4_wqn_range	*wqn_range;
    365	/* Number of RSS QP parents that uses this WQ */
    366	u32			rss_usecnt;
    367	union {
    368		struct mlx4_ib_rss *rss_ctx;
    369		struct mlx4_ib_sqp *sqp;
    370	};
    371};
    372
    373struct mlx4_ib_srq {
    374	struct ib_srq		ibsrq;
    375	struct mlx4_srq		msrq;
    376	struct mlx4_buf		buf;
    377	struct mlx4_db		db;
    378	u64		       *wrid;
    379	spinlock_t		lock;
    380	int			head;
    381	int			tail;
    382	u16			wqe_ctr;
    383	struct ib_umem	       *umem;
    384	struct mlx4_mtt		mtt;
    385	struct mutex		mutex;
    386};
    387
    388struct mlx4_ib_ah {
    389	struct ib_ah		ibah;
    390	union mlx4_ext_av       av;
    391};
    392
    393struct mlx4_ib_rwq_ind_table {
    394	struct ib_rwq_ind_table ib_rwq_ind_tbl;
    395};
    396
    397/****************************************/
    398/* alias guid support */
    399/****************************************/
    400#define NUM_PORT_ALIAS_GUID		2
    401#define NUM_ALIAS_GUID_IN_REC		8
    402#define NUM_ALIAS_GUID_REC_IN_PORT	16
    403#define GUID_REC_SIZE			8
    404#define NUM_ALIAS_GUID_PER_PORT		128
    405#define MLX4_NOT_SET_GUID		(0x00LL)
    406#define MLX4_GUID_FOR_DELETE_VAL	(~(0x00LL))
    407
    408enum mlx4_guid_alias_rec_status {
    409	MLX4_GUID_INFO_STATUS_IDLE,
    410	MLX4_GUID_INFO_STATUS_SET,
    411};
    412
    413#define GUID_STATE_NEED_PORT_INIT 0x01
    414
    415enum mlx4_guid_alias_rec_method {
    416	MLX4_GUID_INFO_RECORD_SET	= IB_MGMT_METHOD_SET,
    417	MLX4_GUID_INFO_RECORD_DELETE	= IB_SA_METHOD_DELETE,
    418};
    419
    420struct mlx4_sriov_alias_guid_info_rec_det {
    421	u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
    422	ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
    423	enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
    424	unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
    425	u64 time_to_run;
    426};
    427
    428struct mlx4_sriov_alias_guid_port_rec_det {
    429	struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
    430	struct workqueue_struct *wq;
    431	struct delayed_work alias_guid_work;
    432	u32 port;
    433	u32 state_flags;
    434	struct mlx4_sriov_alias_guid *parent;
    435	struct list_head cb_list;
    436};
    437
    438struct mlx4_sriov_alias_guid {
    439	struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
    440	spinlock_t ag_work_lock;
    441	struct ib_sa_client *sa_client;
    442};
    443
    444struct mlx4_ib_demux_work {
    445	struct work_struct	work;
    446	struct mlx4_ib_dev     *dev;
    447	int			slave;
    448	int			do_init;
    449	u8			port;
    450
    451};
    452
    453struct mlx4_ib_tun_tx_buf {
    454	struct mlx4_ib_buf buf;
    455	struct ib_ah *ah;
    456};
    457
    458struct mlx4_ib_demux_pv_qp {
    459	struct ib_qp *qp;
    460	enum ib_qp_type proxy_qpt;
    461	struct mlx4_ib_buf *ring;
    462	struct mlx4_ib_tun_tx_buf *tx_ring;
    463	spinlock_t tx_lock;
    464	unsigned tx_ix_head;
    465	unsigned tx_ix_tail;
    466};
    467
    468enum mlx4_ib_demux_pv_state {
    469	DEMUX_PV_STATE_DOWN,
    470	DEMUX_PV_STATE_STARTING,
    471	DEMUX_PV_STATE_ACTIVE,
    472	DEMUX_PV_STATE_DOWNING,
    473};
    474
    475struct mlx4_ib_demux_pv_ctx {
    476	int port;
    477	int slave;
    478	enum mlx4_ib_demux_pv_state state;
    479	int has_smi;
    480	struct ib_device *ib_dev;
    481	struct ib_cq *cq;
    482	struct ib_pd *pd;
    483	struct work_struct work;
    484	struct workqueue_struct *wq;
    485	struct workqueue_struct *wi_wq;
    486	struct mlx4_ib_demux_pv_qp qp[2];
    487};
    488
    489struct mlx4_ib_demux_ctx {
    490	struct ib_device *ib_dev;
    491	int port;
    492	struct workqueue_struct *wq;
    493	struct workqueue_struct *wi_wq;
    494	struct workqueue_struct *ud_wq;
    495	spinlock_t ud_lock;
    496	atomic64_t subnet_prefix;
    497	__be64 guid_cache[128];
    498	struct mlx4_ib_dev *dev;
    499	/* the following lock protects both mcg_table and mcg_mgid0_list */
    500	struct mutex		mcg_table_lock;
    501	struct rb_root		mcg_table;
    502	struct list_head	mcg_mgid0_list;
    503	struct workqueue_struct	*mcg_wq;
    504	struct mlx4_ib_demux_pv_ctx **tun;
    505	atomic_t tid;
    506	int    flushing; /* flushing the work queue */
    507};
    508
    509struct mlx4_ib_sriov {
    510	struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
    511	struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
    512	/* when using this spinlock you should use "irq" because
    513	 * it may be called from interrupt context.*/
    514	spinlock_t going_down_lock;
    515	int is_going_down;
    516
    517	struct mlx4_sriov_alias_guid alias_guid;
    518
    519	/* CM paravirtualization fields */
    520	struct xarray pv_id_table;
    521	u32 pv_id_next;
    522	spinlock_t id_map_lock;
    523	struct rb_root sl_id_map;
    524	struct list_head cm_list;
    525	struct xarray xa_rej_tmout;
    526};
    527
    528struct gid_cache_context {
    529	int real_index;
    530	int refcount;
    531};
    532
    533struct gid_entry {
    534	union ib_gid	gid;
    535	enum ib_gid_type gid_type;
    536	struct gid_cache_context *ctx;
    537	u16 vlan_id;
    538};
    539
    540struct mlx4_port_gid_table {
    541	struct gid_entry gids[MLX4_MAX_PORT_GIDS];
    542};
    543
    544struct mlx4_ib_iboe {
    545	spinlock_t		lock;
    546	struct net_device      *netdevs[MLX4_MAX_PORTS];
    547	atomic64_t		mac[MLX4_MAX_PORTS];
    548	struct notifier_block 	nb;
    549	struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
    550	enum ib_port_state	last_port_state[MLX4_MAX_PORTS];
    551};
    552
    553struct pkey_mgt {
    554	u8			virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
    555	u16			phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
    556	struct list_head	pkey_port_list[MLX4_MFUNC_MAX];
    557	struct kobject	       *device_parent[MLX4_MFUNC_MAX];
    558};
    559
    560struct mlx4_ib_iov_sysfs_attr {
    561	void *ctx;
    562	struct kobject *kobj;
    563	unsigned long data;
    564	u32 entry_num;
    565	char name[15];
    566	struct device_attribute dentry;
    567	struct device *dev;
    568};
    569
    570struct mlx4_ib_iov_sysfs_attr_ar {
    571	struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
    572};
    573
    574struct mlx4_ib_iov_port {
    575	char name[100];
    576	u8 num;
    577	struct mlx4_ib_dev *dev;
    578	struct list_head list;
    579	struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
    580	struct ib_port_attr attr;
    581	struct kobject	*cur_port;
    582	struct kobject	*admin_alias_parent;
    583	struct kobject	*gids_parent;
    584	struct kobject	*pkeys_parent;
    585	struct kobject	*mcgs_parent;
    586	struct mlx4_ib_iov_sysfs_attr mcg_dentry;
    587};
    588
    589struct counter_index {
    590	struct  list_head       list;
    591	u32		index;
    592	u8		allocated;
    593};
    594
    595struct mlx4_ib_counters {
    596	struct list_head        counters_list;
    597	struct mutex            mutex; /* mutex for accessing counters list */
    598	u32			default_counter;
    599};
    600
    601#define MLX4_DIAG_COUNTERS_TYPES 2
    602
    603struct mlx4_ib_diag_counters {
    604	struct rdma_stat_desc *descs;
    605	u32 *offset;
    606	u32 num_counters;
    607};
    608
    609struct mlx4_ib_dev {
    610	struct ib_device	ib_dev;
    611	struct mlx4_dev	       *dev;
    612	int			num_ports;
    613	void __iomem	       *uar_map;
    614
    615	struct mlx4_uar		priv_uar;
    616	u32			priv_pdn;
    617	MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
    618
    619	struct ib_mad_agent    *send_agent[MLX4_MAX_PORTS][2];
    620	struct ib_ah	       *sm_ah[MLX4_MAX_PORTS];
    621	spinlock_t		sm_lock;
    622	atomic64_t		sl2vl[MLX4_MAX_PORTS];
    623	struct mlx4_ib_sriov	sriov;
    624
    625	struct mutex		cap_mask_mutex;
    626	bool			ib_active;
    627	struct mlx4_ib_iboe	iboe;
    628	struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
    629	int		       *eq_table;
    630	struct kobject	       *iov_parent;
    631	struct kobject	       *ports_parent;
    632	struct kobject	       *dev_ports_parent[MLX4_MFUNC_MAX];
    633	struct mlx4_ib_iov_port	iov_ports[MLX4_MAX_PORTS];
    634	struct pkey_mgt		pkeys;
    635	unsigned long *ib_uc_qpns_bitmap;
    636	int steer_qpn_count;
    637	int steer_qpn_base;
    638	int steering_support;
    639	struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
    640	/* lock when destroying qp1_proxy and getting netdev events */
    641	struct mutex		qp1_proxy_lock[MLX4_MAX_PORTS];
    642	u8			bond_next_port;
    643	/* protect resources needed as part of reset flow */
    644	spinlock_t		reset_flow_resource_lock;
    645	struct list_head		qp_list;
    646	struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
    647};
    648
    649struct ib_event_work {
    650	struct work_struct	work;
    651	struct mlx4_ib_dev	*ib_dev;
    652	struct mlx4_eqe		ib_eqe;
    653	int			port;
    654};
    655
    656struct mlx4_ib_qp_tunnel_init_attr {
    657	struct ib_qp_init_attr init_attr;
    658	int slave;
    659	enum ib_qp_type proxy_qp_type;
    660	u32 port;
    661};
    662
    663struct mlx4_uverbs_ex_query_device {
    664	__u32 comp_mask;
    665	__u32 reserved;
    666};
    667
    668static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
    669{
    670	return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
    671}
    672
    673static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
    674{
    675	return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
    676}
    677
    678static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
    679{
    680	return container_of(ibpd, struct mlx4_ib_pd, ibpd);
    681}
    682
    683static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
    684{
    685	return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
    686}
    687
    688static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
    689{
    690	return container_of(ibcq, struct mlx4_ib_cq, ibcq);
    691}
    692
    693static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
    694{
    695	return container_of(mcq, struct mlx4_ib_cq, mcq);
    696}
    697
    698static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
    699{
    700	return container_of(ibmr, struct mlx4_ib_mr, ibmr);
    701}
    702
    703static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
    704{
    705	return container_of(ibmw, struct mlx4_ib_mw, ibmw);
    706}
    707
    708static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
    709{
    710	return container_of(ibflow, struct mlx4_ib_flow, ibflow);
    711}
    712
    713static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
    714{
    715	return container_of(ibqp, struct mlx4_ib_qp, ibqp);
    716}
    717
    718static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
    719{
    720	return container_of(mqp, struct mlx4_ib_qp, mqp);
    721}
    722
    723static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
    724{
    725	return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
    726}
    727
    728static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
    729{
    730	return container_of(msrq, struct mlx4_ib_srq, msrq);
    731}
    732
    733static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
    734{
    735	return container_of(ibah, struct mlx4_ib_ah, ibah);
    736}
    737
    738static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
    739{
    740	dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
    741
    742	return dev->bond_next_port + 1;
    743}
    744
    745int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
    746void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
    747
    748int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
    749			struct mlx4_db *db);
    750void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
    751
    752struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
    753int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
    754			   struct ib_umem *umem);
    755struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
    756				  u64 virt_addr, int access_flags,
    757				  struct ib_udata *udata);
    758int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
    759int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
    760int mlx4_ib_dealloc_mw(struct ib_mw *mw);
    761struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
    762			       u32 max_num_sg);
    763int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
    764		      unsigned int *sg_offset);
    765int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
    766int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
    767int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
    768		      struct ib_udata *udata);
    769int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
    770int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
    771int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
    772void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
    773void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
    774
    775int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
    776		      struct ib_udata *udata);
    777int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
    778			    int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
    779int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
    780static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
    781{
    782	return 0;
    783}
    784
    785int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
    786		       struct ib_udata *udata);
    787int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
    788		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
    789int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
    790int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
    791void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
    792int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
    793			  const struct ib_recv_wr **bad_wr);
    794
    795int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
    796		      struct ib_udata *udata);
    797int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
    798void mlx4_ib_drain_sq(struct ib_qp *qp);
    799void mlx4_ib_drain_rq(struct ib_qp *qp);
    800int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
    801		      int attr_mask, struct ib_udata *udata);
    802int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
    803		     struct ib_qp_init_attr *qp_init_attr);
    804int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
    805		      const struct ib_send_wr **bad_wr);
    806int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
    807		      const struct ib_recv_wr **bad_wr);
    808
    809int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
    810		 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
    811		 const void *in_mad, void *response_mad);
    812int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
    813			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
    814			const struct ib_mad *in, struct ib_mad *out,
    815			size_t *out_mad_size, u16 *out_mad_pkey_index);
    816int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
    817void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
    818
    819int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
    820			 struct ib_port_attr *props, int netw_view);
    821int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
    822			 u16 *pkey, int netw_view);
    823
    824int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
    825			union ib_gid *gid, int netw_view);
    826
    827static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
    828{
    829	u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
    830
    831	if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
    832		return true;
    833
    834	return !!(ah->av.ib.g_slid & 0x80);
    835}
    836
    837int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
    838void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
    839void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
    840int mlx4_ib_mcg_init(void);
    841void mlx4_ib_mcg_destroy(void);
    842
    843int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid);
    844
    845int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
    846				  struct ib_sa_mad *sa_mad);
    847int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
    848			      struct ib_sa_mad *mad);
    849
    850int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
    851		   union ib_gid *gid);
    852
    853void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
    854			    enum ib_event_type type);
    855
    856void mlx4_ib_tunnels_update_work(struct work_struct *work);
    857
    858int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
    859			  enum ib_qp_type qpt, struct ib_wc *wc,
    860			  struct ib_grh *grh, struct ib_mad *mad);
    861
    862int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
    863			 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
    864			 u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
    865			 u16 vlan_id, struct ib_mad *mad);
    866
    867__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
    868
    869int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
    870		struct ib_mad *mad);
    871
    872int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
    873		struct ib_mad *mad);
    874
    875void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
    876void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
    877
    878/* alias guid support */
    879void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
    880int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
    881void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
    882void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
    883
    884void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
    885					  int block_num,
    886					  u32 port_num, u8 *p_data);
    887
    888void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
    889					 int block_num, u32 port_num,
    890					 u8 *p_data);
    891
    892int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
    893			    struct attribute *attr);
    894void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
    895			     struct attribute *attr);
    896ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
    897void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
    898				    int port, int slave_init);
    899
    900int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
    901
    902void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
    903
    904__be64 mlx4_ib_gen_node_guid(void);
    905
    906int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
    907void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
    908int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
    909			 int is_attach);
    910struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
    911				    u64 length, u64 virt_addr,
    912				    int mr_access_flags, struct ib_pd *pd,
    913				    struct ib_udata *udata);
    914int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
    915				    const struct ib_gid_attr *attr);
    916
    917void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
    918				     int port);
    919
    920void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
    921
    922struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
    923				struct ib_wq_init_attr *init_attr,
    924				struct ib_udata *udata);
    925int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
    926int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
    927		      u32 wq_attr_mask, struct ib_udata *udata);
    928
    929int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
    930				 struct ib_rwq_ind_table_init_attr *init_attr,
    931				 struct ib_udata *udata);
    932static inline int
    933mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
    934{
    935	return 0;
    936}
    937int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
    938				       int *num_of_mtts);
    939
    940int mlx4_ib_cm_init(void);
    941void mlx4_ib_cm_destroy(void);
    942
    943#endif /* MLX4_IB_H */