cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iw_cxgb4.h (27021B)


      1/*
      2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *	  copyright notice, this list of conditions and the following
     16 *	  disclaimer.
     17 *      - Redistributions in binary form must reproduce the above
     18 *	  copyright notice, this list of conditions and the following
     19 *	  disclaimer in the documentation and/or other materials
     20 *	  provided with the distribution.
     21 *
     22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     29 * SOFTWARE.
     30 */
     31#ifndef __IW_CXGB4_H__
     32#define __IW_CXGB4_H__
     33
     34#include <linux/mutex.h>
     35#include <linux/list.h>
     36#include <linux/spinlock.h>
     37#include <linux/xarray.h>
     38#include <linux/completion.h>
     39#include <linux/netdevice.h>
     40#include <linux/sched/mm.h>
     41#include <linux/pci.h>
     42#include <linux/dma-mapping.h>
     43#include <linux/inet.h>
     44#include <linux/wait.h>
     45#include <linux/kref.h>
     46#include <linux/timer.h>
     47#include <linux/io.h>
     48#include <linux/workqueue.h>
     49
     50#include <asm/byteorder.h>
     51
     52#include <net/net_namespace.h>
     53
     54#include <rdma/ib_verbs.h>
     55#include <rdma/iw_cm.h>
     56#include <rdma/rdma_netlink.h>
     57#include <rdma/iw_portmap.h>
     58#include <rdma/restrack.h>
     59
     60#include "cxgb4.h"
     61#include "cxgb4_uld.h"
     62#include "l2t.h"
     63#include <rdma/cxgb4-abi.h>
     64
     65#define DRV_NAME "iw_cxgb4"
     66#define MOD DRV_NAME ":"
     67
     68#ifdef pr_fmt
     69#undef pr_fmt
     70#endif
     71
     72#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     73
     74#include "t4.h"
     75
     76#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
     77#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
     78
     79static inline void *cplhdr(struct sk_buff *skb)
     80{
     81	return skb->data;
     82}
     83
     84#define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
     85#define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
     86
     87struct c4iw_id_table {
     88	u32 flags;
     89	u32 start;              /* logical minimal id */
     90	u32 last;               /* hint for find */
     91	u32 max;
     92	spinlock_t lock;
     93	unsigned long *table;
     94};
     95
     96struct c4iw_resource {
     97	struct c4iw_id_table tpt_table;
     98	struct c4iw_id_table qid_table;
     99	struct c4iw_id_table pdid_table;
    100	struct c4iw_id_table srq_table;
    101};
    102
    103struct c4iw_qid_list {
    104	struct list_head entry;
    105	u32 qid;
    106};
    107
    108struct c4iw_dev_ucontext {
    109	struct list_head qpids;
    110	struct list_head cqids;
    111	struct mutex lock;
    112	struct kref kref;
    113};
    114
    115enum c4iw_rdev_flags {
    116	T4_FATAL_ERROR = (1<<0),
    117	T4_STATUS_PAGE_DISABLED = (1<<1),
    118};
    119
    120struct c4iw_stat {
    121	u64 total;
    122	u64 cur;
    123	u64 max;
    124	u64 fail;
    125};
    126
    127struct c4iw_stats {
    128	struct mutex lock;
    129	struct c4iw_stat qid;
    130	struct c4iw_stat pd;
    131	struct c4iw_stat stag;
    132	struct c4iw_stat pbl;
    133	struct c4iw_stat rqt;
    134	struct c4iw_stat srqt;
    135	struct c4iw_stat srq;
    136	struct c4iw_stat ocqp;
    137	u64  db_full;
    138	u64  db_empty;
    139	u64  db_drop;
    140	u64  db_state_transitions;
    141	u64  db_fc_interruptions;
    142	u64  tcam_full;
    143	u64  act_ofld_conn_fails;
    144	u64  pas_ofld_conn_fails;
    145	u64  neg_adv;
    146};
    147
    148struct c4iw_hw_queue {
    149	int t4_eq_status_entries;
    150	int t4_max_eq_size;
    151	int t4_max_iq_size;
    152	int t4_max_rq_size;
    153	int t4_max_sq_size;
    154	int t4_max_qp_depth;
    155	int t4_max_cq_depth;
    156	int t4_stat_len;
    157};
    158
    159struct wr_log_entry {
    160	ktime_t post_host_time;
    161	ktime_t poll_host_time;
    162	u64 post_sge_ts;
    163	u64 cqe_sge_ts;
    164	u64 poll_sge_ts;
    165	u16 qid;
    166	u16 wr_id;
    167	u8 opcode;
    168	u8 valid;
    169};
    170
    171struct c4iw_rdev {
    172	struct c4iw_resource resource;
    173	u32 qpmask;
    174	u32 cqmask;
    175	struct c4iw_dev_ucontext uctx;
    176	struct gen_pool *pbl_pool;
    177	struct gen_pool *rqt_pool;
    178	struct gen_pool *ocqp_pool;
    179	u32 flags;
    180	struct cxgb4_lld_info lldi;
    181	unsigned long bar2_pa;
    182	void __iomem *bar2_kva;
    183	unsigned long oc_mw_pa;
    184	void __iomem *oc_mw_kva;
    185	struct c4iw_stats stats;
    186	struct c4iw_hw_queue hw_queue;
    187	struct t4_dev_status_page *status_page;
    188	atomic_t wr_log_idx;
    189	struct wr_log_entry *wr_log;
    190	int wr_log_size;
    191	struct workqueue_struct *free_workq;
    192	struct completion rqt_compl;
    193	struct completion pbl_compl;
    194	struct kref rqt_kref;
    195	struct kref pbl_kref;
    196};
    197
    198static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
    199{
    200	return rdev->flags & T4_FATAL_ERROR;
    201}
    202
    203static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
    204{
    205	return (int)(rdev->lldi.vr->stag.size >> 5);
    206}
    207
    208#define C4IW_WR_TO (60*HZ)
    209
    210struct c4iw_wr_wait {
    211	struct completion completion;
    212	int ret;
    213	struct kref kref;
    214};
    215
    216void _c4iw_free_wr_wait(struct kref *kref);
    217
    218static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
    219{
    220	pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
    221		 kref_read(&wr_waitp->kref));
    222	WARN_ON(kref_read(&wr_waitp->kref) == 0);
    223	kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
    224}
    225
    226static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
    227{
    228	pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
    229		 kref_read(&wr_waitp->kref));
    230	WARN_ON(kref_read(&wr_waitp->kref) == 0);
    231	kref_get(&wr_waitp->kref);
    232}
    233
    234static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
    235{
    236	wr_waitp->ret = 0;
    237	init_completion(&wr_waitp->completion);
    238}
    239
    240static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
    241				 bool deref)
    242{
    243	wr_waitp->ret = ret;
    244	complete(&wr_waitp->completion);
    245	if (deref)
    246		c4iw_put_wr_wait(wr_waitp);
    247}
    248
    249static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
    250{
    251	_c4iw_wake_up(wr_waitp, ret, false);
    252}
    253
    254static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
    255{
    256	_c4iw_wake_up(wr_waitp, ret, true);
    257}
    258
    259static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
    260				 struct c4iw_wr_wait *wr_waitp,
    261				 u32 hwtid, u32 qpid,
    262				 const char *func)
    263{
    264	int ret;
    265
    266	if (c4iw_fatal_error(rdev)) {
    267		wr_waitp->ret = -EIO;
    268		goto out;
    269	}
    270
    271	ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
    272	if (!ret) {
    273		pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
    274		       func, pci_name(rdev->lldi.pdev), hwtid, qpid);
    275		rdev->flags |= T4_FATAL_ERROR;
    276		wr_waitp->ret = -EIO;
    277		goto out;
    278	}
    279	if (wr_waitp->ret)
    280		pr_debug("%s: FW reply %d tid %u qpid %u\n",
    281			 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
    282out:
    283	return wr_waitp->ret;
    284}
    285
    286int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
    287
    288static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
    289				     struct sk_buff *skb,
    290				     struct c4iw_wr_wait *wr_waitp,
    291				     u32 hwtid, u32 qpid,
    292				     const char *func)
    293{
    294	int ret;
    295
    296	pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
    297		 qpid);
    298	c4iw_get_wr_wait(wr_waitp);
    299	ret = c4iw_ofld_send(rdev, skb);
    300	if (ret) {
    301		c4iw_put_wr_wait(wr_waitp);
    302		return ret;
    303	}
    304	return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
    305}
    306
    307enum db_state {
    308	NORMAL = 0,
    309	FLOW_CONTROL = 1,
    310	RECOVERY = 2,
    311	STOPPED = 3
    312};
    313
    314struct c4iw_dev {
    315	struct ib_device ibdev;
    316	struct c4iw_rdev rdev;
    317	struct xarray cqs;
    318	struct xarray qps;
    319	struct xarray mrs;
    320	struct mutex db_mutex;
    321	struct dentry *debugfs_root;
    322	enum db_state db_state;
    323	struct xarray hwtids;
    324	struct xarray atids;
    325	struct xarray stids;
    326	struct list_head db_fc_list;
    327	u32 avail_ird;
    328	wait_queue_head_t wait;
    329};
    330
    331struct uld_ctx {
    332	struct list_head entry;
    333	struct cxgb4_lld_info lldi;
    334	struct c4iw_dev *dev;
    335	struct work_struct reg_work;
    336};
    337
    338static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
    339{
    340	return container_of(ibdev, struct c4iw_dev, ibdev);
    341}
    342
    343static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
    344{
    345	return xa_load(&rhp->cqs, cqid);
    346}
    347
    348static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
    349{
    350	return xa_load(&rhp->qps, qpid);
    351}
    352
    353extern uint c4iw_max_read_depth;
    354
    355static inline int cur_max_read_depth(struct c4iw_dev *dev)
    356{
    357	return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
    358}
    359
    360struct c4iw_pd {
    361	struct ib_pd ibpd;
    362	u32 pdid;
    363	struct c4iw_dev *rhp;
    364};
    365
    366static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
    367{
    368	return container_of(ibpd, struct c4iw_pd, ibpd);
    369}
    370
    371struct tpt_attributes {
    372	u64 len;
    373	u64 va_fbo;
    374	enum fw_ri_mem_perms perms;
    375	u32 stag;
    376	u32 pdid;
    377	u32 qpid;
    378	u32 pbl_addr;
    379	u32 pbl_size;
    380	u32 state:1;
    381	u32 type:2;
    382	u32 rsvd:1;
    383	u32 remote_invaliate_disable:1;
    384	u32 zbva:1;
    385	u32 mw_bind_enable:1;
    386	u32 page_size:5;
    387};
    388
    389struct c4iw_mr {
    390	struct ib_mr ibmr;
    391	struct ib_umem *umem;
    392	struct c4iw_dev *rhp;
    393	struct sk_buff *dereg_skb;
    394	u64 kva;
    395	struct tpt_attributes attr;
    396	u64 *mpl;
    397	dma_addr_t mpl_addr;
    398	u32 max_mpl_len;
    399	u32 mpl_len;
    400	struct c4iw_wr_wait *wr_waitp;
    401};
    402
    403static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
    404{
    405	return container_of(ibmr, struct c4iw_mr, ibmr);
    406}
    407
    408struct c4iw_mw {
    409	struct ib_mw ibmw;
    410	struct c4iw_dev *rhp;
    411	struct sk_buff *dereg_skb;
    412	u64 kva;
    413	struct tpt_attributes attr;
    414	struct c4iw_wr_wait *wr_waitp;
    415};
    416
    417static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
    418{
    419	return container_of(ibmw, struct c4iw_mw, ibmw);
    420}
    421
    422struct c4iw_cq {
    423	struct ib_cq ibcq;
    424	struct c4iw_dev *rhp;
    425	struct sk_buff *destroy_skb;
    426	struct t4_cq cq;
    427	spinlock_t lock;
    428	spinlock_t comp_handler_lock;
    429	refcount_t refcnt;
    430	struct completion cq_rel_comp;
    431	struct c4iw_wr_wait *wr_waitp;
    432};
    433
    434static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
    435{
    436	return container_of(ibcq, struct c4iw_cq, ibcq);
    437}
    438
    439struct c4iw_mpa_attributes {
    440	u8 initiator;
    441	u8 recv_marker_enabled;
    442	u8 xmit_marker_enabled;
    443	u8 crc_enabled;
    444	u8 enhanced_rdma_conn;
    445	u8 version;
    446	u8 p2p_type;
    447};
    448
    449struct c4iw_qp_attributes {
    450	u32 scq;
    451	u32 rcq;
    452	u32 sq_num_entries;
    453	u32 rq_num_entries;
    454	u32 sq_max_sges;
    455	u32 sq_max_sges_rdma_write;
    456	u32 rq_max_sges;
    457	u32 state;
    458	u8 enable_rdma_read;
    459	u8 enable_rdma_write;
    460	u8 enable_bind;
    461	u8 enable_mmid0_fastreg;
    462	u32 max_ord;
    463	u32 max_ird;
    464	u32 pd;
    465	u32 next_state;
    466	char terminate_buffer[52];
    467	u32 terminate_msg_len;
    468	u8 is_terminate_local;
    469	struct c4iw_mpa_attributes mpa_attr;
    470	struct c4iw_ep *llp_stream_handle;
    471	u8 layer_etype;
    472	u8 ecode;
    473	u16 sq_db_inc;
    474	u16 rq_db_inc;
    475	u8 send_term;
    476};
    477
    478struct c4iw_qp {
    479	struct ib_qp ibqp;
    480	struct list_head db_fc_entry;
    481	struct c4iw_dev *rhp;
    482	struct c4iw_ep *ep;
    483	struct c4iw_qp_attributes attr;
    484	struct t4_wq wq;
    485	spinlock_t lock;
    486	struct mutex mutex;
    487	wait_queue_head_t wait;
    488	int sq_sig_all;
    489	struct c4iw_srq *srq;
    490	struct c4iw_ucontext *ucontext;
    491	struct c4iw_wr_wait *wr_waitp;
    492	struct completion qp_rel_comp;
    493	refcount_t qp_refcnt;
    494};
    495
    496static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
    497{
    498	return container_of(ibqp, struct c4iw_qp, ibqp);
    499}
    500
    501struct c4iw_srq {
    502	struct ib_srq ibsrq;
    503	struct list_head db_fc_entry;
    504	struct c4iw_dev *rhp;
    505	struct t4_srq wq;
    506	struct sk_buff *destroy_skb;
    507	u32 srq_limit;
    508	u32 pdid;
    509	int idx;
    510	u32 flags;
    511	spinlock_t lock; /* protects srq */
    512	struct c4iw_wr_wait *wr_waitp;
    513	bool armed;
    514};
    515
    516static inline struct c4iw_srq *to_c4iw_srq(struct ib_srq *ibsrq)
    517{
    518	return container_of(ibsrq, struct c4iw_srq, ibsrq);
    519}
    520
    521struct c4iw_ucontext {
    522	struct ib_ucontext ibucontext;
    523	struct c4iw_dev_ucontext uctx;
    524	u32 key;
    525	spinlock_t mmap_lock;
    526	struct list_head mmaps;
    527	bool is_32b_cqe;
    528};
    529
    530static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
    531{
    532	return container_of(c, struct c4iw_ucontext, ibucontext);
    533}
    534
    535struct c4iw_mm_entry {
    536	struct list_head entry;
    537	u64 addr;
    538	u32 key;
    539	unsigned len;
    540};
    541
    542static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
    543						u32 key, unsigned len)
    544{
    545	struct list_head *pos, *nxt;
    546	struct c4iw_mm_entry *mm;
    547
    548	spin_lock(&ucontext->mmap_lock);
    549	list_for_each_safe(pos, nxt, &ucontext->mmaps) {
    550
    551		mm = list_entry(pos, struct c4iw_mm_entry, entry);
    552		if (mm->key == key && mm->len == len) {
    553			list_del_init(&mm->entry);
    554			spin_unlock(&ucontext->mmap_lock);
    555			pr_debug("key 0x%x addr 0x%llx len %d\n", key,
    556				 (unsigned long long)mm->addr, mm->len);
    557			return mm;
    558		}
    559	}
    560	spin_unlock(&ucontext->mmap_lock);
    561	return NULL;
    562}
    563
    564static inline void insert_mmap(struct c4iw_ucontext *ucontext,
    565			       struct c4iw_mm_entry *mm)
    566{
    567	spin_lock(&ucontext->mmap_lock);
    568	pr_debug("key 0x%x addr 0x%llx len %d\n",
    569		 mm->key, (unsigned long long)mm->addr, mm->len);
    570	list_add_tail(&mm->entry, &ucontext->mmaps);
    571	spin_unlock(&ucontext->mmap_lock);
    572}
    573
    574enum c4iw_qp_attr_mask {
    575	C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
    576	C4IW_QP_ATTR_SQ_DB = 1<<1,
    577	C4IW_QP_ATTR_RQ_DB = 1<<2,
    578	C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
    579	C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
    580	C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
    581	C4IW_QP_ATTR_MAX_ORD = 1 << 11,
    582	C4IW_QP_ATTR_MAX_IRD = 1 << 12,
    583	C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
    584	C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
    585	C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
    586	C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
    587	C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
    588				     C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
    589				     C4IW_QP_ATTR_MAX_ORD |
    590				     C4IW_QP_ATTR_MAX_IRD |
    591				     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
    592				     C4IW_QP_ATTR_STREAM_MSG_BUFFER |
    593				     C4IW_QP_ATTR_MPA_ATTR |
    594				     C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
    595};
    596
    597int c4iw_modify_qp(struct c4iw_dev *rhp,
    598				struct c4iw_qp *qhp,
    599				enum c4iw_qp_attr_mask mask,
    600				struct c4iw_qp_attributes *attrs,
    601				int internal);
    602
    603enum c4iw_qp_state {
    604	C4IW_QP_STATE_IDLE,
    605	C4IW_QP_STATE_RTS,
    606	C4IW_QP_STATE_ERROR,
    607	C4IW_QP_STATE_TERMINATE,
    608	C4IW_QP_STATE_CLOSING,
    609	C4IW_QP_STATE_TOT
    610};
    611
    612static inline int c4iw_convert_state(enum ib_qp_state ib_state)
    613{
    614	switch (ib_state) {
    615	case IB_QPS_RESET:
    616	case IB_QPS_INIT:
    617		return C4IW_QP_STATE_IDLE;
    618	case IB_QPS_RTS:
    619		return C4IW_QP_STATE_RTS;
    620	case IB_QPS_SQD:
    621		return C4IW_QP_STATE_CLOSING;
    622	case IB_QPS_SQE:
    623		return C4IW_QP_STATE_TERMINATE;
    624	case IB_QPS_ERR:
    625		return C4IW_QP_STATE_ERROR;
    626	default:
    627		return -1;
    628	}
    629}
    630
    631static inline int to_ib_qp_state(int c4iw_qp_state)
    632{
    633	switch (c4iw_qp_state) {
    634	case C4IW_QP_STATE_IDLE:
    635		return IB_QPS_INIT;
    636	case C4IW_QP_STATE_RTS:
    637		return IB_QPS_RTS;
    638	case C4IW_QP_STATE_CLOSING:
    639		return IB_QPS_SQD;
    640	case C4IW_QP_STATE_TERMINATE:
    641		return IB_QPS_SQE;
    642	case C4IW_QP_STATE_ERROR:
    643		return IB_QPS_ERR;
    644	}
    645	return IB_QPS_ERR;
    646}
    647
    648static inline u32 c4iw_ib_to_tpt_access(int a)
    649{
    650	return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
    651	       (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
    652	       (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
    653	       FW_RI_MEM_ACCESS_LOCAL_READ;
    654}
    655
    656enum c4iw_mmid_state {
    657	C4IW_STAG_STATE_VALID,
    658	C4IW_STAG_STATE_INVALID
    659};
    660
    661#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
    662
    663#define MPA_KEY_REQ "MPA ID Req Frame"
    664#define MPA_KEY_REP "MPA ID Rep Frame"
    665
    666#define MPA_MAX_PRIVATE_DATA	256
    667#define MPA_ENHANCED_RDMA_CONN	0x10
    668#define MPA_REJECT		0x20
    669#define MPA_CRC			0x40
    670#define MPA_MARKERS		0x80
    671#define MPA_FLAGS_MASK		0xE0
    672
    673#define MPA_V2_PEER2PEER_MODEL          0x8000
    674#define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
    675#define MPA_V2_RDMA_WRITE_RTR           0x8000
    676#define MPA_V2_RDMA_READ_RTR            0x4000
    677#define MPA_V2_IRD_ORD_MASK             0x3FFF
    678
    679#define c4iw_put_ep(ep) {						\
    680	pr_debug("put_ep ep %p refcnt %d\n",		\
    681		 ep, kref_read(&((ep)->kref)));				\
    682	WARN_ON(kref_read(&((ep)->kref)) < 1);				\
    683	kref_put(&((ep)->kref), _c4iw_free_ep);				\
    684}
    685
    686#define c4iw_get_ep(ep) {						\
    687	pr_debug("get_ep ep %p, refcnt %d\n",		\
    688		 ep, kref_read(&((ep)->kref)));				\
    689	kref_get(&((ep)->kref));					\
    690}
    691void _c4iw_free_ep(struct kref *kref);
    692
    693struct mpa_message {
    694	u8 key[16];
    695	u8 flags;
    696	u8 revision;
    697	__be16 private_data_size;
    698	u8 private_data[];
    699};
    700
    701struct mpa_v2_conn_params {
    702	__be16 ird;
    703	__be16 ord;
    704};
    705
    706struct terminate_message {
    707	u8 layer_etype;
    708	u8 ecode;
    709	__be16 hdrct_rsvd;
    710	u8 len_hdrs[];
    711};
    712
    713#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
    714
    715enum c4iw_layers_types {
    716	LAYER_RDMAP		= 0x00,
    717	LAYER_DDP		= 0x10,
    718	LAYER_MPA		= 0x20,
    719	RDMAP_LOCAL_CATA	= 0x00,
    720	RDMAP_REMOTE_PROT	= 0x01,
    721	RDMAP_REMOTE_OP		= 0x02,
    722	DDP_LOCAL_CATA		= 0x00,
    723	DDP_TAGGED_ERR		= 0x01,
    724	DDP_UNTAGGED_ERR	= 0x02,
    725	DDP_LLP			= 0x03
    726};
    727
    728enum c4iw_rdma_ecodes {
    729	RDMAP_INV_STAG		= 0x00,
    730	RDMAP_BASE_BOUNDS	= 0x01,
    731	RDMAP_ACC_VIOL		= 0x02,
    732	RDMAP_STAG_NOT_ASSOC	= 0x03,
    733	RDMAP_TO_WRAP		= 0x04,
    734	RDMAP_INV_VERS		= 0x05,
    735	RDMAP_INV_OPCODE	= 0x06,
    736	RDMAP_STREAM_CATA	= 0x07,
    737	RDMAP_GLOBAL_CATA	= 0x08,
    738	RDMAP_CANT_INV_STAG	= 0x09,
    739	RDMAP_UNSPECIFIED	= 0xff
    740};
    741
    742enum c4iw_ddp_ecodes {
    743	DDPT_INV_STAG		= 0x00,
    744	DDPT_BASE_BOUNDS	= 0x01,
    745	DDPT_STAG_NOT_ASSOC	= 0x02,
    746	DDPT_TO_WRAP		= 0x03,
    747	DDPT_INV_VERS		= 0x04,
    748	DDPU_INV_QN		= 0x01,
    749	DDPU_INV_MSN_NOBUF	= 0x02,
    750	DDPU_INV_MSN_RANGE	= 0x03,
    751	DDPU_INV_MO		= 0x04,
    752	DDPU_MSG_TOOBIG		= 0x05,
    753	DDPU_INV_VERS		= 0x06
    754};
    755
    756enum c4iw_mpa_ecodes {
    757	MPA_CRC_ERR		= 0x02,
    758	MPA_MARKER_ERR          = 0x03,
    759	MPA_LOCAL_CATA          = 0x05,
    760	MPA_INSUFF_IRD          = 0x06,
    761	MPA_NOMATCH_RTR         = 0x07,
    762};
    763
    764enum c4iw_ep_state {
    765	IDLE = 0,
    766	LISTEN,
    767	CONNECTING,
    768	MPA_REQ_WAIT,
    769	MPA_REQ_SENT,
    770	MPA_REQ_RCVD,
    771	MPA_REP_SENT,
    772	FPDU_MODE,
    773	ABORTING,
    774	CLOSING,
    775	MORIBUND,
    776	DEAD,
    777};
    778
    779enum c4iw_ep_flags {
    780	PEER_ABORT_IN_PROGRESS	= 0,
    781	ABORT_REQ_IN_PROGRESS	= 1,
    782	RELEASE_RESOURCES	= 2,
    783	CLOSE_SENT		= 3,
    784	TIMEOUT                 = 4,
    785	QP_REFERENCED           = 5,
    786	STOP_MPA_TIMER		= 7,
    787};
    788
    789enum c4iw_ep_history {
    790	ACT_OPEN_REQ            = 0,
    791	ACT_OFLD_CONN           = 1,
    792	ACT_OPEN_RPL            = 2,
    793	ACT_ESTAB               = 3,
    794	PASS_ACCEPT_REQ         = 4,
    795	PASS_ESTAB              = 5,
    796	ABORT_UPCALL            = 6,
    797	ESTAB_UPCALL            = 7,
    798	CLOSE_UPCALL            = 8,
    799	ULP_ACCEPT              = 9,
    800	ULP_REJECT              = 10,
    801	TIMEDOUT                = 11,
    802	PEER_ABORT              = 12,
    803	PEER_CLOSE              = 13,
    804	CONNREQ_UPCALL          = 14,
    805	ABORT_CONN              = 15,
    806	DISCONN_UPCALL          = 16,
    807	EP_DISC_CLOSE           = 17,
    808	EP_DISC_ABORT           = 18,
    809	CONN_RPL_UPCALL         = 19,
    810	ACT_RETRY_NOMEM         = 20,
    811	ACT_RETRY_INUSE         = 21,
    812	CLOSE_CON_RPL		= 22,
    813	EP_DISC_FAIL		= 24,
    814	QP_REFED		= 25,
    815	QP_DEREFED		= 26,
    816	CM_ID_REFED		= 27,
    817	CM_ID_DEREFED		= 28,
    818};
    819
    820enum conn_pre_alloc_buffers {
    821	CN_ABORT_REQ_BUF,
    822	CN_ABORT_RPL_BUF,
    823	CN_CLOSE_CON_REQ_BUF,
    824	CN_DESTROY_BUF,
    825	CN_FLOWC_BUF,
    826	CN_MAX_CON_BUF
    827};
    828
    829enum {
    830	FLOWC_LEN = offsetof(struct fw_flowc_wr, mnemval[FW_FLOWC_MNEM_MAX])
    831};
    832
    833union cpl_wr_size {
    834	struct cpl_abort_req abrt_req;
    835	struct cpl_abort_rpl abrt_rpl;
    836	struct fw_ri_wr ri_req;
    837	struct cpl_close_con_req close_req;
    838	char flowc_buf[FLOWC_LEN];
    839};
    840
    841struct c4iw_ep_common {
    842	struct iw_cm_id *cm_id;
    843	struct c4iw_qp *qp;
    844	struct c4iw_dev *dev;
    845	struct sk_buff_head ep_skb_list;
    846	enum c4iw_ep_state state;
    847	struct kref kref;
    848	struct mutex mutex;
    849	struct sockaddr_storage local_addr;
    850	struct sockaddr_storage remote_addr;
    851	struct c4iw_wr_wait *wr_waitp;
    852	unsigned long flags;
    853	unsigned long history;
    854};
    855
    856struct c4iw_listen_ep {
    857	struct c4iw_ep_common com;
    858	unsigned int stid;
    859	int backlog;
    860};
    861
    862struct c4iw_ep_stats {
    863	unsigned connect_neg_adv;
    864	unsigned abort_neg_adv;
    865};
    866
    867struct c4iw_ep {
    868	struct c4iw_ep_common com;
    869	struct c4iw_ep *parent_ep;
    870	struct timer_list timer;
    871	struct list_head entry;
    872	unsigned int atid;
    873	u32 hwtid;
    874	u32 snd_seq;
    875	u32 rcv_seq;
    876	struct l2t_entry *l2t;
    877	struct dst_entry *dst;
    878	struct sk_buff *mpa_skb;
    879	struct c4iw_mpa_attributes mpa_attr;
    880	u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
    881	unsigned int mpa_pkt_len;
    882	u32 ird;
    883	u32 ord;
    884	u32 smac_idx;
    885	u32 tx_chan;
    886	u32 mtu;
    887	u16 mss;
    888	u16 emss;
    889	u16 plen;
    890	u16 rss_qid;
    891	u16 txq_idx;
    892	u16 ctrlq_idx;
    893	u8 tos;
    894	u8 retry_with_mpa_v1;
    895	u8 tried_with_mpa_v1;
    896	unsigned int retry_count;
    897	int snd_win;
    898	int rcv_win;
    899	u32 snd_wscale;
    900	struct c4iw_ep_stats stats;
    901	u32 srqe_idx;
    902	u32 rx_pdu_out_cnt;
    903	struct sk_buff *peer_abort_skb;
    904};
    905
    906static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
    907{
    908	return cm_id->provider_data;
    909}
    910
    911static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
    912{
    913	return cm_id->provider_data;
    914}
    915
    916static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
    917{
    918#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
    919	return infop->vr->ocq.size > 0;
    920#else
    921	return 0;
    922#endif
    923}
    924
    925u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
    926void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
    927int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
    928			u32 reserved, u32 flags);
    929void c4iw_id_table_free(struct c4iw_id_table *alloc);
    930
    931typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
    932
    933int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
    934		     struct l2t_entry *l2t);
    935void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
    936		   struct c4iw_dev_ucontext *uctx);
    937u32 c4iw_get_resource(struct c4iw_id_table *id_table);
    938void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
    939int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
    940		       u32 nr_pdid, u32 nr_srqt);
    941int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
    942int c4iw_pblpool_create(struct c4iw_rdev *rdev);
    943int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
    944int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
    945void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
    946void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
    947void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
    948void c4iw_destroy_resource(struct c4iw_resource *rscp);
    949int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
    950void c4iw_register_device(struct work_struct *work);
    951void c4iw_unregister_device(struct c4iw_dev *dev);
    952int __init c4iw_cm_init(void);
    953void c4iw_cm_term(void);
    954void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
    955			       struct c4iw_dev_ucontext *uctx);
    956void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
    957			    struct c4iw_dev_ucontext *uctx);
    958int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
    959int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
    960		   const struct ib_send_wr **bad_wr);
    961int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
    962		      const struct ib_recv_wr **bad_wr);
    963int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
    964int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
    965int c4iw_destroy_listen(struct iw_cm_id *cm_id);
    966int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
    967int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
    968void c4iw_qp_add_ref(struct ib_qp *qp);
    969void c4iw_qp_rem_ref(struct ib_qp *qp);
    970struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
    971			    u32 max_num_sg);
    972int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
    973		   unsigned int *sg_offset);
    974void c4iw_dealloc(struct uld_ctx *ctx);
    975struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
    976					   u64 length, u64 virt, int acc,
    977					   struct ib_udata *udata);
    978struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
    979int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
    980int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
    981void c4iw_cq_rem_ref(struct c4iw_cq *chp);
    982int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
    983		   struct ib_udata *udata);
    984int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
    985int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
    986		    enum ib_srq_attr_mask srq_attr_mask,
    987		    struct ib_udata *udata);
    988int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
    989int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
    990		    struct ib_udata *udata);
    991int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
    992int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
    993		   struct ib_udata *udata);
    994int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
    995				 int attr_mask, struct ib_udata *udata);
    996int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
    997		     int attr_mask, struct ib_qp_init_attr *init_attr);
    998struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
    999u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
   1000void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
   1001u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
   1002void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
   1003u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
   1004void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
   1005void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
   1006void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
   1007int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
   1008int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
   1009int c4iw_flush_sq(struct c4iw_qp *qhp);
   1010int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
   1011u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
   1012int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
   1013u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
   1014void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
   1015		struct c4iw_dev_ucontext *uctx);
   1016u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
   1017void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
   1018		struct c4iw_dev_ucontext *uctx);
   1019void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
   1020
   1021extern struct cxgb4_client t4c_client;
   1022extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
   1023void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
   1024			      enum cxgb4_bar2_qtype qtype,
   1025			      unsigned int *pbar2_qid, u64 *pbar2_pa);
   1026int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev);
   1027void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx);
   1028extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
   1029extern int c4iw_wr_log;
   1030extern int db_fc_threshold;
   1031extern int db_coalescing_threshold;
   1032extern int use_dsgl;
   1033void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
   1034void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq);
   1035void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16);
   1036void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
   1037int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
   1038		       const struct ib_recv_wr **bad_wr);
   1039struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
   1040
   1041int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr);
   1042int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
   1043int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp);
   1044int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id);
   1045
   1046#endif