cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

libcxgbi.h (18169B)


      1/*
      2 * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
      3 *
      4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
      5 *
      6 * This program is free software; you can redistribute it and/or modify
      7 * it under the terms of the GNU General Public License as published by
      8 * the Free Software Foundation.
      9 *
     10 * Written by: Karen Xie (kxie@chelsio.com)
     11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
     12 */
     13
     14#ifndef	__LIBCXGBI_H__
     15#define	__LIBCXGBI_H__
     16
     17#include <linux/kernel.h>
     18#include <linux/errno.h>
     19#include <linux/types.h>
     20#include <linux/debugfs.h>
     21#include <linux/list.h>
     22#include <linux/netdevice.h>
     23#include <linux/if_vlan.h>
     24#include <linux/scatterlist.h>
     25#include <linux/skbuff.h>
     26#include <linux/vmalloc.h>
     27#include <linux/version.h>
     28#include <scsi/scsi_device.h>
     29#include <scsi/libiscsi_tcp.h>
     30
     31#include <libcxgb_ppm.h>
     32
     33enum cxgbi_dbg_flag {
     34	CXGBI_DBG_ISCSI,
     35	CXGBI_DBG_DDP,
     36	CXGBI_DBG_TOE,
     37	CXGBI_DBG_SOCK,
     38
     39	CXGBI_DBG_PDU_TX,
     40	CXGBI_DBG_PDU_RX,
     41	CXGBI_DBG_DEV,
     42};
     43
     44#define log_debug(level, fmt, ...)	\
     45	do {	\
     46		if (dbg_level & (level)) \
     47			pr_info(fmt, ##__VA_ARGS__); \
     48	} while (0)
     49
     50#define pr_info_ipaddr(fmt_trail,					\
     51			addr1, addr2, args_trail...)			\
     52do {									\
     53	if (!((1 << CXGBI_DBG_SOCK) & dbg_level))			\
     54		break;							\
     55	pr_info("%pISpc - %pISpc, " fmt_trail,				\
     56		addr1, addr2, args_trail);				\
     57} while (0)
     58
     59/* max. connections per adapter */
     60#define CXGBI_MAX_CONN		16384
     61
     62/* always allocate rooms for AHS */
     63#define SKB_TX_ISCSI_PDU_HEADER_MAX	\
     64	(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
     65
     66#define	ISCSI_PDU_NONPAYLOAD_LEN	312 /* bhs(48) + ahs(256) + digest(8)*/
     67
     68/*
     69 * align pdu size to multiple of 512 for better performance
     70 */
     71#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
     72
     73#define ULP2_MODE_ISCSI		2
     74
     75#define ULP2_MAX_PKT_SIZE	16224
     76#define ULP2_MAX_PDU_PAYLOAD	\
     77	(ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
     78
     79#define CXGBI_ULP2_MAX_ISO_PAYLOAD	65535
     80
     81#define CXGBI_MAX_ISO_DATA_IN_SKB	\
     82	min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD)
     83
     84#define cxgbi_is_iso_config(csk)	((csk)->cdev->skb_iso_txhdr)
     85#define cxgbi_is_iso_disabled(csk)	((csk)->disable_iso)
     86
     87/*
     88 * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
     89 * bytes are not sent by the host but are part of the TCP payload and therefore
     90 * consume TCP sequence space.
     91 */
     92static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
     93static inline unsigned int cxgbi_ulp_extra_len(int submode)
     94{
     95	return ulp2_extra_len[submode & 3];
     96}
     97
     98#define CPL_RX_DDP_STATUS_DDP_SHIFT	16 /* ddp'able */
     99#define CPL_RX_DDP_STATUS_PAD_SHIFT	19 /* pad error */
    100#define CPL_RX_DDP_STATUS_HCRC_SHIFT	20 /* hcrc error */
    101#define CPL_RX_DDP_STATUS_DCRC_SHIFT	21 /* dcrc error */
    102
    103/*
    104 * sge_opaque_hdr -
    105 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
    106 * and for which we must reserve space.
    107 */
    108struct sge_opaque_hdr {
    109	void *dev;
    110	dma_addr_t addr[MAX_SKB_FRAGS + 1];
    111};
    112
    113struct cxgbi_sock {
    114	struct cxgbi_device *cdev;
    115
    116	int tid;
    117	int atid;
    118	unsigned long flags;
    119	unsigned int mtu;
    120	unsigned short rss_qid;
    121	unsigned short txq_idx;
    122	unsigned short advmss;
    123	unsigned int tx_chan;
    124	unsigned int rx_chan;
    125	unsigned int mss_idx;
    126	unsigned int smac_idx;
    127	unsigned char port_id;
    128	int wr_max_cred;
    129	int wr_cred;
    130	int wr_una_cred;
    131#ifdef CONFIG_CHELSIO_T4_DCB
    132	u8 dcb_priority;
    133#endif
    134	unsigned char hcrc_len;
    135	unsigned char dcrc_len;
    136
    137	void *l2t;
    138	struct sk_buff *wr_pending_head;
    139	struct sk_buff *wr_pending_tail;
    140	struct sk_buff *cpl_close;
    141	struct sk_buff *cpl_abort_req;
    142	struct sk_buff *cpl_abort_rpl;
    143	struct sk_buff *skb_ulp_lhdr;
    144	spinlock_t lock;
    145	struct kref refcnt;
    146	unsigned int state;
    147	unsigned int csk_family;
    148	union {
    149		struct sockaddr_in saddr;
    150		struct sockaddr_in6 saddr6;
    151	};
    152	union {
    153		struct sockaddr_in daddr;
    154		struct sockaddr_in6 daddr6;
    155	};
    156	struct dst_entry *dst;
    157	struct sk_buff_head receive_queue;
    158	struct sk_buff_head write_queue;
    159	struct timer_list retry_timer;
    160	struct completion cmpl;
    161	int err;
    162	rwlock_t callback_lock;
    163	void *user_data;
    164
    165	u32 rcv_nxt;
    166	u32 copied_seq;
    167	u32 rcv_wup;
    168	u32 snd_nxt;
    169	u32 snd_una;
    170	u32 write_seq;
    171	u32 snd_win;
    172	u32 rcv_win;
    173
    174	bool disable_iso;
    175	u32 no_tx_credits;
    176	unsigned long prev_iso_ts;
    177};
    178
    179/*
    180 * connection states
    181 */
    182enum cxgbi_sock_states{
    183	CTP_CLOSED,
    184	CTP_CONNECTING,
    185	CTP_ACTIVE_OPEN,
    186	CTP_ESTABLISHED,
    187	CTP_ACTIVE_CLOSE,
    188	CTP_PASSIVE_CLOSE,
    189	CTP_CLOSE_WAIT_1,
    190	CTP_CLOSE_WAIT_2,
    191	CTP_ABORTING,
    192};
    193
    194/*
    195 * Connection flags -- many to track some close related events.
    196 */
    197enum cxgbi_sock_flags {
    198	CTPF_ABORT_RPL_RCVD,	/*received one ABORT_RPL_RSS message */
    199	CTPF_ABORT_REQ_RCVD,	/*received one ABORT_REQ_RSS message */
    200	CTPF_ABORT_RPL_PENDING,	/* expecting an abort reply */
    201	CTPF_TX_DATA_SENT,	/* already sent a TX_DATA WR */
    202	CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */
    203	CTPF_HAS_ATID,		/* reserved atid */
    204	CTPF_HAS_TID,		/* reserved hw tid */
    205	CTPF_OFFLOAD_DOWN,	/* offload function off */
    206	CTPF_LOGOUT_RSP_RCVD,   /* received logout response */
    207};
    208
    209struct cxgbi_skb_rx_cb {
    210	__u32 ddigest;
    211	__u32 pdulen;
    212};
    213
    214struct cxgbi_skb_tx_cb {
    215	void *handle;
    216	void *arp_err_handler;
    217	struct sk_buff *wr_next;
    218	u16 iscsi_hdr_len;
    219	u8 ulp_mode;
    220};
    221
    222enum cxgbi_skcb_flags {
    223	SKCBF_TX_NEED_HDR,	/* packet needs a header */
    224	SKCBF_TX_MEM_WRITE,     /* memory write */
    225	SKCBF_TX_FLAG_COMPL,    /* wr completion flag */
    226	SKCBF_RX_COALESCED,	/* received whole pdu */
    227	SKCBF_RX_HDR,		/* received pdu header */
    228	SKCBF_RX_DATA,		/* received pdu payload */
    229	SKCBF_RX_STATUS,	/* received ddp status */
    230	SKCBF_RX_ISCSI_COMPL,   /* received iscsi completion */
    231	SKCBF_RX_DATA_DDPD,	/* pdu payload ddp'd */
    232	SKCBF_RX_HCRC_ERR,	/* header digest error */
    233	SKCBF_RX_DCRC_ERR,	/* data digest error */
    234	SKCBF_RX_PAD_ERR,	/* padding byte error */
    235	SKCBF_TX_ISO,		/* iso cpl in tx skb */
    236};
    237
    238struct cxgbi_skb_cb {
    239	union {
    240		struct cxgbi_skb_rx_cb rx;
    241		struct cxgbi_skb_tx_cb tx;
    242	};
    243	unsigned long flags;
    244	unsigned int seq;
    245};
    246
    247#define CXGBI_SKB_CB(skb)	((struct cxgbi_skb_cb *)&((skb)->cb[0]))
    248#define cxgbi_skcb_flags(skb)		(CXGBI_SKB_CB(skb)->flags)
    249#define cxgbi_skcb_tcp_seq(skb)		(CXGBI_SKB_CB(skb)->seq)
    250#define cxgbi_skcb_rx_ddigest(skb)	(CXGBI_SKB_CB(skb)->rx.ddigest)
    251#define cxgbi_skcb_rx_pdulen(skb)	(CXGBI_SKB_CB(skb)->rx.pdulen)
    252#define cxgbi_skcb_tx_wr_next(skb)	(CXGBI_SKB_CB(skb)->tx.wr_next)
    253#define cxgbi_skcb_tx_iscsi_hdrlen(skb)	(CXGBI_SKB_CB(skb)->tx.iscsi_hdr_len)
    254#define cxgbi_skcb_tx_ulp_mode(skb)	(CXGBI_SKB_CB(skb)->tx.ulp_mode)
    255
    256static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
    257					enum cxgbi_skcb_flags flag)
    258{
    259	__set_bit(flag, &(cxgbi_skcb_flags(skb)));
    260}
    261
    262static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
    263					enum cxgbi_skcb_flags flag)
    264{
    265	__clear_bit(flag, &(cxgbi_skcb_flags(skb)));
    266}
    267
    268static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
    269				       enum cxgbi_skcb_flags flag)
    270{
    271	return test_bit(flag, &(cxgbi_skcb_flags(skb)));
    272}
    273
    274static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
    275					enum cxgbi_sock_flags flag)
    276{
    277	__set_bit(flag, &csk->flags);
    278	log_debug(1 << CXGBI_DBG_SOCK,
    279		"csk 0x%p,%u,0x%lx, bit %d.\n",
    280		csk, csk->state, csk->flags, flag);
    281}
    282
    283static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
    284					enum cxgbi_sock_flags flag)
    285{
    286	__clear_bit(flag, &csk->flags);
    287	log_debug(1 << CXGBI_DBG_SOCK,
    288		"csk 0x%p,%u,0x%lx, bit %d.\n",
    289		csk, csk->state, csk->flags, flag);
    290}
    291
    292static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
    293				enum cxgbi_sock_flags flag)
    294{
    295	if (csk == NULL)
    296		return 0;
    297	return test_bit(flag, &csk->flags);
    298}
    299
    300static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
    301{
    302	log_debug(1 << CXGBI_DBG_SOCK,
    303		"csk 0x%p,%u,0x%lx, state -> %u.\n",
    304		csk, csk->state, csk->flags, state);
    305	csk->state = state;
    306}
    307
    308static inline void cxgbi_sock_free(struct kref *kref)
    309{
    310	struct cxgbi_sock *csk = container_of(kref,
    311						struct cxgbi_sock,
    312						refcnt);
    313	if (csk) {
    314		log_debug(1 << CXGBI_DBG_SOCK,
    315			"free csk 0x%p, state %u, flags 0x%lx\n",
    316			csk, csk->state, csk->flags);
    317		kfree(csk);
    318	}
    319}
    320
    321static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
    322{
    323	log_debug(1 << CXGBI_DBG_SOCK,
    324		"%s, put csk 0x%p, ref %u-1.\n",
    325		fn, csk, kref_read(&csk->refcnt));
    326	kref_put(&csk->refcnt, cxgbi_sock_free);
    327}
    328#define cxgbi_sock_put(csk)	__cxgbi_sock_put(__func__, csk)
    329
    330static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
    331{
    332	log_debug(1 << CXGBI_DBG_SOCK,
    333		"%s, get csk 0x%p, ref %u+1.\n",
    334		fn, csk, kref_read(&csk->refcnt));
    335	kref_get(&csk->refcnt);
    336}
    337#define cxgbi_sock_get(csk)	__cxgbi_sock_get(__func__, csk)
    338
    339static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk)
    340{
    341	return csk->state >= CTP_ACTIVE_CLOSE;
    342}
    343
    344static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk)
    345{
    346	return csk->state == CTP_ESTABLISHED;
    347}
    348
    349static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk)
    350{
    351	struct sk_buff *skb;
    352
    353	while ((skb = __skb_dequeue(&csk->write_queue)))
    354		__kfree_skb(skb);
    355}
    356
    357static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win)
    358{
    359	unsigned int wscale = 0;
    360
    361	while (wscale < 14 && (65535 << wscale) < win)
    362		wscale++;
    363	return wscale;
    364}
    365
    366static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
    367{
    368	struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
    369
    370	if (skb) {
    371		__skb_put(skb, wrlen);
    372		memset(skb->head, 0, wrlen + dlen);
    373	} else
    374		pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
    375	return skb;
    376}
    377
    378
    379/*
    380 * The number of WRs needed for an skb depends on the number of fragments
    381 * in the skb and whether it has any payload in its main body.  This maps the
    382 * length of the gather list represented by an skb into the # of necessary WRs.
    383 * The extra two fragments are for iscsi bhs and payload padding.
    384 */
    385#define SKB_WR_LIST_SIZE	 (MAX_SKB_FRAGS + 2)
    386
    387static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk)
    388{
    389	csk->wr_pending_head = csk->wr_pending_tail = NULL;
    390}
    391
    392static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
    393					  struct sk_buff *skb)
    394{
    395	cxgbi_skcb_tx_wr_next(skb) = NULL;
    396	/*
    397	 * We want to take an extra reference since both us and the driver
    398	 * need to free the packet before it's really freed.
    399	 */
    400	skb_get(skb);
    401
    402	if (!csk->wr_pending_head)
    403		csk->wr_pending_head = skb;
    404	else
    405		cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
    406	csk->wr_pending_tail = skb;
    407}
    408
    409static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk)
    410{
    411	int n = 0;
    412	const struct sk_buff *skb = csk->wr_pending_head;
    413
    414	while (skb) {
    415		n += skb->csum;
    416		skb = cxgbi_skcb_tx_wr_next(skb);
    417	}
    418	return n;
    419}
    420
    421static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk)
    422{
    423	return csk->wr_pending_head;
    424}
    425
    426static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk)
    427{
    428	struct sk_buff *skb = csk->wr_pending_head;
    429
    430	if (likely(skb)) {
    431		csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
    432		cxgbi_skcb_tx_wr_next(skb) = NULL;
    433	}
    434	return skb;
    435}
    436
    437void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *);
    438void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *);
    439void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *);
    440void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int);
    441void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *);
    442void cxgbi_sock_closed(struct cxgbi_sock *);
    443void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int);
    444void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *);
    445void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *);
    446void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32);
    447void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int,
    448				int);
    449unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int);
    450void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *);
    451
    452struct cxgbi_hba {
    453	struct net_device *ndev;
    454	struct net_device *vdev;	/* vlan dev */
    455	struct Scsi_Host *shost;
    456	struct cxgbi_device *cdev;
    457	__be32 ipv4addr;
    458	unsigned char port_id;
    459};
    460
    461struct cxgbi_ports_map {
    462	unsigned int max_connect;
    463	unsigned int used;
    464	unsigned short sport_base;
    465	spinlock_t lock;
    466	unsigned int next;
    467	struct cxgbi_sock **port_csk;
    468};
    469
    470#define CXGBI_FLAG_DEV_T3		0x1
    471#define CXGBI_FLAG_DEV_T4		0x2
    472#define CXGBI_FLAG_ADAPTER_RESET	0x4
    473#define CXGBI_FLAG_IPV4_SET		0x10
    474#define CXGBI_FLAG_USE_PPOD_OFLDQ       0x40
    475#define CXGBI_FLAG_DDP_OFF		0x100
    476#define CXGBI_FLAG_DEV_ISO_OFF		0x400
    477
    478struct cxgbi_device {
    479	struct list_head list_head;
    480	struct list_head rcu_node;
    481	unsigned int flags;
    482	struct net_device **ports;
    483	void *lldev;
    484	struct cxgbi_hba **hbas;
    485	const unsigned short *mtus;
    486	unsigned char nmtus;
    487	unsigned char nports;
    488	struct pci_dev *pdev;
    489	struct dentry *debugfs_root;
    490	struct iscsi_transport *itp;
    491	struct module *owner;
    492
    493	unsigned int pfvf;
    494	unsigned int rx_credit_thres;
    495	unsigned int skb_tx_rsvd;
    496	u32 skb_iso_txhdr;
    497	unsigned int skb_rx_extra;	/* for msg coalesced mode */
    498	unsigned int tx_max_size;
    499	unsigned int rx_max_size;
    500	unsigned int rxq_idx_cntr;
    501	struct cxgbi_ports_map pmap;
    502
    503	void (*dev_ddp_cleanup)(struct cxgbi_device *);
    504	struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *);
    505	int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *,
    506			       struct cxgbi_task_tag_info *);
    507	void (*csk_ddp_clear_map)(struct cxgbi_device *cdev,
    508				  struct cxgbi_ppm *,
    509				  struct cxgbi_task_tag_info *);
    510	int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
    511				    unsigned int, int, int);
    512	int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
    513				   unsigned int, int);
    514
    515	void (*csk_release_offload_resources)(struct cxgbi_sock *);
    516	int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
    517	u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
    518	int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
    519	void (*csk_send_abort_req)(struct cxgbi_sock *);
    520	void (*csk_send_close_req)(struct cxgbi_sock *);
    521	int (*csk_alloc_cpls)(struct cxgbi_sock *);
    522	int (*csk_init_act_open)(struct cxgbi_sock *);
    523
    524	void *dd_data;
    525};
    526#define cxgbi_cdev_priv(cdev)	((cdev)->dd_data)
    527
    528struct cxgbi_conn {
    529	struct cxgbi_endpoint *cep;
    530	struct iscsi_conn *iconn;
    531	struct cxgbi_hba *chba;
    532	u32 task_idx_bits;
    533	unsigned int ddp_full;
    534	unsigned int ddp_tag_full;
    535};
    536
    537struct cxgbi_endpoint {
    538	struct cxgbi_conn *cconn;
    539	struct cxgbi_hba *chba;
    540	struct cxgbi_sock *csk;
    541};
    542
    543struct cxgbi_task_data {
    544#define CXGBI_TASK_SGL_CHECKED	0x1
    545#define CXGBI_TASK_SGL_COPY	0x2
    546	u8 flags;
    547	unsigned short nr_frags;
    548	struct page_frag frags[MAX_SKB_FRAGS];
    549	struct sk_buff *skb;
    550	unsigned int dlen;
    551	unsigned int offset;
    552	unsigned int count;
    553	unsigned int sgoffset;
    554	u32 total_count;
    555	u32 total_offset;
    556	u32 max_xmit_dlength;
    557	struct cxgbi_task_tag_info ttinfo;
    558};
    559#define iscsi_task_cxgbi_data(task) \
    560	((task)->dd_data + sizeof(struct iscsi_tcp_task))
    561
    562struct cxgbi_iso_info {
    563#define CXGBI_ISO_INFO_FSLICE		0x1
    564#define CXGBI_ISO_INFO_LSLICE		0x2
    565#define CXGBI_ISO_INFO_IMM_ENABLE	0x4
    566	u8 flags;
    567	u8 op;
    568	u8 ahs;
    569	u8 num_pdu;
    570	u32 mpdu;
    571	u32 burst_size;
    572	u32 len;
    573	u32 segment_offset;
    574	u32 datasn_offset;
    575	u32 buffer_offset;
    576};
    577
    578static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
    579{
    580	if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
    581		chba->ipv4addr = ipaddr;
    582	else
    583		pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n",
    584			chba->ndev->name);
    585}
    586
    587struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
    588void cxgbi_device_unregister(struct cxgbi_device *);
    589void cxgbi_device_unregister_all(unsigned int flag);
    590struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
    591struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
    592struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
    593						     int *);
    594int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
    595			struct scsi_host_template *,
    596			struct scsi_transport_template *);
    597void cxgbi_hbas_remove(struct cxgbi_device *);
    598
    599int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
    600			unsigned int max_conn);
    601void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev);
    602
    603void cxgbi_conn_tx_open(struct cxgbi_sock *);
    604void cxgbi_conn_pdu_ready(struct cxgbi_sock *);
    605int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
    606int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
    607int cxgbi_conn_xmit_pdu(struct iscsi_task *);
    608
    609void cxgbi_cleanup_task(struct iscsi_task *task);
    610
    611umode_t cxgbi_attr_is_visible(int param_type, int param);
    612void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
    613int cxgbi_set_conn_param(struct iscsi_cls_conn *,
    614			enum iscsi_param, char *, int);
    615int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
    616struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
    617int cxgbi_bind_conn(struct iscsi_cls_session *,
    618			struct iscsi_cls_conn *, u64, int);
    619void cxgbi_destroy_session(struct iscsi_cls_session *);
    620struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
    621			u16, u16, u32);
    622int cxgbi_set_host_param(struct Scsi_Host *,
    623			enum iscsi_host_param, char *, int);
    624int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
    625struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
    626			struct sockaddr *, int);
    627int cxgbi_ep_poll(struct iscsi_endpoint *, int);
    628void cxgbi_ep_disconnect(struct iscsi_endpoint *);
    629
    630int cxgbi_iscsi_init(struct iscsi_transport *,
    631			struct scsi_transport_template **);
    632void cxgbi_iscsi_cleanup(struct iscsi_transport *,
    633			struct scsi_transport_template **);
    634void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *);
    635int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
    636			unsigned int, unsigned int);
    637int cxgbi_ddp_cleanup(struct cxgbi_device *);
    638void cxgbi_ddp_page_size_factor(int *);
    639void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *,
    640			    struct cxgbi_task_tag_info *,
    641			    struct scatterlist **sg_pp, unsigned int *sg_off);
    642int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
    643			struct cxgbi_tag_format *tformat,
    644			unsigned int iscsi_size, unsigned int llimit,
    645			unsigned int start, unsigned int rsvd_factor,
    646			unsigned int edram_start, unsigned int edram_size);
    647#endif	/*__LIBCXGBI_H__*/