cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qed_ll2.c (78712B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
      2/* QLogic qed NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#include <linux/types.h>
      8#include <asm/byteorder.h>
      9#include <linux/dma-mapping.h>
     10#include <linux/if_vlan.h>
     11#include <linux/kernel.h>
     12#include <linux/pci.h>
     13#include <linux/slab.h>
     14#include <linux/stddef.h>
     15#include <linux/workqueue.h>
     16#include <net/ipv6.h>
     17#include <linux/bitops.h>
     18#include <linux/delay.h>
     19#include <linux/errno.h>
     20#include <linux/etherdevice.h>
     21#include <linux/io.h>
     22#include <linux/list.h>
     23#include <linux/mutex.h>
     24#include <linux/spinlock.h>
     25#include <linux/string.h>
     26#include <linux/qed/qed_ll2_if.h>
     27#include "qed.h"
     28#include "qed_cxt.h"
     29#include "qed_dev_api.h"
     30#include "qed_hsi.h"
     31#include "qed_iro_hsi.h"
     32#include "qed_hw.h"
     33#include "qed_int.h"
     34#include "qed_ll2.h"
     35#include "qed_mcp.h"
     36#include "qed_ooo.h"
     37#include "qed_reg_addr.h"
     38#include "qed_sp.h"
     39#include "qed_rdma.h"
     40
     41#define QED_LL2_RX_REGISTERED(ll2)	((ll2)->rx_queue.b_cb_registered)
     42#define QED_LL2_TX_REGISTERED(ll2)	((ll2)->tx_queue.b_cb_registered)
     43
     44#define QED_LL2_TX_SIZE (256)
     45#define QED_LL2_RX_SIZE (4096)
     46
     47#define QED_LL2_INVALID_STATS_ID        0xff
     48
     49struct qed_cb_ll2_info {
     50	int rx_cnt;
     51	u32 rx_size;
     52	u8 handle;
     53
     54	/* Lock protecting LL2 buffer lists in sleepless context */
     55	spinlock_t lock;
     56	struct list_head list;
     57
     58	const struct qed_ll2_cb_ops *cbs;
     59	void *cb_cookie;
     60};
     61
     62struct qed_ll2_buffer {
     63	struct list_head list;
     64	void *data;
     65	dma_addr_t phys_addr;
     66};
     67
     68static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
     69				     u8 ll2_queue_type, u8 qid)
     70{
     71	u8 stats_id;
     72
     73	/* For legacy (RAM based) queues, the stats_id will be set as the
     74	 * queue_id. Otherwise (context based queue), it will be set to
     75	 * the "abs_pf_id" offset from the end of the RAM based queue IDs.
     76	 * If the final value exceeds the total counters amount, return
     77	 * INVALID value to indicate that the stats for this connection should
     78	 * be disabled.
     79	 */
     80	if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
     81		stats_id = qid;
     82	else
     83		stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
     84
     85	if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
     86		return stats_id;
     87	else
     88		return QED_LL2_INVALID_STATS_ID;
     89}
     90
     91static void qed_ll2b_complete_tx_packet(void *cxt,
     92					u8 connection_handle,
     93					void *cookie,
     94					dma_addr_t first_frag_addr,
     95					bool b_last_fragment,
     96					bool b_last_packet)
     97{
     98	struct qed_hwfn *p_hwfn = cxt;
     99	struct qed_dev *cdev = p_hwfn->cdev;
    100	struct sk_buff *skb = cookie;
    101
    102	/* All we need to do is release the mapping */
    103	dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
    104			 skb_headlen(skb), DMA_TO_DEVICE);
    105
    106	if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
    107		cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
    108				      b_last_fragment);
    109
    110	dev_kfree_skb_any(skb);
    111}
    112
    113static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
    114				u8 **data, dma_addr_t *phys_addr)
    115{
    116	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
    117	if (!(*data)) {
    118		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
    119		return -ENOMEM;
    120	}
    121
    122	*phys_addr = dma_map_single(&cdev->pdev->dev,
    123				    ((*data) + NET_SKB_PAD),
    124				    cdev->ll2->rx_size, DMA_FROM_DEVICE);
    125	if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
    126		DP_INFO(cdev, "Failed to map LL2 buffer data\n");
    127		kfree((*data));
    128		return -ENOMEM;
    129	}
    130
    131	return 0;
    132}
    133
    134static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
    135				  struct qed_ll2_buffer *buffer)
    136{
    137	spin_lock_bh(&cdev->ll2->lock);
    138
    139	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
    140			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
    141	kfree(buffer->data);
    142	list_del(&buffer->list);
    143
    144	cdev->ll2->rx_cnt--;
    145	if (!cdev->ll2->rx_cnt)
    146		DP_INFO(cdev, "All LL2 entries were removed\n");
    147
    148	spin_unlock_bh(&cdev->ll2->lock);
    149
    150	return 0;
    151}
    152
    153static void qed_ll2_kill_buffers(struct qed_dev *cdev)
    154{
    155	struct qed_ll2_buffer *buffer, *tmp_buffer;
    156
    157	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
    158		qed_ll2_dealloc_buffer(cdev, buffer);
    159}
    160
    161static void qed_ll2b_complete_rx_packet(void *cxt,
    162					struct qed_ll2_comp_rx_data *data)
    163{
    164	struct qed_hwfn *p_hwfn = cxt;
    165	struct qed_ll2_buffer *buffer = data->cookie;
    166	struct qed_dev *cdev = p_hwfn->cdev;
    167	dma_addr_t new_phys_addr;
    168	struct sk_buff *skb;
    169	bool reuse = false;
    170	int rc = -EINVAL;
    171	u8 *new_data;
    172
    173	DP_VERBOSE(p_hwfn,
    174		   (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
    175		   "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
    176		   (u64)data->rx_buf_addr,
    177		   data->u.placement_offset,
    178		   data->length.packet_length,
    179		   data->parse_flags,
    180		   data->vlan, data->opaque_data_0, data->opaque_data_1);
    181
    182	if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
    183		print_hex_dump(KERN_INFO, "",
    184			       DUMP_PREFIX_OFFSET, 16, 1,
    185			       buffer->data, data->length.packet_length, false);
    186	}
    187
    188	/* Determine if data is valid */
    189	if (data->length.packet_length < ETH_HLEN)
    190		reuse = true;
    191
    192	/* Allocate a replacement for buffer; Reuse upon failure */
    193	if (!reuse)
    194		rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
    195					  &new_phys_addr);
    196
    197	/* If need to reuse or there's no replacement buffer, repost this */
    198	if (rc)
    199		goto out_post;
    200	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
    201			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
    202
    203	skb = build_skb(buffer->data, 0);
    204	if (!skb) {
    205		DP_INFO(cdev, "Failed to build SKB\n");
    206		kfree(buffer->data);
    207		goto out_post1;
    208	}
    209
    210	data->u.placement_offset += NET_SKB_PAD;
    211	skb_reserve(skb, data->u.placement_offset);
    212	skb_put(skb, data->length.packet_length);
    213	skb_checksum_none_assert(skb);
    214
    215	/* Get parital ethernet information instead of eth_type_trans(),
    216	 * Since we don't have an associated net_device.
    217	 */
    218	skb_reset_mac_header(skb);
    219	skb->protocol = eth_hdr(skb)->h_proto;
    220
    221	/* Pass SKB onward */
    222	if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
    223		if (data->vlan)
    224			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
    225					       data->vlan);
    226		cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
    227				      data->opaque_data_0,
    228				      data->opaque_data_1);
    229	} else {
    230		DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
    231				    QED_MSG_LL2 | QED_MSG_STORAGE),
    232			   "Dropping the packet\n");
    233		kfree(buffer->data);
    234	}
    235
    236out_post1:
    237	/* Update Buffer information and update FW producer */
    238	buffer->data = new_data;
    239	buffer->phys_addr = new_phys_addr;
    240
    241out_post:
    242	rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
    243				    buffer->phys_addr, 0, buffer, 1);
    244	if (rc)
    245		qed_ll2_dealloc_buffer(cdev, buffer);
    246}
    247
    248static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
    249						    u8 connection_handle,
    250						    bool b_lock,
    251						    bool b_only_active)
    252{
    253	struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
    254
    255	if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
    256		return NULL;
    257
    258	if (!p_hwfn->p_ll2_info)
    259		return NULL;
    260
    261	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
    262
    263	if (b_only_active) {
    264		if (b_lock)
    265			mutex_lock(&p_ll2_conn->mutex);
    266		if (p_ll2_conn->b_active)
    267			p_ret = p_ll2_conn;
    268		if (b_lock)
    269			mutex_unlock(&p_ll2_conn->mutex);
    270	} else {
    271		p_ret = p_ll2_conn;
    272	}
    273
    274	return p_ret;
    275}
    276
    277static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
    278						  u8 connection_handle)
    279{
    280	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
    281}
    282
    283static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
    284						       u8 connection_handle)
    285{
    286	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
    287}
    288
    289static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
    290							   *p_hwfn,
    291							   u8 connection_handle)
    292{
    293	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
    294}
    295
    296static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
    297{
    298	bool b_last_packet = false, b_last_frag = false;
    299	struct qed_ll2_tx_packet *p_pkt = NULL;
    300	struct qed_ll2_info *p_ll2_conn;
    301	struct qed_ll2_tx_queue *p_tx;
    302	unsigned long flags = 0;
    303	dma_addr_t tx_frag;
    304
    305	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
    306	if (!p_ll2_conn)
    307		return;
    308
    309	p_tx = &p_ll2_conn->tx_queue;
    310
    311	spin_lock_irqsave(&p_tx->lock, flags);
    312	while (!list_empty(&p_tx->active_descq)) {
    313		p_pkt = list_first_entry(&p_tx->active_descq,
    314					 struct qed_ll2_tx_packet, list_entry);
    315		if (!p_pkt)
    316			break;
    317
    318		list_del(&p_pkt->list_entry);
    319		b_last_packet = list_empty(&p_tx->active_descq);
    320		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
    321		spin_unlock_irqrestore(&p_tx->lock, flags);
    322		if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
    323			struct qed_ooo_buffer *p_buffer;
    324
    325			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
    326			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
    327						p_buffer);
    328		} else {
    329			p_tx->cur_completing_packet = *p_pkt;
    330			p_tx->cur_completing_bd_idx = 1;
    331			b_last_frag =
    332				p_tx->cur_completing_bd_idx == p_pkt->bd_used;
    333			tx_frag = p_pkt->bds_set[0].tx_frag;
    334			p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
    335						      p_ll2_conn->my_id,
    336						      p_pkt->cookie,
    337						      tx_frag,
    338						      b_last_frag,
    339						      b_last_packet);
    340		}
    341		spin_lock_irqsave(&p_tx->lock, flags);
    342	}
    343	spin_unlock_irqrestore(&p_tx->lock, flags);
    344}
    345
    346static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
    347{
    348	struct qed_ll2_info *p_ll2_conn = p_cookie;
    349	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
    350	u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
    351	struct qed_ll2_tx_packet *p_pkt;
    352	bool b_last_frag = false;
    353	unsigned long flags;
    354	int rc = -EINVAL;
    355
    356	if (!p_ll2_conn)
    357		return rc;
    358
    359	spin_lock_irqsave(&p_tx->lock, flags);
    360	if (p_tx->b_completing_packet) {
    361		rc = -EBUSY;
    362		goto out;
    363	}
    364
    365	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
    366	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
    367	while (num_bds) {
    368		if (list_empty(&p_tx->active_descq))
    369			goto out;
    370
    371		p_pkt = list_first_entry(&p_tx->active_descq,
    372					 struct qed_ll2_tx_packet, list_entry);
    373		if (!p_pkt)
    374			goto out;
    375
    376		p_tx->b_completing_packet = true;
    377		p_tx->cur_completing_packet = *p_pkt;
    378		num_bds_in_packet = p_pkt->bd_used;
    379		list_del(&p_pkt->list_entry);
    380
    381		if (unlikely(num_bds < num_bds_in_packet)) {
    382			DP_NOTICE(p_hwfn,
    383				  "Rest of BDs does not cover whole packet\n");
    384			goto out;
    385		}
    386
    387		num_bds -= num_bds_in_packet;
    388		p_tx->bds_idx += num_bds_in_packet;
    389		while (num_bds_in_packet--)
    390			qed_chain_consume(&p_tx->txq_chain);
    391
    392		p_tx->cur_completing_bd_idx = 1;
    393		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
    394		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
    395
    396		spin_unlock_irqrestore(&p_tx->lock, flags);
    397
    398		p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
    399					   p_ll2_conn->my_id,
    400					   p_pkt->cookie,
    401					   p_pkt->bds_set[0].tx_frag,
    402					   b_last_frag, !num_bds);
    403
    404		spin_lock_irqsave(&p_tx->lock, flags);
    405	}
    406
    407	p_tx->b_completing_packet = false;
    408	rc = 0;
    409out:
    410	spin_unlock_irqrestore(&p_tx->lock, flags);
    411	return rc;
    412}
    413
    414static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
    415				  union core_rx_cqe_union *p_cqe,
    416				  struct qed_ll2_comp_rx_data *data)
    417{
    418	data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
    419	data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
    420	data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
    421	data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
    422	data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
    423	data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
    424	data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
    425
    426	data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
    427}
    428
    429static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
    430				  union core_rx_cqe_union *p_cqe,
    431				  struct qed_ll2_comp_rx_data *data)
    432{
    433	data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
    434	data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
    435	data->length.packet_length =
    436	    le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
    437	data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
    438	data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
    439	data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
    440	data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
    441}
    442
    443static int
    444qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
    445			struct qed_ll2_info *p_ll2_conn,
    446			union core_rx_cqe_union *p_cqe,
    447			unsigned long *p_lock_flags)
    448{
    449	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
    450	struct core_rx_slow_path_cqe *sp_cqe;
    451
    452	sp_cqe = &p_cqe->rx_cqe_sp;
    453	if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
    454		DP_NOTICE(p_hwfn,
    455			  "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
    456			  sp_cqe->ramrod_cmd_id);
    457		return -EINVAL;
    458	}
    459
    460	if (!p_ll2_conn->cbs.slowpath_cb) {
    461		DP_NOTICE(p_hwfn,
    462			  "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
    463		return -EINVAL;
    464	}
    465
    466	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
    467
    468	p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
    469				    p_ll2_conn->my_id,
    470				    le32_to_cpu(sp_cqe->opaque_data.data[0]),
    471				    le32_to_cpu(sp_cqe->opaque_data.data[1]));
    472
    473	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
    474
    475	return 0;
    476}
    477
    478static int
    479qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
    480			      struct qed_ll2_info *p_ll2_conn,
    481			      union core_rx_cqe_union *p_cqe,
    482			      unsigned long *p_lock_flags, bool b_last_cqe)
    483{
    484	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
    485	struct qed_ll2_rx_packet *p_pkt = NULL;
    486	struct qed_ll2_comp_rx_data data;
    487
    488	if (!list_empty(&p_rx->active_descq))
    489		p_pkt = list_first_entry(&p_rx->active_descq,
    490					 struct qed_ll2_rx_packet, list_entry);
    491	if (unlikely(!p_pkt)) {
    492		DP_NOTICE(p_hwfn,
    493			  "[%d] LL2 Rx completion but active_descq is empty\n",
    494			  p_ll2_conn->input.conn_type);
    495
    496		return -EIO;
    497	}
    498	list_del(&p_pkt->list_entry);
    499
    500	if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
    501		qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
    502	else
    503		qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
    504	if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
    505		DP_NOTICE(p_hwfn,
    506			  "Mismatch between active_descq and the LL2 Rx chain\n");
    507
    508	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
    509
    510	data.connection_handle = p_ll2_conn->my_id;
    511	data.cookie = p_pkt->cookie;
    512	data.rx_buf_addr = p_pkt->rx_buf_addr;
    513	data.b_last_packet = b_last_cqe;
    514
    515	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
    516	p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
    517
    518	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
    519
    520	return 0;
    521}
    522
    523static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
    524{
    525	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
    526	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
    527	union core_rx_cqe_union *cqe = NULL;
    528	u16 cq_new_idx = 0, cq_old_idx = 0;
    529	unsigned long flags = 0;
    530	int rc = 0;
    531
    532	if (!p_ll2_conn)
    533		return rc;
    534
    535	spin_lock_irqsave(&p_rx->lock, flags);
    536
    537	if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
    538		spin_unlock_irqrestore(&p_rx->lock, flags);
    539		return 0;
    540	}
    541
    542	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
    543	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
    544
    545	while (cq_new_idx != cq_old_idx) {
    546		bool b_last_cqe = (cq_new_idx == cq_old_idx);
    547
    548		cqe =
    549		    (union core_rx_cqe_union *)
    550		    qed_chain_consume(&p_rx->rcq_chain);
    551		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
    552
    553		DP_VERBOSE(p_hwfn,
    554			   QED_MSG_LL2,
    555			   "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
    556			   cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
    557
    558		switch (cqe->rx_cqe_sp.type) {
    559		case CORE_RX_CQE_TYPE_SLOW_PATH:
    560			rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
    561						     cqe, &flags);
    562			break;
    563		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
    564		case CORE_RX_CQE_TYPE_REGULAR:
    565			rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
    566							   cqe, &flags,
    567							   b_last_cqe);
    568			break;
    569		default:
    570			rc = -EIO;
    571		}
    572	}
    573
    574	spin_unlock_irqrestore(&p_rx->lock, flags);
    575	return rc;
    576}
    577
    578static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
    579{
    580	struct qed_ll2_info *p_ll2_conn = NULL;
    581	struct qed_ll2_rx_packet *p_pkt = NULL;
    582	struct qed_ll2_rx_queue *p_rx;
    583	unsigned long flags = 0;
    584
    585	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
    586	if (!p_ll2_conn)
    587		return;
    588
    589	p_rx = &p_ll2_conn->rx_queue;
    590
    591	spin_lock_irqsave(&p_rx->lock, flags);
    592	while (!list_empty(&p_rx->active_descq)) {
    593		p_pkt = list_first_entry(&p_rx->active_descq,
    594					 struct qed_ll2_rx_packet, list_entry);
    595		if (!p_pkt)
    596			break;
    597		list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
    598		spin_unlock_irqrestore(&p_rx->lock, flags);
    599
    600		if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
    601			struct qed_ooo_buffer *p_buffer;
    602
    603			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
    604			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
    605						p_buffer);
    606		} else {
    607			dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
    608			void *cookie = p_pkt->cookie;
    609			bool b_last;
    610
    611			b_last = list_empty(&p_rx->active_descq);
    612			p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
    613						      p_ll2_conn->my_id,
    614						      cookie,
    615						      rx_buf_addr, b_last);
    616		}
    617		spin_lock_irqsave(&p_rx->lock, flags);
    618	}
    619	spin_unlock_irqrestore(&p_rx->lock, flags);
    620}
    621
    622static bool
    623qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
    624				struct core_rx_slow_path_cqe *p_cqe)
    625{
    626	struct ooo_opaque *ooo_opq;
    627	u32 cid;
    628
    629	if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
    630		return false;
    631
    632	ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
    633	if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
    634		return false;
    635
    636	/* Need to make a flush */
    637	cid = le32_to_cpu(ooo_opq->cid);
    638	qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
    639
    640	return true;
    641}
    642
    643static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
    644				  struct qed_ll2_info *p_ll2_conn)
    645{
    646	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
    647	u16 packet_length = 0, parse_flags = 0, vlan = 0;
    648	struct qed_ll2_rx_packet *p_pkt = NULL;
    649	u32 num_ooo_add_to_peninsula = 0, cid;
    650	union core_rx_cqe_union *cqe = NULL;
    651	u16 cq_new_idx = 0, cq_old_idx = 0;
    652	struct qed_ooo_buffer *p_buffer;
    653	struct ooo_opaque *ooo_opq;
    654	u8 placement_offset = 0;
    655	u8 cqe_type;
    656
    657	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
    658	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
    659	if (cq_new_idx == cq_old_idx)
    660		return 0;
    661
    662	while (cq_new_idx != cq_old_idx) {
    663		struct core_rx_fast_path_cqe *p_cqe_fp;
    664
    665		cqe = qed_chain_consume(&p_rx->rcq_chain);
    666		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
    667		cqe_type = cqe->rx_cqe_sp.type;
    668
    669		if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
    670			if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
    671							    &cqe->rx_cqe_sp))
    672				continue;
    673
    674		if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
    675			DP_NOTICE(p_hwfn,
    676				  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
    677				  cqe_type);
    678			return -EINVAL;
    679		}
    680		p_cqe_fp = &cqe->rx_cqe_fp;
    681
    682		placement_offset = p_cqe_fp->placement_offset;
    683		parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
    684		packet_length = le16_to_cpu(p_cqe_fp->packet_length);
    685		vlan = le16_to_cpu(p_cqe_fp->vlan);
    686		ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
    687		qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
    688		cid = le32_to_cpu(ooo_opq->cid);
    689
    690		/* Process delete isle first */
    691		if (ooo_opq->drop_size)
    692			qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
    693					     ooo_opq->drop_isle,
    694					     ooo_opq->drop_size);
    695
    696		if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
    697			continue;
    698
    699		/* Now process create/add/join isles */
    700		if (unlikely(list_empty(&p_rx->active_descq))) {
    701			DP_NOTICE(p_hwfn,
    702				  "LL2 OOO RX chain has no submitted buffers\n"
    703				  );
    704			return -EIO;
    705		}
    706
    707		p_pkt = list_first_entry(&p_rx->active_descq,
    708					 struct qed_ll2_rx_packet, list_entry);
    709
    710		if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
    711			   ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
    712			   ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
    713			   ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
    714			   ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
    715			if (unlikely(!p_pkt)) {
    716				DP_NOTICE(p_hwfn,
    717					  "LL2 OOO RX packet is not valid\n");
    718				return -EIO;
    719			}
    720			list_del(&p_pkt->list_entry);
    721			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
    722			p_buffer->packet_length = packet_length;
    723			p_buffer->parse_flags = parse_flags;
    724			p_buffer->vlan = vlan;
    725			p_buffer->placement_offset = placement_offset;
    726			qed_chain_consume(&p_rx->rxq_chain);
    727			list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
    728
    729			switch (ooo_opq->ooo_opcode) {
    730			case TCP_EVENT_ADD_NEW_ISLE:
    731				qed_ooo_add_new_isle(p_hwfn,
    732						     p_hwfn->p_ooo_info,
    733						     cid,
    734						     ooo_opq->ooo_isle,
    735						     p_buffer);
    736				break;
    737			case TCP_EVENT_ADD_ISLE_RIGHT:
    738				qed_ooo_add_new_buffer(p_hwfn,
    739						       p_hwfn->p_ooo_info,
    740						       cid,
    741						       ooo_opq->ooo_isle,
    742						       p_buffer,
    743						       QED_OOO_RIGHT_BUF);
    744				break;
    745			case TCP_EVENT_ADD_ISLE_LEFT:
    746				qed_ooo_add_new_buffer(p_hwfn,
    747						       p_hwfn->p_ooo_info,
    748						       cid,
    749						       ooo_opq->ooo_isle,
    750						       p_buffer,
    751						       QED_OOO_LEFT_BUF);
    752				break;
    753			case TCP_EVENT_JOIN:
    754				qed_ooo_add_new_buffer(p_hwfn,
    755						       p_hwfn->p_ooo_info,
    756						       cid,
    757						       ooo_opq->ooo_isle + 1,
    758						       p_buffer,
    759						       QED_OOO_LEFT_BUF);
    760				qed_ooo_join_isles(p_hwfn,
    761						   p_hwfn->p_ooo_info,
    762						   cid, ooo_opq->ooo_isle);
    763				break;
    764			case TCP_EVENT_ADD_PEN:
    765				num_ooo_add_to_peninsula++;
    766				qed_ooo_put_ready_buffer(p_hwfn,
    767							 p_hwfn->p_ooo_info,
    768							 p_buffer, true);
    769				break;
    770			}
    771		} else {
    772			DP_NOTICE(p_hwfn,
    773				  "Unexpected event (%d) TX OOO completion\n",
    774				  ooo_opq->ooo_opcode);
    775		}
    776	}
    777
    778	return 0;
    779}
    780
    781static void
    782qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
    783			  struct qed_ll2_info *p_ll2_conn)
    784{
    785	struct qed_ll2_tx_pkt_info tx_pkt;
    786	struct qed_ooo_buffer *p_buffer;
    787	u16 l4_hdr_offset_w;
    788	dma_addr_t first_frag;
    789	u8 bd_flags;
    790	int rc;
    791
    792	/* Submit Tx buffers here */
    793	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
    794						    p_hwfn->p_ooo_info))) {
    795		l4_hdr_offset_w = 0;
    796		bd_flags = 0;
    797
    798		first_frag = p_buffer->rx_buffer_phys_addr +
    799			     p_buffer->placement_offset;
    800		SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
    801		SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
    802
    803		memset(&tx_pkt, 0, sizeof(tx_pkt));
    804		tx_pkt.num_of_bds = 1;
    805		tx_pkt.vlan = p_buffer->vlan;
    806		tx_pkt.bd_flags = bd_flags;
    807		tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
    808		switch (p_ll2_conn->tx_dest) {
    809		case CORE_TX_DEST_NW:
    810			tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
    811			break;
    812		case CORE_TX_DEST_LB:
    813			tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
    814			break;
    815		case CORE_TX_DEST_DROP:
    816		default:
    817			tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
    818			break;
    819		}
    820		tx_pkt.first_frag = first_frag;
    821		tx_pkt.first_frag_len = p_buffer->packet_length;
    822		tx_pkt.cookie = p_buffer;
    823
    824		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
    825					       &tx_pkt, true);
    826		if (rc) {
    827			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
    828						 p_buffer, false);
    829			break;
    830		}
    831	}
    832}
    833
    834static void
    835qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
    836			  struct qed_ll2_info *p_ll2_conn)
    837{
    838	struct qed_ooo_buffer *p_buffer;
    839	int rc;
    840
    841	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
    842						   p_hwfn->p_ooo_info))) {
    843		rc = qed_ll2_post_rx_buffer(p_hwfn,
    844					    p_ll2_conn->my_id,
    845					    p_buffer->rx_buffer_phys_addr,
    846					    0, p_buffer, true);
    847		if (rc) {
    848			qed_ooo_put_free_buffer(p_hwfn,
    849						p_hwfn->p_ooo_info, p_buffer);
    850			break;
    851		}
    852	}
    853}
    854
    855static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
    856{
    857	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
    858	int rc;
    859
    860	if (!p_ll2_conn)
    861		return 0;
    862
    863	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
    864		return 0;
    865
    866	rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
    867	if (rc)
    868		return rc;
    869
    870	qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
    871	qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
    872
    873	return 0;
    874}
    875
    876static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
    877{
    878	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
    879	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
    880	struct qed_ll2_tx_packet *p_pkt = NULL;
    881	struct qed_ooo_buffer *p_buffer;
    882	bool b_dont_submit_rx = false;
    883	u16 new_idx = 0, num_bds = 0;
    884	int rc;
    885
    886	if (unlikely(!p_ll2_conn))
    887		return 0;
    888
    889	if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
    890		return 0;
    891
    892	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
    893	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
    894
    895	if (unlikely(!num_bds))
    896		return 0;
    897
    898	while (num_bds) {
    899		if (list_empty(&p_tx->active_descq))
    900			return -EINVAL;
    901
    902		p_pkt = list_first_entry(&p_tx->active_descq,
    903					 struct qed_ll2_tx_packet, list_entry);
    904		if (unlikely(!p_pkt))
    905			return -EINVAL;
    906
    907		if (unlikely(p_pkt->bd_used != 1)) {
    908			DP_NOTICE(p_hwfn,
    909				  "Unexpectedly many BDs(%d) in TX OOO completion\n",
    910				  p_pkt->bd_used);
    911			return -EINVAL;
    912		}
    913
    914		list_del(&p_pkt->list_entry);
    915
    916		num_bds--;
    917		p_tx->bds_idx++;
    918		qed_chain_consume(&p_tx->txq_chain);
    919
    920		p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
    921		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
    922
    923		if (b_dont_submit_rx) {
    924			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
    925						p_buffer);
    926			continue;
    927		}
    928
    929		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
    930					    p_buffer->rx_buffer_phys_addr, 0,
    931					    p_buffer, true);
    932		if (rc != 0) {
    933			qed_ooo_put_free_buffer(p_hwfn,
    934						p_hwfn->p_ooo_info, p_buffer);
    935			b_dont_submit_rx = true;
    936		}
    937	}
    938
    939	qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
    940
    941	return 0;
    942}
    943
    944static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
    945{
    946	u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
    947
    948	DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
    949		   "Stopping LL2 OOO queue [%02x]\n", *handle);
    950
    951	qed_ll2_terminate_connection(p_hwfn, *handle);
    952	qed_ll2_release_connection(p_hwfn, *handle);
    953	*handle = QED_LL2_UNUSED_HANDLE;
    954}
    955
    956static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
    957				     struct qed_ll2_info *p_ll2_conn,
    958				     u8 action_on_error)
    959{
    960	enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
    961	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
    962	struct core_rx_start_ramrod_data *p_ramrod = NULL;
    963	struct qed_spq_entry *p_ent = NULL;
    964	struct qed_sp_init_data init_data;
    965	u16 cqe_pbl_size;
    966	int rc = 0;
    967
    968	/* Get SPQ entry */
    969	memset(&init_data, 0, sizeof(init_data));
    970	init_data.cid = p_ll2_conn->cid;
    971	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
    972	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
    973
    974	rc = qed_sp_init_request(p_hwfn, &p_ent,
    975				 CORE_RAMROD_RX_QUEUE_START,
    976				 PROTOCOLID_CORE, &init_data);
    977	if (rc)
    978		return rc;
    979
    980	p_ramrod = &p_ent->ramrod.core_rx_queue_start;
    981	memset(p_ramrod, 0, sizeof(*p_ramrod));
    982	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
    983	p_ramrod->sb_index = p_rx->rx_sb_index;
    984	p_ramrod->complete_event_flg = 1;
    985
    986	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
    987	DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
    988	cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
    989	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
    990	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
    991		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
    992
    993	p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
    994	p_ramrod->inner_vlan_stripping_en =
    995		p_ll2_conn->input.rx_vlan_removal_en;
    996
    997	if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
    998	    p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
    999		p_ramrod->report_outer_vlan = 1;
   1000	p_ramrod->queue_id = p_ll2_conn->queue_id;
   1001	p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
   1002
   1003	if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
   1004	    p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
   1005	    conn_type != QED_LL2_TYPE_IWARP &&
   1006		(!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
   1007		p_ramrod->mf_si_bcast_accept_all = 1;
   1008		p_ramrod->mf_si_mcast_accept_all = 1;
   1009	} else {
   1010		p_ramrod->mf_si_bcast_accept_all = 0;
   1011		p_ramrod->mf_si_mcast_accept_all = 0;
   1012	}
   1013
   1014	p_ramrod->action_on_error.error_type = action_on_error;
   1015	p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
   1016	p_ramrod->zero_prod_flg = 1;
   1017
   1018	return qed_spq_post(p_hwfn, p_ent, NULL);
   1019}
   1020
   1021static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
   1022				     struct qed_ll2_info *p_ll2_conn)
   1023{
   1024	enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
   1025	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
   1026	struct core_tx_start_ramrod_data *p_ramrod = NULL;
   1027	struct qed_spq_entry *p_ent = NULL;
   1028	struct qed_sp_init_data init_data;
   1029	u16 pq_id = 0, pbl_size;
   1030	int rc = -EINVAL;
   1031
   1032	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
   1033		return 0;
   1034
   1035	if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
   1036		p_ll2_conn->tx_stats_en = 0;
   1037	else
   1038		p_ll2_conn->tx_stats_en = 1;
   1039
   1040	/* Get SPQ entry */
   1041	memset(&init_data, 0, sizeof(init_data));
   1042	init_data.cid = p_ll2_conn->cid;
   1043	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1044	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1045
   1046	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1047				 CORE_RAMROD_TX_QUEUE_START,
   1048				 PROTOCOLID_CORE, &init_data);
   1049	if (rc)
   1050		return rc;
   1051
   1052	p_ramrod = &p_ent->ramrod.core_tx_queue_start;
   1053
   1054	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
   1055	p_ramrod->sb_index = p_tx->tx_sb_index;
   1056	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
   1057	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
   1058	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
   1059
   1060	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
   1061		       qed_chain_get_pbl_phys(&p_tx->txq_chain));
   1062	pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
   1063	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
   1064
   1065	switch (p_ll2_conn->input.tx_tc) {
   1066	case PURE_LB_TC:
   1067		pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
   1068		break;
   1069	case PKT_LB_TC:
   1070		pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
   1071		break;
   1072	default:
   1073		pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
   1074		break;
   1075	}
   1076
   1077	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
   1078
   1079	switch (conn_type) {
   1080	case QED_LL2_TYPE_FCOE:
   1081		p_ramrod->conn_type = PROTOCOLID_FCOE;
   1082		break;
   1083	case QED_LL2_TYPE_TCP_ULP:
   1084		p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
   1085		break;
   1086	case QED_LL2_TYPE_ROCE:
   1087		p_ramrod->conn_type = PROTOCOLID_ROCE;
   1088		break;
   1089	case QED_LL2_TYPE_IWARP:
   1090		p_ramrod->conn_type = PROTOCOLID_IWARP;
   1091		break;
   1092	case QED_LL2_TYPE_OOO:
   1093		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
   1094		    p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
   1095			p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
   1096		else
   1097			p_ramrod->conn_type = PROTOCOLID_IWARP;
   1098		break;
   1099	default:
   1100		p_ramrod->conn_type = PROTOCOLID_ETH;
   1101		DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
   1102	}
   1103
   1104	p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
   1105
   1106	rc = qed_spq_post(p_hwfn, p_ent, NULL);
   1107	if (rc)
   1108		return rc;
   1109
   1110	rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
   1111				 &p_tx->db_msg, DB_REC_WIDTH_32B,
   1112				 DB_REC_KERNEL);
   1113	return rc;
   1114}
   1115
   1116static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
   1117				    struct qed_ll2_info *p_ll2_conn)
   1118{
   1119	struct core_rx_stop_ramrod_data *p_ramrod = NULL;
   1120	struct qed_spq_entry *p_ent = NULL;
   1121	struct qed_sp_init_data init_data;
   1122	int rc = -EINVAL;
   1123
   1124	/* Get SPQ entry */
   1125	memset(&init_data, 0, sizeof(init_data));
   1126	init_data.cid = p_ll2_conn->cid;
   1127	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1128	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1129
   1130	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1131				 CORE_RAMROD_RX_QUEUE_STOP,
   1132				 PROTOCOLID_CORE, &init_data);
   1133	if (rc)
   1134		return rc;
   1135
   1136	p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
   1137
   1138	p_ramrod->complete_event_flg = 1;
   1139	p_ramrod->queue_id = p_ll2_conn->queue_id;
   1140
   1141	return qed_spq_post(p_hwfn, p_ent, NULL);
   1142}
   1143
   1144static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
   1145				    struct qed_ll2_info *p_ll2_conn)
   1146{
   1147	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
   1148	struct qed_spq_entry *p_ent = NULL;
   1149	struct qed_sp_init_data init_data;
   1150	int rc = -EINVAL;
   1151
   1152	qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
   1153
   1154	/* Get SPQ entry */
   1155	memset(&init_data, 0, sizeof(init_data));
   1156	init_data.cid = p_ll2_conn->cid;
   1157	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1158	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1159
   1160	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1161				 CORE_RAMROD_TX_QUEUE_STOP,
   1162				 PROTOCOLID_CORE, &init_data);
   1163	if (rc)
   1164		return rc;
   1165
   1166	return qed_spq_post(p_hwfn, p_ent, NULL);
   1167}
   1168
   1169static int
   1170qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
   1171			      struct qed_ll2_info *p_ll2_info)
   1172{
   1173	struct qed_chain_init_params params = {
   1174		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
   1175		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
   1176		.num_elems	= p_ll2_info->input.rx_num_desc,
   1177	};
   1178	struct qed_dev *cdev = p_hwfn->cdev;
   1179	struct qed_ll2_rx_packet *p_descq;
   1180	u32 capacity;
   1181	int rc = 0;
   1182
   1183	if (!p_ll2_info->input.rx_num_desc)
   1184		goto out;
   1185
   1186	params.mode = QED_CHAIN_MODE_NEXT_PTR;
   1187	params.elem_size = sizeof(struct core_rx_bd);
   1188
   1189	rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, &params);
   1190	if (rc) {
   1191		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
   1192		goto out;
   1193	}
   1194
   1195	capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
   1196	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
   1197			  GFP_KERNEL);
   1198	if (!p_descq) {
   1199		rc = -ENOMEM;
   1200		DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
   1201		goto out;
   1202	}
   1203	p_ll2_info->rx_queue.descq_array = p_descq;
   1204
   1205	params.mode = QED_CHAIN_MODE_PBL;
   1206	params.elem_size = sizeof(struct core_rx_fast_path_cqe);
   1207
   1208	rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, &params);
   1209	if (rc) {
   1210		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
   1211		goto out;
   1212	}
   1213
   1214	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
   1215		   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
   1216		   p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
   1217
   1218out:
   1219	return rc;
   1220}
   1221
   1222static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
   1223					 struct qed_ll2_info *p_ll2_info)
   1224{
   1225	struct qed_chain_init_params params = {
   1226		.mode		= QED_CHAIN_MODE_PBL,
   1227		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
   1228		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
   1229		.num_elems	= p_ll2_info->input.tx_num_desc,
   1230		.elem_size	= sizeof(struct core_tx_bd),
   1231	};
   1232	struct qed_ll2_tx_packet *p_descq;
   1233	size_t desc_size;
   1234	u32 capacity;
   1235	int rc = 0;
   1236
   1237	if (!p_ll2_info->input.tx_num_desc)
   1238		goto out;
   1239
   1240	rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain,
   1241			     &params);
   1242	if (rc)
   1243		goto out;
   1244
   1245	capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
   1246	/* All bds_set elements are flexibily added. */
   1247	desc_size = struct_size(p_descq, bds_set,
   1248				p_ll2_info->input.tx_max_bds_per_packet);
   1249
   1250	p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
   1251	if (!p_descq) {
   1252		rc = -ENOMEM;
   1253		goto out;
   1254	}
   1255	p_ll2_info->tx_queue.descq_mem = p_descq;
   1256
   1257	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
   1258		   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
   1259		   p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
   1260
   1261out:
   1262	if (rc)
   1263		DP_NOTICE(p_hwfn,
   1264			  "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
   1265			  p_ll2_info->input.tx_num_desc);
   1266	return rc;
   1267}
   1268
   1269static int
   1270qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
   1271			       struct qed_ll2_info *p_ll2_info, u16 mtu)
   1272{
   1273	struct qed_ooo_buffer *p_buf = NULL;
   1274	void *p_virt;
   1275	u16 buf_idx;
   1276	int rc = 0;
   1277
   1278	if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
   1279		return rc;
   1280
   1281	/* Correct number of requested OOO buffers if needed */
   1282	if (!p_ll2_info->input.rx_num_ooo_buffers) {
   1283		u16 num_desc = p_ll2_info->input.rx_num_desc;
   1284
   1285		if (!num_desc)
   1286			return -EINVAL;
   1287		p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
   1288	}
   1289
   1290	for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
   1291	     buf_idx++) {
   1292		p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
   1293		if (!p_buf) {
   1294			rc = -ENOMEM;
   1295			goto out;
   1296		}
   1297
   1298		p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
   1299		p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
   1300					 ETH_CACHE_LINE_SIZE - 1) &
   1301					~(ETH_CACHE_LINE_SIZE - 1);
   1302		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
   1303					    p_buf->rx_buffer_size,
   1304					    &p_buf->rx_buffer_phys_addr,
   1305					    GFP_KERNEL);
   1306		if (!p_virt) {
   1307			kfree(p_buf);
   1308			rc = -ENOMEM;
   1309			goto out;
   1310		}
   1311
   1312		p_buf->rx_buffer_virt_addr = p_virt;
   1313		qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
   1314	}
   1315
   1316	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
   1317		   "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
   1318		   p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
   1319
   1320out:
   1321	return rc;
   1322}
   1323
   1324static int
   1325qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
   1326{
   1327	if (!cbs || (!cbs->rx_comp_cb ||
   1328		     !cbs->rx_release_cb ||
   1329		     !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
   1330		return -EINVAL;
   1331
   1332	p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
   1333	p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
   1334	p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
   1335	p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
   1336	p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
   1337	p_ll2_info->cbs.cookie = cbs->cookie;
   1338
   1339	return 0;
   1340}
   1341
   1342static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
   1343					struct qed_ll2_acquire_data *data,
   1344					u8 *start_idx, u8 *last_idx)
   1345{
   1346	/* LL2 queues handles will be split as follows:
   1347	 * First will be the legacy queues, and then the ctx based.
   1348	 */
   1349	if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
   1350		*start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
   1351		*last_idx = *start_idx +
   1352			QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
   1353	} else {
   1354		/* QED_LL2_RX_TYPE_CTX */
   1355		*start_idx = QED_LL2_CTX_CONN_BASE_PF;
   1356		*last_idx = *start_idx +
   1357			QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
   1358	}
   1359}
   1360
   1361static enum core_error_handle
   1362qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
   1363{
   1364	switch (err) {
   1365	case QED_LL2_DROP_PACKET:
   1366		return LL2_DROP_PACKET;
   1367	case QED_LL2_DO_NOTHING:
   1368		return LL2_DO_NOTHING;
   1369	case QED_LL2_ASSERT:
   1370		return LL2_ASSERT;
   1371	default:
   1372		return LL2_DO_NOTHING;
   1373	}
   1374}
   1375
   1376int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
   1377{
   1378	struct qed_hwfn *p_hwfn = cxt;
   1379	qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
   1380	struct qed_ll2_info *p_ll2_info = NULL;
   1381	u8 i, first_idx, last_idx, *p_tx_max;
   1382	int rc;
   1383
   1384	if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
   1385		return -EINVAL;
   1386
   1387	_qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
   1388
   1389	/* Find a free connection to be used */
   1390	for (i = first_idx; i < last_idx; i++) {
   1391		mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
   1392		if (p_hwfn->p_ll2_info[i].b_active) {
   1393			mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
   1394			continue;
   1395		}
   1396
   1397		p_hwfn->p_ll2_info[i].b_active = true;
   1398		p_ll2_info = &p_hwfn->p_ll2_info[i];
   1399		mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
   1400		break;
   1401	}
   1402	if (!p_ll2_info)
   1403		return -EBUSY;
   1404
   1405	memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
   1406
   1407	switch (data->input.tx_dest) {
   1408	case QED_LL2_TX_DEST_NW:
   1409		p_ll2_info->tx_dest = CORE_TX_DEST_NW;
   1410		break;
   1411	case QED_LL2_TX_DEST_LB:
   1412		p_ll2_info->tx_dest = CORE_TX_DEST_LB;
   1413		break;
   1414	case QED_LL2_TX_DEST_DROP:
   1415		p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
   1416		break;
   1417	default:
   1418		return -EINVAL;
   1419	}
   1420
   1421	if (data->input.conn_type == QED_LL2_TYPE_OOO ||
   1422	    data->input.secondary_queue)
   1423		p_ll2_info->main_func_queue = false;
   1424	else
   1425		p_ll2_info->main_func_queue = true;
   1426
   1427	/* Correct maximum number of Tx BDs */
   1428	p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
   1429	if (*p_tx_max == 0)
   1430		*p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
   1431	else
   1432		*p_tx_max = min_t(u8, *p_tx_max,
   1433				  CORE_LL2_TX_MAX_BDS_PER_PACKET);
   1434
   1435	rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
   1436	if (rc) {
   1437		DP_NOTICE(p_hwfn, "Invalid callback functions\n");
   1438		goto q_allocate_fail;
   1439	}
   1440
   1441	rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
   1442	if (rc)
   1443		goto q_allocate_fail;
   1444
   1445	rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
   1446	if (rc)
   1447		goto q_allocate_fail;
   1448
   1449	rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
   1450					    data->input.mtu);
   1451	if (rc)
   1452		goto q_allocate_fail;
   1453
   1454	/* Register callbacks for the Rx/Tx queues */
   1455	if (data->input.conn_type == QED_LL2_TYPE_OOO) {
   1456		comp_rx_cb = qed_ll2_lb_rxq_completion;
   1457		comp_tx_cb = qed_ll2_lb_txq_completion;
   1458	} else {
   1459		comp_rx_cb = qed_ll2_rxq_completion;
   1460		comp_tx_cb = qed_ll2_txq_completion;
   1461	}
   1462
   1463	if (data->input.rx_num_desc) {
   1464		qed_int_register_cb(p_hwfn, comp_rx_cb,
   1465				    &p_hwfn->p_ll2_info[i],
   1466				    &p_ll2_info->rx_queue.rx_sb_index,
   1467				    &p_ll2_info->rx_queue.p_fw_cons);
   1468		p_ll2_info->rx_queue.b_cb_registered = true;
   1469	}
   1470
   1471	if (data->input.tx_num_desc) {
   1472		qed_int_register_cb(p_hwfn,
   1473				    comp_tx_cb,
   1474				    &p_hwfn->p_ll2_info[i],
   1475				    &p_ll2_info->tx_queue.tx_sb_index,
   1476				    &p_ll2_info->tx_queue.p_fw_cons);
   1477		p_ll2_info->tx_queue.b_cb_registered = true;
   1478	}
   1479
   1480	*data->p_connection_handle = i;
   1481	return rc;
   1482
   1483q_allocate_fail:
   1484	qed_ll2_release_connection(p_hwfn, i);
   1485	return -ENOMEM;
   1486}
   1487
   1488static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
   1489					   struct qed_ll2_info *p_ll2_conn)
   1490{
   1491	enum qed_ll2_error_handle error_input;
   1492	enum core_error_handle error_mode;
   1493	u8 action_on_error = 0;
   1494	int rc;
   1495
   1496	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
   1497		return 0;
   1498
   1499	DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
   1500	error_input = p_ll2_conn->input.ai_err_packet_too_big;
   1501	error_mode = qed_ll2_get_error_choice(error_input);
   1502	SET_FIELD(action_on_error,
   1503		  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
   1504	error_input = p_ll2_conn->input.ai_err_no_buf;
   1505	error_mode = qed_ll2_get_error_choice(error_input);
   1506	SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
   1507
   1508	rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
   1509	if (rc)
   1510		return rc;
   1511
   1512	if (p_ll2_conn->rx_queue.ctx_based) {
   1513		rc = qed_db_recovery_add(p_hwfn->cdev,
   1514					 p_ll2_conn->rx_queue.set_prod_addr,
   1515					 &p_ll2_conn->rx_queue.db_data,
   1516					 DB_REC_WIDTH_64B, DB_REC_KERNEL);
   1517	}
   1518
   1519	return rc;
   1520}
   1521
   1522static void
   1523qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
   1524				 struct qed_ll2_info *p_ll2_conn)
   1525{
   1526	if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
   1527		return;
   1528
   1529	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
   1530	qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
   1531}
   1532
   1533static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
   1534					    u8 handle,
   1535					    u8 ll2_queue_type)
   1536{
   1537	u8 qid;
   1538
   1539	if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
   1540		return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
   1541
   1542	/* QED_LL2_RX_TYPE_CTX
   1543	 * FW distinguishes between the legacy queues (ram based) and the
   1544	 * ctx based queues by the queue_id.
   1545	 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
   1546	 * and the queue ids above that are ctx base.
   1547	 */
   1548	qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
   1549	      MAX_NUM_LL2_RX_RAM_QUEUES;
   1550
   1551	/* See comment on the acquire connection for how the ll2
   1552	 * queues handles are divided.
   1553	 */
   1554	qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
   1555
   1556	return qid;
   1557}
   1558
   1559int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
   1560{
   1561	struct core_conn_context *p_cxt;
   1562	struct qed_ll2_tx_packet *p_pkt;
   1563	struct qed_ll2_info *p_ll2_conn;
   1564	struct qed_hwfn *p_hwfn = cxt;
   1565	struct qed_ll2_rx_queue *p_rx;
   1566	struct qed_ll2_tx_queue *p_tx;
   1567	struct qed_cxt_info cxt_info;
   1568	struct qed_ptt *p_ptt;
   1569	int rc = -EINVAL;
   1570	u32 i, capacity;
   1571	size_t desc_size;
   1572	u8 qid, stats_id;
   1573
   1574	p_ptt = qed_ptt_acquire(p_hwfn);
   1575	if (!p_ptt)
   1576		return -EAGAIN;
   1577
   1578	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
   1579	if (!p_ll2_conn) {
   1580		rc = -EINVAL;
   1581		goto out;
   1582	}
   1583
   1584	p_rx = &p_ll2_conn->rx_queue;
   1585	p_tx = &p_ll2_conn->tx_queue;
   1586
   1587	qed_chain_reset(&p_rx->rxq_chain);
   1588	qed_chain_reset(&p_rx->rcq_chain);
   1589	INIT_LIST_HEAD(&p_rx->active_descq);
   1590	INIT_LIST_HEAD(&p_rx->free_descq);
   1591	INIT_LIST_HEAD(&p_rx->posting_descq);
   1592	spin_lock_init(&p_rx->lock);
   1593	capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
   1594	for (i = 0; i < capacity; i++)
   1595		list_add_tail(&p_rx->descq_array[i].list_entry,
   1596			      &p_rx->free_descq);
   1597	*p_rx->p_fw_cons = 0;
   1598
   1599	qed_chain_reset(&p_tx->txq_chain);
   1600	INIT_LIST_HEAD(&p_tx->active_descq);
   1601	INIT_LIST_HEAD(&p_tx->free_descq);
   1602	INIT_LIST_HEAD(&p_tx->sending_descq);
   1603	spin_lock_init(&p_tx->lock);
   1604	capacity = qed_chain_get_capacity(&p_tx->txq_chain);
   1605	/* All bds_set elements are flexibily added. */
   1606	desc_size = struct_size(p_pkt, bds_set,
   1607				p_ll2_conn->input.tx_max_bds_per_packet);
   1608
   1609	for (i = 0; i < capacity; i++) {
   1610		p_pkt = p_tx->descq_mem + desc_size * i;
   1611		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
   1612	}
   1613	p_tx->cur_completing_bd_idx = 0;
   1614	p_tx->bds_idx = 0;
   1615	p_tx->b_completing_packet = false;
   1616	p_tx->cur_send_packet = NULL;
   1617	p_tx->cur_send_frag_num = 0;
   1618	p_tx->cur_completing_frag_num = 0;
   1619	*p_tx->p_fw_cons = 0;
   1620
   1621	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
   1622	if (rc)
   1623		goto out;
   1624	cxt_info.iid = p_ll2_conn->cid;
   1625	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
   1626	if (rc) {
   1627		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
   1628			  p_ll2_conn->cid);
   1629		goto out;
   1630	}
   1631
   1632	p_cxt = cxt_info.p_cxt;
   1633
   1634	memset(p_cxt, 0, sizeof(*p_cxt));
   1635
   1636	qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
   1637					 p_ll2_conn->input.rx_conn_type);
   1638	stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
   1639					      p_ll2_conn->input.rx_conn_type,
   1640					      qid);
   1641	p_ll2_conn->queue_id = qid;
   1642	p_ll2_conn->tx_stats_id = stats_id;
   1643
   1644	/* If there is no valid stats id for this connection, disable stats */
   1645	if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
   1646		p_ll2_conn->tx_stats_en = 0;
   1647		DP_VERBOSE(p_hwfn,
   1648			   QED_MSG_LL2,
   1649			   "Disabling stats for queue %d - not enough counters\n",
   1650			   qid);
   1651	}
   1652
   1653	DP_VERBOSE(p_hwfn,
   1654		   QED_MSG_LL2,
   1655		   "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
   1656		   p_hwfn->rel_pf_id,
   1657		   p_ll2_conn->input.rx_conn_type, qid, stats_id);
   1658
   1659	if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
   1660		p_rx->set_prod_addr =
   1661		    (u8 __iomem *)p_hwfn->regview +
   1662		    GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
   1663				     TSTORM_LL2_RX_PRODS, qid);
   1664	} else {
   1665		/* QED_LL2_RX_TYPE_CTX - using doorbell */
   1666		p_rx->ctx_based = 1;
   1667
   1668		p_rx->set_prod_addr = p_hwfn->doorbells +
   1669			p_hwfn->dpi_start_offset +
   1670			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
   1671
   1672		/* prepare db data */
   1673		p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
   1674		SET_FIELD(p_rx->db_data.params,
   1675			  CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
   1676		SET_FIELD(p_rx->db_data.params,
   1677			  CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
   1678	}
   1679
   1680	p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
   1681					    qed_db_addr(p_ll2_conn->cid,
   1682							DQ_DEMS_LEGACY);
   1683	/* prepare db data */
   1684	SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
   1685	SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
   1686	SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
   1687		  DQ_XCM_CORE_TX_BD_PROD_CMD);
   1688	p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
   1689
   1690	rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
   1691	if (rc)
   1692		goto out;
   1693
   1694	rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
   1695	if (rc)
   1696		goto out;
   1697
   1698	if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
   1699	    !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
   1700		qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
   1701
   1702	qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
   1703
   1704	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
   1705		if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
   1706			qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
   1707						    QED_LLH_FILTER_ETHERTYPE,
   1708						    ETH_P_FCOE, 0);
   1709		qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
   1710					    QED_LLH_FILTER_ETHERTYPE,
   1711					    ETH_P_FIP, 0);
   1712	}
   1713
   1714out:
   1715	qed_ptt_release(p_hwfn, p_ptt);
   1716	return rc;
   1717}
   1718
   1719static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
   1720					     struct qed_ll2_rx_queue *p_rx,
   1721					     struct qed_ll2_rx_packet *p_curp)
   1722{
   1723	struct qed_ll2_rx_packet *p_posting_packet = NULL;
   1724	struct core_ll2_rx_prod rx_prod = { 0, 0 };
   1725	bool b_notify_fw = false;
   1726	u16 bd_prod, cq_prod;
   1727
   1728	/* This handles the flushing of already posted buffers */
   1729	while (!list_empty(&p_rx->posting_descq)) {
   1730		p_posting_packet = list_first_entry(&p_rx->posting_descq,
   1731						    struct qed_ll2_rx_packet,
   1732						    list_entry);
   1733		list_move_tail(&p_posting_packet->list_entry,
   1734			       &p_rx->active_descq);
   1735		b_notify_fw = true;
   1736	}
   1737
   1738	/* This handles the supplied packet [if there is one] */
   1739	if (p_curp) {
   1740		list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
   1741		b_notify_fw = true;
   1742	}
   1743
   1744	if (!b_notify_fw)
   1745		return;
   1746
   1747	bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
   1748	cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
   1749	if (p_rx->ctx_based) {
   1750		/* update producer by giving a doorbell */
   1751		p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
   1752		p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
   1753		/* Make sure chain element is updated before ringing the
   1754		 * doorbell
   1755		 */
   1756		dma_wmb();
   1757		DIRECT_REG_WR64(p_rx->set_prod_addr,
   1758				*((u64 *)&p_rx->db_data));
   1759	} else {
   1760		rx_prod.bd_prod = cpu_to_le16(bd_prod);
   1761		rx_prod.cqe_prod = cpu_to_le16(cq_prod);
   1762
   1763		/* Make sure chain element is updated before ringing the
   1764		 * doorbell
   1765		 */
   1766		dma_wmb();
   1767
   1768		DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
   1769	}
   1770}
   1771
   1772int qed_ll2_post_rx_buffer(void *cxt,
   1773			   u8 connection_handle,
   1774			   dma_addr_t addr,
   1775			   u16 buf_len, void *cookie, u8 notify_fw)
   1776{
   1777	struct qed_hwfn *p_hwfn = cxt;
   1778	struct core_rx_bd_with_buff_len *p_curb = NULL;
   1779	struct qed_ll2_rx_packet *p_curp = NULL;
   1780	struct qed_ll2_info *p_ll2_conn;
   1781	struct qed_ll2_rx_queue *p_rx;
   1782	unsigned long flags;
   1783	void *p_data;
   1784	int rc = 0;
   1785
   1786	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
   1787	if (!p_ll2_conn)
   1788		return -EINVAL;
   1789	p_rx = &p_ll2_conn->rx_queue;
   1790	if (!p_rx->set_prod_addr)
   1791		return -EIO;
   1792
   1793	spin_lock_irqsave(&p_rx->lock, flags);
   1794	if (!list_empty(&p_rx->free_descq))
   1795		p_curp = list_first_entry(&p_rx->free_descq,
   1796					  struct qed_ll2_rx_packet, list_entry);
   1797	if (p_curp) {
   1798		if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
   1799		    qed_chain_get_elem_left(&p_rx->rcq_chain)) {
   1800			p_data = qed_chain_produce(&p_rx->rxq_chain);
   1801			p_curb = (struct core_rx_bd_with_buff_len *)p_data;
   1802			qed_chain_produce(&p_rx->rcq_chain);
   1803		}
   1804	}
   1805
   1806	/* If we're lacking entries, let's try to flush buffers to FW */
   1807	if (!p_curp || !p_curb) {
   1808		rc = -EBUSY;
   1809		p_curp = NULL;
   1810		goto out_notify;
   1811	}
   1812
   1813	/* We have an Rx packet we can fill */
   1814	DMA_REGPAIR_LE(p_curb->addr, addr);
   1815	p_curb->buff_length = cpu_to_le16(buf_len);
   1816	p_curp->rx_buf_addr = addr;
   1817	p_curp->cookie = cookie;
   1818	p_curp->rxq_bd = p_curb;
   1819	p_curp->buf_length = buf_len;
   1820	list_del(&p_curp->list_entry);
   1821
   1822	/* Check if we only want to enqueue this packet without informing FW */
   1823	if (!notify_fw) {
   1824		list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
   1825		goto out;
   1826	}
   1827
   1828out_notify:
   1829	qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
   1830out:
   1831	spin_unlock_irqrestore(&p_rx->lock, flags);
   1832	return rc;
   1833}
   1834
   1835static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
   1836					  struct qed_ll2_tx_queue *p_tx,
   1837					  struct qed_ll2_tx_packet *p_curp,
   1838					  struct qed_ll2_tx_pkt_info *pkt,
   1839					  u8 notify_fw)
   1840{
   1841	list_del(&p_curp->list_entry);
   1842	p_curp->cookie = pkt->cookie;
   1843	p_curp->bd_used = pkt->num_of_bds;
   1844	p_curp->notify_fw = notify_fw;
   1845	p_tx->cur_send_packet = p_curp;
   1846	p_tx->cur_send_frag_num = 0;
   1847
   1848	p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
   1849	p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
   1850	p_tx->cur_send_frag_num++;
   1851}
   1852
   1853static void
   1854qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
   1855				 struct qed_ll2_info *p_ll2,
   1856				 struct qed_ll2_tx_packet *p_curp,
   1857				 struct qed_ll2_tx_pkt_info *pkt)
   1858{
   1859	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
   1860	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
   1861	struct core_tx_bd *start_bd = NULL;
   1862	enum core_roce_flavor_type roce_flavor;
   1863	enum core_tx_dest tx_dest;
   1864	u16 bd_data = 0, frag_idx;
   1865	u16 bitfield1;
   1866
   1867	roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
   1868							     : CORE_RROCE;
   1869
   1870	switch (pkt->tx_dest) {
   1871	case QED_LL2_TX_DEST_NW:
   1872		tx_dest = CORE_TX_DEST_NW;
   1873		break;
   1874	case QED_LL2_TX_DEST_LB:
   1875		tx_dest = CORE_TX_DEST_LB;
   1876		break;
   1877	case QED_LL2_TX_DEST_DROP:
   1878		tx_dest = CORE_TX_DEST_DROP;
   1879		break;
   1880	default:
   1881		tx_dest = CORE_TX_DEST_LB;
   1882		break;
   1883	}
   1884
   1885	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
   1886	if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
   1887		   p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
   1888		start_bd->nw_vlan_or_lb_echo =
   1889		    cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
   1890	} else {
   1891		start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
   1892		if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
   1893		    p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
   1894			pkt->remove_stag = true;
   1895	}
   1896
   1897	bitfield1 = le16_to_cpu(start_bd->bitfield1);
   1898	SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w);
   1899	SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest);
   1900	start_bd->bitfield1 = cpu_to_le16(bitfield1);
   1901
   1902	bd_data |= pkt->bd_flags;
   1903	SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
   1904	SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
   1905	SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
   1906	SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
   1907	SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
   1908	SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
   1909	SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
   1910		  !!(pkt->remove_stag));
   1911
   1912	start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
   1913	DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
   1914	start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
   1915
   1916	DP_VERBOSE(p_hwfn,
   1917		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
   1918		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
   1919		   p_ll2->queue_id,
   1920		   p_ll2->cid,
   1921		   p_ll2->input.conn_type,
   1922		   prod_idx,
   1923		   pkt->first_frag_len,
   1924		   pkt->num_of_bds,
   1925		   le32_to_cpu(start_bd->addr.hi),
   1926		   le32_to_cpu(start_bd->addr.lo));
   1927
   1928	if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
   1929		return;
   1930
   1931	/* Need to provide the packet with additional BDs for frags */
   1932	for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
   1933	     frag_idx < pkt->num_of_bds; frag_idx++) {
   1934		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
   1935
   1936		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
   1937		(*p_bd)->bd_data.as_bitfield = 0;
   1938		(*p_bd)->bitfield1 = 0;
   1939		p_curp->bds_set[frag_idx].tx_frag = 0;
   1940		p_curp->bds_set[frag_idx].frag_len = 0;
   1941	}
   1942}
   1943
   1944/* This should be called while the Txq spinlock is being held */
   1945static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
   1946				     struct qed_ll2_info *p_ll2_conn)
   1947{
   1948	bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
   1949	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
   1950	struct qed_ll2_tx_packet *p_pkt = NULL;
   1951	u16 bd_prod;
   1952
   1953	/* If there are missing BDs, don't do anything now */
   1954	if (p_ll2_conn->tx_queue.cur_send_frag_num !=
   1955	    p_ll2_conn->tx_queue.cur_send_packet->bd_used)
   1956		return;
   1957
   1958	/* Push the current packet to the list and clean after it */
   1959	list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
   1960		      &p_ll2_conn->tx_queue.sending_descq);
   1961	p_ll2_conn->tx_queue.cur_send_packet = NULL;
   1962	p_ll2_conn->tx_queue.cur_send_frag_num = 0;
   1963
   1964	/* Notify FW of packet only if requested to */
   1965	if (!b_notify)
   1966		return;
   1967
   1968	bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
   1969
   1970	while (!list_empty(&p_tx->sending_descq)) {
   1971		p_pkt = list_first_entry(&p_tx->sending_descq,
   1972					 struct qed_ll2_tx_packet, list_entry);
   1973		if (!p_pkt)
   1974			break;
   1975
   1976		list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
   1977	}
   1978
   1979	p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
   1980
   1981	/* Make sure the BDs data is updated before ringing the doorbell */
   1982	wmb();
   1983
   1984	DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
   1985
   1986	DP_VERBOSE(p_hwfn,
   1987		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
   1988		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
   1989		   p_ll2_conn->queue_id,
   1990		   p_ll2_conn->cid,
   1991		   p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
   1992}
   1993
   1994int qed_ll2_prepare_tx_packet(void *cxt,
   1995			      u8 connection_handle,
   1996			      struct qed_ll2_tx_pkt_info *pkt,
   1997			      bool notify_fw)
   1998{
   1999	struct qed_hwfn *p_hwfn = cxt;
   2000	struct qed_ll2_tx_packet *p_curp = NULL;
   2001	struct qed_ll2_info *p_ll2_conn = NULL;
   2002	struct qed_ll2_tx_queue *p_tx;
   2003	struct qed_chain *p_tx_chain;
   2004	unsigned long flags;
   2005	int rc = 0;
   2006
   2007	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
   2008	if (unlikely(!p_ll2_conn))
   2009		return -EINVAL;
   2010	p_tx = &p_ll2_conn->tx_queue;
   2011	p_tx_chain = &p_tx->txq_chain;
   2012
   2013	if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
   2014		return -EIO;
   2015
   2016	spin_lock_irqsave(&p_tx->lock, flags);
   2017	if (unlikely(p_tx->cur_send_packet)) {
   2018		rc = -EEXIST;
   2019		goto out;
   2020	}
   2021
   2022	/* Get entry, but only if we have tx elements for it */
   2023	if (unlikely(!list_empty(&p_tx->free_descq)))
   2024		p_curp = list_first_entry(&p_tx->free_descq,
   2025					  struct qed_ll2_tx_packet, list_entry);
   2026	if (unlikely(p_curp &&
   2027		     qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
   2028		p_curp = NULL;
   2029
   2030	if (unlikely(!p_curp)) {
   2031		rc = -EBUSY;
   2032		goto out;
   2033	}
   2034
   2035	/* Prepare packet and BD, and perhaps send a doorbell to FW */
   2036	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
   2037
   2038	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
   2039
   2040	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
   2041
   2042out:
   2043	spin_unlock_irqrestore(&p_tx->lock, flags);
   2044	return rc;
   2045}
   2046
   2047int qed_ll2_set_fragment_of_tx_packet(void *cxt,
   2048				      u8 connection_handle,
   2049				      dma_addr_t addr, u16 nbytes)
   2050{
   2051	struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
   2052	struct qed_hwfn *p_hwfn = cxt;
   2053	struct qed_ll2_info *p_ll2_conn = NULL;
   2054	u16 cur_send_frag_num = 0;
   2055	struct core_tx_bd *p_bd;
   2056	unsigned long flags;
   2057
   2058	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
   2059	if (unlikely(!p_ll2_conn))
   2060		return -EINVAL;
   2061
   2062	if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
   2063		return -EINVAL;
   2064
   2065	p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
   2066	cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
   2067
   2068	if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
   2069		return -EINVAL;
   2070
   2071	/* Fill the BD information, and possibly notify FW */
   2072	p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
   2073	DMA_REGPAIR_LE(p_bd->addr, addr);
   2074	p_bd->nbytes = cpu_to_le16(nbytes);
   2075	p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
   2076	p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
   2077
   2078	p_ll2_conn->tx_queue.cur_send_frag_num++;
   2079
   2080	spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
   2081	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
   2082	spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
   2083
   2084	return 0;
   2085}
   2086
   2087int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
   2088{
   2089	struct qed_hwfn *p_hwfn = cxt;
   2090	struct qed_ll2_info *p_ll2_conn = NULL;
   2091	int rc = -EINVAL;
   2092	struct qed_ptt *p_ptt;
   2093
   2094	p_ptt = qed_ptt_acquire(p_hwfn);
   2095	if (!p_ptt)
   2096		return -EAGAIN;
   2097
   2098	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
   2099	if (!p_ll2_conn) {
   2100		rc = -EINVAL;
   2101		goto out;
   2102	}
   2103
   2104	/* Stop Tx & Rx of connection, if needed */
   2105	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
   2106		p_ll2_conn->tx_queue.b_cb_registered = false;
   2107		smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
   2108		rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
   2109		if (rc)
   2110			goto out;
   2111
   2112		qed_ll2_txq_flush(p_hwfn, connection_handle);
   2113		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
   2114	}
   2115
   2116	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
   2117		p_ll2_conn->rx_queue.b_cb_registered = false;
   2118		smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
   2119
   2120		if (p_ll2_conn->rx_queue.ctx_based)
   2121			qed_db_recovery_del(p_hwfn->cdev,
   2122					    p_ll2_conn->rx_queue.set_prod_addr,
   2123					    &p_ll2_conn->rx_queue.db_data);
   2124
   2125		rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
   2126		if (rc)
   2127			goto out;
   2128
   2129		qed_ll2_rxq_flush(p_hwfn, connection_handle);
   2130		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
   2131	}
   2132
   2133	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
   2134		qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
   2135
   2136	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
   2137		if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
   2138			qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
   2139						       QED_LLH_FILTER_ETHERTYPE,
   2140						       ETH_P_FCOE, 0);
   2141		qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
   2142					       QED_LLH_FILTER_ETHERTYPE,
   2143					       ETH_P_FIP, 0);
   2144	}
   2145
   2146out:
   2147	qed_ptt_release(p_hwfn, p_ptt);
   2148	return rc;
   2149}
   2150
   2151static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
   2152					   struct qed_ll2_info *p_ll2_conn)
   2153{
   2154	struct qed_ooo_buffer *p_buffer;
   2155
   2156	if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
   2157		return;
   2158
   2159	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
   2160	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
   2161						   p_hwfn->p_ooo_info))) {
   2162		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
   2163				  p_buffer->rx_buffer_size,
   2164				  p_buffer->rx_buffer_virt_addr,
   2165				  p_buffer->rx_buffer_phys_addr);
   2166		kfree(p_buffer);
   2167	}
   2168}
   2169
   2170void qed_ll2_release_connection(void *cxt, u8 connection_handle)
   2171{
   2172	struct qed_hwfn *p_hwfn = cxt;
   2173	struct qed_ll2_info *p_ll2_conn = NULL;
   2174
   2175	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
   2176	if (!p_ll2_conn)
   2177		return;
   2178
   2179	kfree(p_ll2_conn->tx_queue.descq_mem);
   2180	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
   2181
   2182	kfree(p_ll2_conn->rx_queue.descq_array);
   2183	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
   2184	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
   2185
   2186	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
   2187
   2188	qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
   2189
   2190	mutex_lock(&p_ll2_conn->mutex);
   2191	p_ll2_conn->b_active = false;
   2192	mutex_unlock(&p_ll2_conn->mutex);
   2193}
   2194
   2195int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
   2196{
   2197	struct qed_ll2_info *p_ll2_connections;
   2198	u8 i;
   2199
   2200	/* Allocate LL2's set struct */
   2201	p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
   2202				    sizeof(struct qed_ll2_info), GFP_KERNEL);
   2203	if (!p_ll2_connections) {
   2204		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
   2205		return -ENOMEM;
   2206	}
   2207
   2208	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
   2209		p_ll2_connections[i].my_id = i;
   2210
   2211	p_hwfn->p_ll2_info = p_ll2_connections;
   2212	return 0;
   2213}
   2214
   2215void qed_ll2_setup(struct qed_hwfn *p_hwfn)
   2216{
   2217	int i;
   2218
   2219	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
   2220		mutex_init(&p_hwfn->p_ll2_info[i].mutex);
   2221}
   2222
   2223void qed_ll2_free(struct qed_hwfn *p_hwfn)
   2224{
   2225	if (!p_hwfn->p_ll2_info)
   2226		return;
   2227
   2228	kfree(p_hwfn->p_ll2_info);
   2229	p_hwfn->p_ll2_info = NULL;
   2230}
   2231
   2232static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
   2233				    struct qed_ptt *p_ptt,
   2234				    struct qed_ll2_stats *p_stats)
   2235{
   2236	struct core_ll2_port_stats port_stats;
   2237
   2238	memset(&port_stats, 0, sizeof(port_stats));
   2239	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
   2240			BAR0_MAP_REG_TSDM_RAM +
   2241			TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
   2242			sizeof(port_stats));
   2243
   2244	p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
   2245	p_stats->gsi_invalid_pkt_length +=
   2246	    HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
   2247	p_stats->gsi_unsupported_pkt_typ +=
   2248	    HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
   2249	p_stats->gsi_crcchksm_error +=
   2250	    HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
   2251}
   2252
   2253static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
   2254				struct qed_ptt *p_ptt,
   2255				struct qed_ll2_info *p_ll2_conn,
   2256				struct qed_ll2_stats *p_stats)
   2257{
   2258	struct core_ll2_tstorm_per_queue_stat tstats;
   2259	u8 qid = p_ll2_conn->queue_id;
   2260	u32 tstats_addr;
   2261
   2262	memset(&tstats, 0, sizeof(tstats));
   2263	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
   2264		      CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
   2265	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
   2266
   2267	p_stats->packet_too_big_discard +=
   2268			HILO_64_REGPAIR(tstats.packet_too_big_discard);
   2269	p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
   2270}
   2271
   2272static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
   2273				struct qed_ptt *p_ptt,
   2274				struct qed_ll2_info *p_ll2_conn,
   2275				struct qed_ll2_stats *p_stats)
   2276{
   2277	struct core_ll2_ustorm_per_queue_stat ustats;
   2278	u8 qid = p_ll2_conn->queue_id;
   2279	u32 ustats_addr;
   2280
   2281	memset(&ustats, 0, sizeof(ustats));
   2282	ustats_addr = BAR0_MAP_REG_USDM_RAM +
   2283		      CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
   2284	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
   2285
   2286	p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
   2287	p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
   2288	p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
   2289	p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
   2290	p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
   2291	p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
   2292}
   2293
   2294static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
   2295				struct qed_ptt *p_ptt,
   2296				struct qed_ll2_info *p_ll2_conn,
   2297				struct qed_ll2_stats *p_stats)
   2298{
   2299	struct core_ll2_pstorm_per_queue_stat pstats;
   2300	u8 stats_id = p_ll2_conn->tx_stats_id;
   2301	u32 pstats_addr;
   2302
   2303	memset(&pstats, 0, sizeof(pstats));
   2304	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
   2305		      CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
   2306	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
   2307
   2308	p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
   2309	p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
   2310	p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
   2311	p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
   2312	p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
   2313	p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
   2314}
   2315
   2316static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
   2317			       struct qed_ll2_stats *p_stats)
   2318{
   2319	struct qed_hwfn *p_hwfn = cxt;
   2320	struct qed_ll2_info *p_ll2_conn = NULL;
   2321	struct qed_ptt *p_ptt;
   2322
   2323	if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
   2324	    !p_hwfn->p_ll2_info)
   2325		return -EINVAL;
   2326
   2327	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
   2328
   2329	p_ptt = qed_ptt_acquire(p_hwfn);
   2330	if (!p_ptt) {
   2331		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
   2332		return -EINVAL;
   2333	}
   2334
   2335	if (p_ll2_conn->input.gsi_enable)
   2336		_qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
   2337
   2338	_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
   2339
   2340	_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
   2341
   2342	if (p_ll2_conn->tx_stats_en)
   2343		_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
   2344
   2345	qed_ptt_release(p_hwfn, p_ptt);
   2346
   2347	return 0;
   2348}
   2349
   2350int qed_ll2_get_stats(void *cxt,
   2351		      u8 connection_handle, struct qed_ll2_stats *p_stats)
   2352{
   2353	memset(p_stats, 0, sizeof(*p_stats));
   2354	return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
   2355}
   2356
   2357static void qed_ll2b_release_rx_packet(void *cxt,
   2358				       u8 connection_handle,
   2359				       void *cookie,
   2360				       dma_addr_t rx_buf_addr,
   2361				       bool b_last_packet)
   2362{
   2363	struct qed_hwfn *p_hwfn = cxt;
   2364
   2365	qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
   2366}
   2367
   2368static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
   2369				    const struct qed_ll2_cb_ops *ops,
   2370				    void *cookie)
   2371{
   2372	cdev->ll2->cbs = ops;
   2373	cdev->ll2->cb_cookie = cookie;
   2374}
   2375
   2376static struct qed_ll2_cbs ll2_cbs = {
   2377	.rx_comp_cb = &qed_ll2b_complete_rx_packet,
   2378	.rx_release_cb = &qed_ll2b_release_rx_packet,
   2379	.tx_comp_cb = &qed_ll2b_complete_tx_packet,
   2380	.tx_release_cb = &qed_ll2b_complete_tx_packet,
   2381};
   2382
   2383static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
   2384				  struct qed_ll2_acquire_data *data,
   2385				  struct qed_ll2_params *params,
   2386				  enum qed_ll2_conn_type conn_type,
   2387				  u8 *handle, bool lb)
   2388{
   2389	memset(data, 0, sizeof(*data));
   2390
   2391	data->input.conn_type = conn_type;
   2392	data->input.mtu = params->mtu;
   2393	data->input.rx_num_desc = QED_LL2_RX_SIZE;
   2394	data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
   2395	data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
   2396	data->input.tx_num_desc = QED_LL2_TX_SIZE;
   2397	data->p_connection_handle = handle;
   2398	data->cbs = &ll2_cbs;
   2399	ll2_cbs.cookie = p_hwfn;
   2400
   2401	if (lb) {
   2402		data->input.tx_tc = PKT_LB_TC;
   2403		data->input.tx_dest = QED_LL2_TX_DEST_LB;
   2404	} else {
   2405		data->input.tx_tc = 0;
   2406		data->input.tx_dest = QED_LL2_TX_DEST_NW;
   2407	}
   2408}
   2409
   2410static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
   2411			     struct qed_ll2_params *params)
   2412{
   2413	u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
   2414	struct qed_ll2_acquire_data data;
   2415	int rc;
   2416
   2417	qed_ll2_set_conn_data(p_hwfn, &data, params,
   2418			      QED_LL2_TYPE_OOO, handle, true);
   2419
   2420	rc = qed_ll2_acquire_connection(p_hwfn, &data);
   2421	if (rc) {
   2422		DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
   2423		goto out;
   2424	}
   2425
   2426	rc = qed_ll2_establish_connection(p_hwfn, *handle);
   2427	if (rc) {
   2428		DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
   2429		goto fail;
   2430	}
   2431
   2432	return 0;
   2433
   2434fail:
   2435	qed_ll2_release_connection(p_hwfn, *handle);
   2436out:
   2437	*handle = QED_LL2_UNUSED_HANDLE;
   2438	return rc;
   2439}
   2440
   2441static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
   2442{
   2443	return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
   2444		QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
   2445		QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
   2446		(QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
   2447}
   2448
   2449static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
   2450{
   2451	struct qed_dev *cdev = p_hwfn->cdev;
   2452	int rc;
   2453
   2454	rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
   2455	if (rc)
   2456		DP_INFO(cdev, "Failed to terminate LL2 connection\n");
   2457
   2458	qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
   2459
   2460	return rc;
   2461}
   2462
   2463static int qed_ll2_stop(struct qed_dev *cdev)
   2464{
   2465	bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
   2466	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
   2467	int rc = 0, rc2 = 0;
   2468
   2469	if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
   2470		return 0;
   2471	if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
   2472		qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
   2473
   2474	qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
   2475	eth_zero_addr(cdev->ll2_mac_address);
   2476
   2477	if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
   2478		qed_ll2_stop_ooo(p_hwfn);
   2479
   2480	/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
   2481	if (b_is_storage_eng1) {
   2482		rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
   2483		if (rc2)
   2484			DP_NOTICE(QED_LEADING_HWFN(cdev),
   2485				  "Failed to stop LL2 on engine 0\n");
   2486	}
   2487
   2488	rc = __qed_ll2_stop(p_hwfn);
   2489	if (rc)
   2490		DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
   2491
   2492	qed_ll2_kill_buffers(cdev);
   2493
   2494	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
   2495
   2496	return rc | rc2;
   2497}
   2498
   2499static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
   2500			   struct qed_ll2_params *params)
   2501{
   2502	struct qed_ll2_buffer *buffer, *tmp_buffer;
   2503	struct qed_dev *cdev = p_hwfn->cdev;
   2504	enum qed_ll2_conn_type conn_type;
   2505	struct qed_ll2_acquire_data data;
   2506	int rc, rx_cnt;
   2507
   2508	switch (p_hwfn->hw_info.personality) {
   2509	case QED_PCI_FCOE:
   2510		conn_type = QED_LL2_TYPE_FCOE;
   2511		break;
   2512	case QED_PCI_ISCSI:
   2513	case QED_PCI_NVMETCP:
   2514		conn_type = QED_LL2_TYPE_TCP_ULP;
   2515		break;
   2516	case QED_PCI_ETH_ROCE:
   2517		conn_type = QED_LL2_TYPE_ROCE;
   2518		break;
   2519	default:
   2520
   2521		conn_type = QED_LL2_TYPE_TEST;
   2522	}
   2523
   2524	qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
   2525			      &cdev->ll2->handle, false);
   2526
   2527	rc = qed_ll2_acquire_connection(p_hwfn, &data);
   2528	if (rc) {
   2529		DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
   2530		return rc;
   2531	}
   2532
   2533	rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
   2534	if (rc) {
   2535		DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
   2536		goto release_conn;
   2537	}
   2538
   2539	/* Post all Rx buffers to FW */
   2540	spin_lock_bh(&cdev->ll2->lock);
   2541	rx_cnt = cdev->ll2->rx_cnt;
   2542	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
   2543		rc = qed_ll2_post_rx_buffer(p_hwfn,
   2544					    cdev->ll2->handle,
   2545					    buffer->phys_addr, 0, buffer, 1);
   2546		if (rc) {
   2547			DP_INFO(p_hwfn,
   2548				"Failed to post an Rx buffer; Deleting it\n");
   2549			dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
   2550					 cdev->ll2->rx_size, DMA_FROM_DEVICE);
   2551			kfree(buffer->data);
   2552			list_del(&buffer->list);
   2553			kfree(buffer);
   2554		} else {
   2555			rx_cnt++;
   2556		}
   2557	}
   2558	spin_unlock_bh(&cdev->ll2->lock);
   2559
   2560	if (rx_cnt == cdev->ll2->rx_cnt) {
   2561		DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
   2562		goto terminate_conn;
   2563	}
   2564	cdev->ll2->rx_cnt = rx_cnt;
   2565
   2566	return 0;
   2567
   2568terminate_conn:
   2569	qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
   2570release_conn:
   2571	qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
   2572	return rc;
   2573}
   2574
   2575static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
   2576{
   2577	bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
   2578	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
   2579	struct qed_ll2_buffer *buffer;
   2580	int rx_num_desc, i, rc;
   2581
   2582	if (!is_valid_ether_addr(params->ll2_mac_address)) {
   2583		DP_NOTICE(cdev, "Invalid Ethernet address\n");
   2584		return -EINVAL;
   2585	}
   2586
   2587	WARN_ON(!cdev->ll2->cbs);
   2588
   2589	/* Initialize LL2 locks & lists */
   2590	INIT_LIST_HEAD(&cdev->ll2->list);
   2591	spin_lock_init(&cdev->ll2->lock);
   2592
   2593	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
   2594			     L1_CACHE_BYTES + params->mtu;
   2595
   2596	/* Allocate memory for LL2.
   2597	 * In CMT mode, in case of a storage PF which is affintized to engine 1,
   2598	 * LL2 is started also on engine 0 and thus we need twofold buffers.
   2599	 */
   2600	rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
   2601	DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
   2602		rx_num_desc, cdev->ll2->rx_size);
   2603	for (i = 0; i < rx_num_desc; i++) {
   2604		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
   2605		if (!buffer) {
   2606			DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
   2607			rc = -ENOMEM;
   2608			goto err0;
   2609		}
   2610
   2611		rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
   2612					  &buffer->phys_addr);
   2613		if (rc) {
   2614			kfree(buffer);
   2615			goto err0;
   2616		}
   2617
   2618		list_add_tail(&buffer->list, &cdev->ll2->list);
   2619	}
   2620
   2621	rc = __qed_ll2_start(p_hwfn, params);
   2622	if (rc) {
   2623		DP_NOTICE(cdev, "Failed to start LL2\n");
   2624		goto err0;
   2625	}
   2626
   2627	/* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
   2628	 * since broadcast/mutlicast packets are routed to engine 0.
   2629	 */
   2630	if (b_is_storage_eng1) {
   2631		rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
   2632		if (rc) {
   2633			DP_NOTICE(QED_LEADING_HWFN(cdev),
   2634				  "Failed to start LL2 on engine 0\n");
   2635			goto err1;
   2636		}
   2637	}
   2638
   2639	if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
   2640		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
   2641		rc = qed_ll2_start_ooo(p_hwfn, params);
   2642		if (rc) {
   2643			DP_NOTICE(cdev, "Failed to start OOO LL2\n");
   2644			goto err2;
   2645		}
   2646	}
   2647
   2648	if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
   2649		rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
   2650		if (rc) {
   2651			DP_NOTICE(cdev, "Failed to add an LLH filter\n");
   2652			goto err3;
   2653		}
   2654	}
   2655
   2656	ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
   2657
   2658	return 0;
   2659
   2660err3:
   2661	if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
   2662		qed_ll2_stop_ooo(p_hwfn);
   2663err2:
   2664	if (b_is_storage_eng1)
   2665		__qed_ll2_stop(QED_LEADING_HWFN(cdev));
   2666err1:
   2667	__qed_ll2_stop(p_hwfn);
   2668err0:
   2669	qed_ll2_kill_buffers(cdev);
   2670	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
   2671	return rc;
   2672}
   2673
   2674static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
   2675			      unsigned long xmit_flags)
   2676{
   2677	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
   2678	struct qed_ll2_tx_pkt_info pkt;
   2679	const skb_frag_t *frag;
   2680	u8 flags = 0, nr_frags;
   2681	int rc = -EINVAL, i;
   2682	dma_addr_t mapping;
   2683	u16 vlan = 0;
   2684
   2685	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
   2686		DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
   2687		return -EINVAL;
   2688	}
   2689
   2690	/* Cache number of fragments from SKB since SKB may be freed by
   2691	 * the completion routine after calling qed_ll2_prepare_tx_packet()
   2692	 */
   2693	nr_frags = skb_shinfo(skb)->nr_frags;
   2694
   2695	if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
   2696		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
   2697		       1 + nr_frags);
   2698		return -EINVAL;
   2699	}
   2700
   2701	mapping = dma_map_single(&cdev->pdev->dev, skb->data,
   2702				 skb->len, DMA_TO_DEVICE);
   2703	if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
   2704		DP_NOTICE(cdev, "SKB mapping failed\n");
   2705		return -EINVAL;
   2706	}
   2707
   2708	/* Request HW to calculate IP csum */
   2709	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
   2710	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
   2711		flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
   2712
   2713	if (skb_vlan_tag_present(skb)) {
   2714		vlan = skb_vlan_tag_get(skb);
   2715		flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
   2716	}
   2717
   2718	memset(&pkt, 0, sizeof(pkt));
   2719	pkt.num_of_bds = 1 + nr_frags;
   2720	pkt.vlan = vlan;
   2721	pkt.bd_flags = flags;
   2722	pkt.tx_dest = QED_LL2_TX_DEST_NW;
   2723	pkt.first_frag = mapping;
   2724	pkt.first_frag_len = skb->len;
   2725	pkt.cookie = skb;
   2726	if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
   2727	    test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
   2728		pkt.remove_stag = true;
   2729
   2730	/* qed_ll2_prepare_tx_packet() may actually send the packet if
   2731	 * there are no fragments in the skb and subsequently the completion
   2732	 * routine may run and free the SKB, so no dereferencing the SKB
   2733	 * beyond this point unless skb has any fragments.
   2734	 */
   2735	rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
   2736				       &pkt, 1);
   2737	if (unlikely(rc))
   2738		goto err;
   2739
   2740	for (i = 0; i < nr_frags; i++) {
   2741		frag = &skb_shinfo(skb)->frags[i];
   2742
   2743		mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
   2744					   skb_frag_size(frag), DMA_TO_DEVICE);
   2745
   2746		if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
   2747			DP_NOTICE(cdev,
   2748				  "Unable to map frag - dropping packet\n");
   2749			rc = -ENOMEM;
   2750			goto err;
   2751		}
   2752
   2753		rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
   2754						       cdev->ll2->handle,
   2755						       mapping,
   2756						       skb_frag_size(frag));
   2757
   2758		/* if failed not much to do here, partial packet has been posted
   2759		 * we can't free memory, will need to wait for completion
   2760		 */
   2761		if (unlikely(rc))
   2762			goto err2;
   2763	}
   2764
   2765	return 0;
   2766
   2767err:
   2768	dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
   2769err2:
   2770	return rc;
   2771}
   2772
   2773static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
   2774{
   2775	bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
   2776	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
   2777	int rc;
   2778
   2779	if (!cdev->ll2)
   2780		return -EINVAL;
   2781
   2782	rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
   2783	if (rc) {
   2784		DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
   2785		return rc;
   2786	}
   2787
   2788	/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
   2789	if (b_is_storage_eng1) {
   2790		rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
   2791					 cdev->ll2->handle, stats);
   2792		if (rc) {
   2793			DP_NOTICE(QED_LEADING_HWFN(cdev),
   2794				  "Failed to get LL2 stats on engine 0\n");
   2795			return rc;
   2796		}
   2797	}
   2798
   2799	return 0;
   2800}
   2801
   2802const struct qed_ll2_ops qed_ll2_ops_pass = {
   2803	.start = &qed_ll2_start,
   2804	.stop = &qed_ll2_stop,
   2805	.start_xmit = &qed_ll2_start_xmit,
   2806	.register_cb_ops = &qed_ll2_register_cb_ops,
   2807	.get_stats = &qed_ll2_stats,
   2808};
   2809
   2810int qed_ll2_alloc_if(struct qed_dev *cdev)
   2811{
   2812	cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
   2813	return cdev->ll2 ? 0 : -ENOMEM;
   2814}
   2815
   2816void qed_ll2_dealloc_if(struct qed_dev *cdev)
   2817{
   2818	kfree(cdev->ll2);
   2819	cdev->ll2 = NULL;
   2820}