cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ixgbe_fcoe.c (28286B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 1999 - 2018 Intel Corporation. */
      3
      4#include "ixgbe.h"
      5#include <linux/if_ether.h>
      6#include <linux/gfp.h>
      7#include <linux/if_vlan.h>
      8#include <generated/utsrelease.h>
      9#include <scsi/scsi_cmnd.h>
     10#include <scsi/scsi_device.h>
     11#include <scsi/fc/fc_fs.h>
     12#include <scsi/fc/fc_fcoe.h>
     13#include <scsi/libfc.h>
     14#include <scsi/libfcoe.h>
     15
     16/**
     17 * ixgbe_fcoe_clear_ddp - clear the given ddp context
     18 * @ddp: ptr to the ixgbe_fcoe_ddp
     19 *
     20 * Returns : none
     21 *
     22 */
     23static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
     24{
     25	ddp->len = 0;
     26	ddp->err = 1;
     27	ddp->udl = NULL;
     28	ddp->udp = 0UL;
     29	ddp->sgl = NULL;
     30	ddp->sgc = 0;
     31}
     32
     33/**
     34 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
     35 * @netdev: the corresponding net_device
     36 * @xid: the xid that corresponding ddp will be freed
     37 *
     38 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
     39 * and it is expected to be called by ULD, i.e., FCP layer of libfc
     40 * to release the corresponding ddp context when the I/O is done.
     41 *
     42 * Returns : data length already ddp-ed in bytes
     43 */
     44int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
     45{
     46	int len;
     47	struct ixgbe_fcoe *fcoe;
     48	struct ixgbe_adapter *adapter;
     49	struct ixgbe_fcoe_ddp *ddp;
     50	struct ixgbe_hw *hw;
     51	u32 fcbuff;
     52
     53	if (!netdev)
     54		return 0;
     55
     56	if (xid >= netdev->fcoe_ddp_xid)
     57		return 0;
     58
     59	adapter = netdev_priv(netdev);
     60	fcoe = &adapter->fcoe;
     61	ddp = &fcoe->ddp[xid];
     62	if (!ddp->udl)
     63		return 0;
     64
     65	hw = &adapter->hw;
     66	len = ddp->len;
     67	/* if no error then skip ddp context invalidation */
     68	if (!ddp->err)
     69		goto skip_ddpinv;
     70
     71	if (hw->mac.type == ixgbe_mac_X550) {
     72		/* X550 does not require DDP FCoE lock */
     73
     74		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
     75		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
     76				(xid | IXGBE_FCFLTRW_WE));
     77
     78		/* program FCBUFF */
     79		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
     80
     81		/* program FCDMARW */
     82		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
     83				(xid | IXGBE_FCDMARW_WE));
     84
     85		/* read FCBUFF to check context invalidated */
     86		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
     87				(xid | IXGBE_FCDMARW_RE));
     88		fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
     89	} else {
     90		/* other hardware requires DDP FCoE lock */
     91		spin_lock_bh(&fcoe->lock);
     92		IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
     93		IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
     94				(xid | IXGBE_FCFLTRW_WE));
     95		IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
     96		IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
     97				(xid | IXGBE_FCDMARW_WE));
     98
     99		/* guaranteed to be invalidated after 100us */
    100		IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
    101				(xid | IXGBE_FCDMARW_RE));
    102		fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
    103		spin_unlock_bh(&fcoe->lock);
    104		}
    105
    106	if (fcbuff & IXGBE_FCBUFF_VALID)
    107		usleep_range(100, 150);
    108
    109skip_ddpinv:
    110	if (ddp->sgl)
    111		dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
    112			     DMA_FROM_DEVICE);
    113	if (ddp->pool) {
    114		dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
    115		ddp->pool = NULL;
    116	}
    117
    118	ixgbe_fcoe_clear_ddp(ddp);
    119
    120	return len;
    121}
    122
    123/**
    124 * ixgbe_fcoe_ddp_setup - called to set up ddp context
    125 * @netdev: the corresponding net_device
    126 * @xid: the exchange id requesting ddp
    127 * @sgl: the scatter-gather list for this request
    128 * @sgc: the number of scatter-gather items
    129 * @target_mode: 1 to setup target mode, 0 to setup initiator mode
    130 *
    131 * Returns : 1 for success and 0 for no ddp
    132 */
    133static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
    134				struct scatterlist *sgl, unsigned int sgc,
    135				int target_mode)
    136{
    137	struct ixgbe_adapter *adapter;
    138	struct ixgbe_hw *hw;
    139	struct ixgbe_fcoe *fcoe;
    140	struct ixgbe_fcoe_ddp *ddp;
    141	struct ixgbe_fcoe_ddp_pool *ddp_pool;
    142	struct scatterlist *sg;
    143	unsigned int i, j, dmacount;
    144	unsigned int len;
    145	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
    146	unsigned int firstoff = 0;
    147	unsigned int lastsize;
    148	unsigned int thisoff = 0;
    149	unsigned int thislen = 0;
    150	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
    151	dma_addr_t addr = 0;
    152
    153	if (!netdev || !sgl)
    154		return 0;
    155
    156	adapter = netdev_priv(netdev);
    157	if (xid >= netdev->fcoe_ddp_xid) {
    158		e_warn(drv, "xid=0x%x out-of-range\n", xid);
    159		return 0;
    160	}
    161
    162	/* no DDP if we are already down or resetting */
    163	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
    164	    test_bit(__IXGBE_RESETTING, &adapter->state))
    165		return 0;
    166
    167	fcoe = &adapter->fcoe;
    168	ddp = &fcoe->ddp[xid];
    169	if (ddp->sgl) {
    170		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
    171		      xid, ddp->sgl, ddp->sgc);
    172		return 0;
    173	}
    174	ixgbe_fcoe_clear_ddp(ddp);
    175
    176
    177	if (!fcoe->ddp_pool) {
    178		e_warn(drv, "No ddp_pool resources allocated\n");
    179		return 0;
    180	}
    181
    182	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
    183	if (!ddp_pool->pool) {
    184		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
    185		goto out_noddp;
    186	}
    187
    188	/* setup dma from scsi command sgl */
    189	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
    190	if (dmacount == 0) {
    191		e_err(drv, "xid 0x%x DMA map error\n", xid);
    192		goto out_noddp;
    193	}
    194
    195	/* alloc the udl from per cpu ddp pool */
    196	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
    197	if (!ddp->udl) {
    198		e_err(drv, "failed allocated ddp context\n");
    199		goto out_noddp_unmap;
    200	}
    201	ddp->pool = ddp_pool->pool;
    202	ddp->sgl = sgl;
    203	ddp->sgc = sgc;
    204
    205	j = 0;
    206	for_each_sg(sgl, sg, dmacount, i) {
    207		addr = sg_dma_address(sg);
    208		len = sg_dma_len(sg);
    209		while (len) {
    210			/* max number of buffers allowed in one DDP context */
    211			if (j >= IXGBE_BUFFCNT_MAX) {
    212				ddp_pool->noddp++;
    213				goto out_noddp_free;
    214			}
    215
    216			/* get the offset of length of current buffer */
    217			thisoff = addr & ((dma_addr_t)bufflen - 1);
    218			thislen = min((bufflen - thisoff), len);
    219			/*
    220			 * all but the 1st buffer (j == 0)
    221			 * must be aligned on bufflen
    222			 */
    223			if ((j != 0) && (thisoff))
    224				goto out_noddp_free;
    225			/*
    226			 * all but the last buffer
    227			 * ((i == (dmacount - 1)) && (thislen == len))
    228			 * must end at bufflen
    229			 */
    230			if (((i != (dmacount - 1)) || (thislen != len))
    231			    && ((thislen + thisoff) != bufflen))
    232				goto out_noddp_free;
    233
    234			ddp->udl[j] = (u64)(addr - thisoff);
    235			/* only the first buffer may have none-zero offset */
    236			if (j == 0)
    237				firstoff = thisoff;
    238			len -= thislen;
    239			addr += thislen;
    240			j++;
    241		}
    242	}
    243	/* only the last buffer may have non-full bufflen */
    244	lastsize = thisoff + thislen;
    245
    246	/*
    247	 * lastsize can not be buffer len.
    248	 * If it is then adding another buffer with lastsize = 1.
    249	 */
    250	if (lastsize == bufflen) {
    251		if (j >= IXGBE_BUFFCNT_MAX) {
    252			ddp_pool->noddp_ext_buff++;
    253			goto out_noddp_free;
    254		}
    255
    256		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
    257		j++;
    258		lastsize = 1;
    259	}
    260	put_cpu();
    261
    262	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
    263	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
    264	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
    265	/* Set WRCONTX bit to allow DDP for target */
    266	if (target_mode)
    267		fcbuff |= (IXGBE_FCBUFF_WRCONTX);
    268	fcbuff |= (IXGBE_FCBUFF_VALID);
    269
    270	fcdmarw = xid;
    271	fcdmarw |= IXGBE_FCDMARW_WE;
    272	fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
    273
    274	fcfltrw = xid;
    275	fcfltrw |= IXGBE_FCFLTRW_WE;
    276
    277	/* program DMA context */
    278	hw = &adapter->hw;
    279
    280	/* turn on last frame indication for target mode as FCP_RSPtarget is
    281	 * supposed to send FCP_RSP when it is done. */
    282	if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
    283		set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
    284		fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
    285		fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
    286		IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
    287	}
    288
    289	if (hw->mac.type == ixgbe_mac_X550) {
    290		/* X550 does not require DDP lock */
    291
    292		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
    293				ddp->udp & DMA_BIT_MASK(32));
    294		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
    295		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
    296		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
    297		/* program filter context */
    298		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
    299		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
    300		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
    301	} else {
    302		/* DDP lock for indirect DDP context access */
    303		spin_lock_bh(&fcoe->lock);
    304
    305		IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
    306		IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
    307		IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
    308		IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
    309		/* program filter context */
    310		IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
    311		IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
    312		IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
    313
    314		spin_unlock_bh(&fcoe->lock);
    315	}
    316
    317	return 1;
    318
    319out_noddp_free:
    320	dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
    321	ixgbe_fcoe_clear_ddp(ddp);
    322
    323out_noddp_unmap:
    324	dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
    325out_noddp:
    326	put_cpu();
    327	return 0;
    328}
    329
    330/**
    331 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
    332 * @netdev: the corresponding net_device
    333 * @xid: the exchange id requesting ddp
    334 * @sgl: the scatter-gather list for this request
    335 * @sgc: the number of scatter-gather items
    336 *
    337 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
    338 * and is expected to be called from ULD, e.g., FCP layer of libfc
    339 * to set up ddp for the corresponding xid of the given sglist for
    340 * the corresponding I/O.
    341 *
    342 * Returns : 1 for success and 0 for no ddp
    343 */
    344int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
    345		       struct scatterlist *sgl, unsigned int sgc)
    346{
    347	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
    348}
    349
    350/**
    351 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
    352 * @netdev: the corresponding net_device
    353 * @xid: the exchange id requesting ddp
    354 * @sgl: the scatter-gather list for this request
    355 * @sgc: the number of scatter-gather items
    356 *
    357 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
    358 * and is expected to be called from ULD, e.g., FCP layer of libfc
    359 * to set up ddp for the corresponding xid of the given sglist for
    360 * the corresponding I/O. The DDP in target mode is a write I/O request
    361 * from the initiator.
    362 *
    363 * Returns : 1 for success and 0 for no ddp
    364 */
    365int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
    366			    struct scatterlist *sgl, unsigned int sgc)
    367{
    368	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
    369}
    370
    371/**
    372 * ixgbe_fcoe_ddp - check ddp status and mark it done
    373 * @adapter: ixgbe adapter
    374 * @rx_desc: advanced rx descriptor
    375 * @skb: the skb holding the received data
    376 *
    377 * This checks ddp status.
    378 *
    379 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
    380 * not passing the skb to ULD, > 0 indicates is the length of data
    381 * being ddped.
    382 */
    383int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
    384		   union ixgbe_adv_rx_desc *rx_desc,
    385		   struct sk_buff *skb)
    386{
    387	int rc = -EINVAL;
    388	struct ixgbe_fcoe *fcoe;
    389	struct ixgbe_fcoe_ddp *ddp;
    390	struct fc_frame_header *fh;
    391	struct fcoe_crc_eof *crc;
    392	__le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
    393	__le32 ddp_err;
    394	int ddp_max;
    395	u32 fctl;
    396	u16 xid;
    397
    398	if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
    399		skb->ip_summed = CHECKSUM_NONE;
    400	else
    401		skb->ip_summed = CHECKSUM_UNNECESSARY;
    402
    403	if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
    404		fh = (struct fc_frame_header *)(skb->data +
    405			sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
    406	else
    407		fh = (struct fc_frame_header *)(skb->data +
    408			sizeof(struct fcoe_hdr));
    409
    410	fctl = ntoh24(fh->fh_f_ctl);
    411	if (fctl & FC_FC_EX_CTX)
    412		xid =  be16_to_cpu(fh->fh_ox_id);
    413	else
    414		xid =  be16_to_cpu(fh->fh_rx_id);
    415
    416	ddp_max = IXGBE_FCOE_DDP_MAX;
    417	/* X550 has different DDP Max limit */
    418	if (adapter->hw.mac.type == ixgbe_mac_X550)
    419		ddp_max = IXGBE_FCOE_DDP_MAX_X550;
    420	if (xid >= ddp_max)
    421		return -EINVAL;
    422
    423	fcoe = &adapter->fcoe;
    424	ddp = &fcoe->ddp[xid];
    425	if (!ddp->udl)
    426		return -EINVAL;
    427
    428	ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
    429					      IXGBE_RXDADV_ERR_FCERR);
    430	if (ddp_err)
    431		return -EINVAL;
    432
    433	switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
    434	/* return 0 to bypass going to ULD for DDPed data */
    435	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
    436		/* update length of DDPed data */
    437		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
    438		rc = 0;
    439		break;
    440	/* unmap the sg list when FCPRSP is received */
    441	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
    442		dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
    443			     ddp->sgc, DMA_FROM_DEVICE);
    444		ddp->err = (__force u32)ddp_err;
    445		ddp->sgl = NULL;
    446		ddp->sgc = 0;
    447		fallthrough;
    448	/* if DDP length is present pass it through to ULD */
    449	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
    450		/* update length of DDPed data */
    451		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
    452		if (ddp->len)
    453			rc = ddp->len;
    454		break;
    455	/* no match will return as an error */
    456	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
    457	default:
    458		break;
    459	}
    460
    461	/* In target mode, check the last data frame of the sequence.
    462	 * For DDP in target mode, data is already DDPed but the header
    463	 * indication of the last data frame ould allow is to tell if we
    464	 * got all the data and the ULP can send FCP_RSP back, as this is
    465	 * not a full fcoe frame, we fill the trailer here so it won't be
    466	 * dropped by the ULP stack.
    467	 */
    468	if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
    469	    (fctl & FC_FC_END_SEQ)) {
    470		skb_linearize(skb);
    471		crc = skb_put(skb, sizeof(*crc));
    472		crc->fcoe_eof = FC_EOF_T;
    473	}
    474
    475	return rc;
    476}
    477
    478/**
    479 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
    480 * @tx_ring: tx desc ring
    481 * @first: first tx_buffer structure containing skb, tx_flags, and protocol
    482 * @hdr_len: hdr_len to be returned
    483 *
    484 * This sets up large send offload for FCoE
    485 *
    486 * Returns : 0 indicates success, < 0 for error
    487 */
    488int ixgbe_fso(struct ixgbe_ring *tx_ring,
    489	      struct ixgbe_tx_buffer *first,
    490	      u8 *hdr_len)
    491{
    492	struct sk_buff *skb = first->skb;
    493	struct fc_frame_header *fh;
    494	u32 vlan_macip_lens;
    495	u32 fcoe_sof_eof = 0;
    496	u32 mss_l4len_idx;
    497	u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE;
    498	u8 sof, eof;
    499
    500	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
    501		dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
    502			skb_shinfo(skb)->gso_type);
    503		return -EINVAL;
    504	}
    505
    506	/* resets the header to point fcoe/fc */
    507	skb_set_network_header(skb, skb->mac_len);
    508	skb_set_transport_header(skb, skb->mac_len +
    509				 sizeof(struct fcoe_hdr));
    510
    511	/* sets up SOF and ORIS */
    512	sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
    513	switch (sof) {
    514	case FC_SOF_I2:
    515		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
    516		break;
    517	case FC_SOF_I3:
    518		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
    519			       IXGBE_ADVTXD_FCOEF_ORIS;
    520		break;
    521	case FC_SOF_N2:
    522		break;
    523	case FC_SOF_N3:
    524		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
    525		break;
    526	default:
    527		dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
    528		return -EINVAL;
    529	}
    530
    531	/* the first byte of the last dword is EOF */
    532	skb_copy_bits(skb, skb->len - 4, &eof, 1);
    533	/* sets up EOF and ORIE */
    534	switch (eof) {
    535	case FC_EOF_N:
    536		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
    537		break;
    538	case FC_EOF_T:
    539		/* lso needs ORIE */
    540		if (skb_is_gso(skb))
    541			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
    542					IXGBE_ADVTXD_FCOEF_ORIE;
    543		else
    544			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
    545		break;
    546	case FC_EOF_NI:
    547		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
    548		break;
    549	case FC_EOF_A:
    550		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
    551		break;
    552	default:
    553		dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
    554		return -EINVAL;
    555	}
    556
    557	/* sets up PARINC indicating data offset */
    558	fh = (struct fc_frame_header *)skb_transport_header(skb);
    559	if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
    560		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
    561
    562	/* include trailer in headlen as it is replicated per frame */
    563	*hdr_len = sizeof(struct fcoe_crc_eof);
    564
    565	/* hdr_len includes fc_hdr if FCoE LSO is enabled */
    566	if (skb_is_gso(skb)) {
    567		*hdr_len += skb_transport_offset(skb) +
    568			    sizeof(struct fc_frame_header);
    569		/* update gso_segs and bytecount */
    570		first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
    571					       skb_shinfo(skb)->gso_size);
    572		first->bytecount += (first->gso_segs - 1) * *hdr_len;
    573		first->tx_flags |= IXGBE_TX_FLAGS_TSO;
    574		/* Hardware expects L4T to be RSV for FCoE TSO */
    575		type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV;
    576	}
    577
    578	/* set flag indicating FCOE to ixgbe_tx_map call */
    579	first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
    580
    581	/* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
    582	mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
    583
    584	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
    585	vlan_macip_lens = skb_transport_offset(skb) +
    586			  sizeof(struct fc_frame_header);
    587	vlan_macip_lens |= (skb_transport_offset(skb) - 4)
    588			   << IXGBE_ADVTXD_MACLEN_SHIFT;
    589	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
    590
    591	/* write context desc */
    592	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
    593			  type_tucmd, mss_l4len_idx);
    594
    595	return 0;
    596}
    597
    598static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
    599{
    600	struct ixgbe_fcoe_ddp_pool *ddp_pool;
    601
    602	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
    603	dma_pool_destroy(ddp_pool->pool);
    604	ddp_pool->pool = NULL;
    605}
    606
    607static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
    608				     struct device *dev,
    609				     unsigned int cpu)
    610{
    611	struct ixgbe_fcoe_ddp_pool *ddp_pool;
    612	struct dma_pool *pool;
    613	char pool_name[32];
    614
    615	snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
    616
    617	pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
    618			       IXGBE_FCPTR_ALIGN, PAGE_SIZE);
    619	if (!pool)
    620		return -ENOMEM;
    621
    622	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
    623	ddp_pool->pool = pool;
    624	ddp_pool->noddp = 0;
    625	ddp_pool->noddp_ext_buff = 0;
    626
    627	return 0;
    628}
    629
    630/**
    631 * ixgbe_configure_fcoe - configures registers for fcoe at start
    632 * @adapter: ptr to ixgbe adapter
    633 *
    634 * This sets up FCoE related registers
    635 *
    636 * Returns : none
    637 */
    638void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
    639{
    640	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
    641	struct ixgbe_hw *hw = &adapter->hw;
    642	int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
    643	int fcreta_size;
    644	u32 etqf;
    645
    646	/* Minimal functionality for FCoE requires at least CRC offloads */
    647	if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
    648		return;
    649
    650	/* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
    651	etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
    652	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
    653		etqf |= IXGBE_ETQF_POOL_ENABLE;
    654		etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
    655	}
    656	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
    657	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
    658
    659	/* leave registers un-configured if FCoE is disabled */
    660	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
    661		return;
    662
    663	/* Use one or more Rx queues for FCoE by redirection table */
    664	fcreta_size = IXGBE_FCRETA_SIZE;
    665	if (adapter->hw.mac.type == ixgbe_mac_X550)
    666		fcreta_size = IXGBE_FCRETA_SIZE_X550;
    667
    668	for (i = 0; i < fcreta_size; i++) {
    669		if (adapter->hw.mac.type == ixgbe_mac_X550) {
    670			int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
    671							fcoe->indices);
    672			fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
    673			fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
    674				   IXGBE_FCRETA_ENTRY_HIGH_MASK;
    675		}
    676
    677		fcoe_i = fcoe->offset + (i % fcoe->indices);
    678		fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
    679		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
    680		fcoe_q |= fcoe_q_h;
    681		IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
    682	}
    683	IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
    684
    685	/* Enable L2 EtherType filter for FIP */
    686	etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
    687	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
    688		etqf |= IXGBE_ETQF_POOL_ENABLE;
    689		etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
    690	}
    691	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
    692
    693	/* Send FIP frames to the first FCoE queue */
    694	fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
    695	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
    696			IXGBE_ETQS_QUEUE_EN |
    697			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
    698
    699	/* Configure FCoE Rx control */
    700	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
    701			IXGBE_FCRXCTRL_FCCRCBO |
    702			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
    703}
    704
    705/**
    706 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
    707 * @adapter : ixgbe adapter
    708 *
    709 * Cleans up outstanding ddp context resources
    710 *
    711 * Returns : none
    712 */
    713void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
    714{
    715	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
    716	int cpu, i, ddp_max;
    717
    718	/* do nothing if no DDP pools were allocated */
    719	if (!fcoe->ddp_pool)
    720		return;
    721
    722	ddp_max = IXGBE_FCOE_DDP_MAX;
    723	/* X550 has different DDP Max limit */
    724	if (adapter->hw.mac.type == ixgbe_mac_X550)
    725		ddp_max = IXGBE_FCOE_DDP_MAX_X550;
    726
    727	for (i = 0; i < ddp_max; i++)
    728		ixgbe_fcoe_ddp_put(adapter->netdev, i);
    729
    730	for_each_possible_cpu(cpu)
    731		ixgbe_fcoe_dma_pool_free(fcoe, cpu);
    732
    733	dma_unmap_single(&adapter->pdev->dev,
    734			 fcoe->extra_ddp_buffer_dma,
    735			 IXGBE_FCBUFF_MIN,
    736			 DMA_FROM_DEVICE);
    737	kfree(fcoe->extra_ddp_buffer);
    738
    739	fcoe->extra_ddp_buffer = NULL;
    740	fcoe->extra_ddp_buffer_dma = 0;
    741}
    742
    743/**
    744 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
    745 * @adapter: ixgbe adapter
    746 *
    747 * Sets up ddp context resouces
    748 *
    749 * Returns : 0 indicates success or -EINVAL on failure
    750 */
    751int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
    752{
    753	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
    754	struct device *dev = &adapter->pdev->dev;
    755	void *buffer;
    756	dma_addr_t dma;
    757	unsigned int cpu;
    758
    759	/* do nothing if no DDP pools were allocated */
    760	if (!fcoe->ddp_pool)
    761		return 0;
    762
    763	/* Extra buffer to be shared by all DDPs for HW work around */
    764	buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
    765	if (!buffer)
    766		return -ENOMEM;
    767
    768	dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
    769	if (dma_mapping_error(dev, dma)) {
    770		e_err(drv, "failed to map extra DDP buffer\n");
    771		kfree(buffer);
    772		return -ENOMEM;
    773	}
    774
    775	fcoe->extra_ddp_buffer = buffer;
    776	fcoe->extra_ddp_buffer_dma = dma;
    777
    778	/* allocate pci pool for each cpu */
    779	for_each_possible_cpu(cpu) {
    780		int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
    781		if (!err)
    782			continue;
    783
    784		e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
    785		ixgbe_free_fcoe_ddp_resources(adapter);
    786		return -ENOMEM;
    787	}
    788
    789	return 0;
    790}
    791
    792static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
    793{
    794	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
    795
    796	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
    797		return -EINVAL;
    798
    799	fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
    800
    801	if (!fcoe->ddp_pool) {
    802		e_err(drv, "failed to allocate percpu DDP resources\n");
    803		return -ENOMEM;
    804	}
    805
    806	adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
    807	/* X550 has different DDP Max limit */
    808	if (adapter->hw.mac.type == ixgbe_mac_X550)
    809		adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
    810
    811	return 0;
    812}
    813
    814static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
    815{
    816	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
    817
    818	adapter->netdev->fcoe_ddp_xid = 0;
    819
    820	if (!fcoe->ddp_pool)
    821		return;
    822
    823	free_percpu(fcoe->ddp_pool);
    824	fcoe->ddp_pool = NULL;
    825}
    826
    827/**
    828 * ixgbe_fcoe_enable - turn on FCoE offload feature
    829 * @netdev: the corresponding netdev
    830 *
    831 * Turns on FCoE offload feature in 82599.
    832 *
    833 * Returns : 0 indicates success or -EINVAL on failure
    834 */
    835int ixgbe_fcoe_enable(struct net_device *netdev)
    836{
    837	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    838	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
    839
    840	atomic_inc(&fcoe->refcnt);
    841
    842	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
    843		return -EINVAL;
    844
    845	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
    846		return -EINVAL;
    847
    848	e_info(drv, "Enabling FCoE offload features.\n");
    849
    850	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
    851		e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
    852
    853	if (netif_running(netdev))
    854		netdev->netdev_ops->ndo_stop(netdev);
    855
    856	/* Allocate per CPU memory to track DDP pools */
    857	ixgbe_fcoe_ddp_enable(adapter);
    858
    859	/* enable FCoE and notify stack */
    860	adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
    861	netdev->features |= NETIF_F_FCOE_MTU;
    862	netdev_features_change(netdev);
    863
    864	/* release existing queues and reallocate them */
    865	ixgbe_clear_interrupt_scheme(adapter);
    866	ixgbe_init_interrupt_scheme(adapter);
    867
    868	if (netif_running(netdev))
    869		netdev->netdev_ops->ndo_open(netdev);
    870
    871	return 0;
    872}
    873
    874/**
    875 * ixgbe_fcoe_disable - turn off FCoE offload feature
    876 * @netdev: the corresponding netdev
    877 *
    878 * Turns off FCoE offload feature in 82599.
    879 *
    880 * Returns : 0 indicates success or -EINVAL on failure
    881 */
    882int ixgbe_fcoe_disable(struct net_device *netdev)
    883{
    884	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    885
    886	if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
    887		return -EINVAL;
    888
    889	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
    890		return -EINVAL;
    891
    892	e_info(drv, "Disabling FCoE offload features.\n");
    893	if (netif_running(netdev))
    894		netdev->netdev_ops->ndo_stop(netdev);
    895
    896	/* Free per CPU memory to track DDP pools */
    897	ixgbe_fcoe_ddp_disable(adapter);
    898
    899	/* disable FCoE and notify stack */
    900	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
    901	netdev->features &= ~NETIF_F_FCOE_MTU;
    902
    903	netdev_features_change(netdev);
    904
    905	/* release existing queues and reallocate them */
    906	ixgbe_clear_interrupt_scheme(adapter);
    907	ixgbe_init_interrupt_scheme(adapter);
    908
    909	if (netif_running(netdev))
    910		netdev->netdev_ops->ndo_open(netdev);
    911
    912	return 0;
    913}
    914
    915/**
    916 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
    917 * @netdev : ixgbe adapter
    918 * @wwn : the world wide name
    919 * @type: the type of world wide name
    920 *
    921 * Returns the node or port world wide name if both the prefix and the san
    922 * mac address are valid, then the wwn is formed based on the NAA-2 for
    923 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
    924 *
    925 * Returns : 0 on success
    926 */
    927int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
    928{
    929	u16 prefix = 0xffff;
    930	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    931	struct ixgbe_mac_info *mac = &adapter->hw.mac;
    932
    933	switch (type) {
    934	case NETDEV_FCOE_WWNN:
    935		prefix = mac->wwnn_prefix;
    936		break;
    937	case NETDEV_FCOE_WWPN:
    938		prefix = mac->wwpn_prefix;
    939		break;
    940	default:
    941		break;
    942	}
    943
    944	if ((prefix != 0xffff) &&
    945	    is_valid_ether_addr(mac->san_addr)) {
    946		*wwn = ((u64) prefix << 48) |
    947		       ((u64) mac->san_addr[0] << 40) |
    948		       ((u64) mac->san_addr[1] << 32) |
    949		       ((u64) mac->san_addr[2] << 24) |
    950		       ((u64) mac->san_addr[3] << 16) |
    951		       ((u64) mac->san_addr[4] << 8)  |
    952		       ((u64) mac->san_addr[5]);
    953		return 0;
    954	}
    955	return -EINVAL;
    956}
    957
    958/**
    959 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
    960 * @netdev : ixgbe adapter
    961 * @info : HBA information
    962 *
    963 * Returns ixgbe HBA information
    964 *
    965 * Returns : 0 on success
    966 */
    967int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
    968			   struct netdev_fcoe_hbainfo *info)
    969{
    970	struct ixgbe_adapter *adapter = netdev_priv(netdev);
    971	struct ixgbe_hw *hw = &adapter->hw;
    972	u64 dsn;
    973
    974	if (!info)
    975		return -EINVAL;
    976
    977	/* Don't return information on unsupported devices */
    978	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
    979		return -EINVAL;
    980
    981	/* Manufacturer */
    982	snprintf(info->manufacturer, sizeof(info->manufacturer),
    983		 "Intel Corporation");
    984
    985	/* Serial Number */
    986
    987	/* Get the PCI-e Device Serial Number Capability */
    988	dsn = pci_get_dsn(adapter->pdev);
    989	if (dsn)
    990		snprintf(info->serial_number, sizeof(info->serial_number),
    991			 "%016llX", dsn);
    992	else
    993		snprintf(info->serial_number, sizeof(info->serial_number),
    994			 "Unknown");
    995
    996	/* Hardware Version */
    997	snprintf(info->hardware_version,
    998		 sizeof(info->hardware_version),
    999		 "Rev %d", hw->revision_id);
   1000	/* Driver Name/Version */
   1001	snprintf(info->driver_version,
   1002		 sizeof(info->driver_version),
   1003		 "%s v%s",
   1004		 ixgbe_driver_name,
   1005		 UTS_RELEASE);
   1006	/* Firmware Version */
   1007	strlcpy(info->firmware_version, adapter->eeprom_id,
   1008		sizeof(info->firmware_version));
   1009
   1010	/* Model */
   1011	if (hw->mac.type == ixgbe_mac_82599EB) {
   1012		snprintf(info->model,
   1013			 sizeof(info->model),
   1014			 "Intel 82599");
   1015	} else if (hw->mac.type == ixgbe_mac_X550) {
   1016		snprintf(info->model,
   1017			 sizeof(info->model),
   1018			 "Intel X550");
   1019	} else {
   1020		snprintf(info->model,
   1021			 sizeof(info->model),
   1022			 "Intel X540");
   1023	}
   1024
   1025	/* Model Description */
   1026	snprintf(info->model_description,
   1027		 sizeof(info->model_description),
   1028		 "%s",
   1029		 ixgbe_default_device_descr);
   1030
   1031	return 0;
   1032}
   1033
   1034/**
   1035 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
   1036 * @adapter: pointer to the device adapter structure
   1037 *
   1038 * Return : TC that FCoE is mapped to
   1039 */
   1040u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
   1041{
   1042#ifdef CONFIG_IXGBE_DCB
   1043	return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
   1044#else
   1045	return 0;
   1046#endif
   1047}