cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

enetc.c (72875B)


      1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
      2/* Copyright 2017-2019 NXP */
      3
      4#include "enetc.h"
      5#include <linux/bpf_trace.h>
      6#include <linux/tcp.h>
      7#include <linux/udp.h>
      8#include <linux/vmalloc.h>
      9#include <linux/ptp_classify.h>
     10#include <net/ip6_checksum.h>
     11#include <net/pkt_sched.h>
     12#include <net/tso.h>
     13
     14static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
     15{
     16	int num_tx_rings = priv->num_tx_rings;
     17	int i;
     18
     19	for (i = 0; i < priv->num_rx_rings; i++)
     20		if (priv->rx_ring[i]->xdp.prog)
     21			return num_tx_rings - num_possible_cpus();
     22
     23	return num_tx_rings;
     24}
     25
     26static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
     27							struct enetc_bdr *tx_ring)
     28{
     29	int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
     30
     31	return priv->rx_ring[index];
     32}
     33
     34static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
     35{
     36	if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
     37		return NULL;
     38
     39	return tx_swbd->skb;
     40}
     41
     42static struct xdp_frame *
     43enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
     44{
     45	if (tx_swbd->is_xdp_redirect)
     46		return tx_swbd->xdp_frame;
     47
     48	return NULL;
     49}
     50
     51static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
     52				struct enetc_tx_swbd *tx_swbd)
     53{
     54	/* For XDP_TX, pages come from RX, whereas for the other contexts where
     55	 * we have is_dma_page_set, those come from skb_frag_dma_map. We need
     56	 * to match the DMA mapping length, so we need to differentiate those.
     57	 */
     58	if (tx_swbd->is_dma_page)
     59		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
     60			       tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
     61			       tx_swbd->dir);
     62	else
     63		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
     64				 tx_swbd->len, tx_swbd->dir);
     65	tx_swbd->dma = 0;
     66}
     67
     68static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
     69				struct enetc_tx_swbd *tx_swbd)
     70{
     71	struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
     72	struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
     73
     74	if (tx_swbd->dma)
     75		enetc_unmap_tx_buff(tx_ring, tx_swbd);
     76
     77	if (xdp_frame) {
     78		xdp_return_frame(tx_swbd->xdp_frame);
     79		tx_swbd->xdp_frame = NULL;
     80	} else if (skb) {
     81		dev_kfree_skb_any(skb);
     82		tx_swbd->skb = NULL;
     83	}
     84}
     85
     86/* Let H/W know BD ring has been updated */
     87static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
     88{
     89	/* includes wmb() */
     90	enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
     91}
     92
     93static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
     94			   u8 *msgtype, u8 *twostep,
     95			   u16 *correction_offset, u16 *body_offset)
     96{
     97	unsigned int ptp_class;
     98	struct ptp_header *hdr;
     99	unsigned int type;
    100	u8 *base;
    101
    102	ptp_class = ptp_classify_raw(skb);
    103	if (ptp_class == PTP_CLASS_NONE)
    104		return -EINVAL;
    105
    106	hdr = ptp_parse_header(skb, ptp_class);
    107	if (!hdr)
    108		return -EINVAL;
    109
    110	type = ptp_class & PTP_CLASS_PMASK;
    111	if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
    112		*udp = 1;
    113	else
    114		*udp = 0;
    115
    116	*msgtype = ptp_get_msgtype(hdr, ptp_class);
    117	*twostep = hdr->flag_field[0] & 0x2;
    118
    119	base = skb_mac_header(skb);
    120	*correction_offset = (u8 *)&hdr->correction - base;
    121	*body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
    122
    123	return 0;
    124}
    125
    126static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
    127{
    128	bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
    129	struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
    130	struct enetc_hw *hw = &priv->si->hw;
    131	struct enetc_tx_swbd *tx_swbd;
    132	int len = skb_headlen(skb);
    133	union enetc_tx_bd temp_bd;
    134	u8 msgtype, twostep, udp;
    135	union enetc_tx_bd *txbd;
    136	u16 offset1, offset2;
    137	int i, count = 0;
    138	skb_frag_t *frag;
    139	unsigned int f;
    140	dma_addr_t dma;
    141	u8 flags = 0;
    142
    143	i = tx_ring->next_to_use;
    144	txbd = ENETC_TXBD(*tx_ring, i);
    145	prefetchw(txbd);
    146
    147	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
    148	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
    149		goto dma_err;
    150
    151	temp_bd.addr = cpu_to_le64(dma);
    152	temp_bd.buf_len = cpu_to_le16(len);
    153	temp_bd.lstatus = 0;
    154
    155	tx_swbd = &tx_ring->tx_swbd[i];
    156	tx_swbd->dma = dma;
    157	tx_swbd->len = len;
    158	tx_swbd->is_dma_page = 0;
    159	tx_swbd->dir = DMA_TO_DEVICE;
    160	count++;
    161
    162	do_vlan = skb_vlan_tag_present(skb);
    163	if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
    164		if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
    165				    &offset2) ||
    166		    msgtype != PTP_MSGTYPE_SYNC || twostep)
    167			WARN_ONCE(1, "Bad packet for one-step timestamping\n");
    168		else
    169			do_onestep_tstamp = true;
    170	} else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
    171		do_twostep_tstamp = true;
    172	}
    173
    174	tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
    175	tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
    176	tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
    177
    178	if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
    179		flags |= ENETC_TXBD_FLAGS_EX;
    180
    181	if (tx_ring->tsd_enable)
    182		flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
    183
    184	/* first BD needs frm_len and offload flags set */
    185	temp_bd.frm_len = cpu_to_le16(skb->len);
    186	temp_bd.flags = flags;
    187
    188	if (flags & ENETC_TXBD_FLAGS_TSE)
    189		temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
    190							  flags);
    191
    192	if (flags & ENETC_TXBD_FLAGS_EX) {
    193		u8 e_flags = 0;
    194		*txbd = temp_bd;
    195		enetc_clear_tx_bd(&temp_bd);
    196
    197		/* add extension BD for VLAN and/or timestamping */
    198		flags = 0;
    199		tx_swbd++;
    200		txbd++;
    201		i++;
    202		if (unlikely(i == tx_ring->bd_count)) {
    203			i = 0;
    204			tx_swbd = tx_ring->tx_swbd;
    205			txbd = ENETC_TXBD(*tx_ring, 0);
    206		}
    207		prefetchw(txbd);
    208
    209		if (do_vlan) {
    210			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
    211			temp_bd.ext.tpid = 0; /* < C-TAG */
    212			e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
    213		}
    214
    215		if (do_onestep_tstamp) {
    216			u32 lo, hi, val;
    217			u64 sec, nsec;
    218			u8 *data;
    219
    220			lo = enetc_rd_hot(hw, ENETC_SICTR0);
    221			hi = enetc_rd_hot(hw, ENETC_SICTR1);
    222			sec = (u64)hi << 32 | lo;
    223			nsec = do_div(sec, 1000000000);
    224
    225			/* Configure extension BD */
    226			temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
    227			e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
    228
    229			/* Update originTimestamp field of Sync packet
    230			 * - 48 bits seconds field
    231			 * - 32 bits nanseconds field
    232			 */
    233			data = skb_mac_header(skb);
    234			*(__be16 *)(data + offset2) =
    235				htons((sec >> 32) & 0xffff);
    236			*(__be32 *)(data + offset2 + 2) =
    237				htonl(sec & 0xffffffff);
    238			*(__be32 *)(data + offset2 + 6) = htonl(nsec);
    239
    240			/* Configure single-step register */
    241			val = ENETC_PM0_SINGLE_STEP_EN;
    242			val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
    243			if (udp)
    244				val |= ENETC_PM0_SINGLE_STEP_CH;
    245
    246			enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
    247			enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
    248		} else if (do_twostep_tstamp) {
    249			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
    250			e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
    251		}
    252
    253		temp_bd.ext.e_flags = e_flags;
    254		count++;
    255	}
    256
    257	frag = &skb_shinfo(skb)->frags[0];
    258	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
    259		len = skb_frag_size(frag);
    260		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
    261				       DMA_TO_DEVICE);
    262		if (dma_mapping_error(tx_ring->dev, dma))
    263			goto dma_err;
    264
    265		*txbd = temp_bd;
    266		enetc_clear_tx_bd(&temp_bd);
    267
    268		flags = 0;
    269		tx_swbd++;
    270		txbd++;
    271		i++;
    272		if (unlikely(i == tx_ring->bd_count)) {
    273			i = 0;
    274			tx_swbd = tx_ring->tx_swbd;
    275			txbd = ENETC_TXBD(*tx_ring, 0);
    276		}
    277		prefetchw(txbd);
    278
    279		temp_bd.addr = cpu_to_le64(dma);
    280		temp_bd.buf_len = cpu_to_le16(len);
    281
    282		tx_swbd->dma = dma;
    283		tx_swbd->len = len;
    284		tx_swbd->is_dma_page = 1;
    285		tx_swbd->dir = DMA_TO_DEVICE;
    286		count++;
    287	}
    288
    289	/* last BD needs 'F' bit set */
    290	flags |= ENETC_TXBD_FLAGS_F;
    291	temp_bd.flags = flags;
    292	*txbd = temp_bd;
    293
    294	tx_ring->tx_swbd[i].is_eof = true;
    295	tx_ring->tx_swbd[i].skb = skb;
    296
    297	enetc_bdr_idx_inc(tx_ring, &i);
    298	tx_ring->next_to_use = i;
    299
    300	skb_tx_timestamp(skb);
    301
    302	enetc_update_tx_ring_tail(tx_ring);
    303
    304	return count;
    305
    306dma_err:
    307	dev_err(tx_ring->dev, "DMA map error");
    308
    309	do {
    310		tx_swbd = &tx_ring->tx_swbd[i];
    311		enetc_free_tx_frame(tx_ring, tx_swbd);
    312		if (i == 0)
    313			i = tx_ring->bd_count;
    314		i--;
    315	} while (count--);
    316
    317	return 0;
    318}
    319
    320static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
    321				 struct enetc_tx_swbd *tx_swbd,
    322				 union enetc_tx_bd *txbd, int *i, int hdr_len,
    323				 int data_len)
    324{
    325	union enetc_tx_bd txbd_tmp;
    326	u8 flags = 0, e_flags = 0;
    327	dma_addr_t addr;
    328
    329	enetc_clear_tx_bd(&txbd_tmp);
    330	addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
    331
    332	if (skb_vlan_tag_present(skb))
    333		flags |= ENETC_TXBD_FLAGS_EX;
    334
    335	txbd_tmp.addr = cpu_to_le64(addr);
    336	txbd_tmp.buf_len = cpu_to_le16(hdr_len);
    337
    338	/* first BD needs frm_len and offload flags set */
    339	txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
    340	txbd_tmp.flags = flags;
    341
    342	/* For the TSO header we do not set the dma address since we do not
    343	 * want it unmapped when we do cleanup. We still set len so that we
    344	 * count the bytes sent.
    345	 */
    346	tx_swbd->len = hdr_len;
    347	tx_swbd->do_twostep_tstamp = false;
    348	tx_swbd->check_wb = false;
    349
    350	/* Actually write the header in the BD */
    351	*txbd = txbd_tmp;
    352
    353	/* Add extension BD for VLAN */
    354	if (flags & ENETC_TXBD_FLAGS_EX) {
    355		/* Get the next BD */
    356		enetc_bdr_idx_inc(tx_ring, i);
    357		txbd = ENETC_TXBD(*tx_ring, *i);
    358		tx_swbd = &tx_ring->tx_swbd[*i];
    359		prefetchw(txbd);
    360
    361		/* Setup the VLAN fields */
    362		enetc_clear_tx_bd(&txbd_tmp);
    363		txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
    364		txbd_tmp.ext.tpid = 0; /* < C-TAG */
    365		e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
    366
    367		/* Write the BD */
    368		txbd_tmp.ext.e_flags = e_flags;
    369		*txbd = txbd_tmp;
    370	}
    371}
    372
    373static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
    374				 struct enetc_tx_swbd *tx_swbd,
    375				 union enetc_tx_bd *txbd, char *data,
    376				 int size, bool last_bd)
    377{
    378	union enetc_tx_bd txbd_tmp;
    379	dma_addr_t addr;
    380	u8 flags = 0;
    381
    382	enetc_clear_tx_bd(&txbd_tmp);
    383
    384	addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
    385	if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
    386		netdev_err(tx_ring->ndev, "DMA map error\n");
    387		return -ENOMEM;
    388	}
    389
    390	if (last_bd) {
    391		flags |= ENETC_TXBD_FLAGS_F;
    392		tx_swbd->is_eof = 1;
    393	}
    394
    395	txbd_tmp.addr = cpu_to_le64(addr);
    396	txbd_tmp.buf_len = cpu_to_le16(size);
    397	txbd_tmp.flags = flags;
    398
    399	tx_swbd->dma = addr;
    400	tx_swbd->len = size;
    401	tx_swbd->dir = DMA_TO_DEVICE;
    402
    403	*txbd = txbd_tmp;
    404
    405	return 0;
    406}
    407
    408static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
    409				 char *hdr, int hdr_len, int *l4_hdr_len)
    410{
    411	char *l4_hdr = hdr + skb_transport_offset(skb);
    412	int mac_hdr_len = skb_network_offset(skb);
    413
    414	if (tso->tlen != sizeof(struct udphdr)) {
    415		struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
    416
    417		tcph->check = 0;
    418	} else {
    419		struct udphdr *udph = (struct udphdr *)(l4_hdr);
    420
    421		udph->check = 0;
    422	}
    423
    424	/* Compute the IP checksum. This is necessary since tso_build_hdr()
    425	 * already incremented the IP ID field.
    426	 */
    427	if (!tso->ipv6) {
    428		struct iphdr *iph = (void *)(hdr + mac_hdr_len);
    429
    430		iph->check = 0;
    431		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
    432	}
    433
    434	/* Compute the checksum over the L4 header. */
    435	*l4_hdr_len = hdr_len - skb_transport_offset(skb);
    436	return csum_partial(l4_hdr, *l4_hdr_len, 0);
    437}
    438
    439static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
    440				    struct sk_buff *skb, char *hdr, int len,
    441				    __wsum sum)
    442{
    443	char *l4_hdr = hdr + skb_transport_offset(skb);
    444	__sum16 csum_final;
    445
    446	/* Complete the L4 checksum by appending the pseudo-header to the
    447	 * already computed checksum.
    448	 */
    449	if (!tso->ipv6)
    450		csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
    451					       ip_hdr(skb)->daddr,
    452					       len, ip_hdr(skb)->protocol, sum);
    453	else
    454		csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
    455					     &ipv6_hdr(skb)->daddr,
    456					     len, ipv6_hdr(skb)->nexthdr, sum);
    457
    458	if (tso->tlen != sizeof(struct udphdr)) {
    459		struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
    460
    461		tcph->check = csum_final;
    462	} else {
    463		struct udphdr *udph = (struct udphdr *)(l4_hdr);
    464
    465		udph->check = csum_final;
    466	}
    467}
    468
    469static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
    470{
    471	int hdr_len, total_len, data_len;
    472	struct enetc_tx_swbd *tx_swbd;
    473	union enetc_tx_bd *txbd;
    474	struct tso_t tso;
    475	__wsum csum, csum2;
    476	int count = 0, pos;
    477	int err, i, bd_data_num;
    478
    479	/* Initialize the TSO handler, and prepare the first payload */
    480	hdr_len = tso_start(skb, &tso);
    481	total_len = skb->len - hdr_len;
    482	i = tx_ring->next_to_use;
    483
    484	while (total_len > 0) {
    485		char *hdr;
    486
    487		/* Get the BD */
    488		txbd = ENETC_TXBD(*tx_ring, i);
    489		tx_swbd = &tx_ring->tx_swbd[i];
    490		prefetchw(txbd);
    491
    492		/* Determine the length of this packet */
    493		data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
    494		total_len -= data_len;
    495
    496		/* prepare packet headers: MAC + IP + TCP */
    497		hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
    498		tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
    499
    500		/* compute the csum over the L4 header */
    501		csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
    502		enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
    503		bd_data_num = 0;
    504		count++;
    505
    506		while (data_len > 0) {
    507			int size;
    508
    509			size = min_t(int, tso.size, data_len);
    510
    511			/* Advance the index in the BDR */
    512			enetc_bdr_idx_inc(tx_ring, &i);
    513			txbd = ENETC_TXBD(*tx_ring, i);
    514			tx_swbd = &tx_ring->tx_swbd[i];
    515			prefetchw(txbd);
    516
    517			/* Compute the checksum over this segment of data and
    518			 * add it to the csum already computed (over the L4
    519			 * header and possible other data segments).
    520			 */
    521			csum2 = csum_partial(tso.data, size, 0);
    522			csum = csum_block_add(csum, csum2, pos);
    523			pos += size;
    524
    525			err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
    526						    tso.data, size,
    527						    size == data_len);
    528			if (err)
    529				goto err_map_data;
    530
    531			data_len -= size;
    532			count++;
    533			bd_data_num++;
    534			tso_build_data(skb, &tso, size);
    535
    536			if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
    537				goto err_chained_bd;
    538		}
    539
    540		enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
    541
    542		if (total_len == 0)
    543			tx_swbd->skb = skb;
    544
    545		/* Go to the next BD */
    546		enetc_bdr_idx_inc(tx_ring, &i);
    547	}
    548
    549	tx_ring->next_to_use = i;
    550	enetc_update_tx_ring_tail(tx_ring);
    551
    552	return count;
    553
    554err_map_data:
    555	dev_err(tx_ring->dev, "DMA map error");
    556
    557err_chained_bd:
    558	do {
    559		tx_swbd = &tx_ring->tx_swbd[i];
    560		enetc_free_tx_frame(tx_ring, tx_swbd);
    561		if (i == 0)
    562			i = tx_ring->bd_count;
    563		i--;
    564	} while (count--);
    565
    566	return 0;
    567}
    568
    569static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
    570				    struct net_device *ndev)
    571{
    572	struct enetc_ndev_priv *priv = netdev_priv(ndev);
    573	struct enetc_bdr *tx_ring;
    574	int count, err;
    575
    576	/* Queue one-step Sync packet if already locked */
    577	if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
    578		if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
    579					  &priv->flags)) {
    580			skb_queue_tail(&priv->tx_skbs, skb);
    581			return NETDEV_TX_OK;
    582		}
    583	}
    584
    585	tx_ring = priv->tx_ring[skb->queue_mapping];
    586
    587	if (skb_is_gso(skb)) {
    588		if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
    589			netif_stop_subqueue(ndev, tx_ring->index);
    590			return NETDEV_TX_BUSY;
    591		}
    592
    593		enetc_lock_mdio();
    594		count = enetc_map_tx_tso_buffs(tx_ring, skb);
    595		enetc_unlock_mdio();
    596	} else {
    597		if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
    598			if (unlikely(skb_linearize(skb)))
    599				goto drop_packet_err;
    600
    601		count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
    602		if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
    603			netif_stop_subqueue(ndev, tx_ring->index);
    604			return NETDEV_TX_BUSY;
    605		}
    606
    607		if (skb->ip_summed == CHECKSUM_PARTIAL) {
    608			err = skb_checksum_help(skb);
    609			if (err)
    610				goto drop_packet_err;
    611		}
    612		enetc_lock_mdio();
    613		count = enetc_map_tx_buffs(tx_ring, skb);
    614		enetc_unlock_mdio();
    615	}
    616
    617	if (unlikely(!count))
    618		goto drop_packet_err;
    619
    620	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
    621		netif_stop_subqueue(ndev, tx_ring->index);
    622
    623	return NETDEV_TX_OK;
    624
    625drop_packet_err:
    626	dev_kfree_skb_any(skb);
    627	return NETDEV_TX_OK;
    628}
    629
    630netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
    631{
    632	struct enetc_ndev_priv *priv = netdev_priv(ndev);
    633	u8 udp, msgtype, twostep;
    634	u16 offset1, offset2;
    635
    636	/* Mark tx timestamp type on skb->cb[0] if requires */
    637	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
    638	    (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
    639		skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
    640	} else {
    641		skb->cb[0] = 0;
    642	}
    643
    644	/* Fall back to two-step timestamp if not one-step Sync packet */
    645	if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
    646		if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
    647				    &offset1, &offset2) ||
    648		    msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
    649			skb->cb[0] = ENETC_F_TX_TSTAMP;
    650	}
    651
    652	return enetc_start_xmit(skb, ndev);
    653}
    654
    655static irqreturn_t enetc_msix(int irq, void *data)
    656{
    657	struct enetc_int_vector	*v = data;
    658	int i;
    659
    660	enetc_lock_mdio();
    661
    662	/* disable interrupts */
    663	enetc_wr_reg_hot(v->rbier, 0);
    664	enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
    665
    666	for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
    667		enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
    668
    669	enetc_unlock_mdio();
    670
    671	napi_schedule(&v->napi);
    672
    673	return IRQ_HANDLED;
    674}
    675
    676static void enetc_rx_dim_work(struct work_struct *w)
    677{
    678	struct dim *dim = container_of(w, struct dim, work);
    679	struct dim_cq_moder moder =
    680		net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
    681	struct enetc_int_vector	*v =
    682		container_of(dim, struct enetc_int_vector, rx_dim);
    683
    684	v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
    685	dim->state = DIM_START_MEASURE;
    686}
    687
    688static void enetc_rx_net_dim(struct enetc_int_vector *v)
    689{
    690	struct dim_sample dim_sample = {};
    691
    692	v->comp_cnt++;
    693
    694	if (!v->rx_napi_work)
    695		return;
    696
    697	dim_update_sample(v->comp_cnt,
    698			  v->rx_ring.stats.packets,
    699			  v->rx_ring.stats.bytes,
    700			  &dim_sample);
    701	net_dim(&v->rx_dim, dim_sample);
    702}
    703
    704static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
    705{
    706	int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
    707
    708	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
    709}
    710
    711static bool enetc_page_reusable(struct page *page)
    712{
    713	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
    714}
    715
    716static void enetc_reuse_page(struct enetc_bdr *rx_ring,
    717			     struct enetc_rx_swbd *old)
    718{
    719	struct enetc_rx_swbd *new;
    720
    721	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
    722
    723	/* next buf that may reuse a page */
    724	enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
    725
    726	/* copy page reference */
    727	*new = *old;
    728}
    729
    730static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
    731				u64 *tstamp)
    732{
    733	u32 lo, hi, tstamp_lo;
    734
    735	lo = enetc_rd_hot(hw, ENETC_SICTR0);
    736	hi = enetc_rd_hot(hw, ENETC_SICTR1);
    737	tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
    738	if (lo <= tstamp_lo)
    739		hi -= 1;
    740	*tstamp = (u64)hi << 32 | tstamp_lo;
    741}
    742
    743static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
    744{
    745	struct skb_shared_hwtstamps shhwtstamps;
    746
    747	if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
    748		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
    749		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
    750		skb_txtime_consumed(skb);
    751		skb_tstamp_tx(skb, &shhwtstamps);
    752	}
    753}
    754
    755static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
    756				      struct enetc_tx_swbd *tx_swbd)
    757{
    758	struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
    759	struct enetc_rx_swbd rx_swbd = {
    760		.dma = tx_swbd->dma,
    761		.page = tx_swbd->page,
    762		.page_offset = tx_swbd->page_offset,
    763		.dir = tx_swbd->dir,
    764		.len = tx_swbd->len,
    765	};
    766	struct enetc_bdr *rx_ring;
    767
    768	rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
    769
    770	if (likely(enetc_swbd_unused(rx_ring))) {
    771		enetc_reuse_page(rx_ring, &rx_swbd);
    772
    773		/* sync for use by the device */
    774		dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
    775						 rx_swbd.page_offset,
    776						 ENETC_RXB_DMA_SIZE_XDP,
    777						 rx_swbd.dir);
    778
    779		rx_ring->stats.recycles++;
    780	} else {
    781		/* RX ring is already full, we need to unmap and free the
    782		 * page, since there's nothing useful we can do with it.
    783		 */
    784		rx_ring->stats.recycle_failures++;
    785
    786		dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
    787			       rx_swbd.dir);
    788		__free_page(rx_swbd.page);
    789	}
    790
    791	rx_ring->xdp.xdp_tx_in_flight--;
    792}
    793
    794static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
    795{
    796	int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
    797	struct net_device *ndev = tx_ring->ndev;
    798	struct enetc_ndev_priv *priv = netdev_priv(ndev);
    799	struct enetc_tx_swbd *tx_swbd;
    800	int i, bds_to_clean;
    801	bool do_twostep_tstamp;
    802	u64 tstamp = 0;
    803
    804	i = tx_ring->next_to_clean;
    805	tx_swbd = &tx_ring->tx_swbd[i];
    806
    807	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
    808
    809	do_twostep_tstamp = false;
    810
    811	while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
    812		struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
    813		struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
    814		bool is_eof = tx_swbd->is_eof;
    815
    816		if (unlikely(tx_swbd->check_wb)) {
    817			union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
    818
    819			if (txbd->flags & ENETC_TXBD_FLAGS_W &&
    820			    tx_swbd->do_twostep_tstamp) {
    821				enetc_get_tx_tstamp(&priv->si->hw, txbd,
    822						    &tstamp);
    823				do_twostep_tstamp = true;
    824			}
    825
    826			if (tx_swbd->qbv_en &&
    827			    txbd->wb.status & ENETC_TXBD_STATS_WIN)
    828				tx_win_drop++;
    829		}
    830
    831		if (tx_swbd->is_xdp_tx)
    832			enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
    833		else if (likely(tx_swbd->dma))
    834			enetc_unmap_tx_buff(tx_ring, tx_swbd);
    835
    836		if (xdp_frame) {
    837			xdp_return_frame(xdp_frame);
    838		} else if (skb) {
    839			if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
    840				/* Start work to release lock for next one-step
    841				 * timestamping packet. And send one skb in
    842				 * tx_skbs queue if has.
    843				 */
    844				schedule_work(&priv->tx_onestep_tstamp);
    845			} else if (unlikely(do_twostep_tstamp)) {
    846				enetc_tstamp_tx(skb, tstamp);
    847				do_twostep_tstamp = false;
    848			}
    849			napi_consume_skb(skb, napi_budget);
    850		}
    851
    852		tx_byte_cnt += tx_swbd->len;
    853		/* Scrub the swbd here so we don't have to do that
    854		 * when we reuse it during xmit
    855		 */
    856		memset(tx_swbd, 0, sizeof(*tx_swbd));
    857
    858		bds_to_clean--;
    859		tx_swbd++;
    860		i++;
    861		if (unlikely(i == tx_ring->bd_count)) {
    862			i = 0;
    863			tx_swbd = tx_ring->tx_swbd;
    864		}
    865
    866		/* BD iteration loop end */
    867		if (is_eof) {
    868			tx_frm_cnt++;
    869			/* re-arm interrupt source */
    870			enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
    871					 BIT(16 + tx_ring->index));
    872		}
    873
    874		if (unlikely(!bds_to_clean))
    875			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
    876	}
    877
    878	tx_ring->next_to_clean = i;
    879	tx_ring->stats.packets += tx_frm_cnt;
    880	tx_ring->stats.bytes += tx_byte_cnt;
    881	tx_ring->stats.win_drop += tx_win_drop;
    882
    883	if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
    884		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
    885		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
    886		netif_wake_subqueue(ndev, tx_ring->index);
    887	}
    888
    889	return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
    890}
    891
    892static bool enetc_new_page(struct enetc_bdr *rx_ring,
    893			   struct enetc_rx_swbd *rx_swbd)
    894{
    895	bool xdp = !!(rx_ring->xdp.prog);
    896	struct page *page;
    897	dma_addr_t addr;
    898
    899	page = dev_alloc_page();
    900	if (unlikely(!page))
    901		return false;
    902
    903	/* For XDP_TX, we forgo dma_unmap -> dma_map */
    904	rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
    905
    906	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
    907	if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
    908		__free_page(page);
    909
    910		return false;
    911	}
    912
    913	rx_swbd->dma = addr;
    914	rx_swbd->page = page;
    915	rx_swbd->page_offset = rx_ring->buffer_offset;
    916
    917	return true;
    918}
    919
    920static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
    921{
    922	struct enetc_rx_swbd *rx_swbd;
    923	union enetc_rx_bd *rxbd;
    924	int i, j;
    925
    926	i = rx_ring->next_to_use;
    927	rx_swbd = &rx_ring->rx_swbd[i];
    928	rxbd = enetc_rxbd(rx_ring, i);
    929
    930	for (j = 0; j < buff_cnt; j++) {
    931		/* try reuse page */
    932		if (unlikely(!rx_swbd->page)) {
    933			if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
    934				rx_ring->stats.rx_alloc_errs++;
    935				break;
    936			}
    937		}
    938
    939		/* update RxBD */
    940		rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
    941					   rx_swbd->page_offset);
    942		/* clear 'R" as well */
    943		rxbd->r.lstatus = 0;
    944
    945		enetc_rxbd_next(rx_ring, &rxbd, &i);
    946		rx_swbd = &rx_ring->rx_swbd[i];
    947	}
    948
    949	if (likely(j)) {
    950		rx_ring->next_to_alloc = i; /* keep track from page reuse */
    951		rx_ring->next_to_use = i;
    952
    953		/* update ENETC's consumer index */
    954		enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
    955	}
    956
    957	return j;
    958}
    959
    960#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
    961static void enetc_get_rx_tstamp(struct net_device *ndev,
    962				union enetc_rx_bd *rxbd,
    963				struct sk_buff *skb)
    964{
    965	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
    966	struct enetc_ndev_priv *priv = netdev_priv(ndev);
    967	struct enetc_hw *hw = &priv->si->hw;
    968	u32 lo, hi, tstamp_lo;
    969	u64 tstamp;
    970
    971	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
    972		lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
    973		hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
    974		rxbd = enetc_rxbd_ext(rxbd);
    975		tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
    976		if (lo <= tstamp_lo)
    977			hi -= 1;
    978
    979		tstamp = (u64)hi << 32 | tstamp_lo;
    980		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
    981		shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
    982	}
    983}
    984#endif
    985
    986static void enetc_get_offloads(struct enetc_bdr *rx_ring,
    987			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
    988{
    989	struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
    990
    991	/* TODO: hashing */
    992	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
    993		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
    994
    995		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
    996		skb->ip_summed = CHECKSUM_COMPLETE;
    997	}
    998
    999	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
   1000		__be16 tpid = 0;
   1001
   1002		switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
   1003		case 0:
   1004			tpid = htons(ETH_P_8021Q);
   1005			break;
   1006		case 1:
   1007			tpid = htons(ETH_P_8021AD);
   1008			break;
   1009		case 2:
   1010			tpid = htons(enetc_port_rd(&priv->si->hw,
   1011						   ENETC_PCVLANR1));
   1012			break;
   1013		case 3:
   1014			tpid = htons(enetc_port_rd(&priv->si->hw,
   1015						   ENETC_PCVLANR2));
   1016			break;
   1017		default:
   1018			break;
   1019		}
   1020
   1021		__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
   1022	}
   1023
   1024#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
   1025	if (priv->active_offloads & ENETC_F_RX_TSTAMP)
   1026		enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
   1027#endif
   1028}
   1029
   1030/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
   1031 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
   1032 * mapped buffers.
   1033 */
   1034static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
   1035					       int i, u16 size)
   1036{
   1037	struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
   1038
   1039	dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
   1040				      rx_swbd->page_offset,
   1041				      size, rx_swbd->dir);
   1042	return rx_swbd;
   1043}
   1044
   1045/* Reuse the current page without performing half-page buffer flipping */
   1046static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
   1047			      struct enetc_rx_swbd *rx_swbd)
   1048{
   1049	size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
   1050
   1051	enetc_reuse_page(rx_ring, rx_swbd);
   1052
   1053	dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
   1054					 rx_swbd->page_offset,
   1055					 buffer_size, rx_swbd->dir);
   1056
   1057	rx_swbd->page = NULL;
   1058}
   1059
   1060/* Reuse the current page by performing half-page buffer flipping */
   1061static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
   1062			       struct enetc_rx_swbd *rx_swbd)
   1063{
   1064	if (likely(enetc_page_reusable(rx_swbd->page))) {
   1065		rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
   1066		page_ref_inc(rx_swbd->page);
   1067
   1068		enetc_put_rx_buff(rx_ring, rx_swbd);
   1069	} else {
   1070		dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
   1071			       rx_swbd->dir);
   1072		rx_swbd->page = NULL;
   1073	}
   1074}
   1075
   1076static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
   1077						int i, u16 size)
   1078{
   1079	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
   1080	struct sk_buff *skb;
   1081	void *ba;
   1082
   1083	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
   1084	skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
   1085	if (unlikely(!skb)) {
   1086		rx_ring->stats.rx_alloc_errs++;
   1087		return NULL;
   1088	}
   1089
   1090	skb_reserve(skb, rx_ring->buffer_offset);
   1091	__skb_put(skb, size);
   1092
   1093	enetc_flip_rx_buff(rx_ring, rx_swbd);
   1094
   1095	return skb;
   1096}
   1097
   1098static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
   1099				     u16 size, struct sk_buff *skb)
   1100{
   1101	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
   1102
   1103	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
   1104			rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
   1105
   1106	enetc_flip_rx_buff(rx_ring, rx_swbd);
   1107}
   1108
   1109static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
   1110					      u32 bd_status,
   1111					      union enetc_rx_bd **rxbd, int *i)
   1112{
   1113	if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
   1114		return false;
   1115
   1116	enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
   1117	enetc_rxbd_next(rx_ring, rxbd, i);
   1118
   1119	while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
   1120		dma_rmb();
   1121		bd_status = le32_to_cpu((*rxbd)->r.lstatus);
   1122
   1123		enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
   1124		enetc_rxbd_next(rx_ring, rxbd, i);
   1125	}
   1126
   1127	rx_ring->ndev->stats.rx_dropped++;
   1128	rx_ring->ndev->stats.rx_errors++;
   1129
   1130	return true;
   1131}
   1132
   1133static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
   1134				       u32 bd_status, union enetc_rx_bd **rxbd,
   1135				       int *i, int *cleaned_cnt, int buffer_size)
   1136{
   1137	struct sk_buff *skb;
   1138	u16 size;
   1139
   1140	size = le16_to_cpu((*rxbd)->r.buf_len);
   1141	skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
   1142	if (!skb)
   1143		return NULL;
   1144
   1145	enetc_get_offloads(rx_ring, *rxbd, skb);
   1146
   1147	(*cleaned_cnt)++;
   1148
   1149	enetc_rxbd_next(rx_ring, rxbd, i);
   1150
   1151	/* not last BD in frame? */
   1152	while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
   1153		bd_status = le32_to_cpu((*rxbd)->r.lstatus);
   1154		size = buffer_size;
   1155
   1156		if (bd_status & ENETC_RXBD_LSTATUS_F) {
   1157			dma_rmb();
   1158			size = le16_to_cpu((*rxbd)->r.buf_len);
   1159		}
   1160
   1161		enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
   1162
   1163		(*cleaned_cnt)++;
   1164
   1165		enetc_rxbd_next(rx_ring, rxbd, i);
   1166	}
   1167
   1168	skb_record_rx_queue(skb, rx_ring->index);
   1169	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
   1170
   1171	return skb;
   1172}
   1173
   1174#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
   1175
   1176static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
   1177			       struct napi_struct *napi, int work_limit)
   1178{
   1179	int rx_frm_cnt = 0, rx_byte_cnt = 0;
   1180	int cleaned_cnt, i;
   1181
   1182	cleaned_cnt = enetc_bd_unused(rx_ring);
   1183	/* next descriptor to process */
   1184	i = rx_ring->next_to_clean;
   1185
   1186	while (likely(rx_frm_cnt < work_limit)) {
   1187		union enetc_rx_bd *rxbd;
   1188		struct sk_buff *skb;
   1189		u32 bd_status;
   1190
   1191		if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
   1192			cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
   1193							    cleaned_cnt);
   1194
   1195		rxbd = enetc_rxbd(rx_ring, i);
   1196		bd_status = le32_to_cpu(rxbd->r.lstatus);
   1197		if (!bd_status)
   1198			break;
   1199
   1200		enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
   1201		dma_rmb(); /* for reading other rxbd fields */
   1202
   1203		if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
   1204						      &rxbd, &i))
   1205			break;
   1206
   1207		skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
   1208				      &cleaned_cnt, ENETC_RXB_DMA_SIZE);
   1209		if (!skb)
   1210			break;
   1211
   1212		rx_byte_cnt += skb->len;
   1213		rx_frm_cnt++;
   1214
   1215		napi_gro_receive(napi, skb);
   1216	}
   1217
   1218	rx_ring->next_to_clean = i;
   1219
   1220	rx_ring->stats.packets += rx_frm_cnt;
   1221	rx_ring->stats.bytes += rx_byte_cnt;
   1222
   1223	return rx_frm_cnt;
   1224}
   1225
   1226static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
   1227				  struct enetc_tx_swbd *tx_swbd,
   1228				  int frm_len)
   1229{
   1230	union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
   1231
   1232	prefetchw(txbd);
   1233
   1234	enetc_clear_tx_bd(txbd);
   1235	txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
   1236	txbd->buf_len = cpu_to_le16(tx_swbd->len);
   1237	txbd->frm_len = cpu_to_le16(frm_len);
   1238
   1239	memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
   1240}
   1241
   1242/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
   1243 * descriptors.
   1244 */
   1245static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
   1246			 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
   1247{
   1248	struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
   1249	int i, k, frm_len = tmp_tx_swbd->len;
   1250
   1251	if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
   1252		return false;
   1253
   1254	while (unlikely(!tmp_tx_swbd->is_eof)) {
   1255		tmp_tx_swbd++;
   1256		frm_len += tmp_tx_swbd->len;
   1257	}
   1258
   1259	i = tx_ring->next_to_use;
   1260
   1261	for (k = 0; k < num_tx_swbd; k++) {
   1262		struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
   1263
   1264		enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
   1265
   1266		/* last BD needs 'F' bit set */
   1267		if (xdp_tx_swbd->is_eof) {
   1268			union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
   1269
   1270			txbd->flags = ENETC_TXBD_FLAGS_F;
   1271		}
   1272
   1273		enetc_bdr_idx_inc(tx_ring, &i);
   1274	}
   1275
   1276	tx_ring->next_to_use = i;
   1277
   1278	return true;
   1279}
   1280
   1281static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
   1282					  struct enetc_tx_swbd *xdp_tx_arr,
   1283					  struct xdp_frame *xdp_frame)
   1284{
   1285	struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
   1286	struct skb_shared_info *shinfo;
   1287	void *data = xdp_frame->data;
   1288	int len = xdp_frame->len;
   1289	skb_frag_t *frag;
   1290	dma_addr_t dma;
   1291	unsigned int f;
   1292	int n = 0;
   1293
   1294	dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
   1295	if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
   1296		netdev_err(tx_ring->ndev, "DMA map error\n");
   1297		return -1;
   1298	}
   1299
   1300	xdp_tx_swbd->dma = dma;
   1301	xdp_tx_swbd->dir = DMA_TO_DEVICE;
   1302	xdp_tx_swbd->len = len;
   1303	xdp_tx_swbd->is_xdp_redirect = true;
   1304	xdp_tx_swbd->is_eof = false;
   1305	xdp_tx_swbd->xdp_frame = NULL;
   1306
   1307	n++;
   1308	xdp_tx_swbd = &xdp_tx_arr[n];
   1309
   1310	shinfo = xdp_get_shared_info_from_frame(xdp_frame);
   1311
   1312	for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
   1313	     f++, frag++) {
   1314		data = skb_frag_address(frag);
   1315		len = skb_frag_size(frag);
   1316
   1317		dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
   1318		if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
   1319			/* Undo the DMA mapping for all fragments */
   1320			while (--n >= 0)
   1321				enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
   1322
   1323			netdev_err(tx_ring->ndev, "DMA map error\n");
   1324			return -1;
   1325		}
   1326
   1327		xdp_tx_swbd->dma = dma;
   1328		xdp_tx_swbd->dir = DMA_TO_DEVICE;
   1329		xdp_tx_swbd->len = len;
   1330		xdp_tx_swbd->is_xdp_redirect = true;
   1331		xdp_tx_swbd->is_eof = false;
   1332		xdp_tx_swbd->xdp_frame = NULL;
   1333
   1334		n++;
   1335		xdp_tx_swbd = &xdp_tx_arr[n];
   1336	}
   1337
   1338	xdp_tx_arr[n - 1].is_eof = true;
   1339	xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
   1340
   1341	return n;
   1342}
   1343
   1344int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
   1345		   struct xdp_frame **frames, u32 flags)
   1346{
   1347	struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
   1348	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   1349	struct enetc_bdr *tx_ring;
   1350	int xdp_tx_bd_cnt, i, k;
   1351	int xdp_tx_frm_cnt = 0;
   1352
   1353	enetc_lock_mdio();
   1354
   1355	tx_ring = priv->xdp_tx_ring[smp_processor_id()];
   1356
   1357	prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
   1358
   1359	for (k = 0; k < num_frames; k++) {
   1360		xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
   1361							       xdp_redirect_arr,
   1362							       frames[k]);
   1363		if (unlikely(xdp_tx_bd_cnt < 0))
   1364			break;
   1365
   1366		if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
   1367					   xdp_tx_bd_cnt))) {
   1368			for (i = 0; i < xdp_tx_bd_cnt; i++)
   1369				enetc_unmap_tx_buff(tx_ring,
   1370						    &xdp_redirect_arr[i]);
   1371			tx_ring->stats.xdp_tx_drops++;
   1372			break;
   1373		}
   1374
   1375		xdp_tx_frm_cnt++;
   1376	}
   1377
   1378	if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
   1379		enetc_update_tx_ring_tail(tx_ring);
   1380
   1381	tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
   1382
   1383	enetc_unlock_mdio();
   1384
   1385	return xdp_tx_frm_cnt;
   1386}
   1387
   1388static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
   1389				     struct xdp_buff *xdp_buff, u16 size)
   1390{
   1391	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
   1392	void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
   1393	struct skb_shared_info *shinfo;
   1394
   1395	/* To be used for XDP_TX */
   1396	rx_swbd->len = size;
   1397
   1398	xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
   1399			 rx_ring->buffer_offset, size, false);
   1400
   1401	shinfo = xdp_get_shared_info_from_buff(xdp_buff);
   1402	shinfo->nr_frags = 0;
   1403}
   1404
   1405static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
   1406				     u16 size, struct xdp_buff *xdp_buff)
   1407{
   1408	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
   1409	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
   1410	skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
   1411
   1412	/* To be used for XDP_TX */
   1413	rx_swbd->len = size;
   1414
   1415	skb_frag_off_set(frag, rx_swbd->page_offset);
   1416	skb_frag_size_set(frag, size);
   1417	__skb_frag_set_page(frag, rx_swbd->page);
   1418
   1419	shinfo->nr_frags++;
   1420}
   1421
   1422static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
   1423				 union enetc_rx_bd **rxbd, int *i,
   1424				 int *cleaned_cnt, struct xdp_buff *xdp_buff)
   1425{
   1426	u16 size = le16_to_cpu((*rxbd)->r.buf_len);
   1427
   1428	xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
   1429
   1430	enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
   1431	(*cleaned_cnt)++;
   1432	enetc_rxbd_next(rx_ring, rxbd, i);
   1433
   1434	/* not last BD in frame? */
   1435	while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
   1436		bd_status = le32_to_cpu((*rxbd)->r.lstatus);
   1437		size = ENETC_RXB_DMA_SIZE_XDP;
   1438
   1439		if (bd_status & ENETC_RXBD_LSTATUS_F) {
   1440			dma_rmb();
   1441			size = le16_to_cpu((*rxbd)->r.buf_len);
   1442		}
   1443
   1444		enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
   1445		(*cleaned_cnt)++;
   1446		enetc_rxbd_next(rx_ring, rxbd, i);
   1447	}
   1448}
   1449
   1450/* Convert RX buffer descriptors to TX buffer descriptors. These will be
   1451 * recycled back into the RX ring in enetc_clean_tx_ring.
   1452 */
   1453static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
   1454					struct enetc_bdr *rx_ring,
   1455					int rx_ring_first, int rx_ring_last)
   1456{
   1457	int n = 0;
   1458
   1459	for (; rx_ring_first != rx_ring_last;
   1460	     n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
   1461		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
   1462		struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
   1463
   1464		/* No need to dma_map, we already have DMA_BIDIRECTIONAL */
   1465		tx_swbd->dma = rx_swbd->dma;
   1466		tx_swbd->dir = rx_swbd->dir;
   1467		tx_swbd->page = rx_swbd->page;
   1468		tx_swbd->page_offset = rx_swbd->page_offset;
   1469		tx_swbd->len = rx_swbd->len;
   1470		tx_swbd->is_dma_page = true;
   1471		tx_swbd->is_xdp_tx = true;
   1472		tx_swbd->is_eof = false;
   1473	}
   1474
   1475	/* We rely on caller providing an rx_ring_last > rx_ring_first */
   1476	xdp_tx_arr[n - 1].is_eof = true;
   1477
   1478	return n;
   1479}
   1480
   1481static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
   1482			   int rx_ring_last)
   1483{
   1484	while (rx_ring_first != rx_ring_last) {
   1485		enetc_put_rx_buff(rx_ring,
   1486				  &rx_ring->rx_swbd[rx_ring_first]);
   1487		enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
   1488	}
   1489	rx_ring->stats.xdp_drops++;
   1490}
   1491
   1492static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
   1493			   int rx_ring_last)
   1494{
   1495	while (rx_ring_first != rx_ring_last) {
   1496		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
   1497
   1498		if (rx_swbd->page) {
   1499			dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
   1500				       rx_swbd->dir);
   1501			__free_page(rx_swbd->page);
   1502			rx_swbd->page = NULL;
   1503		}
   1504		enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
   1505	}
   1506	rx_ring->stats.xdp_redirect_failures++;
   1507}
   1508
   1509static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
   1510				   struct napi_struct *napi, int work_limit,
   1511				   struct bpf_prog *prog)
   1512{
   1513	int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
   1514	struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
   1515	struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
   1516	int rx_frm_cnt = 0, rx_byte_cnt = 0;
   1517	struct enetc_bdr *tx_ring;
   1518	int cleaned_cnt, i;
   1519	u32 xdp_act;
   1520
   1521	cleaned_cnt = enetc_bd_unused(rx_ring);
   1522	/* next descriptor to process */
   1523	i = rx_ring->next_to_clean;
   1524
   1525	while (likely(rx_frm_cnt < work_limit)) {
   1526		union enetc_rx_bd *rxbd, *orig_rxbd;
   1527		int orig_i, orig_cleaned_cnt;
   1528		struct xdp_buff xdp_buff;
   1529		struct sk_buff *skb;
   1530		int tmp_orig_i, err;
   1531		u32 bd_status;
   1532
   1533		rxbd = enetc_rxbd(rx_ring, i);
   1534		bd_status = le32_to_cpu(rxbd->r.lstatus);
   1535		if (!bd_status)
   1536			break;
   1537
   1538		enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
   1539		dma_rmb(); /* for reading other rxbd fields */
   1540
   1541		if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
   1542						      &rxbd, &i))
   1543			break;
   1544
   1545		orig_rxbd = rxbd;
   1546		orig_cleaned_cnt = cleaned_cnt;
   1547		orig_i = i;
   1548
   1549		enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
   1550				     &cleaned_cnt, &xdp_buff);
   1551
   1552		xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
   1553
   1554		switch (xdp_act) {
   1555		default:
   1556			bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
   1557			fallthrough;
   1558		case XDP_ABORTED:
   1559			trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
   1560			fallthrough;
   1561		case XDP_DROP:
   1562			enetc_xdp_drop(rx_ring, orig_i, i);
   1563			break;
   1564		case XDP_PASS:
   1565			rxbd = orig_rxbd;
   1566			cleaned_cnt = orig_cleaned_cnt;
   1567			i = orig_i;
   1568
   1569			skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
   1570					      &i, &cleaned_cnt,
   1571					      ENETC_RXB_DMA_SIZE_XDP);
   1572			if (unlikely(!skb))
   1573				goto out;
   1574
   1575			napi_gro_receive(napi, skb);
   1576			break;
   1577		case XDP_TX:
   1578			tx_ring = priv->xdp_tx_ring[rx_ring->index];
   1579			xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
   1580								     rx_ring,
   1581								     orig_i, i);
   1582
   1583			if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
   1584				enetc_xdp_drop(rx_ring, orig_i, i);
   1585				tx_ring->stats.xdp_tx_drops++;
   1586			} else {
   1587				tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
   1588				rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
   1589				xdp_tx_frm_cnt++;
   1590				/* The XDP_TX enqueue was successful, so we
   1591				 * need to scrub the RX software BDs because
   1592				 * the ownership of the buffers no longer
   1593				 * belongs to the RX ring, and we must prevent
   1594				 * enetc_refill_rx_ring() from reusing
   1595				 * rx_swbd->page.
   1596				 */
   1597				while (orig_i != i) {
   1598					rx_ring->rx_swbd[orig_i].page = NULL;
   1599					enetc_bdr_idx_inc(rx_ring, &orig_i);
   1600				}
   1601			}
   1602			break;
   1603		case XDP_REDIRECT:
   1604			/* xdp_return_frame does not support S/G in the sense
   1605			 * that it leaks the fragments (__xdp_return should not
   1606			 * call page_frag_free only for the initial buffer).
   1607			 * Until XDP_REDIRECT gains support for S/G let's keep
   1608			 * the code structure in place, but dead. We drop the
   1609			 * S/G frames ourselves to avoid memory leaks which
   1610			 * would otherwise leave the kernel OOM.
   1611			 */
   1612			if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
   1613				enetc_xdp_drop(rx_ring, orig_i, i);
   1614				rx_ring->stats.xdp_redirect_sg++;
   1615				break;
   1616			}
   1617
   1618			tmp_orig_i = orig_i;
   1619
   1620			while (orig_i != i) {
   1621				enetc_flip_rx_buff(rx_ring,
   1622						   &rx_ring->rx_swbd[orig_i]);
   1623				enetc_bdr_idx_inc(rx_ring, &orig_i);
   1624			}
   1625
   1626			err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
   1627			if (unlikely(err)) {
   1628				enetc_xdp_free(rx_ring, tmp_orig_i, i);
   1629			} else {
   1630				xdp_redirect_frm_cnt++;
   1631				rx_ring->stats.xdp_redirect++;
   1632			}
   1633		}
   1634
   1635		rx_frm_cnt++;
   1636	}
   1637
   1638out:
   1639	rx_ring->next_to_clean = i;
   1640
   1641	rx_ring->stats.packets += rx_frm_cnt;
   1642	rx_ring->stats.bytes += rx_byte_cnt;
   1643
   1644	if (xdp_redirect_frm_cnt)
   1645		xdp_do_flush_map();
   1646
   1647	if (xdp_tx_frm_cnt)
   1648		enetc_update_tx_ring_tail(tx_ring);
   1649
   1650	if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
   1651		enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
   1652				     rx_ring->xdp.xdp_tx_in_flight);
   1653
   1654	return rx_frm_cnt;
   1655}
   1656
   1657static int enetc_poll(struct napi_struct *napi, int budget)
   1658{
   1659	struct enetc_int_vector
   1660		*v = container_of(napi, struct enetc_int_vector, napi);
   1661	struct enetc_bdr *rx_ring = &v->rx_ring;
   1662	struct bpf_prog *prog;
   1663	bool complete = true;
   1664	int work_done;
   1665	int i;
   1666
   1667	enetc_lock_mdio();
   1668
   1669	for (i = 0; i < v->count_tx_rings; i++)
   1670		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
   1671			complete = false;
   1672
   1673	prog = rx_ring->xdp.prog;
   1674	if (prog)
   1675		work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
   1676	else
   1677		work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
   1678	if (work_done == budget)
   1679		complete = false;
   1680	if (work_done)
   1681		v->rx_napi_work = true;
   1682
   1683	if (!complete) {
   1684		enetc_unlock_mdio();
   1685		return budget;
   1686	}
   1687
   1688	napi_complete_done(napi, work_done);
   1689
   1690	if (likely(v->rx_dim_en))
   1691		enetc_rx_net_dim(v);
   1692
   1693	v->rx_napi_work = false;
   1694
   1695	/* enable interrupts */
   1696	enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
   1697
   1698	for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
   1699		enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
   1700				 ENETC_TBIER_TXTIE);
   1701
   1702	enetc_unlock_mdio();
   1703
   1704	return work_done;
   1705}
   1706
   1707/* Probing and Init */
   1708#define ENETC_MAX_RFS_SIZE 64
   1709void enetc_get_si_caps(struct enetc_si *si)
   1710{
   1711	struct enetc_hw *hw = &si->hw;
   1712	u32 val;
   1713
   1714	/* find out how many of various resources we have to work with */
   1715	val = enetc_rd(hw, ENETC_SICAPR0);
   1716	si->num_rx_rings = (val >> 16) & 0xff;
   1717	si->num_tx_rings = val & 0xff;
   1718
   1719	val = enetc_rd(hw, ENETC_SIRFSCAPR);
   1720	si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
   1721	si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
   1722
   1723	si->num_rss = 0;
   1724	val = enetc_rd(hw, ENETC_SIPCAPR0);
   1725	if (val & ENETC_SIPCAPR0_RSS) {
   1726		u32 rss;
   1727
   1728		rss = enetc_rd(hw, ENETC_SIRSSCAPR);
   1729		si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
   1730	}
   1731
   1732	if (val & ENETC_SIPCAPR0_QBV)
   1733		si->hw_features |= ENETC_SI_F_QBV;
   1734
   1735	if (val & ENETC_SIPCAPR0_PSFP)
   1736		si->hw_features |= ENETC_SI_F_PSFP;
   1737}
   1738
   1739static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
   1740{
   1741	r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
   1742					&r->bd_dma_base, GFP_KERNEL);
   1743	if (!r->bd_base)
   1744		return -ENOMEM;
   1745
   1746	/* h/w requires 128B alignment */
   1747	if (!IS_ALIGNED(r->bd_dma_base, 128)) {
   1748		dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
   1749				  r->bd_dma_base);
   1750		return -EINVAL;
   1751	}
   1752
   1753	return 0;
   1754}
   1755
   1756static int enetc_alloc_txbdr(struct enetc_bdr *txr)
   1757{
   1758	int err;
   1759
   1760	txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
   1761	if (!txr->tx_swbd)
   1762		return -ENOMEM;
   1763
   1764	err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
   1765	if (err)
   1766		goto err_alloc_bdr;
   1767
   1768	txr->tso_headers = dma_alloc_coherent(txr->dev,
   1769					      txr->bd_count * TSO_HEADER_SIZE,
   1770					      &txr->tso_headers_dma,
   1771					      GFP_KERNEL);
   1772	if (!txr->tso_headers) {
   1773		err = -ENOMEM;
   1774		goto err_alloc_tso;
   1775	}
   1776
   1777	txr->next_to_clean = 0;
   1778	txr->next_to_use = 0;
   1779
   1780	return 0;
   1781
   1782err_alloc_tso:
   1783	dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
   1784			  txr->bd_base, txr->bd_dma_base);
   1785	txr->bd_base = NULL;
   1786err_alloc_bdr:
   1787	vfree(txr->tx_swbd);
   1788	txr->tx_swbd = NULL;
   1789
   1790	return err;
   1791}
   1792
   1793static void enetc_free_txbdr(struct enetc_bdr *txr)
   1794{
   1795	int size, i;
   1796
   1797	for (i = 0; i < txr->bd_count; i++)
   1798		enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
   1799
   1800	size = txr->bd_count * sizeof(union enetc_tx_bd);
   1801
   1802	dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
   1803			  txr->tso_headers, txr->tso_headers_dma);
   1804	txr->tso_headers = NULL;
   1805
   1806	dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
   1807	txr->bd_base = NULL;
   1808
   1809	vfree(txr->tx_swbd);
   1810	txr->tx_swbd = NULL;
   1811}
   1812
   1813static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
   1814{
   1815	int i, err;
   1816
   1817	for (i = 0; i < priv->num_tx_rings; i++) {
   1818		err = enetc_alloc_txbdr(priv->tx_ring[i]);
   1819
   1820		if (err)
   1821			goto fail;
   1822	}
   1823
   1824	return 0;
   1825
   1826fail:
   1827	while (i-- > 0)
   1828		enetc_free_txbdr(priv->tx_ring[i]);
   1829
   1830	return err;
   1831}
   1832
   1833static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
   1834{
   1835	int i;
   1836
   1837	for (i = 0; i < priv->num_tx_rings; i++)
   1838		enetc_free_txbdr(priv->tx_ring[i]);
   1839}
   1840
   1841static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
   1842{
   1843	size_t size = sizeof(union enetc_rx_bd);
   1844	int err;
   1845
   1846	rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
   1847	if (!rxr->rx_swbd)
   1848		return -ENOMEM;
   1849
   1850	if (extended)
   1851		size *= 2;
   1852
   1853	err = enetc_dma_alloc_bdr(rxr, size);
   1854	if (err) {
   1855		vfree(rxr->rx_swbd);
   1856		return err;
   1857	}
   1858
   1859	rxr->next_to_clean = 0;
   1860	rxr->next_to_use = 0;
   1861	rxr->next_to_alloc = 0;
   1862	rxr->ext_en = extended;
   1863
   1864	return 0;
   1865}
   1866
   1867static void enetc_free_rxbdr(struct enetc_bdr *rxr)
   1868{
   1869	int size;
   1870
   1871	size = rxr->bd_count * sizeof(union enetc_rx_bd);
   1872
   1873	dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
   1874	rxr->bd_base = NULL;
   1875
   1876	vfree(rxr->rx_swbd);
   1877	rxr->rx_swbd = NULL;
   1878}
   1879
   1880static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
   1881{
   1882	bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
   1883	int i, err;
   1884
   1885	for (i = 0; i < priv->num_rx_rings; i++) {
   1886		err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
   1887
   1888		if (err)
   1889			goto fail;
   1890	}
   1891
   1892	return 0;
   1893
   1894fail:
   1895	while (i-- > 0)
   1896		enetc_free_rxbdr(priv->rx_ring[i]);
   1897
   1898	return err;
   1899}
   1900
   1901static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
   1902{
   1903	int i;
   1904
   1905	for (i = 0; i < priv->num_rx_rings; i++)
   1906		enetc_free_rxbdr(priv->rx_ring[i]);
   1907}
   1908
   1909static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
   1910{
   1911	int i;
   1912
   1913	if (!tx_ring->tx_swbd)
   1914		return;
   1915
   1916	for (i = 0; i < tx_ring->bd_count; i++) {
   1917		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
   1918
   1919		enetc_free_tx_frame(tx_ring, tx_swbd);
   1920	}
   1921
   1922	tx_ring->next_to_clean = 0;
   1923	tx_ring->next_to_use = 0;
   1924}
   1925
   1926static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
   1927{
   1928	int i;
   1929
   1930	if (!rx_ring->rx_swbd)
   1931		return;
   1932
   1933	for (i = 0; i < rx_ring->bd_count; i++) {
   1934		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
   1935
   1936		if (!rx_swbd->page)
   1937			continue;
   1938
   1939		dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
   1940			       rx_swbd->dir);
   1941		__free_page(rx_swbd->page);
   1942		rx_swbd->page = NULL;
   1943	}
   1944
   1945	rx_ring->next_to_clean = 0;
   1946	rx_ring->next_to_use = 0;
   1947	rx_ring->next_to_alloc = 0;
   1948}
   1949
   1950static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
   1951{
   1952	int i;
   1953
   1954	for (i = 0; i < priv->num_rx_rings; i++)
   1955		enetc_free_rx_ring(priv->rx_ring[i]);
   1956
   1957	for (i = 0; i < priv->num_tx_rings; i++)
   1958		enetc_free_tx_ring(priv->tx_ring[i]);
   1959}
   1960
   1961static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
   1962{
   1963	int *rss_table;
   1964	int i;
   1965
   1966	rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
   1967	if (!rss_table)
   1968		return -ENOMEM;
   1969
   1970	/* Set up RSS table defaults */
   1971	for (i = 0; i < si->num_rss; i++)
   1972		rss_table[i] = i % num_groups;
   1973
   1974	enetc_set_rss_table(si, rss_table, si->num_rss);
   1975
   1976	kfree(rss_table);
   1977
   1978	return 0;
   1979}
   1980
   1981int enetc_configure_si(struct enetc_ndev_priv *priv)
   1982{
   1983	struct enetc_si *si = priv->si;
   1984	struct enetc_hw *hw = &si->hw;
   1985	int err;
   1986
   1987	/* set SI cache attributes */
   1988	enetc_wr(hw, ENETC_SICAR0,
   1989		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
   1990	enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
   1991	/* enable SI */
   1992	enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
   1993
   1994	if (si->num_rss) {
   1995		err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
   1996		if (err)
   1997			return err;
   1998	}
   1999
   2000	return 0;
   2001}
   2002
   2003void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
   2004{
   2005	struct enetc_si *si = priv->si;
   2006	int cpus = num_online_cpus();
   2007
   2008	priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
   2009	priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
   2010
   2011	/* Enable all available TX rings in order to configure as many
   2012	 * priorities as possible, when needed.
   2013	 * TODO: Make # of TX rings run-time configurable
   2014	 */
   2015	priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
   2016	priv->num_tx_rings = si->num_tx_rings;
   2017	priv->bdr_int_num = cpus;
   2018	priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
   2019	priv->tx_ictt = ENETC_TXIC_TIMETHR;
   2020}
   2021
   2022int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
   2023{
   2024	struct enetc_si *si = priv->si;
   2025
   2026	priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
   2027				  GFP_KERNEL);
   2028	if (!priv->cls_rules)
   2029		return -ENOMEM;
   2030
   2031	return 0;
   2032}
   2033
   2034void enetc_free_si_resources(struct enetc_ndev_priv *priv)
   2035{
   2036	kfree(priv->cls_rules);
   2037}
   2038
   2039static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
   2040{
   2041	int idx = tx_ring->index;
   2042	u32 tbmr;
   2043
   2044	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
   2045		       lower_32_bits(tx_ring->bd_dma_base));
   2046
   2047	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
   2048		       upper_32_bits(tx_ring->bd_dma_base));
   2049
   2050	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
   2051	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
   2052		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
   2053
   2054	/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
   2055	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
   2056	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
   2057
   2058	/* enable Tx ints by setting pkt thr to 1 */
   2059	enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
   2060
   2061	tbmr = ENETC_TBMR_EN;
   2062	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
   2063		tbmr |= ENETC_TBMR_VIH;
   2064
   2065	/* enable ring */
   2066	enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
   2067
   2068	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
   2069	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
   2070	tx_ring->idr = hw->reg + ENETC_SITXIDR;
   2071}
   2072
   2073static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
   2074{
   2075	int idx = rx_ring->index;
   2076	u32 rbmr;
   2077
   2078	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
   2079		       lower_32_bits(rx_ring->bd_dma_base));
   2080
   2081	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
   2082		       upper_32_bits(rx_ring->bd_dma_base));
   2083
   2084	WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
   2085	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
   2086		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
   2087
   2088	if (rx_ring->xdp.prog)
   2089		enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
   2090	else
   2091		enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
   2092
   2093	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
   2094
   2095	/* enable Rx ints by setting pkt thr to 1 */
   2096	enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
   2097
   2098	rbmr = ENETC_RBMR_EN;
   2099
   2100	if (rx_ring->ext_en)
   2101		rbmr |= ENETC_RBMR_BDS;
   2102
   2103	if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
   2104		rbmr |= ENETC_RBMR_VTE;
   2105
   2106	rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
   2107	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
   2108
   2109	enetc_lock_mdio();
   2110	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
   2111	enetc_unlock_mdio();
   2112
   2113	/* enable ring */
   2114	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
   2115}
   2116
   2117static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
   2118{
   2119	int i;
   2120
   2121	for (i = 0; i < priv->num_tx_rings; i++)
   2122		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
   2123
   2124	for (i = 0; i < priv->num_rx_rings; i++)
   2125		enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
   2126}
   2127
   2128static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
   2129{
   2130	int idx = rx_ring->index;
   2131
   2132	/* disable EN bit on ring */
   2133	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
   2134}
   2135
   2136static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
   2137{
   2138	int delay = 8, timeout = 100;
   2139	int idx = tx_ring->index;
   2140
   2141	/* disable EN bit on ring */
   2142	enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
   2143
   2144	/* wait for busy to clear */
   2145	while (delay < timeout &&
   2146	       enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
   2147		msleep(delay);
   2148		delay *= 2;
   2149	}
   2150
   2151	if (delay >= timeout)
   2152		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
   2153			    idx);
   2154}
   2155
   2156static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
   2157{
   2158	int i;
   2159
   2160	for (i = 0; i < priv->num_tx_rings; i++)
   2161		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
   2162
   2163	for (i = 0; i < priv->num_rx_rings; i++)
   2164		enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
   2165
   2166	udelay(1);
   2167}
   2168
   2169static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
   2170{
   2171	struct pci_dev *pdev = priv->si->pdev;
   2172	int i, j, err;
   2173
   2174	for (i = 0; i < priv->bdr_int_num; i++) {
   2175		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
   2176		struct enetc_int_vector *v = priv->int_vector[i];
   2177		int entry = ENETC_BDR_INT_BASE_IDX + i;
   2178		struct enetc_hw *hw = &priv->si->hw;
   2179
   2180		snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
   2181			 priv->ndev->name, i);
   2182		err = request_irq(irq, enetc_msix, 0, v->name, v);
   2183		if (err) {
   2184			dev_err(priv->dev, "request_irq() failed!\n");
   2185			goto irq_err;
   2186		}
   2187		disable_irq(irq);
   2188
   2189		v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
   2190		v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
   2191		v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
   2192
   2193		enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
   2194
   2195		for (j = 0; j < v->count_tx_rings; j++) {
   2196			int idx = v->tx_ring[j].index;
   2197
   2198			enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
   2199		}
   2200		irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
   2201	}
   2202
   2203	return 0;
   2204
   2205irq_err:
   2206	while (i--) {
   2207		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
   2208
   2209		irq_set_affinity_hint(irq, NULL);
   2210		free_irq(irq, priv->int_vector[i]);
   2211	}
   2212
   2213	return err;
   2214}
   2215
   2216static void enetc_free_irqs(struct enetc_ndev_priv *priv)
   2217{
   2218	struct pci_dev *pdev = priv->si->pdev;
   2219	int i;
   2220
   2221	for (i = 0; i < priv->bdr_int_num; i++) {
   2222		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
   2223
   2224		irq_set_affinity_hint(irq, NULL);
   2225		free_irq(irq, priv->int_vector[i]);
   2226	}
   2227}
   2228
   2229static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
   2230{
   2231	struct enetc_hw *hw = &priv->si->hw;
   2232	u32 icpt, ictt;
   2233	int i;
   2234
   2235	/* enable Tx & Rx event indication */
   2236	if (priv->ic_mode &
   2237	    (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
   2238		icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
   2239		/* init to non-0 minimum, will be adjusted later */
   2240		ictt = 0x1;
   2241	} else {
   2242		icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
   2243		ictt = 0;
   2244	}
   2245
   2246	for (i = 0; i < priv->num_rx_rings; i++) {
   2247		enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
   2248		enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
   2249		enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
   2250	}
   2251
   2252	if (priv->ic_mode & ENETC_IC_TX_MANUAL)
   2253		icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
   2254	else
   2255		icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
   2256
   2257	for (i = 0; i < priv->num_tx_rings; i++) {
   2258		enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
   2259		enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
   2260		enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
   2261	}
   2262}
   2263
   2264static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
   2265{
   2266	int i;
   2267
   2268	for (i = 0; i < priv->num_tx_rings; i++)
   2269		enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
   2270
   2271	for (i = 0; i < priv->num_rx_rings; i++)
   2272		enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
   2273}
   2274
   2275static int enetc_phylink_connect(struct net_device *ndev)
   2276{
   2277	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2278	struct ethtool_eee edata;
   2279	int err;
   2280
   2281	if (!priv->phylink)
   2282		return 0; /* phy-less mode */
   2283
   2284	err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
   2285	if (err) {
   2286		dev_err(&ndev->dev, "could not attach to PHY\n");
   2287		return err;
   2288	}
   2289
   2290	/* disable EEE autoneg, until ENETC driver supports it */
   2291	memset(&edata, 0, sizeof(struct ethtool_eee));
   2292	phylink_ethtool_set_eee(priv->phylink, &edata);
   2293
   2294	return 0;
   2295}
   2296
   2297static void enetc_tx_onestep_tstamp(struct work_struct *work)
   2298{
   2299	struct enetc_ndev_priv *priv;
   2300	struct sk_buff *skb;
   2301
   2302	priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
   2303
   2304	netif_tx_lock(priv->ndev);
   2305
   2306	clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
   2307	skb = skb_dequeue(&priv->tx_skbs);
   2308	if (skb)
   2309		enetc_start_xmit(skb, priv->ndev);
   2310
   2311	netif_tx_unlock(priv->ndev);
   2312}
   2313
   2314static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
   2315{
   2316	INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
   2317	skb_queue_head_init(&priv->tx_skbs);
   2318}
   2319
   2320void enetc_start(struct net_device *ndev)
   2321{
   2322	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2323	int i;
   2324
   2325	enetc_setup_interrupts(priv);
   2326
   2327	for (i = 0; i < priv->bdr_int_num; i++) {
   2328		int irq = pci_irq_vector(priv->si->pdev,
   2329					 ENETC_BDR_INT_BASE_IDX + i);
   2330
   2331		napi_enable(&priv->int_vector[i]->napi);
   2332		enable_irq(irq);
   2333	}
   2334
   2335	if (priv->phylink)
   2336		phylink_start(priv->phylink);
   2337	else
   2338		netif_carrier_on(ndev);
   2339
   2340	netif_tx_start_all_queues(ndev);
   2341}
   2342
   2343int enetc_open(struct net_device *ndev)
   2344{
   2345	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2346	int num_stack_tx_queues;
   2347	int err;
   2348
   2349	err = enetc_setup_irqs(priv);
   2350	if (err)
   2351		return err;
   2352
   2353	err = enetc_phylink_connect(ndev);
   2354	if (err)
   2355		goto err_phy_connect;
   2356
   2357	err = enetc_alloc_tx_resources(priv);
   2358	if (err)
   2359		goto err_alloc_tx;
   2360
   2361	err = enetc_alloc_rx_resources(priv);
   2362	if (err)
   2363		goto err_alloc_rx;
   2364
   2365	num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
   2366
   2367	err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
   2368	if (err)
   2369		goto err_set_queues;
   2370
   2371	err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
   2372	if (err)
   2373		goto err_set_queues;
   2374
   2375	enetc_tx_onestep_tstamp_init(priv);
   2376	enetc_setup_bdrs(priv);
   2377	enetc_start(ndev);
   2378
   2379	return 0;
   2380
   2381err_set_queues:
   2382	enetc_free_rx_resources(priv);
   2383err_alloc_rx:
   2384	enetc_free_tx_resources(priv);
   2385err_alloc_tx:
   2386	if (priv->phylink)
   2387		phylink_disconnect_phy(priv->phylink);
   2388err_phy_connect:
   2389	enetc_free_irqs(priv);
   2390
   2391	return err;
   2392}
   2393
   2394void enetc_stop(struct net_device *ndev)
   2395{
   2396	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2397	int i;
   2398
   2399	netif_tx_stop_all_queues(ndev);
   2400
   2401	for (i = 0; i < priv->bdr_int_num; i++) {
   2402		int irq = pci_irq_vector(priv->si->pdev,
   2403					 ENETC_BDR_INT_BASE_IDX + i);
   2404
   2405		disable_irq(irq);
   2406		napi_synchronize(&priv->int_vector[i]->napi);
   2407		napi_disable(&priv->int_vector[i]->napi);
   2408	}
   2409
   2410	if (priv->phylink)
   2411		phylink_stop(priv->phylink);
   2412	else
   2413		netif_carrier_off(ndev);
   2414
   2415	enetc_clear_interrupts(priv);
   2416}
   2417
   2418int enetc_close(struct net_device *ndev)
   2419{
   2420	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2421
   2422	enetc_stop(ndev);
   2423	enetc_clear_bdrs(priv);
   2424
   2425	if (priv->phylink)
   2426		phylink_disconnect_phy(priv->phylink);
   2427	enetc_free_rxtx_rings(priv);
   2428	enetc_free_rx_resources(priv);
   2429	enetc_free_tx_resources(priv);
   2430	enetc_free_irqs(priv);
   2431
   2432	return 0;
   2433}
   2434
   2435static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
   2436{
   2437	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2438	struct tc_mqprio_qopt *mqprio = type_data;
   2439	struct enetc_bdr *tx_ring;
   2440	int num_stack_tx_queues;
   2441	u8 num_tc;
   2442	int i;
   2443
   2444	num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
   2445	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
   2446	num_tc = mqprio->num_tc;
   2447
   2448	if (!num_tc) {
   2449		netdev_reset_tc(ndev);
   2450		netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
   2451
   2452		/* Reset all ring priorities to 0 */
   2453		for (i = 0; i < priv->num_tx_rings; i++) {
   2454			tx_ring = priv->tx_ring[i];
   2455			enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
   2456		}
   2457
   2458		return 0;
   2459	}
   2460
   2461	/* Check if we have enough BD rings available to accommodate all TCs */
   2462	if (num_tc > num_stack_tx_queues) {
   2463		netdev_err(ndev, "Max %d traffic classes supported\n",
   2464			   priv->num_tx_rings);
   2465		return -EINVAL;
   2466	}
   2467
   2468	/* For the moment, we use only one BD ring per TC.
   2469	 *
   2470	 * Configure num_tc BD rings with increasing priorities.
   2471	 */
   2472	for (i = 0; i < num_tc; i++) {
   2473		tx_ring = priv->tx_ring[i];
   2474		enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
   2475	}
   2476
   2477	/* Reset the number of netdev queues based on the TC count */
   2478	netif_set_real_num_tx_queues(ndev, num_tc);
   2479
   2480	netdev_set_num_tc(ndev, num_tc);
   2481
   2482	/* Each TC is associated with one netdev queue */
   2483	for (i = 0; i < num_tc; i++)
   2484		netdev_set_tc_queue(ndev, i, 1, i);
   2485
   2486	return 0;
   2487}
   2488
   2489int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
   2490		   void *type_data)
   2491{
   2492	switch (type) {
   2493	case TC_SETUP_QDISC_MQPRIO:
   2494		return enetc_setup_tc_mqprio(ndev, type_data);
   2495	case TC_SETUP_QDISC_TAPRIO:
   2496		return enetc_setup_tc_taprio(ndev, type_data);
   2497	case TC_SETUP_QDISC_CBS:
   2498		return enetc_setup_tc_cbs(ndev, type_data);
   2499	case TC_SETUP_QDISC_ETF:
   2500		return enetc_setup_tc_txtime(ndev, type_data);
   2501	case TC_SETUP_BLOCK:
   2502		return enetc_setup_tc_psfp(ndev, type_data);
   2503	default:
   2504		return -EOPNOTSUPP;
   2505	}
   2506}
   2507
   2508static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
   2509				struct netlink_ext_ack *extack)
   2510{
   2511	struct enetc_ndev_priv *priv = netdev_priv(dev);
   2512	struct bpf_prog *old_prog;
   2513	bool is_up;
   2514	int i;
   2515
   2516	/* The buffer layout is changing, so we need to drain the old
   2517	 * RX buffers and seed new ones.
   2518	 */
   2519	is_up = netif_running(dev);
   2520	if (is_up)
   2521		dev_close(dev);
   2522
   2523	old_prog = xchg(&priv->xdp_prog, prog);
   2524	if (old_prog)
   2525		bpf_prog_put(old_prog);
   2526
   2527	for (i = 0; i < priv->num_rx_rings; i++) {
   2528		struct enetc_bdr *rx_ring = priv->rx_ring[i];
   2529
   2530		rx_ring->xdp.prog = prog;
   2531
   2532		if (prog)
   2533			rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
   2534		else
   2535			rx_ring->buffer_offset = ENETC_RXB_PAD;
   2536	}
   2537
   2538	if (is_up)
   2539		return dev_open(dev, extack);
   2540
   2541	return 0;
   2542}
   2543
   2544int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
   2545{
   2546	switch (xdp->command) {
   2547	case XDP_SETUP_PROG:
   2548		return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
   2549	default:
   2550		return -EINVAL;
   2551	}
   2552
   2553	return 0;
   2554}
   2555
   2556struct net_device_stats *enetc_get_stats(struct net_device *ndev)
   2557{
   2558	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2559	struct net_device_stats *stats = &ndev->stats;
   2560	unsigned long packets = 0, bytes = 0;
   2561	unsigned long tx_dropped = 0;
   2562	int i;
   2563
   2564	for (i = 0; i < priv->num_rx_rings; i++) {
   2565		packets += priv->rx_ring[i]->stats.packets;
   2566		bytes	+= priv->rx_ring[i]->stats.bytes;
   2567	}
   2568
   2569	stats->rx_packets = packets;
   2570	stats->rx_bytes = bytes;
   2571	bytes = 0;
   2572	packets = 0;
   2573
   2574	for (i = 0; i < priv->num_tx_rings; i++) {
   2575		packets += priv->tx_ring[i]->stats.packets;
   2576		bytes	+= priv->tx_ring[i]->stats.bytes;
   2577		tx_dropped += priv->tx_ring[i]->stats.win_drop;
   2578	}
   2579
   2580	stats->tx_packets = packets;
   2581	stats->tx_bytes = bytes;
   2582	stats->tx_dropped = tx_dropped;
   2583
   2584	return stats;
   2585}
   2586
   2587static int enetc_set_rss(struct net_device *ndev, int en)
   2588{
   2589	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2590	struct enetc_hw *hw = &priv->si->hw;
   2591	u32 reg;
   2592
   2593	enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
   2594
   2595	reg = enetc_rd(hw, ENETC_SIMR);
   2596	reg &= ~ENETC_SIMR_RSSE;
   2597	reg |= (en) ? ENETC_SIMR_RSSE : 0;
   2598	enetc_wr(hw, ENETC_SIMR, reg);
   2599
   2600	return 0;
   2601}
   2602
   2603static int enetc_set_psfp(struct net_device *ndev, int en)
   2604{
   2605	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2606	int err;
   2607
   2608	if (en) {
   2609		err = enetc_psfp_enable(priv);
   2610		if (err)
   2611			return err;
   2612
   2613		priv->active_offloads |= ENETC_F_QCI;
   2614		return 0;
   2615	}
   2616
   2617	err = enetc_psfp_disable(priv);
   2618	if (err)
   2619		return err;
   2620
   2621	priv->active_offloads &= ~ENETC_F_QCI;
   2622
   2623	return 0;
   2624}
   2625
   2626static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
   2627{
   2628	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2629	int i;
   2630
   2631	for (i = 0; i < priv->num_rx_rings; i++)
   2632		enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
   2633}
   2634
   2635static void enetc_enable_txvlan(struct net_device *ndev, bool en)
   2636{
   2637	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2638	int i;
   2639
   2640	for (i = 0; i < priv->num_tx_rings; i++)
   2641		enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
   2642}
   2643
   2644int enetc_set_features(struct net_device *ndev,
   2645		       netdev_features_t features)
   2646{
   2647	netdev_features_t changed = ndev->features ^ features;
   2648	int err = 0;
   2649
   2650	if (changed & NETIF_F_RXHASH)
   2651		enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
   2652
   2653	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
   2654		enetc_enable_rxvlan(ndev,
   2655				    !!(features & NETIF_F_HW_VLAN_CTAG_RX));
   2656
   2657	if (changed & NETIF_F_HW_VLAN_CTAG_TX)
   2658		enetc_enable_txvlan(ndev,
   2659				    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
   2660
   2661	if (changed & NETIF_F_HW_TC)
   2662		err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
   2663
   2664	return err;
   2665}
   2666
   2667#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
   2668static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
   2669{
   2670	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2671	struct hwtstamp_config config;
   2672	int ao;
   2673
   2674	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
   2675		return -EFAULT;
   2676
   2677	switch (config.tx_type) {
   2678	case HWTSTAMP_TX_OFF:
   2679		priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
   2680		break;
   2681	case HWTSTAMP_TX_ON:
   2682		priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
   2683		priv->active_offloads |= ENETC_F_TX_TSTAMP;
   2684		break;
   2685	case HWTSTAMP_TX_ONESTEP_SYNC:
   2686		priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
   2687		priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
   2688		break;
   2689	default:
   2690		return -ERANGE;
   2691	}
   2692
   2693	ao = priv->active_offloads;
   2694	switch (config.rx_filter) {
   2695	case HWTSTAMP_FILTER_NONE:
   2696		priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
   2697		break;
   2698	default:
   2699		priv->active_offloads |= ENETC_F_RX_TSTAMP;
   2700		config.rx_filter = HWTSTAMP_FILTER_ALL;
   2701	}
   2702
   2703	if (netif_running(ndev) && ao != priv->active_offloads) {
   2704		enetc_close(ndev);
   2705		enetc_open(ndev);
   2706	}
   2707
   2708	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
   2709	       -EFAULT : 0;
   2710}
   2711
   2712static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
   2713{
   2714	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2715	struct hwtstamp_config config;
   2716
   2717	config.flags = 0;
   2718
   2719	if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
   2720		config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
   2721	else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
   2722		config.tx_type = HWTSTAMP_TX_ON;
   2723	else
   2724		config.tx_type = HWTSTAMP_TX_OFF;
   2725
   2726	config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
   2727			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
   2728
   2729	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
   2730	       -EFAULT : 0;
   2731}
   2732#endif
   2733
   2734int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
   2735{
   2736	struct enetc_ndev_priv *priv = netdev_priv(ndev);
   2737#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
   2738	if (cmd == SIOCSHWTSTAMP)
   2739		return enetc_hwtstamp_set(ndev, rq);
   2740	if (cmd == SIOCGHWTSTAMP)
   2741		return enetc_hwtstamp_get(ndev, rq);
   2742#endif
   2743
   2744	if (!priv->phylink)
   2745		return -EOPNOTSUPP;
   2746
   2747	return phylink_mii_ioctl(priv->phylink, rq, cmd);
   2748}
   2749
   2750int enetc_alloc_msix(struct enetc_ndev_priv *priv)
   2751{
   2752	struct pci_dev *pdev = priv->si->pdev;
   2753	int first_xdp_tx_ring;
   2754	int i, n, err, nvec;
   2755	int v_tx_rings;
   2756
   2757	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
   2758	/* allocate MSIX for both messaging and Rx/Tx interrupts */
   2759	n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
   2760
   2761	if (n < 0)
   2762		return n;
   2763
   2764	if (n != nvec)
   2765		return -EPERM;
   2766
   2767	/* # of tx rings per int vector */
   2768	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
   2769
   2770	for (i = 0; i < priv->bdr_int_num; i++) {
   2771		struct enetc_int_vector *v;
   2772		struct enetc_bdr *bdr;
   2773		int j;
   2774
   2775		v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
   2776		if (!v) {
   2777			err = -ENOMEM;
   2778			goto fail;
   2779		}
   2780
   2781		priv->int_vector[i] = v;
   2782
   2783		bdr = &v->rx_ring;
   2784		bdr->index = i;
   2785		bdr->ndev = priv->ndev;
   2786		bdr->dev = priv->dev;
   2787		bdr->bd_count = priv->rx_bd_count;
   2788		bdr->buffer_offset = ENETC_RXB_PAD;
   2789		priv->rx_ring[i] = bdr;
   2790
   2791		err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
   2792		if (err) {
   2793			kfree(v);
   2794			goto fail;
   2795		}
   2796
   2797		err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
   2798						 MEM_TYPE_PAGE_SHARED, NULL);
   2799		if (err) {
   2800			xdp_rxq_info_unreg(&bdr->xdp.rxq);
   2801			kfree(v);
   2802			goto fail;
   2803		}
   2804
   2805		/* init defaults for adaptive IC */
   2806		if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
   2807			v->rx_ictt = 0x1;
   2808			v->rx_dim_en = true;
   2809		}
   2810		INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
   2811		netif_napi_add(priv->ndev, &v->napi, enetc_poll,
   2812			       NAPI_POLL_WEIGHT);
   2813		v->count_tx_rings = v_tx_rings;
   2814
   2815		for (j = 0; j < v_tx_rings; j++) {
   2816			int idx;
   2817
   2818			/* default tx ring mapping policy */
   2819			idx = priv->bdr_int_num * j + i;
   2820			__set_bit(idx, &v->tx_rings_map);
   2821			bdr = &v->tx_ring[j];
   2822			bdr->index = idx;
   2823			bdr->ndev = priv->ndev;
   2824			bdr->dev = priv->dev;
   2825			bdr->bd_count = priv->tx_bd_count;
   2826			priv->tx_ring[idx] = bdr;
   2827		}
   2828	}
   2829
   2830	first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
   2831	priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
   2832
   2833	return 0;
   2834
   2835fail:
   2836	while (i--) {
   2837		struct enetc_int_vector *v = priv->int_vector[i];
   2838		struct enetc_bdr *rx_ring = &v->rx_ring;
   2839
   2840		xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
   2841		xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
   2842		netif_napi_del(&v->napi);
   2843		cancel_work_sync(&v->rx_dim.work);
   2844		kfree(v);
   2845	}
   2846
   2847	pci_free_irq_vectors(pdev);
   2848
   2849	return err;
   2850}
   2851
   2852void enetc_free_msix(struct enetc_ndev_priv *priv)
   2853{
   2854	int i;
   2855
   2856	for (i = 0; i < priv->bdr_int_num; i++) {
   2857		struct enetc_int_vector *v = priv->int_vector[i];
   2858		struct enetc_bdr *rx_ring = &v->rx_ring;
   2859
   2860		xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
   2861		xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
   2862		netif_napi_del(&v->napi);
   2863		cancel_work_sync(&v->rx_dim.work);
   2864	}
   2865
   2866	for (i = 0; i < priv->num_rx_rings; i++)
   2867		priv->rx_ring[i] = NULL;
   2868
   2869	for (i = 0; i < priv->num_tx_rings; i++)
   2870		priv->tx_ring[i] = NULL;
   2871
   2872	for (i = 0; i < priv->bdr_int_num; i++) {
   2873		kfree(priv->int_vector[i]);
   2874		priv->int_vector[i] = NULL;
   2875	}
   2876
   2877	/* disable all MSIX for this device */
   2878	pci_free_irq_vectors(priv->si->pdev);
   2879}
   2880
   2881static void enetc_kfree_si(struct enetc_si *si)
   2882{
   2883	char *p = (char *)si - si->pad;
   2884
   2885	kfree(p);
   2886}
   2887
   2888static void enetc_detect_errata(struct enetc_si *si)
   2889{
   2890	if (si->pdev->revision == ENETC_REV1)
   2891		si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
   2892}
   2893
   2894int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
   2895{
   2896	struct enetc_si *si, *p;
   2897	struct enetc_hw *hw;
   2898	size_t alloc_size;
   2899	int err, len;
   2900
   2901	pcie_flr(pdev);
   2902	err = pci_enable_device_mem(pdev);
   2903	if (err)
   2904		return dev_err_probe(&pdev->dev, err, "device enable failed\n");
   2905
   2906	/* set up for high or low dma */
   2907	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   2908	if (err) {
   2909		dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
   2910		goto err_dma;
   2911	}
   2912
   2913	err = pci_request_mem_regions(pdev, name);
   2914	if (err) {
   2915		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
   2916		goto err_pci_mem_reg;
   2917	}
   2918
   2919	pci_set_master(pdev);
   2920
   2921	alloc_size = sizeof(struct enetc_si);
   2922	if (sizeof_priv) {
   2923		/* align priv to 32B */
   2924		alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
   2925		alloc_size += sizeof_priv;
   2926	}
   2927	/* force 32B alignment for enetc_si */
   2928	alloc_size += ENETC_SI_ALIGN - 1;
   2929
   2930	p = kzalloc(alloc_size, GFP_KERNEL);
   2931	if (!p) {
   2932		err = -ENOMEM;
   2933		goto err_alloc_si;
   2934	}
   2935
   2936	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
   2937	si->pad = (char *)si - (char *)p;
   2938
   2939	pci_set_drvdata(pdev, si);
   2940	si->pdev = pdev;
   2941	hw = &si->hw;
   2942
   2943	len = pci_resource_len(pdev, ENETC_BAR_REGS);
   2944	hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
   2945	if (!hw->reg) {
   2946		err = -ENXIO;
   2947		dev_err(&pdev->dev, "ioremap() failed\n");
   2948		goto err_ioremap;
   2949	}
   2950	if (len > ENETC_PORT_BASE)
   2951		hw->port = hw->reg + ENETC_PORT_BASE;
   2952	if (len > ENETC_GLOBAL_BASE)
   2953		hw->global = hw->reg + ENETC_GLOBAL_BASE;
   2954
   2955	enetc_detect_errata(si);
   2956
   2957	return 0;
   2958
   2959err_ioremap:
   2960	enetc_kfree_si(si);
   2961err_alloc_si:
   2962	pci_release_mem_regions(pdev);
   2963err_pci_mem_reg:
   2964err_dma:
   2965	pci_disable_device(pdev);
   2966
   2967	return err;
   2968}
   2969
   2970void enetc_pci_remove(struct pci_dev *pdev)
   2971{
   2972	struct enetc_si *si = pci_get_drvdata(pdev);
   2973	struct enetc_hw *hw = &si->hw;
   2974
   2975	iounmap(hw->reg);
   2976	enetc_kfree_si(si);
   2977	pci_release_mem_regions(pdev);
   2978	pci_disable_device(pdev);
   2979}