cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lan966x_fdma.c (20344B)


      1// SPDX-License-Identifier: GPL-2.0+
      2
      3#include "lan966x_main.h"
      4
      5static int lan966x_fdma_channel_active(struct lan966x *lan966x)
      6{
      7	return lan_rd(lan966x, FDMA_CH_ACTIVE);
      8}
      9
     10static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
     11					       struct lan966x_db *db)
     12{
     13	struct lan966x *lan966x = rx->lan966x;
     14	dma_addr_t dma_addr;
     15	struct page *page;
     16
     17	page = dev_alloc_pages(rx->page_order);
     18	if (unlikely(!page))
     19		return NULL;
     20
     21	dma_addr = dma_map_page(lan966x->dev, page, 0,
     22				PAGE_SIZE << rx->page_order,
     23				DMA_FROM_DEVICE);
     24	if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
     25		goto free_page;
     26
     27	db->dataptr = dma_addr;
     28
     29	return page;
     30
     31free_page:
     32	__free_pages(page, rx->page_order);
     33	return NULL;
     34}
     35
     36static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
     37{
     38	struct lan966x *lan966x = rx->lan966x;
     39	struct lan966x_rx_dcb *dcb;
     40	struct lan966x_db *db;
     41	int i, j;
     42
     43	for (i = 0; i < FDMA_DCB_MAX; ++i) {
     44		dcb = &rx->dcbs[i];
     45
     46		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
     47			db = &dcb->db[j];
     48			dma_unmap_single(lan966x->dev,
     49					 (dma_addr_t)db->dataptr,
     50					 PAGE_SIZE << rx->page_order,
     51					 DMA_FROM_DEVICE);
     52			__free_pages(rx->page[i][j], rx->page_order);
     53		}
     54	}
     55}
     56
     57static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
     58				    struct lan966x_rx_dcb *dcb,
     59				    u64 nextptr)
     60{
     61	struct lan966x_db *db;
     62	int i;
     63
     64	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
     65		db = &dcb->db[i];
     66		db->status = FDMA_DCB_STATUS_INTR;
     67	}
     68
     69	dcb->nextptr = FDMA_DCB_INVALID_DATA;
     70	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
     71
     72	rx->last_entry->nextptr = nextptr;
     73	rx->last_entry = dcb;
     74}
     75
     76static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
     77{
     78	struct lan966x *lan966x = rx->lan966x;
     79	struct lan966x_rx_dcb *dcb;
     80	struct lan966x_db *db;
     81	struct page *page;
     82	int i, j;
     83	int size;
     84
     85	/* calculate how many pages are needed to allocate the dcbs */
     86	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
     87	size = ALIGN(size, PAGE_SIZE);
     88
     89	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
     90	if (!rx->dcbs)
     91		return -ENOMEM;
     92
     93	rx->last_entry = rx->dcbs;
     94	rx->db_index = 0;
     95	rx->dcb_index = 0;
     96
     97	/* Now for each dcb allocate the dbs */
     98	for (i = 0; i < FDMA_DCB_MAX; ++i) {
     99		dcb = &rx->dcbs[i];
    100		dcb->info = 0;
    101
    102		/* For each db allocate a page and map it to the DB dataptr. */
    103		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
    104			db = &dcb->db[j];
    105			page = lan966x_fdma_rx_alloc_page(rx, db);
    106			if (!page)
    107				return -ENOMEM;
    108
    109			db->status = 0;
    110			rx->page[i][j] = page;
    111		}
    112
    113		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
    114	}
    115
    116	return 0;
    117}
    118
    119static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
    120{
    121	struct lan966x *lan966x = rx->lan966x;
    122	u32 size;
    123
    124	/* Now it is possible to do the cleanup of dcb */
    125	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
    126	size = ALIGN(size, PAGE_SIZE);
    127	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
    128}
    129
    130static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
    131{
    132	struct lan966x *lan966x = rx->lan966x;
    133	u32 mask;
    134
    135	/* When activating a channel, first is required to write the first DCB
    136	 * address and then to activate it
    137	 */
    138	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
    139	       FDMA_DCB_LLP(rx->channel_id));
    140	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
    141	       FDMA_DCB_LLP1(rx->channel_id));
    142
    143	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
    144	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
    145	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
    146	       FDMA_CH_CFG_CH_MEM_SET(1),
    147	       lan966x, FDMA_CH_CFG(rx->channel_id));
    148
    149	/* Start fdma */
    150	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
    151		FDMA_PORT_CTRL_XTR_STOP,
    152		lan966x, FDMA_PORT_CTRL(0));
    153
    154	/* Enable interrupts */
    155	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
    156	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
    157	mask |= BIT(rx->channel_id);
    158	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
    159		FDMA_INTR_DB_ENA_INTR_DB_ENA,
    160		lan966x, FDMA_INTR_DB_ENA);
    161
    162	/* Activate the channel */
    163	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
    164		FDMA_CH_ACTIVATE_CH_ACTIVATE,
    165		lan966x, FDMA_CH_ACTIVATE);
    166}
    167
    168static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
    169{
    170	struct lan966x *lan966x = rx->lan966x;
    171	u32 val;
    172
    173	/* Disable the channel */
    174	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
    175		FDMA_CH_DISABLE_CH_DISABLE,
    176		lan966x, FDMA_CH_DISABLE);
    177
    178	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
    179				  val, !(val & BIT(rx->channel_id)),
    180				  READL_SLEEP_US, READL_TIMEOUT_US);
    181
    182	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
    183		FDMA_CH_DB_DISCARD_DB_DISCARD,
    184		lan966x, FDMA_CH_DB_DISCARD);
    185}
    186
    187static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
    188{
    189	struct lan966x *lan966x = rx->lan966x;
    190
    191	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
    192		FDMA_CH_RELOAD_CH_RELOAD,
    193		lan966x, FDMA_CH_RELOAD);
    194}
    195
    196static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
    197				    struct lan966x_tx_dcb *dcb)
    198{
    199	dcb->nextptr = FDMA_DCB_INVALID_DATA;
    200	dcb->info = 0;
    201}
    202
    203static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
    204{
    205	struct lan966x *lan966x = tx->lan966x;
    206	struct lan966x_tx_dcb *dcb;
    207	struct lan966x_db *db;
    208	int size;
    209	int i, j;
    210
    211	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
    212			       GFP_KERNEL);
    213	if (!tx->dcbs_buf)
    214		return -ENOMEM;
    215
    216	/* calculate how many pages are needed to allocate the dcbs */
    217	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
    218	size = ALIGN(size, PAGE_SIZE);
    219	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
    220	if (!tx->dcbs)
    221		goto out;
    222
    223	/* Now for each dcb allocate the db */
    224	for (i = 0; i < FDMA_DCB_MAX; ++i) {
    225		dcb = &tx->dcbs[i];
    226
    227		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
    228			db = &dcb->db[j];
    229			db->dataptr = 0;
    230			db->status = 0;
    231		}
    232
    233		lan966x_fdma_tx_add_dcb(tx, dcb);
    234	}
    235
    236	return 0;
    237
    238out:
    239	kfree(tx->dcbs_buf);
    240	return -ENOMEM;
    241}
    242
    243static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
    244{
    245	struct lan966x *lan966x = tx->lan966x;
    246	int size;
    247
    248	kfree(tx->dcbs_buf);
    249
    250	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
    251	size = ALIGN(size, PAGE_SIZE);
    252	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
    253}
    254
    255static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
    256{
    257	struct lan966x *lan966x = tx->lan966x;
    258	u32 mask;
    259
    260	/* When activating a channel, first is required to write the first DCB
    261	 * address and then to activate it
    262	 */
    263	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
    264	       FDMA_DCB_LLP(tx->channel_id));
    265	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
    266	       FDMA_DCB_LLP1(tx->channel_id));
    267
    268	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
    269	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
    270	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
    271	       FDMA_CH_CFG_CH_MEM_SET(1),
    272	       lan966x, FDMA_CH_CFG(tx->channel_id));
    273
    274	/* Start fdma */
    275	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
    276		FDMA_PORT_CTRL_INJ_STOP,
    277		lan966x, FDMA_PORT_CTRL(0));
    278
    279	/* Enable interrupts */
    280	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
    281	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
    282	mask |= BIT(tx->channel_id);
    283	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
    284		FDMA_INTR_DB_ENA_INTR_DB_ENA,
    285		lan966x, FDMA_INTR_DB_ENA);
    286
    287	/* Activate the channel */
    288	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
    289		FDMA_CH_ACTIVATE_CH_ACTIVATE,
    290		lan966x, FDMA_CH_ACTIVATE);
    291}
    292
    293static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
    294{
    295	struct lan966x *lan966x = tx->lan966x;
    296	u32 val;
    297
    298	/* Disable the channel */
    299	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
    300		FDMA_CH_DISABLE_CH_DISABLE,
    301		lan966x, FDMA_CH_DISABLE);
    302
    303	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
    304				  val, !(val & BIT(tx->channel_id)),
    305				  READL_SLEEP_US, READL_TIMEOUT_US);
    306
    307	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
    308		FDMA_CH_DB_DISCARD_DB_DISCARD,
    309		lan966x, FDMA_CH_DB_DISCARD);
    310
    311	tx->activated = false;
    312}
    313
    314static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
    315{
    316	struct lan966x *lan966x = tx->lan966x;
    317
    318	/* Write the registers to reload the channel */
    319	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
    320		FDMA_CH_RELOAD_CH_RELOAD,
    321		lan966x, FDMA_CH_RELOAD);
    322}
    323
    324static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
    325{
    326	struct lan966x_port *port;
    327	int i;
    328
    329	for (i = 0; i < lan966x->num_phys_ports; ++i) {
    330		port = lan966x->ports[i];
    331		if (!port)
    332			continue;
    333
    334		if (netif_queue_stopped(port->dev))
    335			netif_wake_queue(port->dev);
    336	}
    337}
    338
    339static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
    340{
    341	struct lan966x_port *port;
    342	int i;
    343
    344	for (i = 0; i < lan966x->num_phys_ports; ++i) {
    345		port = lan966x->ports[i];
    346		if (!port)
    347			continue;
    348
    349		netif_stop_queue(port->dev);
    350	}
    351}
    352
    353static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
    354{
    355	struct lan966x_tx *tx = &lan966x->tx;
    356	struct lan966x_tx_dcb_buf *dcb_buf;
    357	struct lan966x_db *db;
    358	unsigned long flags;
    359	bool clear = false;
    360	int i;
    361
    362	spin_lock_irqsave(&lan966x->tx_lock, flags);
    363	for (i = 0; i < FDMA_DCB_MAX; ++i) {
    364		dcb_buf = &tx->dcbs_buf[i];
    365
    366		if (!dcb_buf->used)
    367			continue;
    368
    369		db = &tx->dcbs[i].db[0];
    370		if (!(db->status & FDMA_DCB_STATUS_DONE))
    371			continue;
    372
    373		dcb_buf->dev->stats.tx_packets++;
    374		dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
    375
    376		dcb_buf->used = false;
    377		dma_unmap_single(lan966x->dev,
    378				 dcb_buf->dma_addr,
    379				 dcb_buf->skb->len,
    380				 DMA_TO_DEVICE);
    381		if (!dcb_buf->ptp)
    382			dev_kfree_skb_any(dcb_buf->skb);
    383
    384		clear = true;
    385	}
    386
    387	if (clear)
    388		lan966x_fdma_wakeup_netdev(lan966x);
    389
    390	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
    391}
    392
    393static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
    394{
    395	struct lan966x_db *db;
    396
    397	/* Check if there is any data */
    398	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
    399	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
    400		return false;
    401
    402	return true;
    403}
    404
    405static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
    406{
    407	struct lan966x *lan966x = rx->lan966x;
    408	u64 src_port, timestamp;
    409	struct lan966x_db *db;
    410	struct sk_buff *skb;
    411	struct page *page;
    412
    413	/* Get the received frame and unmap it */
    414	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
    415	page = rx->page[rx->dcb_index][rx->db_index];
    416	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
    417	if (unlikely(!skb))
    418		goto unmap_page;
    419
    420	dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr,
    421			 FDMA_DCB_STATUS_BLOCKL(db->status),
    422			 DMA_FROM_DEVICE);
    423	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
    424
    425	lan966x_ifh_get_src_port(skb->data, &src_port);
    426	lan966x_ifh_get_timestamp(skb->data, &timestamp);
    427
    428	WARN_ON(src_port >= lan966x->num_phys_ports);
    429
    430	skb->dev = lan966x->ports[src_port]->dev;
    431	skb_pull(skb, IFH_LEN * sizeof(u32));
    432
    433	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
    434		skb_trim(skb, skb->len - ETH_FCS_LEN);
    435
    436	lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
    437	skb->protocol = eth_type_trans(skb, skb->dev);
    438
    439	if (lan966x->bridge_mask & BIT(src_port)) {
    440		skb->offload_fwd_mark = 1;
    441
    442		skb_reset_network_header(skb);
    443		if (!lan966x_hw_offload(lan966x, src_port, skb))
    444			skb->offload_fwd_mark = 0;
    445	}
    446
    447	skb->dev->stats.rx_bytes += skb->len;
    448	skb->dev->stats.rx_packets++;
    449
    450	return skb;
    451
    452unmap_page:
    453	dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
    454		       FDMA_DCB_STATUS_BLOCKL(db->status),
    455		       DMA_FROM_DEVICE);
    456	__free_pages(page, rx->page_order);
    457
    458	return NULL;
    459}
    460
    461static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
    462{
    463	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
    464	struct lan966x_rx *rx = &lan966x->rx;
    465	int dcb_reload = rx->dcb_index;
    466	struct lan966x_rx_dcb *old_dcb;
    467	struct lan966x_db *db;
    468	struct sk_buff *skb;
    469	struct page *page;
    470	int counter = 0;
    471	u64 nextptr;
    472
    473	lan966x_fdma_tx_clear_buf(lan966x, weight);
    474
    475	/* Get all received skb */
    476	while (counter < weight) {
    477		if (!lan966x_fdma_rx_more_frames(rx))
    478			break;
    479
    480		skb = lan966x_fdma_rx_get_frame(rx);
    481
    482		rx->page[rx->dcb_index][rx->db_index] = NULL;
    483		rx->dcb_index++;
    484		rx->dcb_index &= FDMA_DCB_MAX - 1;
    485
    486		if (!skb)
    487			break;
    488
    489		napi_gro_receive(&lan966x->napi, skb);
    490		counter++;
    491	}
    492
    493	/* Allocate new pages and map them */
    494	while (dcb_reload != rx->dcb_index) {
    495		db = &rx->dcbs[dcb_reload].db[rx->db_index];
    496		page = lan966x_fdma_rx_alloc_page(rx, db);
    497		if (unlikely(!page))
    498			break;
    499		rx->page[dcb_reload][rx->db_index] = page;
    500
    501		old_dcb = &rx->dcbs[dcb_reload];
    502		dcb_reload++;
    503		dcb_reload &= FDMA_DCB_MAX - 1;
    504
    505		nextptr = rx->dma + ((unsigned long)old_dcb -
    506				     (unsigned long)rx->dcbs);
    507		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
    508		lan966x_fdma_rx_reload(rx);
    509	}
    510
    511	if (counter < weight && napi_complete_done(napi, counter))
    512		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
    513
    514	return counter;
    515}
    516
    517irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
    518{
    519	struct lan966x *lan966x = args;
    520	u32 db, err, err_type;
    521
    522	db = lan_rd(lan966x, FDMA_INTR_DB);
    523	err = lan_rd(lan966x, FDMA_INTR_ERR);
    524
    525	if (db) {
    526		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
    527		lan_wr(db, lan966x, FDMA_INTR_DB);
    528
    529		napi_schedule(&lan966x->napi);
    530	}
    531
    532	if (err) {
    533		err_type = lan_rd(lan966x, FDMA_ERRORS);
    534
    535		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
    536
    537		lan_wr(err, lan966x, FDMA_INTR_ERR);
    538		lan_wr(err_type, lan966x, FDMA_ERRORS);
    539	}
    540
    541	return IRQ_HANDLED;
    542}
    543
    544static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
    545{
    546	struct lan966x_tx_dcb_buf *dcb_buf;
    547	int i;
    548
    549	for (i = 0; i < FDMA_DCB_MAX; ++i) {
    550		dcb_buf = &tx->dcbs_buf[i];
    551		if (!dcb_buf->used && i != tx->last_in_use)
    552			return i;
    553	}
    554
    555	return -1;
    556}
    557
    558int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
    559{
    560	struct lan966x_port *port = netdev_priv(dev);
    561	struct lan966x *lan966x = port->lan966x;
    562	struct lan966x_tx_dcb_buf *next_dcb_buf;
    563	struct lan966x_tx_dcb *next_dcb, *dcb;
    564	struct lan966x_tx *tx = &lan966x->tx;
    565	struct lan966x_db *next_db;
    566	int needed_headroom;
    567	int needed_tailroom;
    568	dma_addr_t dma_addr;
    569	int next_to_use;
    570	int err;
    571
    572	/* Get next index */
    573	next_to_use = lan966x_fdma_get_next_dcb(tx);
    574	if (next_to_use < 0) {
    575		netif_stop_queue(dev);
    576		return NETDEV_TX_BUSY;
    577	}
    578
    579	if (skb_put_padto(skb, ETH_ZLEN)) {
    580		dev->stats.tx_dropped++;
    581		return NETDEV_TX_OK;
    582	}
    583
    584	/* skb processing */
    585	needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
    586	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
    587	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
    588		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
    589				       GFP_ATOMIC);
    590		if (unlikely(err)) {
    591			dev->stats.tx_dropped++;
    592			err = NETDEV_TX_OK;
    593			goto release;
    594		}
    595	}
    596
    597	skb_tx_timestamp(skb);
    598	skb_push(skb, IFH_LEN * sizeof(u32));
    599	memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
    600	skb_put(skb, 4);
    601
    602	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
    603				  DMA_TO_DEVICE);
    604	if (dma_mapping_error(lan966x->dev, dma_addr)) {
    605		dev->stats.tx_dropped++;
    606		err = NETDEV_TX_OK;
    607		goto release;
    608	}
    609
    610	/* Setup next dcb */
    611	next_dcb = &tx->dcbs[next_to_use];
    612	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
    613
    614	next_db = &next_dcb->db[0];
    615	next_db->dataptr = dma_addr;
    616	next_db->status = FDMA_DCB_STATUS_SOF |
    617			  FDMA_DCB_STATUS_EOF |
    618			  FDMA_DCB_STATUS_INTR |
    619			  FDMA_DCB_STATUS_BLOCKO(0) |
    620			  FDMA_DCB_STATUS_BLOCKL(skb->len);
    621
    622	/* Fill up the buffer */
    623	next_dcb_buf = &tx->dcbs_buf[next_to_use];
    624	next_dcb_buf->skb = skb;
    625	next_dcb_buf->dma_addr = dma_addr;
    626	next_dcb_buf->used = true;
    627	next_dcb_buf->ptp = false;
    628	next_dcb_buf->dev = dev;
    629
    630	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
    631	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
    632		next_dcb_buf->ptp = true;
    633
    634	if (likely(lan966x->tx.activated)) {
    635		/* Connect current dcb to the next db */
    636		dcb = &tx->dcbs[tx->last_in_use];
    637		dcb->nextptr = tx->dma + (next_to_use *
    638					  sizeof(struct lan966x_tx_dcb));
    639
    640		lan966x_fdma_tx_reload(tx);
    641	} else {
    642		/* Because it is first time, then just activate */
    643		lan966x->tx.activated = true;
    644		lan966x_fdma_tx_activate(tx);
    645	}
    646
    647	/* Move to next dcb because this last in use */
    648	tx->last_in_use = next_to_use;
    649
    650	return NETDEV_TX_OK;
    651
    652release:
    653	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
    654	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
    655		lan966x_ptp_txtstamp_release(port, skb);
    656
    657	dev_kfree_skb_any(skb);
    658	return err;
    659}
    660
    661static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
    662{
    663	int max_mtu = 0;
    664	int i;
    665
    666	for (i = 0; i < lan966x->num_phys_ports; ++i) {
    667		int mtu;
    668
    669		if (!lan966x->ports[i])
    670			continue;
    671
    672		mtu = lan966x->ports[i]->dev->mtu;
    673		if (mtu > max_mtu)
    674			max_mtu = mtu;
    675	}
    676
    677	return max_mtu;
    678}
    679
    680static int lan966x_qsys_sw_status(struct lan966x *lan966x)
    681{
    682	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
    683}
    684
    685static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
    686{
    687	void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
    688	dma_addr_t rx_dma, tx_dma;
    689	u32 size;
    690	int err;
    691
    692	/* Store these for later to free them */
    693	rx_dma = lan966x->rx.dma;
    694	tx_dma = lan966x->tx.dma;
    695	rx_dcbs = lan966x->rx.dcbs;
    696	tx_dcbs = lan966x->tx.dcbs;
    697	tx_dcbs_buf = lan966x->tx.dcbs_buf;
    698
    699	napi_synchronize(&lan966x->napi);
    700	napi_disable(&lan966x->napi);
    701	lan966x_fdma_stop_netdev(lan966x);
    702
    703	lan966x_fdma_rx_disable(&lan966x->rx);
    704	lan966x_fdma_rx_free_pages(&lan966x->rx);
    705	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
    706	err = lan966x_fdma_rx_alloc(&lan966x->rx);
    707	if (err)
    708		goto restore;
    709	lan966x_fdma_rx_start(&lan966x->rx);
    710
    711	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
    712	size = ALIGN(size, PAGE_SIZE);
    713	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
    714
    715	lan966x_fdma_tx_disable(&lan966x->tx);
    716	err = lan966x_fdma_tx_alloc(&lan966x->tx);
    717	if (err)
    718		goto restore_tx;
    719
    720	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
    721	size = ALIGN(size, PAGE_SIZE);
    722	dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
    723
    724	kfree(tx_dcbs_buf);
    725
    726	lan966x_fdma_wakeup_netdev(lan966x);
    727	napi_enable(&lan966x->napi);
    728
    729	return err;
    730restore:
    731	lan966x->rx.dma = rx_dma;
    732	lan966x->rx.dcbs = rx_dcbs;
    733	lan966x_fdma_rx_start(&lan966x->rx);
    734
    735restore_tx:
    736	lan966x->tx.dma = tx_dma;
    737	lan966x->tx.dcbs = tx_dcbs;
    738	lan966x->tx.dcbs_buf = tx_dcbs_buf;
    739
    740	return err;
    741}
    742
    743int lan966x_fdma_change_mtu(struct lan966x *lan966x)
    744{
    745	int max_mtu;
    746	int err;
    747	u32 val;
    748
    749	max_mtu = lan966x_fdma_get_max_mtu(lan966x);
    750	max_mtu += IFH_LEN * sizeof(u32);
    751
    752	if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
    753	    lan966x->rx.page_order)
    754		return 0;
    755
    756	/* Disable the CPU port */
    757	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
    758		QSYS_SW_PORT_MODE_PORT_ENA,
    759		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
    760
    761	/* Flush the CPU queues */
    762	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
    763			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
    764			   READL_SLEEP_US, READL_TIMEOUT_US);
    765
    766	/* Add a sleep in case there are frames between the queues and the CPU
    767	 * port
    768	 */
    769	usleep_range(1000, 2000);
    770
    771	err = lan966x_fdma_reload(lan966x, max_mtu);
    772
    773	/* Enable back the CPU port */
    774	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
    775		QSYS_SW_PORT_MODE_PORT_ENA,
    776		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
    777
    778	return err;
    779}
    780
    781void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
    782{
    783	if (lan966x->fdma_ndev)
    784		return;
    785
    786	lan966x->fdma_ndev = dev;
    787	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
    788		       NAPI_POLL_WEIGHT);
    789	napi_enable(&lan966x->napi);
    790}
    791
    792void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
    793{
    794	if (lan966x->fdma_ndev == dev) {
    795		netif_napi_del(&lan966x->napi);
    796		lan966x->fdma_ndev = NULL;
    797	}
    798}
    799
    800int lan966x_fdma_init(struct lan966x *lan966x)
    801{
    802	int err;
    803
    804	if (!lan966x->fdma)
    805		return 0;
    806
    807	lan966x->rx.lan966x = lan966x;
    808	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
    809	lan966x->tx.lan966x = lan966x;
    810	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
    811	lan966x->tx.last_in_use = -1;
    812
    813	err = lan966x_fdma_rx_alloc(&lan966x->rx);
    814	if (err)
    815		return err;
    816
    817	err = lan966x_fdma_tx_alloc(&lan966x->tx);
    818	if (err) {
    819		lan966x_fdma_rx_free(&lan966x->rx);
    820		return err;
    821	}
    822
    823	lan966x_fdma_rx_start(&lan966x->rx);
    824
    825	return 0;
    826}
    827
    828void lan966x_fdma_deinit(struct lan966x *lan966x)
    829{
    830	if (!lan966x->fdma)
    831		return;
    832
    833	lan966x_fdma_rx_disable(&lan966x->rx);
    834	lan966x_fdma_tx_disable(&lan966x->tx);
    835
    836	napi_synchronize(&lan966x->napi);
    837	napi_disable(&lan966x->napi);
    838
    839	lan966x_fdma_rx_free_pages(&lan966x->rx);
    840	lan966x_fdma_rx_free(&lan966x->rx);
    841	lan966x_fdma_tx_free(&lan966x->tx);
    842}