cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

octep_tx.c (8162B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell Octeon EP (EndPoint) Ethernet Driver
      3 *
      4 * Copyright (C) 2020 Marvell.
      5 *
      6 */
      7
      8#include <linux/pci.h>
      9#include <linux/etherdevice.h>
     10#include <linux/vmalloc.h>
     11
     12#include "octep_config.h"
     13#include "octep_main.h"
     14
     15/* Reset various index of Tx queue data structure. */
     16static void octep_iq_reset_indices(struct octep_iq *iq)
     17{
     18	iq->fill_cnt = 0;
     19	iq->host_write_index = 0;
     20	iq->octep_read_index = 0;
     21	iq->flush_index = 0;
     22	iq->pkts_processed = 0;
     23	iq->pkt_in_done = 0;
     24	atomic_set(&iq->instr_pending, 0);
     25}
     26
     27/**
     28 * octep_iq_process_completions() - Process Tx queue completions.
     29 *
     30 * @iq: Octeon Tx queue data structure.
     31 * @budget: max number of completions to be processed in one invocation.
     32 */
     33int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
     34{
     35	u32 compl_pkts, compl_bytes, compl_sg;
     36	struct octep_device *oct = iq->octep_dev;
     37	struct octep_tx_buffer *tx_buffer;
     38	struct skb_shared_info *shinfo;
     39	u32 fi = iq->flush_index;
     40	struct sk_buff *skb;
     41	u8 frags, i;
     42
     43	compl_pkts = 0;
     44	compl_sg = 0;
     45	compl_bytes = 0;
     46	iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
     47
     48	while (likely(budget && (fi != iq->octep_read_index))) {
     49		tx_buffer = iq->buff_info + fi;
     50		skb = tx_buffer->skb;
     51
     52		fi++;
     53		if (unlikely(fi == iq->max_count))
     54			fi = 0;
     55		compl_bytes += skb->len;
     56		compl_pkts++;
     57		budget--;
     58
     59		if (!tx_buffer->gather) {
     60			dma_unmap_single(iq->dev, tx_buffer->dma,
     61					 tx_buffer->skb->len, DMA_TO_DEVICE);
     62			dev_kfree_skb_any(skb);
     63			continue;
     64		}
     65
     66		/* Scatter/Gather */
     67		shinfo = skb_shinfo(skb);
     68		frags = shinfo->nr_frags;
     69		compl_sg++;
     70
     71		dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
     72				 tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
     73
     74		i = 1; /* entry 0 is main skb, unmapped above */
     75		while (frags--) {
     76			dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
     77				       tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
     78			i++;
     79		}
     80
     81		dev_kfree_skb_any(skb);
     82	}
     83
     84	iq->pkts_processed += compl_pkts;
     85	atomic_sub(compl_pkts, &iq->instr_pending);
     86	iq->stats.instr_completed += compl_pkts;
     87	iq->stats.bytes_sent += compl_bytes;
     88	iq->stats.sgentry_sent += compl_sg;
     89	iq->flush_index = fi;
     90
     91	netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
     92
     93	if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
     94	    ((iq->max_count - atomic_read(&iq->instr_pending)) >
     95	     OCTEP_WAKE_QUEUE_THRESHOLD))
     96		netif_wake_subqueue(iq->netdev, iq->q_no);
     97	return !budget;
     98}
     99
    100/**
    101 * octep_iq_free_pending() - Free Tx buffers for pending completions.
    102 *
    103 * @iq: Octeon Tx queue data structure.
    104 */
    105static void octep_iq_free_pending(struct octep_iq *iq)
    106{
    107	struct octep_tx_buffer *tx_buffer;
    108	struct skb_shared_info *shinfo;
    109	u32 fi = iq->flush_index;
    110	struct sk_buff *skb;
    111	u8 frags, i;
    112
    113	while (fi != iq->host_write_index) {
    114		tx_buffer = iq->buff_info + fi;
    115		skb = tx_buffer->skb;
    116
    117		fi++;
    118		if (unlikely(fi == iq->max_count))
    119			fi = 0;
    120
    121		if (!tx_buffer->gather) {
    122			dma_unmap_single(iq->dev, tx_buffer->dma,
    123					 tx_buffer->skb->len, DMA_TO_DEVICE);
    124			dev_kfree_skb_any(skb);
    125			continue;
    126		}
    127
    128		/* Scatter/Gather */
    129		shinfo = skb_shinfo(skb);
    130		frags = shinfo->nr_frags;
    131
    132		dma_unmap_single(iq->dev,
    133				 tx_buffer->sglist[0].dma_ptr[0],
    134				 tx_buffer->sglist[0].len[0],
    135				 DMA_TO_DEVICE);
    136
    137		i = 1; /* entry 0 is main skb, unmapped above */
    138		while (frags--) {
    139			dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
    140				       tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
    141			i++;
    142		}
    143
    144		dev_kfree_skb_any(skb);
    145	}
    146
    147	atomic_set(&iq->instr_pending, 0);
    148	iq->flush_index = fi;
    149	netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
    150}
    151
    152/**
    153 * octep_clean_iqs()  - Clean Tx queues to shutdown the device.
    154 *
    155 * @oct: Octeon device private data structure.
    156 *
    157 * Free the buffers in Tx queue descriptors pending completion and
    158 * reset queue indices
    159 */
    160void octep_clean_iqs(struct octep_device *oct)
    161{
    162	int i;
    163
    164	for (i = 0; i < oct->num_iqs; i++) {
    165		octep_iq_free_pending(oct->iq[i]);
    166		octep_iq_reset_indices(oct->iq[i]);
    167	}
    168}
    169
    170/**
    171 * octep_setup_iq() - Setup a Tx queue.
    172 *
    173 * @oct: Octeon device private data structure.
    174 * @q_no: Tx queue number to be setup.
    175 *
    176 * Allocate resources for a Tx queue.
    177 */
    178static int octep_setup_iq(struct octep_device *oct, int q_no)
    179{
    180	u32 desc_ring_size, buff_info_size, sglist_size;
    181	struct octep_iq *iq;
    182	int i;
    183
    184	iq = vzalloc(sizeof(*iq));
    185	if (!iq)
    186		goto iq_alloc_err;
    187	oct->iq[q_no] = iq;
    188
    189	iq->octep_dev = oct;
    190	iq->netdev = oct->netdev;
    191	iq->dev = &oct->pdev->dev;
    192	iq->q_no = q_no;
    193	iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
    194	iq->ring_size_mask = iq->max_count - 1;
    195	iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
    196	iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
    197
    198	/* Allocate memory for hardware queue descriptors */
    199	desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
    200	iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
    201					   &iq->desc_ring_dma, GFP_KERNEL);
    202	if (unlikely(!iq->desc_ring)) {
    203		dev_err(iq->dev,
    204			"Failed to allocate DMA memory for IQ-%d\n", q_no);
    205		goto desc_dma_alloc_err;
    206	}
    207
    208	/* Allocate memory for hardware SGLIST descriptors */
    209	sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
    210		      CFG_GET_IQ_NUM_DESC(oct->conf);
    211	iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
    212					&iq->sglist_dma, GFP_KERNEL);
    213	if (unlikely(!iq->sglist)) {
    214		dev_err(iq->dev,
    215			"Failed to allocate DMA memory for IQ-%d SGLIST\n",
    216			q_no);
    217		goto sglist_alloc_err;
    218	}
    219
    220	/* allocate memory to manage Tx packets pending completion */
    221	buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
    222	iq->buff_info = vzalloc(buff_info_size);
    223	if (!iq->buff_info) {
    224		dev_err(iq->dev,
    225			"Failed to allocate buff info for IQ-%d\n", q_no);
    226		goto buff_info_err;
    227	}
    228
    229	/* Setup sglist addresses in tx_buffer entries */
    230	for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
    231		struct octep_tx_buffer *tx_buffer;
    232
    233		tx_buffer = &iq->buff_info[i];
    234		tx_buffer->sglist =
    235			&iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
    236		tx_buffer->sglist_dma =
    237			iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
    238	}
    239
    240	octep_iq_reset_indices(iq);
    241	oct->hw_ops.setup_iq_regs(oct, q_no);
    242
    243	oct->num_iqs++;
    244	return 0;
    245
    246buff_info_err:
    247	dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
    248sglist_alloc_err:
    249	dma_free_coherent(iq->dev, desc_ring_size,
    250			  iq->desc_ring, iq->desc_ring_dma);
    251desc_dma_alloc_err:
    252	vfree(iq);
    253	oct->iq[q_no] = NULL;
    254iq_alloc_err:
    255	return -1;
    256}
    257
    258/**
    259 * octep_free_iq() - Free Tx queue resources.
    260 *
    261 * @iq: Octeon Tx queue data structure.
    262 *
    263 * Free all the resources allocated for a Tx queue.
    264 */
    265static void octep_free_iq(struct octep_iq *iq)
    266{
    267	struct octep_device *oct = iq->octep_dev;
    268	u64 desc_ring_size, sglist_size;
    269	int q_no = iq->q_no;
    270
    271	desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
    272
    273	vfree(iq->buff_info);
    274
    275	if (iq->desc_ring)
    276		dma_free_coherent(iq->dev, desc_ring_size,
    277				  iq->desc_ring, iq->desc_ring_dma);
    278
    279	sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
    280		      CFG_GET_IQ_NUM_DESC(oct->conf);
    281	if (iq->sglist)
    282		dma_free_coherent(iq->dev, sglist_size,
    283				  iq->sglist, iq->sglist_dma);
    284
    285	vfree(iq);
    286	oct->iq[q_no] = NULL;
    287	oct->num_iqs--;
    288}
    289
    290/**
    291 * octep_setup_iqs() - setup resources for all Tx queues.
    292 *
    293 * @oct: Octeon device private data structure.
    294 */
    295int octep_setup_iqs(struct octep_device *oct)
    296{
    297	int i;
    298
    299	oct->num_iqs = 0;
    300	for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
    301		if (octep_setup_iq(oct, i)) {
    302			dev_err(&oct->pdev->dev,
    303				"Failed to setup IQ(TxQ)-%d.\n", i);
    304			goto iq_setup_err;
    305		}
    306		dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
    307	}
    308
    309	return 0;
    310
    311iq_setup_err:
    312	while (i) {
    313		i--;
    314		octep_free_iq(oct->iq[i]);
    315	}
    316	return -1;
    317}
    318
    319/**
    320 * octep_free_iqs() - Free resources of all Tx queues.
    321 *
    322 * @oct: Octeon device private data structure.
    323 */
    324void octep_free_iqs(struct octep_device *oct)
    325{
    326	int i;
    327
    328	for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
    329		octep_free_iq(oct->iq[i]);
    330		dev_dbg(&oct->pdev->dev,
    331			"Successfully destroyed IQ(TxQ)-%d.\n", i);
    332	}
    333	oct->num_iqs = 0;
    334}