cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

aq_vec.c (8329B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Atlantic Network Driver
      3 *
      4 * Copyright (C) 2014-2019 aQuantia Corporation
      5 * Copyright (C) 2019-2020 Marvell International Ltd.
      6 */
      7
      8/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
      9 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
     10 */
     11
     12#include "aq_vec.h"
     13
     14struct aq_vec_s {
     15	const struct aq_hw_ops *aq_hw_ops;
     16	struct aq_hw_s *aq_hw;
     17	struct aq_nic_s *aq_nic;
     18	unsigned int tx_rings;
     19	unsigned int rx_rings;
     20	struct aq_ring_param_s aq_ring_param;
     21	struct napi_struct napi;
     22	struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
     23};
     24
     25#define AQ_VEC_TX_ID 0
     26#define AQ_VEC_RX_ID 1
     27
     28static int aq_vec_poll(struct napi_struct *napi, int budget)
     29{
     30	struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
     31	unsigned int sw_tail_old = 0U;
     32	struct aq_ring_s *ring = NULL;
     33	bool was_tx_cleaned = true;
     34	unsigned int i = 0U;
     35	int work_done = 0;
     36	int err = 0;
     37
     38	if (!self) {
     39		err = -EINVAL;
     40	} else {
     41		for (i = 0U; self->tx_rings > i; ++i) {
     42			ring = self->ring[i];
     43			u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
     44			ring[AQ_VEC_RX_ID].stats.rx.polls++;
     45			u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
     46			if (self->aq_hw_ops->hw_ring_tx_head_update) {
     47				err = self->aq_hw_ops->hw_ring_tx_head_update(
     48							self->aq_hw,
     49							&ring[AQ_VEC_TX_ID]);
     50				if (err < 0)
     51					goto err_exit;
     52			}
     53
     54			if (ring[AQ_VEC_TX_ID].sw_head !=
     55			    ring[AQ_VEC_TX_ID].hw_head) {
     56				was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
     57				aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
     58			}
     59
     60			err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
     61					    &ring[AQ_VEC_RX_ID]);
     62			if (err < 0)
     63				goto err_exit;
     64
     65			if (ring[AQ_VEC_RX_ID].sw_head !=
     66				ring[AQ_VEC_RX_ID].hw_head) {
     67				err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
     68						       napi,
     69						       &work_done,
     70						       budget - work_done);
     71				if (err < 0)
     72					goto err_exit;
     73
     74				sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
     75
     76				err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
     77				if (err < 0)
     78					goto err_exit;
     79
     80				err = self->aq_hw_ops->hw_ring_rx_fill(
     81					self->aq_hw,
     82					&ring[AQ_VEC_RX_ID], sw_tail_old);
     83				if (err < 0)
     84					goto err_exit;
     85			}
     86		}
     87
     88err_exit:
     89		if (!was_tx_cleaned)
     90			work_done = budget;
     91
     92		if (work_done < budget) {
     93			napi_complete_done(napi, work_done);
     94			self->aq_hw_ops->hw_irq_enable(self->aq_hw,
     95					1U << self->aq_ring_param.vec_idx);
     96		}
     97	}
     98
     99	return work_done;
    100}
    101
    102struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
    103			      struct aq_nic_cfg_s *aq_nic_cfg)
    104{
    105	struct aq_vec_s *self = NULL;
    106
    107	self = kzalloc(sizeof(*self), GFP_KERNEL);
    108	if (!self)
    109		goto err_exit;
    110
    111	self->aq_nic = aq_nic;
    112	self->aq_ring_param.vec_idx = idx;
    113	self->aq_ring_param.cpu =
    114		idx + aq_nic_cfg->aq_rss.base_cpu_number;
    115
    116	cpumask_set_cpu(self->aq_ring_param.cpu,
    117			&self->aq_ring_param.affinity_mask);
    118
    119	self->tx_rings = 0;
    120	self->rx_rings = 0;
    121
    122	netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
    123		       aq_vec_poll, NAPI_POLL_WEIGHT);
    124
    125err_exit:
    126	return self;
    127}
    128
    129int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
    130		      unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
    131{
    132	struct aq_ring_s *ring = NULL;
    133	unsigned int i = 0U;
    134	int err = 0;
    135
    136	for (i = 0; i < aq_nic_cfg->tcs; ++i) {
    137		const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
    138								    i, idx);
    139
    140		ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
    141					idx_ring, aq_nic_cfg);
    142		if (!ring) {
    143			err = -ENOMEM;
    144			goto err_exit;
    145		}
    146
    147		++self->tx_rings;
    148
    149		aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
    150
    151		if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
    152				     aq_nic->ndev, idx,
    153				     self->napi.napi_id) < 0) {
    154			err = -ENOMEM;
    155			goto err_exit;
    156		}
    157		if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
    158					       MEM_TYPE_PAGE_SHARED, NULL) < 0) {
    159			xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
    160			err = -ENOMEM;
    161			goto err_exit;
    162		}
    163
    164		ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
    165					idx_ring, aq_nic_cfg);
    166		if (!ring) {
    167			xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
    168			err = -ENOMEM;
    169			goto err_exit;
    170		}
    171
    172		++self->rx_rings;
    173	}
    174
    175err_exit:
    176	if (err < 0) {
    177		aq_vec_ring_free(self);
    178		self = NULL;
    179	}
    180
    181	return err;
    182}
    183
    184int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
    185		struct aq_hw_s *aq_hw)
    186{
    187	struct aq_ring_s *ring = NULL;
    188	unsigned int i = 0U;
    189	int err = 0;
    190
    191	self->aq_hw_ops = aq_hw_ops;
    192	self->aq_hw = aq_hw;
    193
    194	for (i = 0U; self->tx_rings > i; ++i) {
    195		ring = self->ring[i];
    196		err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
    197		if (err < 0)
    198			goto err_exit;
    199
    200		err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
    201						       &ring[AQ_VEC_TX_ID],
    202						       &self->aq_ring_param);
    203		if (err < 0)
    204			goto err_exit;
    205
    206		err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
    207		if (err < 0)
    208			goto err_exit;
    209
    210		err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
    211						       &ring[AQ_VEC_RX_ID],
    212						       &self->aq_ring_param);
    213		if (err < 0)
    214			goto err_exit;
    215
    216		err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
    217		if (err < 0)
    218			goto err_exit;
    219
    220		err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
    221						       &ring[AQ_VEC_RX_ID], 0U);
    222		if (err < 0)
    223			goto err_exit;
    224	}
    225
    226err_exit:
    227	return err;
    228}
    229
    230int aq_vec_start(struct aq_vec_s *self)
    231{
    232	struct aq_ring_s *ring = NULL;
    233	unsigned int i = 0U;
    234	int err = 0;
    235
    236	for (i = 0U; self->tx_rings > i; ++i) {
    237		ring = self->ring[i];
    238		err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
    239							&ring[AQ_VEC_TX_ID]);
    240		if (err < 0)
    241			goto err_exit;
    242
    243		err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
    244							&ring[AQ_VEC_RX_ID]);
    245		if (err < 0)
    246			goto err_exit;
    247	}
    248
    249	napi_enable(&self->napi);
    250
    251err_exit:
    252	return err;
    253}
    254
    255void aq_vec_stop(struct aq_vec_s *self)
    256{
    257	struct aq_ring_s *ring = NULL;
    258	unsigned int i = 0U;
    259
    260	for (i = 0U; self->tx_rings > i; ++i) {
    261		ring = self->ring[i];
    262		self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
    263						 &ring[AQ_VEC_TX_ID]);
    264
    265		self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
    266						 &ring[AQ_VEC_RX_ID]);
    267	}
    268
    269	napi_disable(&self->napi);
    270}
    271
    272void aq_vec_deinit(struct aq_vec_s *self)
    273{
    274	struct aq_ring_s *ring = NULL;
    275	unsigned int i = 0U;
    276
    277	if (!self)
    278		goto err_exit;
    279
    280	for (i = 0U; self->tx_rings > i; ++i) {
    281		ring = self->ring[i];
    282		aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
    283		aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
    284	}
    285
    286err_exit:;
    287}
    288
    289void aq_vec_free(struct aq_vec_s *self)
    290{
    291	if (!self)
    292		goto err_exit;
    293
    294	netif_napi_del(&self->napi);
    295
    296	kfree(self);
    297
    298err_exit:;
    299}
    300
    301void aq_vec_ring_free(struct aq_vec_s *self)
    302{
    303	struct aq_ring_s *ring = NULL;
    304	unsigned int i = 0U;
    305
    306	if (!self)
    307		goto err_exit;
    308
    309	for (i = 0U; self->tx_rings > i; ++i) {
    310		ring = self->ring[i];
    311		aq_ring_free(&ring[AQ_VEC_TX_ID]);
    312		if (i < self->rx_rings) {
    313			xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
    314			aq_ring_free(&ring[AQ_VEC_RX_ID]);
    315		}
    316	}
    317
    318	self->tx_rings = 0;
    319	self->rx_rings = 0;
    320err_exit:;
    321}
    322
    323irqreturn_t aq_vec_isr(int irq, void *private)
    324{
    325	struct aq_vec_s *self = private;
    326	int err = 0;
    327
    328	if (!self) {
    329		err = -EINVAL;
    330		goto err_exit;
    331	}
    332	napi_schedule(&self->napi);
    333
    334err_exit:
    335	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
    336}
    337
    338irqreturn_t aq_vec_isr_legacy(int irq, void *private)
    339{
    340	struct aq_vec_s *self = private;
    341	u64 irq_mask = 0U;
    342	int err;
    343
    344	if (!self)
    345		return IRQ_NONE;
    346	err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
    347	if (err < 0)
    348		return IRQ_NONE;
    349
    350	if (irq_mask) {
    351		self->aq_hw_ops->hw_irq_disable(self->aq_hw,
    352			      1U << self->aq_ring_param.vec_idx);
    353		napi_schedule(&self->napi);
    354	} else {
    355		self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
    356		return IRQ_NONE;
    357	}
    358
    359	return IRQ_HANDLED;
    360}
    361
    362cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
    363{
    364	return &self->aq_ring_param.affinity_mask;
    365}
    366
    367bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
    368{
    369	return tc < self->rx_rings && tc < self->tx_rings;
    370}
    371
    372unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
    373{
    374	unsigned int count;
    375
    376	if (!aq_vec_is_valid_tc(self, tc))
    377		return 0;
    378
    379	count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
    380	count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);
    381
    382	return count;
    383}