cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

enetc.h (14307B)


      1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
      2/* Copyright 2017-2019 NXP */
      3
      4#include <linux/timer.h>
      5#include <linux/pci.h>
      6#include <linux/netdevice.h>
      7#include <linux/etherdevice.h>
      8#include <linux/dma-mapping.h>
      9#include <linux/skbuff.h>
     10#include <linux/ethtool.h>
     11#include <linux/if_vlan.h>
     12#include <linux/phylink.h>
     13#include <linux/dim.h>
     14
     15#include "enetc_hw.h"
     16
     17#define ENETC_MAC_MAXFRM_SIZE	9600
     18#define ENETC_MAX_MTU		(ENETC_MAC_MAXFRM_SIZE - \
     19				(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
     20
     21#define ENETC_CBD_DATA_MEM_ALIGN 64
     22
     23struct enetc_tx_swbd {
     24	union {
     25		struct sk_buff *skb;
     26		struct xdp_frame *xdp_frame;
     27	};
     28	dma_addr_t dma;
     29	struct page *page;	/* valid only if is_xdp_tx */
     30	u16 page_offset;	/* valid only if is_xdp_tx */
     31	u16 len;
     32	enum dma_data_direction dir;
     33	u8 is_dma_page:1;
     34	u8 check_wb:1;
     35	u8 do_twostep_tstamp:1;
     36	u8 is_eof:1;
     37	u8 is_xdp_tx:1;
     38	u8 is_xdp_redirect:1;
     39	u8 qbv_en:1;
     40};
     41
     42#define ENETC_RX_MAXFRM_SIZE	ENETC_MAC_MAXFRM_SIZE
     43#define ENETC_RXB_TRUESIZE	2048 /* PAGE_SIZE >> 1 */
     44#define ENETC_RXB_PAD		NET_SKB_PAD /* add extra space if needed */
     45#define ENETC_RXB_DMA_SIZE	\
     46	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
     47#define ENETC_RXB_DMA_SIZE_XDP	\
     48	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
     49
     50struct enetc_rx_swbd {
     51	dma_addr_t dma;
     52	struct page *page;
     53	u16 page_offset;
     54	enum dma_data_direction dir;
     55	u16 len;
     56};
     57
     58/* ENETC overhead: optional extension BD + 1 BD gap */
     59#define ENETC_TXBDS_NEEDED(val)	((val) + 2)
     60/* max # of chained Tx BDs is 15, including head and extension BD */
     61#define ENETC_MAX_SKB_FRAGS	13
     62#define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
     63
     64struct enetc_ring_stats {
     65	unsigned int packets;
     66	unsigned int bytes;
     67	unsigned int rx_alloc_errs;
     68	unsigned int xdp_drops;
     69	unsigned int xdp_tx;
     70	unsigned int xdp_tx_drops;
     71	unsigned int xdp_redirect;
     72	unsigned int xdp_redirect_failures;
     73	unsigned int xdp_redirect_sg;
     74	unsigned int recycles;
     75	unsigned int recycle_failures;
     76	unsigned int win_drop;
     77};
     78
     79struct enetc_xdp_data {
     80	struct xdp_rxq_info rxq;
     81	struct bpf_prog *prog;
     82	int xdp_tx_in_flight;
     83};
     84
     85#define ENETC_RX_RING_DEFAULT_SIZE	2048
     86#define ENETC_TX_RING_DEFAULT_SIZE	2048
     87#define ENETC_DEFAULT_TX_WORK		(ENETC_TX_RING_DEFAULT_SIZE / 2)
     88
     89struct enetc_bdr {
     90	struct device *dev; /* for DMA mapping */
     91	struct net_device *ndev;
     92	void *bd_base; /* points to Rx or Tx BD ring */
     93	union {
     94		void __iomem *tpir;
     95		void __iomem *rcir;
     96	};
     97	u16 index;
     98	int bd_count; /* # of BDs */
     99	int next_to_use;
    100	int next_to_clean;
    101	union {
    102		struct enetc_tx_swbd *tx_swbd;
    103		struct enetc_rx_swbd *rx_swbd;
    104	};
    105	union {
    106		void __iomem *tcir; /* Tx */
    107		int next_to_alloc; /* Rx */
    108	};
    109	void __iomem *idr; /* Interrupt Detect Register pointer */
    110
    111	int buffer_offset;
    112	struct enetc_xdp_data xdp;
    113
    114	struct enetc_ring_stats stats;
    115
    116	dma_addr_t bd_dma_base;
    117	u8 tsd_enable; /* Time specific departure */
    118	bool ext_en; /* enable h/w descriptor extensions */
    119
    120	/* DMA buffer for TSO headers */
    121	char *tso_headers;
    122	dma_addr_t tso_headers_dma;
    123} ____cacheline_aligned_in_smp;
    124
    125static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
    126{
    127	if (unlikely(++*i == bdr->bd_count))
    128		*i = 0;
    129}
    130
    131static inline int enetc_bd_unused(struct enetc_bdr *bdr)
    132{
    133	if (bdr->next_to_clean > bdr->next_to_use)
    134		return bdr->next_to_clean - bdr->next_to_use - 1;
    135
    136	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
    137}
    138
    139static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
    140{
    141	if (bdr->next_to_clean > bdr->next_to_alloc)
    142		return bdr->next_to_clean - bdr->next_to_alloc - 1;
    143
    144	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
    145}
    146
    147/* Control BD ring */
    148#define ENETC_CBDR_DEFAULT_SIZE	64
    149struct enetc_cbdr {
    150	void *bd_base; /* points to Rx or Tx BD ring */
    151	void __iomem *pir;
    152	void __iomem *cir;
    153	void __iomem *mr; /* mode register */
    154
    155	int bd_count; /* # of BDs */
    156	int next_to_use;
    157	int next_to_clean;
    158
    159	dma_addr_t bd_dma_base;
    160	struct device *dma_dev;
    161};
    162
    163#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
    164
    165static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
    166{
    167	int hw_idx = i;
    168
    169#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
    170	if (rx_ring->ext_en)
    171		hw_idx = 2 * i;
    172#endif
    173	return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
    174}
    175
    176static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
    177				   union enetc_rx_bd **old_rxbd, int *old_index)
    178{
    179	union enetc_rx_bd *new_rxbd = *old_rxbd;
    180	int new_index = *old_index;
    181
    182	new_rxbd++;
    183
    184#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
    185	if (rx_ring->ext_en)
    186		new_rxbd++;
    187#endif
    188
    189	if (unlikely(++new_index == rx_ring->bd_count)) {
    190		new_rxbd = rx_ring->bd_base;
    191		new_index = 0;
    192	}
    193
    194	*old_rxbd = new_rxbd;
    195	*old_index = new_index;
    196}
    197
    198static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
    199{
    200	return ++rxbd;
    201}
    202
    203struct enetc_msg_swbd {
    204	void *vaddr;
    205	dma_addr_t dma;
    206	int size;
    207};
    208
    209#define ENETC_REV1	0x1
    210enum enetc_errata {
    211	ENETC_ERR_VLAN_ISOL	= BIT(0),
    212	ENETC_ERR_UCMCSWP	= BIT(1),
    213};
    214
    215#define ENETC_SI_F_QBV BIT(0)
    216#define ENETC_SI_F_PSFP BIT(1)
    217
    218/* PCI IEP device data */
    219struct enetc_si {
    220	struct pci_dev *pdev;
    221	struct enetc_hw hw;
    222	enum enetc_errata errata;
    223
    224	struct net_device *ndev; /* back ref. */
    225
    226	struct enetc_cbdr cbd_ring;
    227
    228	int num_rx_rings; /* how many rings are available in the SI */
    229	int num_tx_rings;
    230	int num_fs_entries;
    231	int num_rss; /* number of RSS buckets */
    232	unsigned short pad;
    233	int hw_features;
    234};
    235
    236#define ENETC_SI_ALIGN	32
    237
    238static inline void *enetc_si_priv(const struct enetc_si *si)
    239{
    240	return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
    241}
    242
    243static inline bool enetc_si_is_pf(struct enetc_si *si)
    244{
    245	return !!(si->hw.port);
    246}
    247
    248static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
    249{
    250	switch (pf_pdev->devfn) {
    251	case 0:
    252		return 0;
    253	case 1:
    254		return 1;
    255	case 2:
    256		return 2;
    257	case 6:
    258		return 3;
    259	default:
    260		return -1;
    261	}
    262}
    263
    264#define ENETC_MAX_NUM_TXQS	8
    265#define ENETC_INT_NAME_MAX	(IFNAMSIZ + 8)
    266
    267struct enetc_int_vector {
    268	void __iomem *rbier;
    269	void __iomem *tbier_base;
    270	void __iomem *ricr1;
    271	unsigned long tx_rings_map;
    272	int count_tx_rings;
    273	u32 rx_ictt;
    274	u16 comp_cnt;
    275	bool rx_dim_en, rx_napi_work;
    276	struct napi_struct napi ____cacheline_aligned_in_smp;
    277	struct dim rx_dim ____cacheline_aligned_in_smp;
    278	char name[ENETC_INT_NAME_MAX];
    279
    280	struct enetc_bdr rx_ring;
    281	struct enetc_bdr tx_ring[];
    282} ____cacheline_aligned_in_smp;
    283
    284struct enetc_cls_rule {
    285	struct ethtool_rx_flow_spec fs;
    286	int used;
    287};
    288
    289#define ENETC_MAX_BDR_INT	2 /* fixed to max # of available cpus */
    290struct psfp_cap {
    291	u32 max_streamid;
    292	u32 max_psfp_filter;
    293	u32 max_psfp_gate;
    294	u32 max_psfp_gatelist;
    295	u32 max_psfp_meter;
    296};
    297
    298#define ENETC_F_TX_TSTAMP_MASK	0xff
    299/* TODO: more hardware offloads */
    300enum enetc_active_offloads {
    301	/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
    302	ENETC_F_TX_TSTAMP		= BIT(0),
    303	ENETC_F_TX_ONESTEP_SYNC_TSTAMP	= BIT(1),
    304
    305	ENETC_F_RX_TSTAMP		= BIT(8),
    306	ENETC_F_QBV			= BIT(9),
    307	ENETC_F_QCI			= BIT(10),
    308};
    309
    310enum enetc_flags_bit {
    311	ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
    312};
    313
    314/* interrupt coalescing modes */
    315enum enetc_ic_mode {
    316	/* one interrupt per frame */
    317	ENETC_IC_NONE = 0,
    318	/* activated when int coalescing time is set to a non-0 value */
    319	ENETC_IC_RX_MANUAL = BIT(0),
    320	ENETC_IC_TX_MANUAL = BIT(1),
    321	/* use dynamic interrupt moderation */
    322	ENETC_IC_RX_ADAPTIVE = BIT(2),
    323};
    324
    325#define ENETC_RXIC_PKTTHR	min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
    326#define ENETC_TXIC_PKTTHR	min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
    327#define ENETC_TXIC_TIMETHR	enetc_usecs_to_cycles(600)
    328
    329struct enetc_ndev_priv {
    330	struct net_device *ndev;
    331	struct device *dev; /* dma-mapping device */
    332	struct enetc_si *si;
    333
    334	int bdr_int_num; /* number of Rx/Tx ring interrupts */
    335	struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
    336	u16 num_rx_rings, num_tx_rings;
    337	u16 rx_bd_count, tx_bd_count;
    338
    339	u16 msg_enable;
    340	enum enetc_active_offloads active_offloads;
    341
    342	u32 speed; /* store speed for compare update pspeed */
    343
    344	struct enetc_bdr **xdp_tx_ring;
    345	struct enetc_bdr *tx_ring[16];
    346	struct enetc_bdr *rx_ring[16];
    347
    348	struct enetc_cls_rule *cls_rules;
    349
    350	struct psfp_cap psfp_cap;
    351
    352	struct phylink *phylink;
    353	int ic_mode;
    354	u32 tx_ictt;
    355
    356	struct bpf_prog *xdp_prog;
    357
    358	unsigned long flags;
    359
    360	struct work_struct	tx_onestep_tstamp;
    361	struct sk_buff_head	tx_skbs;
    362};
    363
    364/* Messaging */
    365
    366/* VF-PF set primary MAC address message format */
    367struct enetc_msg_cmd_set_primary_mac {
    368	struct enetc_msg_cmd_header header;
    369	struct sockaddr mac;
    370};
    371
    372#define ENETC_CBD(R, i)	(&(((struct enetc_cbd *)((R).bd_base))[i]))
    373
    374#define ENETC_CBDR_TIMEOUT	1000 /* usecs */
    375
    376/* PTP driver exports */
    377extern int enetc_phc_index;
    378
    379/* SI common */
    380int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
    381void enetc_pci_remove(struct pci_dev *pdev);
    382int enetc_alloc_msix(struct enetc_ndev_priv *priv);
    383void enetc_free_msix(struct enetc_ndev_priv *priv);
    384void enetc_get_si_caps(struct enetc_si *si);
    385void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
    386int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
    387void enetc_free_si_resources(struct enetc_ndev_priv *priv);
    388int enetc_configure_si(struct enetc_ndev_priv *priv);
    389
    390int enetc_open(struct net_device *ndev);
    391int enetc_close(struct net_device *ndev);
    392void enetc_start(struct net_device *ndev);
    393void enetc_stop(struct net_device *ndev);
    394netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
    395struct net_device_stats *enetc_get_stats(struct net_device *ndev);
    396int enetc_set_features(struct net_device *ndev,
    397		       netdev_features_t features);
    398int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
    399int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
    400		   void *type_data);
    401int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
    402int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
    403		   struct xdp_frame **frames, u32 flags);
    404
    405/* ethtool */
    406void enetc_set_ethtool_ops(struct net_device *ndev);
    407
    408/* control buffer descriptor ring (CBDR) */
    409int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
    410		     struct enetc_cbdr *cbdr);
    411void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
    412int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
    413			    char *mac_addr, int si_map);
    414int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
    415int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
    416		       int index);
    417void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
    418int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
    419int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
    420int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
    421
    422static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
    423					     struct enetc_cbd *cbd,
    424					     int size, dma_addr_t *dma,
    425					     void **data_align)
    426{
    427	struct enetc_cbdr *ring = &si->cbd_ring;
    428	dma_addr_t dma_align;
    429	void *data;
    430
    431	data = dma_alloc_coherent(ring->dma_dev,
    432				  size + ENETC_CBD_DATA_MEM_ALIGN,
    433				  dma, GFP_KERNEL);
    434	if (!data) {
    435		dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
    436		return NULL;
    437	}
    438
    439	dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
    440	*data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
    441
    442	cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
    443	cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
    444	cbd->length = cpu_to_le16(size);
    445
    446	return data;
    447}
    448
    449static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
    450					   void *data, dma_addr_t *dma)
    451{
    452	struct enetc_cbdr *ring = &si->cbd_ring;
    453
    454	dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
    455			  data, *dma);
    456}
    457
    458#ifdef CONFIG_FSL_ENETC_QOS
    459int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
    460void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
    461int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
    462int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
    463int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
    464			    void *cb_priv);
    465int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
    466int enetc_psfp_init(struct enetc_ndev_priv *priv);
    467int enetc_psfp_clean(struct enetc_ndev_priv *priv);
    468
    469static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
    470{
    471	u32 reg;
    472
    473	reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
    474	priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
    475	/* Port stream filter capability */
    476	reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
    477	priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
    478	/* Port stream gate capability */
    479	reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
    480	priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
    481	priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
    482	/* Port flow meter capability */
    483	reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
    484	priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
    485}
    486
    487static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
    488{
    489	struct enetc_hw *hw = &priv->si->hw;
    490	int err;
    491
    492	enetc_get_max_cap(priv);
    493
    494	err = enetc_psfp_init(priv);
    495	if (err)
    496		return err;
    497
    498	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
    499		 ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
    500		 ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
    501
    502	return 0;
    503}
    504
    505static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
    506{
    507	struct enetc_hw *hw = &priv->si->hw;
    508	int err;
    509
    510	err = enetc_psfp_clean(priv);
    511	if (err)
    512		return err;
    513
    514	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
    515		 ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
    516		 ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
    517
    518	memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
    519
    520	return 0;
    521}
    522
    523#else
    524#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
    525#define enetc_sched_speed_set(priv, speed) (void)0
    526#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
    527#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
    528#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
    529#define enetc_setup_tc_block_cb NULL
    530
    531#define enetc_get_max_cap(p)		\
    532	memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
    533
    534static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
    535{
    536	return 0;
    537}
    538
    539static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
    540{
    541	return 0;
    542}
    543#endif