cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sparx5_packet.c (9239B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/* Microchip Sparx5 Switch driver
      3 *
      4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
      5 */
      6
      7#include "sparx5_main_regs.h"
      8#include "sparx5_main.h"
      9
     10#define XTR_EOF_0     ntohl((__force __be32)0x80000000u)
     11#define XTR_EOF_1     ntohl((__force __be32)0x80000001u)
     12#define XTR_EOF_2     ntohl((__force __be32)0x80000002u)
     13#define XTR_EOF_3     ntohl((__force __be32)0x80000003u)
     14#define XTR_PRUNED    ntohl((__force __be32)0x80000004u)
     15#define XTR_ABORT     ntohl((__force __be32)0x80000005u)
     16#define XTR_ESCAPE    ntohl((__force __be32)0x80000006u)
     17#define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
     18
     19#define XTR_VALID_BYTES(x)      (4 - ((x) & 3))
     20
     21#define INJ_TIMEOUT_NS 50000
     22
     23void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
     24{
     25	/* Start flush */
     26	spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
     27
     28	/* Allow to drain */
     29	mdelay(1);
     30
     31	/* All Queues normal */
     32	spx5_wr(0, sparx5, QS_XTR_FLUSH);
     33}
     34
     35void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
     36{
     37	u8 *xtr_hdr = (u8 *)ifh;
     38
     39	/* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
     40	u32 fwd =
     41		((u32)xtr_hdr[27] << 24) |
     42		((u32)xtr_hdr[28] << 16) |
     43		((u32)xtr_hdr[29] <<  8) |
     44		((u32)xtr_hdr[30] <<  0);
     45	fwd = (fwd >> 5);
     46	info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
     47
     48	info->timestamp =
     49		((u64)xtr_hdr[2] << 24) |
     50		((u64)xtr_hdr[3] << 16) |
     51		((u64)xtr_hdr[4] <<  8) |
     52		((u64)xtr_hdr[5] <<  0);
     53}
     54
     55static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
     56{
     57	bool eof_flag = false, pruned_flag = false, abort_flag = false;
     58	struct net_device *netdev;
     59	struct sparx5_port *port;
     60	struct frame_info fi;
     61	int i, byte_cnt = 0;
     62	struct sk_buff *skb;
     63	u32 ifh[IFH_LEN];
     64	u32 *rxbuf;
     65
     66	/* Get IFH */
     67	for (i = 0; i < IFH_LEN; i++)
     68		ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
     69
     70	/* Decode IFH (whats needed) */
     71	sparx5_ifh_parse(ifh, &fi);
     72
     73	/* Map to port netdev */
     74	port = fi.src_port < SPX5_PORTS ?
     75		sparx5->ports[fi.src_port] : NULL;
     76	if (!port || !port->ndev) {
     77		dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
     78		sparx5_xtr_flush(sparx5, grp);
     79		return;
     80	}
     81
     82	/* Have netdev, get skb */
     83	netdev = port->ndev;
     84	skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
     85	if (!skb) {
     86		sparx5_xtr_flush(sparx5, grp);
     87		dev_err(sparx5->dev, "No skb allocated\n");
     88		netdev->stats.rx_dropped++;
     89		return;
     90	}
     91	rxbuf = (u32 *)skb->data;
     92
     93	/* Now, pull frame data */
     94	while (!eof_flag) {
     95		u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
     96		u32 cmp = val;
     97
     98		if (byte_swap)
     99			cmp = ntohl((__force __be32)val);
    100
    101		switch (cmp) {
    102		case XTR_NOT_READY:
    103			break;
    104		case XTR_ABORT:
    105			/* No accompanying data */
    106			abort_flag = true;
    107			eof_flag = true;
    108			break;
    109		case XTR_EOF_0:
    110		case XTR_EOF_1:
    111		case XTR_EOF_2:
    112		case XTR_EOF_3:
    113			/* This assumes STATUS_WORD_POS == 1, Status
    114			 * just after last data
    115			 */
    116			byte_cnt -= (4 - XTR_VALID_BYTES(val));
    117			eof_flag = true;
    118			break;
    119		case XTR_PRUNED:
    120			/* But get the last 4 bytes as well */
    121			eof_flag = true;
    122			pruned_flag = true;
    123			fallthrough;
    124		case XTR_ESCAPE:
    125			*rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
    126			byte_cnt += 4;
    127			rxbuf++;
    128			break;
    129		default:
    130			*rxbuf = val;
    131			byte_cnt += 4;
    132			rxbuf++;
    133		}
    134	}
    135
    136	if (abort_flag || pruned_flag || !eof_flag) {
    137		netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
    138			   abort_flag, pruned_flag, eof_flag);
    139		kfree_skb(skb);
    140		netdev->stats.rx_dropped++;
    141		return;
    142	}
    143
    144	/* Everything we see on an interface that is in the HW bridge
    145	 * has already been forwarded
    146	 */
    147	if (test_bit(port->portno, sparx5->bridge_mask))
    148		skb->offload_fwd_mark = 1;
    149
    150	/* Finish up skb */
    151	skb_put(skb, byte_cnt - ETH_FCS_LEN);
    152	eth_skb_pad(skb);
    153	sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
    154	skb->protocol = eth_type_trans(skb, netdev);
    155	netdev->stats.rx_bytes += skb->len;
    156	netdev->stats.rx_packets++;
    157	netif_rx(skb);
    158}
    159
    160static int sparx5_inject(struct sparx5 *sparx5,
    161			 u32 *ifh,
    162			 struct sk_buff *skb,
    163			 struct net_device *ndev)
    164{
    165	int grp = INJ_QUEUE;
    166	u32 val, w, count;
    167	u8 *buf;
    168
    169	val = spx5_rd(sparx5, QS_INJ_STATUS);
    170	if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
    171		pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
    172				   QS_INJ_STATUS_FIFO_RDY_GET(val));
    173		return -EBUSY;
    174	}
    175
    176	/* Indicate SOF */
    177	spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
    178		QS_INJ_CTRL_GAP_SIZE_SET(1),
    179		sparx5, QS_INJ_CTRL(grp));
    180
    181	/* Write the IFH to the chip. */
    182	for (w = 0; w < IFH_LEN; w++)
    183		spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
    184
    185	/* Write words, round up */
    186	count = DIV_ROUND_UP(skb->len, 4);
    187	buf = skb->data;
    188	for (w = 0; w < count; w++, buf += 4) {
    189		val = get_unaligned((const u32 *)buf);
    190		spx5_wr(val, sparx5, QS_INJ_WR(grp));
    191	}
    192
    193	/* Add padding */
    194	while (w < (60 / 4)) {
    195		spx5_wr(0, sparx5, QS_INJ_WR(grp));
    196		w++;
    197	}
    198
    199	/* Indicate EOF and valid bytes in last word */
    200	spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
    201		QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
    202		QS_INJ_CTRL_EOF_SET(1),
    203		sparx5, QS_INJ_CTRL(grp));
    204
    205	/* Add dummy CRC */
    206	spx5_wr(0, sparx5, QS_INJ_WR(grp));
    207	w++;
    208
    209	val = spx5_rd(sparx5, QS_INJ_STATUS);
    210	if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
    211		struct sparx5_port *port = netdev_priv(ndev);
    212
    213		pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
    214				   QS_INJ_STATUS_WMARK_REACHED_GET(val));
    215		netif_stop_queue(ndev);
    216		hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
    217			      HRTIMER_MODE_REL);
    218	}
    219
    220	return NETDEV_TX_OK;
    221}
    222
    223int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
    224{
    225	struct net_device_stats *stats = &dev->stats;
    226	struct sparx5_port *port = netdev_priv(dev);
    227	struct sparx5 *sparx5 = port->sparx5;
    228	u32 ifh[IFH_LEN];
    229	int ret;
    230
    231	memset(ifh, 0, IFH_LEN * 4);
    232	sparx5_set_port_ifh(ifh, port->portno);
    233
    234	if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
    235		ret = sparx5_ptp_txtstamp_request(port, skb);
    236		if (ret)
    237			return ret;
    238
    239		sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
    240		sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
    241		sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
    242		sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
    243	}
    244
    245	skb_tx_timestamp(skb);
    246	if (sparx5->fdma_irq > 0)
    247		ret = sparx5_fdma_xmit(sparx5, ifh, skb);
    248	else
    249		ret = sparx5_inject(sparx5, ifh, skb, dev);
    250
    251	if (ret == NETDEV_TX_OK) {
    252		stats->tx_bytes += skb->len;
    253		stats->tx_packets++;
    254
    255		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
    256		    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
    257			return ret;
    258
    259		dev_kfree_skb_any(skb);
    260	} else {
    261		stats->tx_dropped++;
    262
    263		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
    264		    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
    265			sparx5_ptp_txtstamp_release(port, skb);
    266	}
    267	return ret;
    268}
    269
    270static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
    271{
    272	struct sparx5_port *port = container_of(tmr, struct sparx5_port,
    273						inj_timer);
    274	int grp = INJ_QUEUE;
    275	u32 val;
    276
    277	val = spx5_rd(port->sparx5, QS_INJ_STATUS);
    278	if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
    279		pr_err_ratelimited("Injection: Reset watermark count\n");
    280		/* Reset Watermark count to restart */
    281		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
    282			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
    283			 port->sparx5,
    284			 DSM_DEV_TX_STOP_WM_CFG(port->portno));
    285	}
    286	netif_wake_queue(port->ndev);
    287	return HRTIMER_NORESTART;
    288}
    289
    290int sparx5_manual_injection_mode(struct sparx5 *sparx5)
    291{
    292	const int byte_swap = 1;
    293	int portno;
    294
    295	/* Change mode to manual extraction and injection */
    296	spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
    297		QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
    298		QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
    299		sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
    300	spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
    301		QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
    302		sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
    303
    304	/* CPU ports capture setup */
    305	for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
    306		/* ASM CPU port: No preamble, IFH, enable padding */
    307		spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
    308			ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
    309			ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
    310			sparx5, ASM_PORT_CFG(portno));
    311
    312		/* Reset WM cnt to unclog queued frames */
    313		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
    314			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
    315			 sparx5,
    316			 DSM_DEV_TX_STOP_WM_CFG(portno));
    317
    318		/* Set Disassembler Stop Watermark level */
    319		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
    320			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
    321			 sparx5,
    322			 DSM_DEV_TX_STOP_WM_CFG(portno));
    323
    324		/* Enable Disassembler buffer underrun watchdog
    325		 */
    326		spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
    327			 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
    328			 sparx5,
    329			 DSM_BUF_CFG(portno));
    330	}
    331	return 0;
    332}
    333
    334irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
    335{
    336	struct sparx5 *s5 = _sparx5;
    337	int poll = 64;
    338
    339	/* Check data in queue */
    340	while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
    341		sparx5_xtr_grp(s5, XTR_QUEUE, false);
    342
    343	return IRQ_HANDLED;
    344}
    345
    346void sparx5_port_inj_timer_setup(struct sparx5_port *port)
    347{
    348	hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    349	port->inj_timer.function = sparx5_injection_timeout;
    350}