cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gve_desc.h (4589B)


      1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
      2 * Google virtual Ethernet (gve) driver
      3 *
      4 * Copyright (C) 2015-2019 Google, Inc.
      5 */
      6
      7/* GVE Transmit Descriptor formats */
      8
      9#ifndef _GVE_DESC_H_
     10#define _GVE_DESC_H_
     11
     12#include <linux/build_bug.h>
     13
     14/* A note on seg_addrs
     15 *
     16 * Base addresses encoded in seg_addr are not assumed to be physical
     17 * addresses. The ring format assumes these come from some linear address
     18 * space. This could be physical memory, kernel virtual memory, user virtual
     19 * memory.
     20 * If raw dma addressing is not supported then gVNIC uses lists of registered
     21 * pages. Each queue is assumed to be associated with a single such linear
     22 * address space to ensure a consistent meaning for seg_addrs posted to its
     23 * rings.
     24 */
     25
     26struct gve_tx_pkt_desc {
     27	u8	type_flags;  /* desc type is lower 4 bits, flags upper */
     28	u8	l4_csum_offset;  /* relative offset of L4 csum word */
     29	u8	l4_hdr_offset;  /* Offset of start of L4 headers in packet */
     30	u8	desc_cnt;  /* Total descriptors for this packet */
     31	__be16	len;  /* Total length of this packet (in bytes) */
     32	__be16	seg_len;  /* Length of this descriptor's segment */
     33	__be64	seg_addr;  /* Base address (see note) of this segment */
     34} __packed;
     35
     36struct gve_tx_mtd_desc {
     37	u8      type_flags;     /* type is lower 4 bits, subtype upper  */
     38	u8      path_state;     /* state is lower 4 bits, hash type upper */
     39	__be16  reserved0;
     40	__be32  path_hash;
     41	__be64  reserved1;
     42} __packed;
     43
     44struct gve_tx_seg_desc {
     45	u8	type_flags;	/* type is lower 4 bits, flags upper	*/
     46	u8	l3_offset;	/* TSO: 2 byte units to start of IPH	*/
     47	__be16	reserved;
     48	__be16	mss;		/* TSO MSS				*/
     49	__be16	seg_len;
     50	__be64	seg_addr;
     51} __packed;
     52
     53/* GVE Transmit Descriptor Types */
     54#define	GVE_TXD_STD		(0x0 << 4) /* Std with Host Address	*/
     55#define	GVE_TXD_TSO		(0x1 << 4) /* TSO with Host Address	*/
     56#define	GVE_TXD_SEG		(0x2 << 4) /* Seg with Host Address	*/
     57#define	GVE_TXD_MTD		(0x3 << 4) /* Metadata			*/
     58
     59/* GVE Transmit Descriptor Flags for Std Pkts */
     60#define	GVE_TXF_L4CSUM	BIT(0)	/* Need csum offload */
     61#define	GVE_TXF_TSTAMP	BIT(2)	/* Timestamp required */
     62
     63/* GVE Transmit Descriptor Flags for TSO Segs */
     64#define	GVE_TXSF_IPV6	BIT(1)	/* IPv6 TSO */
     65
     66/* GVE Transmit Descriptor Options for MTD Segs */
     67#define GVE_MTD_SUBTYPE_PATH		0
     68
     69#define GVE_MTD_PATH_STATE_DEFAULT	0
     70#define GVE_MTD_PATH_STATE_TIMEOUT	1
     71#define GVE_MTD_PATH_STATE_CONGESTION	2
     72#define GVE_MTD_PATH_STATE_RETRANSMIT	3
     73
     74#define GVE_MTD_PATH_HASH_NONE         (0x0 << 4)
     75#define GVE_MTD_PATH_HASH_L4           (0x1 << 4)
     76
     77/* GVE Receive Packet Descriptor */
     78/* The start of an ethernet packet comes 2 bytes into the rx buffer.
     79 * gVNIC adds this padding so that both the DMA and the L3/4 protocol header
     80 * access is aligned.
     81 */
     82#define GVE_RX_PAD 2
     83
     84struct gve_rx_desc {
     85	u8	padding[48];
     86	__be32	rss_hash;  /* Receive-side scaling hash (Toeplitz for gVNIC) */
     87	__be16	mss;
     88	__be16	reserved;  /* Reserved to zero */
     89	u8	hdr_len;  /* Header length (L2-L4) including padding */
     90	u8	hdr_off;  /* 64-byte-scaled offset into RX_DATA entry */
     91	__sum16	csum;  /* 1's-complement partial checksum of L3+ bytes */
     92	__be16	len;  /* Length of the received packet */
     93	__be16	flags_seq;  /* Flags [15:3] and sequence number [2:0] (1-7) */
     94} __packed;
     95static_assert(sizeof(struct gve_rx_desc) == 64);
     96
     97/* If the device supports raw dma addressing then the addr in data slot is
     98 * the dma address of the buffer.
     99 * If the device only supports registered segments then the addr is a byte
    100 * offset into the registered segment (an ordered list of pages) where the
    101 * buffer is.
    102 */
    103union gve_rx_data_slot {
    104	__be64 qpl_offset;
    105	__be64 addr;
    106};
    107
    108/* GVE Recive Packet Descriptor Seq No */
    109#define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
    110
    111/* GVE Recive Packet Descriptor Flags */
    112#define GVE_RXFLG(x)	cpu_to_be16(1 << (3 + (x)))
    113#define	GVE_RXF_FRAG		GVE_RXFLG(3)	/* IP Fragment			*/
    114#define	GVE_RXF_IPV4		GVE_RXFLG(4)	/* IPv4				*/
    115#define	GVE_RXF_IPV6		GVE_RXFLG(5)	/* IPv6				*/
    116#define	GVE_RXF_TCP		GVE_RXFLG(6)	/* TCP Packet			*/
    117#define	GVE_RXF_UDP		GVE_RXFLG(7)	/* UDP Packet			*/
    118#define	GVE_RXF_ERR		GVE_RXFLG(8)	/* Packet Error Detected	*/
    119#define	GVE_RXF_PKT_CONT	GVE_RXFLG(10)	/* Multi Fragment RX packet	*/
    120
    121/* GVE IRQ */
    122#define GVE_IRQ_ACK	BIT(31)
    123#define GVE_IRQ_MASK	BIT(30)
    124#define GVE_IRQ_EVENT	BIT(29)
    125
    126static inline bool gve_needs_rss(__be16 flag)
    127{
    128	if (flag & GVE_RXF_FRAG)
    129		return false;
    130	if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
    131		return true;
    132	return false;
    133}
    134
    135static inline u8 gve_next_seqno(u8 seq)
    136{
    137	return (seq + 1) == 8 ? 1 : seq + 1;
    138}
    139#endif /* _GVE_DESC_H_ */