cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

udp.h (15911B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
      4 *		operating system.  INET is implemented using the  BSD Socket
      5 *		interface as the means of communication with the user level.
      6 *
      7 *		Definitions for the UDP module.
      8 *
      9 * Version:	@(#)udp.h	1.0.2	05/07/93
     10 *
     11 * Authors:	Ross Biro
     12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
     13 *
     14 * Fixes:
     15 *		Alan Cox	: Turned on udp checksums. I don't want to
     16 *				  chase 'memory corruption' bugs that aren't!
     17 */
     18#ifndef _UDP_H
     19#define _UDP_H
     20
     21#include <linux/list.h>
     22#include <linux/bug.h>
     23#include <net/inet_sock.h>
     24#include <net/sock.h>
     25#include <net/snmp.h>
     26#include <net/ip.h>
     27#include <linux/ipv6.h>
     28#include <linux/seq_file.h>
     29#include <linux/poll.h>
     30#include <linux/indirect_call_wrapper.h>
     31
     32/**
     33 *	struct udp_skb_cb  -  UDP(-Lite) private variables
     34 *
     35 *	@header:      private variables used by IPv4/IPv6
     36 *	@cscov:       checksum coverage length (UDP-Lite only)
     37 *	@partial_cov: if set indicates partial csum coverage
     38 */
     39struct udp_skb_cb {
     40	union {
     41		struct inet_skb_parm	h4;
     42#if IS_ENABLED(CONFIG_IPV6)
     43		struct inet6_skb_parm	h6;
     44#endif
     45	} header;
     46	__u16		cscov;
     47	__u8		partial_cov;
     48};
     49#define UDP_SKB_CB(__skb)	((struct udp_skb_cb *)((__skb)->cb))
     50
     51/**
     52 *	struct udp_hslot - UDP hash slot
     53 *
     54 *	@head:	head of list of sockets
     55 *	@count:	number of sockets in 'head' list
     56 *	@lock:	spinlock protecting changes to head/count
     57 */
     58struct udp_hslot {
     59	struct hlist_head	head;
     60	int			count;
     61	spinlock_t		lock;
     62} __attribute__((aligned(2 * sizeof(long))));
     63
     64/**
     65 *	struct udp_table - UDP table
     66 *
     67 *	@hash:	hash table, sockets are hashed on (local port)
     68 *	@hash2:	hash table, sockets are hashed on (local port, local address)
     69 *	@mask:	number of slots in hash tables, minus 1
     70 *	@log:	log2(number of slots in hash table)
     71 */
     72struct udp_table {
     73	struct udp_hslot	*hash;
     74	struct udp_hslot	*hash2;
     75	unsigned int		mask;
     76	unsigned int		log;
     77};
     78extern struct udp_table udp_table;
     79void udp_table_init(struct udp_table *, const char *);
     80static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
     81					     struct net *net, unsigned int num)
     82{
     83	return &table->hash[udp_hashfn(net, num, table->mask)];
     84}
     85/*
     86 * For secondary hash, net_hash_mix() is performed before calling
     87 * udp_hashslot2(), this explains difference with udp_hashslot()
     88 */
     89static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
     90					      unsigned int hash)
     91{
     92	return &table->hash2[hash & table->mask];
     93}
     94
     95extern struct proto udp_prot;
     96
     97extern atomic_long_t udp_memory_allocated;
     98
     99/* sysctl variables for udp */
    100extern long sysctl_udp_mem[3];
    101extern int sysctl_udp_rmem_min;
    102extern int sysctl_udp_wmem_min;
    103
    104struct sk_buff;
    105
    106/*
    107 *	Generic checksumming routines for UDP(-Lite) v4 and v6
    108 */
    109static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
    110{
    111	return (UDP_SKB_CB(skb)->cscov == skb->len ?
    112		__skb_checksum_complete(skb) :
    113		__skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
    114}
    115
    116static inline int udp_lib_checksum_complete(struct sk_buff *skb)
    117{
    118	return !skb_csum_unnecessary(skb) &&
    119		__udp_lib_checksum_complete(skb);
    120}
    121
    122/**
    123 * 	udp_csum_outgoing  -  compute UDPv4/v6 checksum over fragments
    124 * 	@sk: 	socket we are writing to
    125 * 	@skb: 	sk_buff containing the filled-in UDP header
    126 * 	        (checksum field must be zeroed out)
    127 */
    128static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
    129{
    130	__wsum csum = csum_partial(skb_transport_header(skb),
    131				   sizeof(struct udphdr), 0);
    132	skb_queue_walk(&sk->sk_write_queue, skb) {
    133		csum = csum_add(csum, skb->csum);
    134	}
    135	return csum;
    136}
    137
    138static inline __wsum udp_csum(struct sk_buff *skb)
    139{
    140	__wsum csum = csum_partial(skb_transport_header(skb),
    141				   sizeof(struct udphdr), skb->csum);
    142
    143	for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
    144		csum = csum_add(csum, skb->csum);
    145	}
    146	return csum;
    147}
    148
    149static inline __sum16 udp_v4_check(int len, __be32 saddr,
    150				   __be32 daddr, __wsum base)
    151{
    152	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
    153}
    154
    155void udp_set_csum(bool nocheck, struct sk_buff *skb,
    156		  __be32 saddr, __be32 daddr, int len);
    157
    158static inline void udp_csum_pull_header(struct sk_buff *skb)
    159{
    160	if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
    161		skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
    162					 skb->csum);
    163	skb_pull_rcsum(skb, sizeof(struct udphdr));
    164	UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
    165}
    166
    167typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
    168				     __be16 dport);
    169
    170INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
    171INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
    172
    173struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
    174				  netdev_features_t features, bool is_ipv6);
    175
    176/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
    177static inline int udp_lib_hash(struct sock *sk)
    178{
    179	BUG();
    180	return 0;
    181}
    182
    183void udp_lib_unhash(struct sock *sk);
    184void udp_lib_rehash(struct sock *sk, u16 new_hash);
    185
    186static inline void udp_lib_close(struct sock *sk, long timeout)
    187{
    188	sk_common_release(sk);
    189}
    190
    191int udp_lib_get_port(struct sock *sk, unsigned short snum,
    192		     unsigned int hash2_nulladdr);
    193
    194u32 udp_flow_hashrnd(void);
    195
    196static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
    197				       int min, int max, bool use_eth)
    198{
    199	u32 hash;
    200
    201	if (min >= max) {
    202		/* Use default range */
    203		inet_get_local_port_range(net, &min, &max);
    204	}
    205
    206	hash = skb_get_hash(skb);
    207	if (unlikely(!hash)) {
    208		if (use_eth) {
    209			/* Can't find a normal hash, caller has indicated an
    210			 * Ethernet packet so use that to compute a hash.
    211			 */
    212			hash = jhash(skb->data, 2 * ETH_ALEN,
    213				     (__force u32) skb->protocol);
    214		} else {
    215			/* Can't derive any sort of hash for the packet, set
    216			 * to some consistent random value.
    217			 */
    218			hash = udp_flow_hashrnd();
    219		}
    220	}
    221
    222	/* Since this is being sent on the wire obfuscate hash a bit
    223	 * to minimize possbility that any useful information to an
    224	 * attacker is leaked. Only upper 16 bits are relevant in the
    225	 * computation for 16 bit port value.
    226	 */
    227	hash ^= hash << 16;
    228
    229	return htons((((u64) hash * (max - min)) >> 32) + min);
    230}
    231
    232static inline int udp_rqueue_get(struct sock *sk)
    233{
    234	return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
    235}
    236
    237static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
    238				       int dif, int sdif)
    239{
    240#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
    241	return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
    242				 bound_dev_if, dif, sdif);
    243#else
    244	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
    245#endif
    246}
    247
    248/* net/ipv4/udp.c */
    249void udp_destruct_sock(struct sock *sk);
    250void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
    251int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
    252void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
    253struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off,
    254			       int *err);
    255static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
    256					   int *err)
    257{
    258	int off = 0;
    259
    260	return __skb_recv_udp(sk, flags, &off, err);
    261}
    262
    263int udp_v4_early_demux(struct sk_buff *skb);
    264bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
    265int udp_get_port(struct sock *sk, unsigned short snum,
    266		 int (*saddr_cmp)(const struct sock *,
    267				  const struct sock *));
    268int udp_err(struct sk_buff *, u32);
    269int udp_abort(struct sock *sk, int err);
    270int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
    271int udp_push_pending_frames(struct sock *sk);
    272void udp_flush_pending_frames(struct sock *sk);
    273int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
    274void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
    275int udp_rcv(struct sk_buff *skb);
    276int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
    277int udp_init_sock(struct sock *sk);
    278int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
    279int __udp_disconnect(struct sock *sk, int flags);
    280int udp_disconnect(struct sock *sk, int flags);
    281__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
    282struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
    283				       netdev_features_t features,
    284				       bool is_ipv6);
    285int udp_lib_getsockopt(struct sock *sk, int level, int optname,
    286		       char __user *optval, int __user *optlen);
    287int udp_lib_setsockopt(struct sock *sk, int level, int optname,
    288		       sockptr_t optval, unsigned int optlen,
    289		       int (*push_pending_frames)(struct sock *));
    290struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
    291			     __be32 daddr, __be16 dport, int dif);
    292struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
    293			       __be32 daddr, __be16 dport, int dif, int sdif,
    294			       struct udp_table *tbl, struct sk_buff *skb);
    295struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
    296				 __be16 sport, __be16 dport);
    297struct sock *udp6_lib_lookup(struct net *net,
    298			     const struct in6_addr *saddr, __be16 sport,
    299			     const struct in6_addr *daddr, __be16 dport,
    300			     int dif);
    301struct sock *__udp6_lib_lookup(struct net *net,
    302			       const struct in6_addr *saddr, __be16 sport,
    303			       const struct in6_addr *daddr, __be16 dport,
    304			       int dif, int sdif, struct udp_table *tbl,
    305			       struct sk_buff *skb);
    306struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
    307				 __be16 sport, __be16 dport);
    308int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
    309		  sk_read_actor_t recv_actor);
    310
    311/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
    312 * possibly multiple cache miss on dequeue()
    313 */
    314struct udp_dev_scratch {
    315	/* skb->truesize and the stateless bit are embedded in a single field;
    316	 * do not use a bitfield since the compiler emits better/smaller code
    317	 * this way
    318	 */
    319	u32 _tsize_state;
    320
    321#if BITS_PER_LONG == 64
    322	/* len and the bit needed to compute skb_csum_unnecessary
    323	 * will be on cold cache lines at recvmsg time.
    324	 * skb->len can be stored on 16 bits since the udp header has been
    325	 * already validated and pulled.
    326	 */
    327	u16 len;
    328	bool is_linear;
    329	bool csum_unnecessary;
    330#endif
    331};
    332
    333static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
    334{
    335	return (struct udp_dev_scratch *)&skb->dev_scratch;
    336}
    337
    338#if BITS_PER_LONG == 64
    339static inline unsigned int udp_skb_len(struct sk_buff *skb)
    340{
    341	return udp_skb_scratch(skb)->len;
    342}
    343
    344static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
    345{
    346	return udp_skb_scratch(skb)->csum_unnecessary;
    347}
    348
    349static inline bool udp_skb_is_linear(struct sk_buff *skb)
    350{
    351	return udp_skb_scratch(skb)->is_linear;
    352}
    353
    354#else
    355static inline unsigned int udp_skb_len(struct sk_buff *skb)
    356{
    357	return skb->len;
    358}
    359
    360static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
    361{
    362	return skb_csum_unnecessary(skb);
    363}
    364
    365static inline bool udp_skb_is_linear(struct sk_buff *skb)
    366{
    367	return !skb_is_nonlinear(skb);
    368}
    369#endif
    370
    371static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
    372				  struct iov_iter *to)
    373{
    374	int n;
    375
    376	n = copy_to_iter(skb->data + off, len, to);
    377	if (n == len)
    378		return 0;
    379
    380	iov_iter_revert(to, n);
    381	return -EFAULT;
    382}
    383
    384/*
    385 * 	SNMP statistics for UDP and UDP-Lite
    386 */
    387#define UDP_INC_STATS(net, field, is_udplite)		      do { \
    388	if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field);       \
    389	else		SNMP_INC_STATS((net)->mib.udp_statistics, field);  }  while(0)
    390#define __UDP_INC_STATS(net, field, is_udplite) 	      do { \
    391	if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field);         \
    392	else		__SNMP_INC_STATS((net)->mib.udp_statistics, field);    }  while(0)
    393
    394#define __UDP6_INC_STATS(net, field, is_udplite)	    do { \
    395	if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
    396	else		__SNMP_INC_STATS((net)->mib.udp_stats_in6, field);  \
    397} while(0)
    398#define UDP6_INC_STATS(net, field, __lite)		    do { \
    399	if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);  \
    400	else	    SNMP_INC_STATS((net)->mib.udp_stats_in6, field);      \
    401} while(0)
    402
    403#if IS_ENABLED(CONFIG_IPV6)
    404#define __UDPX_MIB(sk, ipv4)						\
    405({									\
    406	ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :	\
    407				 sock_net(sk)->mib.udp_statistics) :	\
    408		(IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 :	\
    409				 sock_net(sk)->mib.udp_stats_in6);	\
    410})
    411#else
    412#define __UDPX_MIB(sk, ipv4)						\
    413({									\
    414	IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :		\
    415			 sock_net(sk)->mib.udp_statistics;		\
    416})
    417#endif
    418
    419#define __UDPX_INC_STATS(sk, field) \
    420	__SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
    421
    422#ifdef CONFIG_PROC_FS
    423struct udp_seq_afinfo {
    424	sa_family_t			family;
    425	struct udp_table		*udp_table;
    426};
    427
    428struct udp_iter_state {
    429	struct seq_net_private  p;
    430	int			bucket;
    431	struct udp_seq_afinfo	*bpf_seq_afinfo;
    432};
    433
    434void *udp_seq_start(struct seq_file *seq, loff_t *pos);
    435void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
    436void udp_seq_stop(struct seq_file *seq, void *v);
    437
    438extern const struct seq_operations udp_seq_ops;
    439extern const struct seq_operations udp6_seq_ops;
    440
    441int udp4_proc_init(void);
    442void udp4_proc_exit(void);
    443#endif /* CONFIG_PROC_FS */
    444
    445int udpv4_offload_init(void);
    446
    447void udp_init(void);
    448
    449DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
    450void udp_encap_enable(void);
    451void udp_encap_disable(void);
    452#if IS_ENABLED(CONFIG_IPV6)
    453DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
    454void udpv6_encap_enable(void);
    455#endif
    456
    457static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
    458					      struct sk_buff *skb, bool ipv4)
    459{
    460	netdev_features_t features = NETIF_F_SG;
    461	struct sk_buff *segs;
    462
    463	/* Avoid csum recalculation by skb_segment unless userspace explicitly
    464	 * asks for the final checksum values
    465	 */
    466	if (!inet_get_convert_csum(sk))
    467		features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
    468
    469	/* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
    470	 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
    471	 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
    472	 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
    473	 * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
    474	 * Reset in this specific case, where PARTIAL is both correct and
    475	 * required.
    476	 */
    477	if (skb->pkt_type == PACKET_LOOPBACK)
    478		skb->ip_summed = CHECKSUM_PARTIAL;
    479
    480	/* the GSO CB lays after the UDP one, no need to save and restore any
    481	 * CB fragment
    482	 */
    483	segs = __skb_gso_segment(skb, features, false);
    484	if (IS_ERR_OR_NULL(segs)) {
    485		int segs_nr = skb_shinfo(skb)->gso_segs;
    486
    487		atomic_add(segs_nr, &sk->sk_drops);
    488		SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
    489		kfree_skb(skb);
    490		return NULL;
    491	}
    492
    493	consume_skb(skb);
    494	return segs;
    495}
    496
    497static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
    498{
    499	/* UDP-lite can't land here - no GRO */
    500	WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
    501
    502	/* UDP packets generated with UDP_SEGMENT and traversing:
    503	 *
    504	 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
    505	 *
    506	 * can reach an UDP socket with CHECKSUM_NONE, because
    507	 * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
    508	 * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
    509	 * have a valid checksum, as the GRO engine validates the UDP csum
    510	 * before the aggregation and nobody strips such info in between.
    511	 * Instead of adding another check in the tunnel fastpath, we can force
    512	 * a valid csum after the segmentation.
    513	 * Additionally fixup the UDP CB.
    514	 */
    515	UDP_SKB_CB(skb)->cscov = skb->len;
    516	if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
    517		skb->csum_valid = 1;
    518}
    519
    520#ifdef CONFIG_BPF_SYSCALL
    521struct sk_psock;
    522struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
    523int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
    524#endif
    525
    526#endif	/* _UDP_H */