cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

udp.c (86470B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
      4 *		operating system.  INET is implemented using the  BSD Socket
      5 *		interface as the means of communication with the user level.
      6 *
      7 *		The User Datagram Protocol (UDP).
      8 *
      9 * Authors:	Ross Biro
     10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
     11 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
     12 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
     13 *		Hirokazu Takahashi, <taka@valinux.co.jp>
     14 *
     15 * Fixes:
     16 *		Alan Cox	:	verify_area() calls
     17 *		Alan Cox	: 	stopped close while in use off icmp
     18 *					messages. Not a fix but a botch that
     19 *					for udp at least is 'valid'.
     20 *		Alan Cox	:	Fixed icmp handling properly
     21 *		Alan Cox	: 	Correct error for oversized datagrams
     22 *		Alan Cox	:	Tidied select() semantics.
     23 *		Alan Cox	:	udp_err() fixed properly, also now
     24 *					select and read wake correctly on errors
     25 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
     26 *		Alan Cox	:	UDP can count its memory
     27 *		Alan Cox	:	send to an unknown connection causes
     28 *					an ECONNREFUSED off the icmp, but
     29 *					does NOT close.
     30 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
     31 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
     32 *					bug no longer crashes it.
     33 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
     34 *		Alan Cox	:	Uses skb_free_datagram
     35 *		Alan Cox	:	Added get/set sockopt support.
     36 *		Alan Cox	:	Broadcasting without option set returns EACCES.
     37 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
     38 *		Alan Cox	:	Use ip_tos and ip_ttl
     39 *		Alan Cox	:	SNMP Mibs
     40 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
     41 *		Matt Dillon	:	UDP length checks.
     42 *		Alan Cox	:	Smarter af_inet used properly.
     43 *		Alan Cox	:	Use new kernel side addressing.
     44 *		Alan Cox	:	Incorrect return on truncated datagram receive.
     45 *	Arnt Gulbrandsen 	:	New udp_send and stuff
     46 *		Alan Cox	:	Cache last socket
     47 *		Alan Cox	:	Route cache
     48 *		Jon Peatfield	:	Minor efficiency fix to sendto().
     49 *		Mike Shaver	:	RFC1122 checks.
     50 *		Alan Cox	:	Nonblocking error fix.
     51 *	Willy Konynenberg	:	Transparent proxying support.
     52 *		Mike McLagan	:	Routing by source
     53 *		David S. Miller	:	New socket lookup architecture.
     54 *					Last socket cache retained as it
     55 *					does have a high hit rate.
     56 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
     57 *		Andi Kleen	:	Some cleanups, cache destination entry
     58 *					for connect.
     59 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
     60 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
     61 *					return ENOTCONN for unconnected sockets (POSIX)
     62 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
     63 *					bound-to-device socket
     64 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
     65 *					datagrams.
     66 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
     67 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
     68 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
     69 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
     70 *					a single port at the same time.
     71 *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
     72 *	James Chapman		:	Add L2TP encapsulation type.
     73 */
     74
     75#define pr_fmt(fmt) "UDP: " fmt
     76
     77#include <linux/bpf-cgroup.h>
     78#include <linux/uaccess.h>
     79#include <asm/ioctls.h>
     80#include <linux/memblock.h>
     81#include <linux/highmem.h>
     82#include <linux/types.h>
     83#include <linux/fcntl.h>
     84#include <linux/module.h>
     85#include <linux/socket.h>
     86#include <linux/sockios.h>
     87#include <linux/igmp.h>
     88#include <linux/inetdevice.h>
     89#include <linux/in.h>
     90#include <linux/errno.h>
     91#include <linux/timer.h>
     92#include <linux/mm.h>
     93#include <linux/inet.h>
     94#include <linux/netdevice.h>
     95#include <linux/slab.h>
     96#include <net/tcp_states.h>
     97#include <linux/skbuff.h>
     98#include <linux/proc_fs.h>
     99#include <linux/seq_file.h>
    100#include <net/net_namespace.h>
    101#include <net/icmp.h>
    102#include <net/inet_hashtables.h>
    103#include <net/ip_tunnels.h>
    104#include <net/route.h>
    105#include <net/checksum.h>
    106#include <net/xfrm.h>
    107#include <trace/events/udp.h>
    108#include <linux/static_key.h>
    109#include <linux/btf_ids.h>
    110#include <trace/events/skb.h>
    111#include <net/busy_poll.h>
    112#include "udp_impl.h"
    113#include <net/sock_reuseport.h>
    114#include <net/addrconf.h>
    115#include <net/udp_tunnel.h>
    116#if IS_ENABLED(CONFIG_IPV6)
    117#include <net/ipv6_stubs.h>
    118#endif
    119
    120struct udp_table udp_table __read_mostly;
    121EXPORT_SYMBOL(udp_table);
    122
    123long sysctl_udp_mem[3] __read_mostly;
    124EXPORT_SYMBOL(sysctl_udp_mem);
    125
    126atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
    127EXPORT_SYMBOL(udp_memory_allocated);
    128
    129#define MAX_UDP_PORTS 65536
    130#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
    131
    132static int udp_lib_lport_inuse(struct net *net, __u16 num,
    133			       const struct udp_hslot *hslot,
    134			       unsigned long *bitmap,
    135			       struct sock *sk, unsigned int log)
    136{
    137	struct sock *sk2;
    138	kuid_t uid = sock_i_uid(sk);
    139
    140	sk_for_each(sk2, &hslot->head) {
    141		if (net_eq(sock_net(sk2), net) &&
    142		    sk2 != sk &&
    143		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
    144		    (!sk2->sk_reuse || !sk->sk_reuse) &&
    145		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
    146		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
    147		    inet_rcv_saddr_equal(sk, sk2, true)) {
    148			if (sk2->sk_reuseport && sk->sk_reuseport &&
    149			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
    150			    uid_eq(uid, sock_i_uid(sk2))) {
    151				if (!bitmap)
    152					return 0;
    153			} else {
    154				if (!bitmap)
    155					return 1;
    156				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
    157					  bitmap);
    158			}
    159		}
    160	}
    161	return 0;
    162}
    163
    164/*
    165 * Note: we still hold spinlock of primary hash chain, so no other writer
    166 * can insert/delete a socket with local_port == num
    167 */
    168static int udp_lib_lport_inuse2(struct net *net, __u16 num,
    169				struct udp_hslot *hslot2,
    170				struct sock *sk)
    171{
    172	struct sock *sk2;
    173	kuid_t uid = sock_i_uid(sk);
    174	int res = 0;
    175
    176	spin_lock(&hslot2->lock);
    177	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
    178		if (net_eq(sock_net(sk2), net) &&
    179		    sk2 != sk &&
    180		    (udp_sk(sk2)->udp_port_hash == num) &&
    181		    (!sk2->sk_reuse || !sk->sk_reuse) &&
    182		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
    183		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
    184		    inet_rcv_saddr_equal(sk, sk2, true)) {
    185			if (sk2->sk_reuseport && sk->sk_reuseport &&
    186			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
    187			    uid_eq(uid, sock_i_uid(sk2))) {
    188				res = 0;
    189			} else {
    190				res = 1;
    191			}
    192			break;
    193		}
    194	}
    195	spin_unlock(&hslot2->lock);
    196	return res;
    197}
    198
    199static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
    200{
    201	struct net *net = sock_net(sk);
    202	kuid_t uid = sock_i_uid(sk);
    203	struct sock *sk2;
    204
    205	sk_for_each(sk2, &hslot->head) {
    206		if (net_eq(sock_net(sk2), net) &&
    207		    sk2 != sk &&
    208		    sk2->sk_family == sk->sk_family &&
    209		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
    210		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
    211		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
    212		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
    213		    inet_rcv_saddr_equal(sk, sk2, false)) {
    214			return reuseport_add_sock(sk, sk2,
    215						  inet_rcv_saddr_any(sk));
    216		}
    217	}
    218
    219	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
    220}
    221
    222/**
    223 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
    224 *
    225 *  @sk:          socket struct in question
    226 *  @snum:        port number to look up
    227 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
    228 *                   with NULL address
    229 */
    230int udp_lib_get_port(struct sock *sk, unsigned short snum,
    231		     unsigned int hash2_nulladdr)
    232{
    233	struct udp_hslot *hslot, *hslot2;
    234	struct udp_table *udptable = sk->sk_prot->h.udp_table;
    235	int    error = 1;
    236	struct net *net = sock_net(sk);
    237
    238	if (!snum) {
    239		int low, high, remaining;
    240		unsigned int rand;
    241		unsigned short first, last;
    242		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
    243
    244		inet_get_local_port_range(net, &low, &high);
    245		remaining = (high - low) + 1;
    246
    247		rand = prandom_u32();
    248		first = reciprocal_scale(rand, remaining) + low;
    249		/*
    250		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
    251		 */
    252		rand = (rand | 1) * (udptable->mask + 1);
    253		last = first + udptable->mask + 1;
    254		do {
    255			hslot = udp_hashslot(udptable, net, first);
    256			bitmap_zero(bitmap, PORTS_PER_CHAIN);
    257			spin_lock_bh(&hslot->lock);
    258			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
    259					    udptable->log);
    260
    261			snum = first;
    262			/*
    263			 * Iterate on all possible values of snum for this hash.
    264			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
    265			 * give us randomization and full range coverage.
    266			 */
    267			do {
    268				if (low <= snum && snum <= high &&
    269				    !test_bit(snum >> udptable->log, bitmap) &&
    270				    !inet_is_local_reserved_port(net, snum))
    271					goto found;
    272				snum += rand;
    273			} while (snum != first);
    274			spin_unlock_bh(&hslot->lock);
    275			cond_resched();
    276		} while (++first != last);
    277		goto fail;
    278	} else {
    279		hslot = udp_hashslot(udptable, net, snum);
    280		spin_lock_bh(&hslot->lock);
    281		if (hslot->count > 10) {
    282			int exist;
    283			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
    284
    285			slot2          &= udptable->mask;
    286			hash2_nulladdr &= udptable->mask;
    287
    288			hslot2 = udp_hashslot2(udptable, slot2);
    289			if (hslot->count < hslot2->count)
    290				goto scan_primary_hash;
    291
    292			exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
    293			if (!exist && (hash2_nulladdr != slot2)) {
    294				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
    295				exist = udp_lib_lport_inuse2(net, snum, hslot2,
    296							     sk);
    297			}
    298			if (exist)
    299				goto fail_unlock;
    300			else
    301				goto found;
    302		}
    303scan_primary_hash:
    304		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
    305			goto fail_unlock;
    306	}
    307found:
    308	inet_sk(sk)->inet_num = snum;
    309	udp_sk(sk)->udp_port_hash = snum;
    310	udp_sk(sk)->udp_portaddr_hash ^= snum;
    311	if (sk_unhashed(sk)) {
    312		if (sk->sk_reuseport &&
    313		    udp_reuseport_add_sock(sk, hslot)) {
    314			inet_sk(sk)->inet_num = 0;
    315			udp_sk(sk)->udp_port_hash = 0;
    316			udp_sk(sk)->udp_portaddr_hash ^= snum;
    317			goto fail_unlock;
    318		}
    319
    320		sk_add_node_rcu(sk, &hslot->head);
    321		hslot->count++;
    322		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
    323
    324		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
    325		spin_lock(&hslot2->lock);
    326		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
    327		    sk->sk_family == AF_INET6)
    328			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
    329					   &hslot2->head);
    330		else
    331			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
    332					   &hslot2->head);
    333		hslot2->count++;
    334		spin_unlock(&hslot2->lock);
    335	}
    336	sock_set_flag(sk, SOCK_RCU_FREE);
    337	error = 0;
    338fail_unlock:
    339	spin_unlock_bh(&hslot->lock);
    340fail:
    341	return error;
    342}
    343EXPORT_SYMBOL(udp_lib_get_port);
    344
    345int udp_v4_get_port(struct sock *sk, unsigned short snum)
    346{
    347	unsigned int hash2_nulladdr =
    348		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
    349	unsigned int hash2_partial =
    350		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
    351
    352	/* precompute partial secondary hash */
    353	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
    354	return udp_lib_get_port(sk, snum, hash2_nulladdr);
    355}
    356
    357static int compute_score(struct sock *sk, struct net *net,
    358			 __be32 saddr, __be16 sport,
    359			 __be32 daddr, unsigned short hnum,
    360			 int dif, int sdif)
    361{
    362	int score;
    363	struct inet_sock *inet;
    364	bool dev_match;
    365
    366	if (!net_eq(sock_net(sk), net) ||
    367	    udp_sk(sk)->udp_port_hash != hnum ||
    368	    ipv6_only_sock(sk))
    369		return -1;
    370
    371	if (sk->sk_rcv_saddr != daddr)
    372		return -1;
    373
    374	score = (sk->sk_family == PF_INET) ? 2 : 1;
    375
    376	inet = inet_sk(sk);
    377	if (inet->inet_daddr) {
    378		if (inet->inet_daddr != saddr)
    379			return -1;
    380		score += 4;
    381	}
    382
    383	if (inet->inet_dport) {
    384		if (inet->inet_dport != sport)
    385			return -1;
    386		score += 4;
    387	}
    388
    389	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
    390					dif, sdif);
    391	if (!dev_match)
    392		return -1;
    393	if (sk->sk_bound_dev_if)
    394		score += 4;
    395
    396	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
    397		score++;
    398	return score;
    399}
    400
    401static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
    402		       const __u16 lport, const __be32 faddr,
    403		       const __be16 fport)
    404{
    405	static u32 udp_ehash_secret __read_mostly;
    406
    407	net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
    408
    409	return __inet_ehashfn(laddr, lport, faddr, fport,
    410			      udp_ehash_secret + net_hash_mix(net));
    411}
    412
    413static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
    414				     struct sk_buff *skb,
    415				     __be32 saddr, __be16 sport,
    416				     __be32 daddr, unsigned short hnum)
    417{
    418	struct sock *reuse_sk = NULL;
    419	u32 hash;
    420
    421	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
    422		hash = udp_ehashfn(net, daddr, hnum, saddr, sport);
    423		reuse_sk = reuseport_select_sock(sk, hash, skb,
    424						 sizeof(struct udphdr));
    425	}
    426	return reuse_sk;
    427}
    428
    429/* called with rcu_read_lock() */
    430static struct sock *udp4_lib_lookup2(struct net *net,
    431				     __be32 saddr, __be16 sport,
    432				     __be32 daddr, unsigned int hnum,
    433				     int dif, int sdif,
    434				     struct udp_hslot *hslot2,
    435				     struct sk_buff *skb)
    436{
    437	struct sock *sk, *result;
    438	int score, badness;
    439
    440	result = NULL;
    441	badness = 0;
    442	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
    443		score = compute_score(sk, net, saddr, sport,
    444				      daddr, hnum, dif, sdif);
    445		if (score > badness) {
    446			result = lookup_reuseport(net, sk, skb,
    447						  saddr, sport, daddr, hnum);
    448			/* Fall back to scoring if group has connections */
    449			if (result && !reuseport_has_conns(sk, false))
    450				return result;
    451
    452			result = result ? : sk;
    453			badness = score;
    454		}
    455	}
    456	return result;
    457}
    458
    459static struct sock *udp4_lookup_run_bpf(struct net *net,
    460					struct udp_table *udptable,
    461					struct sk_buff *skb,
    462					__be32 saddr, __be16 sport,
    463					__be32 daddr, u16 hnum, const int dif)
    464{
    465	struct sock *sk, *reuse_sk;
    466	bool no_reuseport;
    467
    468	if (udptable != &udp_table)
    469		return NULL; /* only UDP is supported */
    470
    471	no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport,
    472					    daddr, hnum, dif, &sk);
    473	if (no_reuseport || IS_ERR_OR_NULL(sk))
    474		return sk;
    475
    476	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
    477	if (reuse_sk)
    478		sk = reuse_sk;
    479	return sk;
    480}
    481
    482/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
    483 * harder than this. -DaveM
    484 */
    485struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
    486		__be16 sport, __be32 daddr, __be16 dport, int dif,
    487		int sdif, struct udp_table *udptable, struct sk_buff *skb)
    488{
    489	unsigned short hnum = ntohs(dport);
    490	unsigned int hash2, slot2;
    491	struct udp_hslot *hslot2;
    492	struct sock *result, *sk;
    493
    494	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
    495	slot2 = hash2 & udptable->mask;
    496	hslot2 = &udptable->hash2[slot2];
    497
    498	/* Lookup connected or non-wildcard socket */
    499	result = udp4_lib_lookup2(net, saddr, sport,
    500				  daddr, hnum, dif, sdif,
    501				  hslot2, skb);
    502	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
    503		goto done;
    504
    505	/* Lookup redirect from BPF */
    506	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
    507		sk = udp4_lookup_run_bpf(net, udptable, skb,
    508					 saddr, sport, daddr, hnum, dif);
    509		if (sk) {
    510			result = sk;
    511			goto done;
    512		}
    513	}
    514
    515	/* Got non-wildcard socket or error on first lookup */
    516	if (result)
    517		goto done;
    518
    519	/* Lookup wildcard sockets */
    520	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
    521	slot2 = hash2 & udptable->mask;
    522	hslot2 = &udptable->hash2[slot2];
    523
    524	result = udp4_lib_lookup2(net, saddr, sport,
    525				  htonl(INADDR_ANY), hnum, dif, sdif,
    526				  hslot2, skb);
    527done:
    528	if (IS_ERR(result))
    529		return NULL;
    530	return result;
    531}
    532EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
    533
    534static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
    535						 __be16 sport, __be16 dport,
    536						 struct udp_table *udptable)
    537{
    538	const struct iphdr *iph = ip_hdr(skb);
    539
    540	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
    541				 iph->daddr, dport, inet_iif(skb),
    542				 inet_sdif(skb), udptable, skb);
    543}
    544
    545struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
    546				 __be16 sport, __be16 dport)
    547{
    548	const struct iphdr *iph = ip_hdr(skb);
    549
    550	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
    551				 iph->daddr, dport, inet_iif(skb),
    552				 inet_sdif(skb), &udp_table, NULL);
    553}
    554
    555/* Must be called under rcu_read_lock().
    556 * Does increment socket refcount.
    557 */
    558#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
    559struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
    560			     __be32 daddr, __be16 dport, int dif)
    561{
    562	struct sock *sk;
    563
    564	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
    565			       dif, 0, &udp_table, NULL);
    566	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
    567		sk = NULL;
    568	return sk;
    569}
    570EXPORT_SYMBOL_GPL(udp4_lib_lookup);
    571#endif
    572
    573static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
    574				       __be16 loc_port, __be32 loc_addr,
    575				       __be16 rmt_port, __be32 rmt_addr,
    576				       int dif, int sdif, unsigned short hnum)
    577{
    578	struct inet_sock *inet = inet_sk(sk);
    579
    580	if (!net_eq(sock_net(sk), net) ||
    581	    udp_sk(sk)->udp_port_hash != hnum ||
    582	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
    583	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
    584	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
    585	    ipv6_only_sock(sk) ||
    586	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
    587		return false;
    588	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
    589		return false;
    590	return true;
    591}
    592
    593DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
    594void udp_encap_enable(void)
    595{
    596	static_branch_inc(&udp_encap_needed_key);
    597}
    598EXPORT_SYMBOL(udp_encap_enable);
    599
    600void udp_encap_disable(void)
    601{
    602	static_branch_dec(&udp_encap_needed_key);
    603}
    604EXPORT_SYMBOL(udp_encap_disable);
    605
    606/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
    607 * through error handlers in encapsulations looking for a match.
    608 */
    609static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
    610{
    611	int i;
    612
    613	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
    614		int (*handler)(struct sk_buff *skb, u32 info);
    615		const struct ip_tunnel_encap_ops *encap;
    616
    617		encap = rcu_dereference(iptun_encaps[i]);
    618		if (!encap)
    619			continue;
    620		handler = encap->err_handler;
    621		if (handler && !handler(skb, info))
    622			return 0;
    623	}
    624
    625	return -ENOENT;
    626}
    627
    628/* Try to match ICMP errors to UDP tunnels by looking up a socket without
    629 * reversing source and destination port: this will match tunnels that force the
    630 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
    631 * lwtunnels might actually break this assumption by being configured with
    632 * different destination ports on endpoints, in this case we won't be able to
    633 * trace ICMP messages back to them.
    634 *
    635 * If this doesn't match any socket, probe tunnels with arbitrary destination
    636 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
    637 * we've sent packets to won't necessarily match the local destination port.
    638 *
    639 * Then ask the tunnel implementation to match the error against a valid
    640 * association.
    641 *
    642 * Return an error if we can't find a match, the socket if we need further
    643 * processing, zero otherwise.
    644 */
    645static struct sock *__udp4_lib_err_encap(struct net *net,
    646					 const struct iphdr *iph,
    647					 struct udphdr *uh,
    648					 struct udp_table *udptable,
    649					 struct sock *sk,
    650					 struct sk_buff *skb, u32 info)
    651{
    652	int (*lookup)(struct sock *sk, struct sk_buff *skb);
    653	int network_offset, transport_offset;
    654	struct udp_sock *up;
    655
    656	network_offset = skb_network_offset(skb);
    657	transport_offset = skb_transport_offset(skb);
    658
    659	/* Network header needs to point to the outer IPv4 header inside ICMP */
    660	skb_reset_network_header(skb);
    661
    662	/* Transport header needs to point to the UDP header */
    663	skb_set_transport_header(skb, iph->ihl << 2);
    664
    665	if (sk) {
    666		up = udp_sk(sk);
    667
    668		lookup = READ_ONCE(up->encap_err_lookup);
    669		if (lookup && lookup(sk, skb))
    670			sk = NULL;
    671
    672		goto out;
    673	}
    674
    675	sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
    676			       iph->saddr, uh->dest, skb->dev->ifindex, 0,
    677			       udptable, NULL);
    678	if (sk) {
    679		up = udp_sk(sk);
    680
    681		lookup = READ_ONCE(up->encap_err_lookup);
    682		if (!lookup || lookup(sk, skb))
    683			sk = NULL;
    684	}
    685
    686out:
    687	if (!sk)
    688		sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
    689
    690	skb_set_transport_header(skb, transport_offset);
    691	skb_set_network_header(skb, network_offset);
    692
    693	return sk;
    694}
    695
    696/*
    697 * This routine is called by the ICMP module when it gets some
    698 * sort of error condition.  If err < 0 then the socket should
    699 * be closed and the error returned to the user.  If err > 0
    700 * it's just the icmp type << 8 | icmp code.
    701 * Header points to the ip header of the error packet. We move
    702 * on past this. Then (as it used to claim before adjustment)
    703 * header points to the first 8 bytes of the udp header.  We need
    704 * to find the appropriate port.
    705 */
    706
    707int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
    708{
    709	struct inet_sock *inet;
    710	const struct iphdr *iph = (const struct iphdr *)skb->data;
    711	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
    712	const int type = icmp_hdr(skb)->type;
    713	const int code = icmp_hdr(skb)->code;
    714	bool tunnel = false;
    715	struct sock *sk;
    716	int harderr;
    717	int err;
    718	struct net *net = dev_net(skb->dev);
    719
    720	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
    721			       iph->saddr, uh->source, skb->dev->ifindex,
    722			       inet_sdif(skb), udptable, NULL);
    723
    724	if (!sk || udp_sk(sk)->encap_type) {
    725		/* No socket for error: try tunnels before discarding */
    726		if (static_branch_unlikely(&udp_encap_needed_key)) {
    727			sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
    728						  info);
    729			if (!sk)
    730				return 0;
    731		} else
    732			sk = ERR_PTR(-ENOENT);
    733
    734		if (IS_ERR(sk)) {
    735			__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
    736			return PTR_ERR(sk);
    737		}
    738
    739		tunnel = true;
    740	}
    741
    742	err = 0;
    743	harderr = 0;
    744	inet = inet_sk(sk);
    745
    746	switch (type) {
    747	default:
    748	case ICMP_TIME_EXCEEDED:
    749		err = EHOSTUNREACH;
    750		break;
    751	case ICMP_SOURCE_QUENCH:
    752		goto out;
    753	case ICMP_PARAMETERPROB:
    754		err = EPROTO;
    755		harderr = 1;
    756		break;
    757	case ICMP_DEST_UNREACH:
    758		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
    759			ipv4_sk_update_pmtu(skb, sk, info);
    760			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
    761				err = EMSGSIZE;
    762				harderr = 1;
    763				break;
    764			}
    765			goto out;
    766		}
    767		err = EHOSTUNREACH;
    768		if (code <= NR_ICMP_UNREACH) {
    769			harderr = icmp_err_convert[code].fatal;
    770			err = icmp_err_convert[code].errno;
    771		}
    772		break;
    773	case ICMP_REDIRECT:
    774		ipv4_sk_redirect(skb, sk);
    775		goto out;
    776	}
    777
    778	/*
    779	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
    780	 *	4.1.3.3.
    781	 */
    782	if (tunnel) {
    783		/* ...not for tunnels though: we don't have a sending socket */
    784		goto out;
    785	}
    786	if (!inet->recverr) {
    787		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
    788			goto out;
    789	} else
    790		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
    791
    792	sk->sk_err = err;
    793	sk_error_report(sk);
    794out:
    795	return 0;
    796}
    797
    798int udp_err(struct sk_buff *skb, u32 info)
    799{
    800	return __udp4_lib_err(skb, info, &udp_table);
    801}
    802
    803/*
    804 * Throw away all pending data and cancel the corking. Socket is locked.
    805 */
    806void udp_flush_pending_frames(struct sock *sk)
    807{
    808	struct udp_sock *up = udp_sk(sk);
    809
    810	if (up->pending) {
    811		up->len = 0;
    812		up->pending = 0;
    813		ip_flush_pending_frames(sk);
    814	}
    815}
    816EXPORT_SYMBOL(udp_flush_pending_frames);
    817
    818/**
    819 * 	udp4_hwcsum  -  handle outgoing HW checksumming
    820 * 	@skb: 	sk_buff containing the filled-in UDP header
    821 * 	        (checksum field must be zeroed out)
    822 *	@src:	source IP address
    823 *	@dst:	destination IP address
    824 */
    825void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
    826{
    827	struct udphdr *uh = udp_hdr(skb);
    828	int offset = skb_transport_offset(skb);
    829	int len = skb->len - offset;
    830	int hlen = len;
    831	__wsum csum = 0;
    832
    833	if (!skb_has_frag_list(skb)) {
    834		/*
    835		 * Only one fragment on the socket.
    836		 */
    837		skb->csum_start = skb_transport_header(skb) - skb->head;
    838		skb->csum_offset = offsetof(struct udphdr, check);
    839		uh->check = ~csum_tcpudp_magic(src, dst, len,
    840					       IPPROTO_UDP, 0);
    841	} else {
    842		struct sk_buff *frags;
    843
    844		/*
    845		 * HW-checksum won't work as there are two or more
    846		 * fragments on the socket so that all csums of sk_buffs
    847		 * should be together
    848		 */
    849		skb_walk_frags(skb, frags) {
    850			csum = csum_add(csum, frags->csum);
    851			hlen -= frags->len;
    852		}
    853
    854		csum = skb_checksum(skb, offset, hlen, csum);
    855		skb->ip_summed = CHECKSUM_NONE;
    856
    857		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
    858		if (uh->check == 0)
    859			uh->check = CSUM_MANGLED_0;
    860	}
    861}
    862EXPORT_SYMBOL_GPL(udp4_hwcsum);
    863
    864/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
    865 * for the simple case like when setting the checksum for a UDP tunnel.
    866 */
    867void udp_set_csum(bool nocheck, struct sk_buff *skb,
    868		  __be32 saddr, __be32 daddr, int len)
    869{
    870	struct udphdr *uh = udp_hdr(skb);
    871
    872	if (nocheck) {
    873		uh->check = 0;
    874	} else if (skb_is_gso(skb)) {
    875		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
    876	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
    877		uh->check = 0;
    878		uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
    879		if (uh->check == 0)
    880			uh->check = CSUM_MANGLED_0;
    881	} else {
    882		skb->ip_summed = CHECKSUM_PARTIAL;
    883		skb->csum_start = skb_transport_header(skb) - skb->head;
    884		skb->csum_offset = offsetof(struct udphdr, check);
    885		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
    886	}
    887}
    888EXPORT_SYMBOL(udp_set_csum);
    889
    890static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
    891			struct inet_cork *cork)
    892{
    893	struct sock *sk = skb->sk;
    894	struct inet_sock *inet = inet_sk(sk);
    895	struct udphdr *uh;
    896	int err;
    897	int is_udplite = IS_UDPLITE(sk);
    898	int offset = skb_transport_offset(skb);
    899	int len = skb->len - offset;
    900	int datalen = len - sizeof(*uh);
    901	__wsum csum = 0;
    902
    903	/*
    904	 * Create a UDP header
    905	 */
    906	uh = udp_hdr(skb);
    907	uh->source = inet->inet_sport;
    908	uh->dest = fl4->fl4_dport;
    909	uh->len = htons(len);
    910	uh->check = 0;
    911
    912	if (cork->gso_size) {
    913		const int hlen = skb_network_header_len(skb) +
    914				 sizeof(struct udphdr);
    915
    916		if (hlen + cork->gso_size > cork->fragsize) {
    917			kfree_skb(skb);
    918			return -EINVAL;
    919		}
    920		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
    921			kfree_skb(skb);
    922			return -EINVAL;
    923		}
    924		if (sk->sk_no_check_tx) {
    925			kfree_skb(skb);
    926			return -EINVAL;
    927		}
    928		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
    929		    dst_xfrm(skb_dst(skb))) {
    930			kfree_skb(skb);
    931			return -EIO;
    932		}
    933
    934		if (datalen > cork->gso_size) {
    935			skb_shinfo(skb)->gso_size = cork->gso_size;
    936			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
    937			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
    938								 cork->gso_size);
    939		}
    940		goto csum_partial;
    941	}
    942
    943	if (is_udplite)  				 /*     UDP-Lite      */
    944		csum = udplite_csum(skb);
    945
    946	else if (sk->sk_no_check_tx) {			 /* UDP csum off */
    947
    948		skb->ip_summed = CHECKSUM_NONE;
    949		goto send;
    950
    951	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
    952csum_partial:
    953
    954		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
    955		goto send;
    956
    957	} else
    958		csum = udp_csum(skb);
    959
    960	/* add protocol-dependent pseudo-header */
    961	uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
    962				      sk->sk_protocol, csum);
    963	if (uh->check == 0)
    964		uh->check = CSUM_MANGLED_0;
    965
    966send:
    967	err = ip_send_skb(sock_net(sk), skb);
    968	if (err) {
    969		if (err == -ENOBUFS && !inet->recverr) {
    970			UDP_INC_STATS(sock_net(sk),
    971				      UDP_MIB_SNDBUFERRORS, is_udplite);
    972			err = 0;
    973		}
    974	} else
    975		UDP_INC_STATS(sock_net(sk),
    976			      UDP_MIB_OUTDATAGRAMS, is_udplite);
    977	return err;
    978}
    979
    980/*
    981 * Push out all pending data as one UDP datagram. Socket is locked.
    982 */
    983int udp_push_pending_frames(struct sock *sk)
    984{
    985	struct udp_sock  *up = udp_sk(sk);
    986	struct inet_sock *inet = inet_sk(sk);
    987	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
    988	struct sk_buff *skb;
    989	int err = 0;
    990
    991	skb = ip_finish_skb(sk, fl4);
    992	if (!skb)
    993		goto out;
    994
    995	err = udp_send_skb(skb, fl4, &inet->cork.base);
    996
    997out:
    998	up->len = 0;
    999	up->pending = 0;
   1000	return err;
   1001}
   1002EXPORT_SYMBOL(udp_push_pending_frames);
   1003
   1004static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
   1005{
   1006	switch (cmsg->cmsg_type) {
   1007	case UDP_SEGMENT:
   1008		if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
   1009			return -EINVAL;
   1010		*gso_size = *(__u16 *)CMSG_DATA(cmsg);
   1011		return 0;
   1012	default:
   1013		return -EINVAL;
   1014	}
   1015}
   1016
   1017int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
   1018{
   1019	struct cmsghdr *cmsg;
   1020	bool need_ip = false;
   1021	int err;
   1022
   1023	for_each_cmsghdr(cmsg, msg) {
   1024		if (!CMSG_OK(msg, cmsg))
   1025			return -EINVAL;
   1026
   1027		if (cmsg->cmsg_level != SOL_UDP) {
   1028			need_ip = true;
   1029			continue;
   1030		}
   1031
   1032		err = __udp_cmsg_send(cmsg, gso_size);
   1033		if (err)
   1034			return err;
   1035	}
   1036
   1037	return need_ip;
   1038}
   1039EXPORT_SYMBOL_GPL(udp_cmsg_send);
   1040
   1041int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
   1042{
   1043	struct inet_sock *inet = inet_sk(sk);
   1044	struct udp_sock *up = udp_sk(sk);
   1045	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
   1046	struct flowi4 fl4_stack;
   1047	struct flowi4 *fl4;
   1048	int ulen = len;
   1049	struct ipcm_cookie ipc;
   1050	struct rtable *rt = NULL;
   1051	int free = 0;
   1052	int connected = 0;
   1053	__be32 daddr, faddr, saddr;
   1054	__be16 dport;
   1055	u8  tos;
   1056	int err, is_udplite = IS_UDPLITE(sk);
   1057	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
   1058	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
   1059	struct sk_buff *skb;
   1060	struct ip_options_data opt_copy;
   1061
   1062	if (len > 0xFFFF)
   1063		return -EMSGSIZE;
   1064
   1065	/*
   1066	 *	Check the flags.
   1067	 */
   1068
   1069	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
   1070		return -EOPNOTSUPP;
   1071
   1072	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
   1073
   1074	fl4 = &inet->cork.fl.u.ip4;
   1075	if (up->pending) {
   1076		/*
   1077		 * There are pending frames.
   1078		 * The socket lock must be held while it's corked.
   1079		 */
   1080		lock_sock(sk);
   1081		if (likely(up->pending)) {
   1082			if (unlikely(up->pending != AF_INET)) {
   1083				release_sock(sk);
   1084				return -EINVAL;
   1085			}
   1086			goto do_append_data;
   1087		}
   1088		release_sock(sk);
   1089	}
   1090	ulen += sizeof(struct udphdr);
   1091
   1092	/*
   1093	 *	Get and verify the address.
   1094	 */
   1095	if (usin) {
   1096		if (msg->msg_namelen < sizeof(*usin))
   1097			return -EINVAL;
   1098		if (usin->sin_family != AF_INET) {
   1099			if (usin->sin_family != AF_UNSPEC)
   1100				return -EAFNOSUPPORT;
   1101		}
   1102
   1103		daddr = usin->sin_addr.s_addr;
   1104		dport = usin->sin_port;
   1105		if (dport == 0)
   1106			return -EINVAL;
   1107	} else {
   1108		if (sk->sk_state != TCP_ESTABLISHED)
   1109			return -EDESTADDRREQ;
   1110		daddr = inet->inet_daddr;
   1111		dport = inet->inet_dport;
   1112		/* Open fast path for connected socket.
   1113		   Route will not be used, if at least one option is set.
   1114		 */
   1115		connected = 1;
   1116	}
   1117
   1118	ipcm_init_sk(&ipc, inet);
   1119	ipc.gso_size = READ_ONCE(up->gso_size);
   1120
   1121	if (msg->msg_controllen) {
   1122		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
   1123		if (err > 0)
   1124			err = ip_cmsg_send(sk, msg, &ipc,
   1125					   sk->sk_family == AF_INET6);
   1126		if (unlikely(err < 0)) {
   1127			kfree(ipc.opt);
   1128			return err;
   1129		}
   1130		if (ipc.opt)
   1131			free = 1;
   1132		connected = 0;
   1133	}
   1134	if (!ipc.opt) {
   1135		struct ip_options_rcu *inet_opt;
   1136
   1137		rcu_read_lock();
   1138		inet_opt = rcu_dereference(inet->inet_opt);
   1139		if (inet_opt) {
   1140			memcpy(&opt_copy, inet_opt,
   1141			       sizeof(*inet_opt) + inet_opt->opt.optlen);
   1142			ipc.opt = &opt_copy.opt;
   1143		}
   1144		rcu_read_unlock();
   1145	}
   1146
   1147	if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) {
   1148		err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
   1149					    (struct sockaddr *)usin, &ipc.addr);
   1150		if (err)
   1151			goto out_free;
   1152		if (usin) {
   1153			if (usin->sin_port == 0) {
   1154				/* BPF program set invalid port. Reject it. */
   1155				err = -EINVAL;
   1156				goto out_free;
   1157			}
   1158			daddr = usin->sin_addr.s_addr;
   1159			dport = usin->sin_port;
   1160		}
   1161	}
   1162
   1163	saddr = ipc.addr;
   1164	ipc.addr = faddr = daddr;
   1165
   1166	if (ipc.opt && ipc.opt->opt.srr) {
   1167		if (!daddr) {
   1168			err = -EINVAL;
   1169			goto out_free;
   1170		}
   1171		faddr = ipc.opt->opt.faddr;
   1172		connected = 0;
   1173	}
   1174	tos = get_rttos(&ipc, inet);
   1175	if (sock_flag(sk, SOCK_LOCALROUTE) ||
   1176	    (msg->msg_flags & MSG_DONTROUTE) ||
   1177	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
   1178		tos |= RTO_ONLINK;
   1179		connected = 0;
   1180	}
   1181
   1182	if (ipv4_is_multicast(daddr)) {
   1183		if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
   1184			ipc.oif = inet->mc_index;
   1185		if (!saddr)
   1186			saddr = inet->mc_addr;
   1187		connected = 0;
   1188	} else if (!ipc.oif) {
   1189		ipc.oif = inet->uc_index;
   1190	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
   1191		/* oif is set, packet is to local broadcast and
   1192		 * uc_index is set. oif is most likely set
   1193		 * by sk_bound_dev_if. If uc_index != oif check if the
   1194		 * oif is an L3 master and uc_index is an L3 slave.
   1195		 * If so, we want to allow the send using the uc_index.
   1196		 */
   1197		if (ipc.oif != inet->uc_index &&
   1198		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
   1199							      inet->uc_index)) {
   1200			ipc.oif = inet->uc_index;
   1201		}
   1202	}
   1203
   1204	if (connected)
   1205		rt = (struct rtable *)sk_dst_check(sk, 0);
   1206
   1207	if (!rt) {
   1208		struct net *net = sock_net(sk);
   1209		__u8 flow_flags = inet_sk_flowi_flags(sk);
   1210
   1211		fl4 = &fl4_stack;
   1212
   1213		flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos,
   1214				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
   1215				   flow_flags,
   1216				   faddr, saddr, dport, inet->inet_sport,
   1217				   sk->sk_uid);
   1218
   1219		security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
   1220		rt = ip_route_output_flow(net, fl4, sk);
   1221		if (IS_ERR(rt)) {
   1222			err = PTR_ERR(rt);
   1223			rt = NULL;
   1224			if (err == -ENETUNREACH)
   1225				IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
   1226			goto out;
   1227		}
   1228
   1229		err = -EACCES;
   1230		if ((rt->rt_flags & RTCF_BROADCAST) &&
   1231		    !sock_flag(sk, SOCK_BROADCAST))
   1232			goto out;
   1233		if (connected)
   1234			sk_dst_set(sk, dst_clone(&rt->dst));
   1235	}
   1236
   1237	if (msg->msg_flags&MSG_CONFIRM)
   1238		goto do_confirm;
   1239back_from_confirm:
   1240
   1241	saddr = fl4->saddr;
   1242	if (!ipc.addr)
   1243		daddr = ipc.addr = fl4->daddr;
   1244
   1245	/* Lockless fast path for the non-corking case. */
   1246	if (!corkreq) {
   1247		struct inet_cork cork;
   1248
   1249		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
   1250				  sizeof(struct udphdr), &ipc, &rt,
   1251				  &cork, msg->msg_flags);
   1252		err = PTR_ERR(skb);
   1253		if (!IS_ERR_OR_NULL(skb))
   1254			err = udp_send_skb(skb, fl4, &cork);
   1255		goto out;
   1256	}
   1257
   1258	lock_sock(sk);
   1259	if (unlikely(up->pending)) {
   1260		/* The socket is already corked while preparing it. */
   1261		/* ... which is an evident application bug. --ANK */
   1262		release_sock(sk);
   1263
   1264		net_dbg_ratelimited("socket already corked\n");
   1265		err = -EINVAL;
   1266		goto out;
   1267	}
   1268	/*
   1269	 *	Now cork the socket to pend data.
   1270	 */
   1271	fl4 = &inet->cork.fl.u.ip4;
   1272	fl4->daddr = daddr;
   1273	fl4->saddr = saddr;
   1274	fl4->fl4_dport = dport;
   1275	fl4->fl4_sport = inet->inet_sport;
   1276	up->pending = AF_INET;
   1277
   1278do_append_data:
   1279	up->len += ulen;
   1280	err = ip_append_data(sk, fl4, getfrag, msg, ulen,
   1281			     sizeof(struct udphdr), &ipc, &rt,
   1282			     corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
   1283	if (err)
   1284		udp_flush_pending_frames(sk);
   1285	else if (!corkreq)
   1286		err = udp_push_pending_frames(sk);
   1287	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
   1288		up->pending = 0;
   1289	release_sock(sk);
   1290
   1291out:
   1292	ip_rt_put(rt);
   1293out_free:
   1294	if (free)
   1295		kfree(ipc.opt);
   1296	if (!err)
   1297		return len;
   1298	/*
   1299	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
   1300	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
   1301	 * we don't have a good statistic (IpOutDiscards but it can be too many
   1302	 * things).  We could add another new stat but at least for now that
   1303	 * seems like overkill.
   1304	 */
   1305	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
   1306		UDP_INC_STATS(sock_net(sk),
   1307			      UDP_MIB_SNDBUFERRORS, is_udplite);
   1308	}
   1309	return err;
   1310
   1311do_confirm:
   1312	if (msg->msg_flags & MSG_PROBE)
   1313		dst_confirm_neigh(&rt->dst, &fl4->daddr);
   1314	if (!(msg->msg_flags&MSG_PROBE) || len)
   1315		goto back_from_confirm;
   1316	err = 0;
   1317	goto out;
   1318}
   1319EXPORT_SYMBOL(udp_sendmsg);
   1320
   1321int udp_sendpage(struct sock *sk, struct page *page, int offset,
   1322		 size_t size, int flags)
   1323{
   1324	struct inet_sock *inet = inet_sk(sk);
   1325	struct udp_sock *up = udp_sk(sk);
   1326	int ret;
   1327
   1328	if (flags & MSG_SENDPAGE_NOTLAST)
   1329		flags |= MSG_MORE;
   1330
   1331	if (!up->pending) {
   1332		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };
   1333
   1334		/* Call udp_sendmsg to specify destination address which
   1335		 * sendpage interface can't pass.
   1336		 * This will succeed only when the socket is connected.
   1337		 */
   1338		ret = udp_sendmsg(sk, &msg, 0);
   1339		if (ret < 0)
   1340			return ret;
   1341	}
   1342
   1343	lock_sock(sk);
   1344
   1345	if (unlikely(!up->pending)) {
   1346		release_sock(sk);
   1347
   1348		net_dbg_ratelimited("cork failed\n");
   1349		return -EINVAL;
   1350	}
   1351
   1352	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
   1353			     page, offset, size, flags);
   1354	if (ret == -EOPNOTSUPP) {
   1355		release_sock(sk);
   1356		return sock_no_sendpage(sk->sk_socket, page, offset,
   1357					size, flags);
   1358	}
   1359	if (ret < 0) {
   1360		udp_flush_pending_frames(sk);
   1361		goto out;
   1362	}
   1363
   1364	up->len += size;
   1365	if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
   1366		ret = udp_push_pending_frames(sk);
   1367	if (!ret)
   1368		ret = size;
   1369out:
   1370	release_sock(sk);
   1371	return ret;
   1372}
   1373
   1374#define UDP_SKB_IS_STATELESS 0x80000000
   1375
   1376/* all head states (dst, sk, nf conntrack) except skb extensions are
   1377 * cleared by udp_rcv().
   1378 *
   1379 * We need to preserve secpath, if present, to eventually process
   1380 * IP_CMSG_PASSSEC at recvmsg() time.
   1381 *
   1382 * Other extensions can be cleared.
   1383 */
   1384static bool udp_try_make_stateless(struct sk_buff *skb)
   1385{
   1386	if (!skb_has_extensions(skb))
   1387		return true;
   1388
   1389	if (!secpath_exists(skb)) {
   1390		skb_ext_reset(skb);
   1391		return true;
   1392	}
   1393
   1394	return false;
   1395}
   1396
   1397static void udp_set_dev_scratch(struct sk_buff *skb)
   1398{
   1399	struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
   1400
   1401	BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
   1402	scratch->_tsize_state = skb->truesize;
   1403#if BITS_PER_LONG == 64
   1404	scratch->len = skb->len;
   1405	scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
   1406	scratch->is_linear = !skb_is_nonlinear(skb);
   1407#endif
   1408	if (udp_try_make_stateless(skb))
   1409		scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
   1410}
   1411
   1412static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
   1413{
   1414	/* We come here after udp_lib_checksum_complete() returned 0.
   1415	 * This means that __skb_checksum_complete() might have
   1416	 * set skb->csum_valid to 1.
   1417	 * On 64bit platforms, we can set csum_unnecessary
   1418	 * to true, but only if the skb is not shared.
   1419	 */
   1420#if BITS_PER_LONG == 64
   1421	if (!skb_shared(skb))
   1422		udp_skb_scratch(skb)->csum_unnecessary = true;
   1423#endif
   1424}
   1425
   1426static int udp_skb_truesize(struct sk_buff *skb)
   1427{
   1428	return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
   1429}
   1430
   1431static bool udp_skb_has_head_state(struct sk_buff *skb)
   1432{
   1433	return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
   1434}
   1435
   1436/* fully reclaim rmem/fwd memory allocated for skb */
   1437static void udp_rmem_release(struct sock *sk, int size, int partial,
   1438			     bool rx_queue_lock_held)
   1439{
   1440	struct udp_sock *up = udp_sk(sk);
   1441	struct sk_buff_head *sk_queue;
   1442	int amt;
   1443
   1444	if (likely(partial)) {
   1445		up->forward_deficit += size;
   1446		size = up->forward_deficit;
   1447		if (size < (sk->sk_rcvbuf >> 2) &&
   1448		    !skb_queue_empty(&up->reader_queue))
   1449			return;
   1450	} else {
   1451		size += up->forward_deficit;
   1452	}
   1453	up->forward_deficit = 0;
   1454
   1455	/* acquire the sk_receive_queue for fwd allocated memory scheduling,
   1456	 * if the called don't held it already
   1457	 */
   1458	sk_queue = &sk->sk_receive_queue;
   1459	if (!rx_queue_lock_held)
   1460		spin_lock(&sk_queue->lock);
   1461
   1462
   1463	sk->sk_forward_alloc += size;
   1464	amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
   1465	sk->sk_forward_alloc -= amt;
   1466
   1467	if (amt)
   1468		__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
   1469
   1470	atomic_sub(size, &sk->sk_rmem_alloc);
   1471
   1472	/* this can save us from acquiring the rx queue lock on next receive */
   1473	skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
   1474
   1475	if (!rx_queue_lock_held)
   1476		spin_unlock(&sk_queue->lock);
   1477}
   1478
   1479/* Note: called with reader_queue.lock held.
   1480 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
   1481 * This avoids a cache line miss while receive_queue lock is held.
   1482 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
   1483 */
   1484void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
   1485{
   1486	prefetch(&skb->data);
   1487	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
   1488}
   1489EXPORT_SYMBOL(udp_skb_destructor);
   1490
   1491/* as above, but the caller held the rx queue lock, too */
   1492static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
   1493{
   1494	prefetch(&skb->data);
   1495	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
   1496}
   1497
   1498/* Idea of busylocks is to let producers grab an extra spinlock
   1499 * to relieve pressure on the receive_queue spinlock shared by consumer.
   1500 * Under flood, this means that only one producer can be in line
   1501 * trying to acquire the receive_queue spinlock.
   1502 * These busylock can be allocated on a per cpu manner, instead of a
   1503 * per socket one (that would consume a cache line per socket)
   1504 */
   1505static int udp_busylocks_log __read_mostly;
   1506static spinlock_t *udp_busylocks __read_mostly;
   1507
   1508static spinlock_t *busylock_acquire(void *ptr)
   1509{
   1510	spinlock_t *busy;
   1511
   1512	busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
   1513	spin_lock(busy);
   1514	return busy;
   1515}
   1516
   1517static void busylock_release(spinlock_t *busy)
   1518{
   1519	if (busy)
   1520		spin_unlock(busy);
   1521}
   1522
   1523int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
   1524{
   1525	struct sk_buff_head *list = &sk->sk_receive_queue;
   1526	int rmem, delta, amt, err = -ENOMEM;
   1527	spinlock_t *busy = NULL;
   1528	int size;
   1529
   1530	/* try to avoid the costly atomic add/sub pair when the receive
   1531	 * queue is full; always allow at least a packet
   1532	 */
   1533	rmem = atomic_read(&sk->sk_rmem_alloc);
   1534	if (rmem > sk->sk_rcvbuf)
   1535		goto drop;
   1536
   1537	/* Under mem pressure, it might be helpful to help udp_recvmsg()
   1538	 * having linear skbs :
   1539	 * - Reduce memory overhead and thus increase receive queue capacity
   1540	 * - Less cache line misses at copyout() time
   1541	 * - Less work at consume_skb() (less alien page frag freeing)
   1542	 */
   1543	if (rmem > (sk->sk_rcvbuf >> 1)) {
   1544		skb_condense(skb);
   1545
   1546		busy = busylock_acquire(sk);
   1547	}
   1548	size = skb->truesize;
   1549	udp_set_dev_scratch(skb);
   1550
   1551	/* we drop only if the receive buf is full and the receive
   1552	 * queue contains some other skb
   1553	 */
   1554	rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
   1555	if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
   1556		goto uncharge_drop;
   1557
   1558	spin_lock(&list->lock);
   1559	if (size >= sk->sk_forward_alloc) {
   1560		amt = sk_mem_pages(size);
   1561		delta = amt << SK_MEM_QUANTUM_SHIFT;
   1562		if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
   1563			err = -ENOBUFS;
   1564			spin_unlock(&list->lock);
   1565			goto uncharge_drop;
   1566		}
   1567
   1568		sk->sk_forward_alloc += delta;
   1569	}
   1570
   1571	sk->sk_forward_alloc -= size;
   1572
   1573	/* no need to setup a destructor, we will explicitly release the
   1574	 * forward allocated memory on dequeue
   1575	 */
   1576	sock_skb_set_dropcount(sk, skb);
   1577
   1578	__skb_queue_tail(list, skb);
   1579	spin_unlock(&list->lock);
   1580
   1581	if (!sock_flag(sk, SOCK_DEAD))
   1582		sk->sk_data_ready(sk);
   1583
   1584	busylock_release(busy);
   1585	return 0;
   1586
   1587uncharge_drop:
   1588	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
   1589
   1590drop:
   1591	atomic_inc(&sk->sk_drops);
   1592	busylock_release(busy);
   1593	return err;
   1594}
   1595EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
   1596
   1597void udp_destruct_sock(struct sock *sk)
   1598{
   1599	/* reclaim completely the forward allocated memory */
   1600	struct udp_sock *up = udp_sk(sk);
   1601	unsigned int total = 0;
   1602	struct sk_buff *skb;
   1603
   1604	skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
   1605	while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
   1606		total += skb->truesize;
   1607		kfree_skb(skb);
   1608	}
   1609	udp_rmem_release(sk, total, 0, true);
   1610
   1611	inet_sock_destruct(sk);
   1612}
   1613EXPORT_SYMBOL_GPL(udp_destruct_sock);
   1614
   1615int udp_init_sock(struct sock *sk)
   1616{
   1617	skb_queue_head_init(&udp_sk(sk)->reader_queue);
   1618	sk->sk_destruct = udp_destruct_sock;
   1619	return 0;
   1620}
   1621EXPORT_SYMBOL_GPL(udp_init_sock);
   1622
   1623void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
   1624{
   1625	if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
   1626		bool slow = lock_sock_fast(sk);
   1627
   1628		sk_peek_offset_bwd(sk, len);
   1629		unlock_sock_fast(sk, slow);
   1630	}
   1631
   1632	if (!skb_unref(skb))
   1633		return;
   1634
   1635	/* In the more common cases we cleared the head states previously,
   1636	 * see __udp_queue_rcv_skb().
   1637	 */
   1638	if (unlikely(udp_skb_has_head_state(skb)))
   1639		skb_release_head_state(skb);
   1640	__consume_stateless_skb(skb);
   1641}
   1642EXPORT_SYMBOL_GPL(skb_consume_udp);
   1643
   1644static struct sk_buff *__first_packet_length(struct sock *sk,
   1645					     struct sk_buff_head *rcvq,
   1646					     int *total)
   1647{
   1648	struct sk_buff *skb;
   1649
   1650	while ((skb = skb_peek(rcvq)) != NULL) {
   1651		if (udp_lib_checksum_complete(skb)) {
   1652			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
   1653					IS_UDPLITE(sk));
   1654			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
   1655					IS_UDPLITE(sk));
   1656			atomic_inc(&sk->sk_drops);
   1657			__skb_unlink(skb, rcvq);
   1658			*total += skb->truesize;
   1659			kfree_skb(skb);
   1660		} else {
   1661			udp_skb_csum_unnecessary_set(skb);
   1662			break;
   1663		}
   1664	}
   1665	return skb;
   1666}
   1667
   1668/**
   1669 *	first_packet_length	- return length of first packet in receive queue
   1670 *	@sk: socket
   1671 *
   1672 *	Drops all bad checksum frames, until a valid one is found.
   1673 *	Returns the length of found skb, or -1 if none is found.
   1674 */
   1675static int first_packet_length(struct sock *sk)
   1676{
   1677	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
   1678	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
   1679	struct sk_buff *skb;
   1680	int total = 0;
   1681	int res;
   1682
   1683	spin_lock_bh(&rcvq->lock);
   1684	skb = __first_packet_length(sk, rcvq, &total);
   1685	if (!skb && !skb_queue_empty_lockless(sk_queue)) {
   1686		spin_lock(&sk_queue->lock);
   1687		skb_queue_splice_tail_init(sk_queue, rcvq);
   1688		spin_unlock(&sk_queue->lock);
   1689
   1690		skb = __first_packet_length(sk, rcvq, &total);
   1691	}
   1692	res = skb ? skb->len : -1;
   1693	if (total)
   1694		udp_rmem_release(sk, total, 1, false);
   1695	spin_unlock_bh(&rcvq->lock);
   1696	return res;
   1697}
   1698
   1699/*
   1700 *	IOCTL requests applicable to the UDP protocol
   1701 */
   1702
   1703int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
   1704{
   1705	switch (cmd) {
   1706	case SIOCOUTQ:
   1707	{
   1708		int amount = sk_wmem_alloc_get(sk);
   1709
   1710		return put_user(amount, (int __user *)arg);
   1711	}
   1712
   1713	case SIOCINQ:
   1714	{
   1715		int amount = max_t(int, 0, first_packet_length(sk));
   1716
   1717		return put_user(amount, (int __user *)arg);
   1718	}
   1719
   1720	default:
   1721		return -ENOIOCTLCMD;
   1722	}
   1723
   1724	return 0;
   1725}
   1726EXPORT_SYMBOL(udp_ioctl);
   1727
   1728struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
   1729			       int *off, int *err)
   1730{
   1731	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
   1732	struct sk_buff_head *queue;
   1733	struct sk_buff *last;
   1734	long timeo;
   1735	int error;
   1736
   1737	queue = &udp_sk(sk)->reader_queue;
   1738	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
   1739	do {
   1740		struct sk_buff *skb;
   1741
   1742		error = sock_error(sk);
   1743		if (error)
   1744			break;
   1745
   1746		error = -EAGAIN;
   1747		do {
   1748			spin_lock_bh(&queue->lock);
   1749			skb = __skb_try_recv_from_queue(sk, queue, flags, off,
   1750							err, &last);
   1751			if (skb) {
   1752				if (!(flags & MSG_PEEK))
   1753					udp_skb_destructor(sk, skb);
   1754				spin_unlock_bh(&queue->lock);
   1755				return skb;
   1756			}
   1757
   1758			if (skb_queue_empty_lockless(sk_queue)) {
   1759				spin_unlock_bh(&queue->lock);
   1760				goto busy_check;
   1761			}
   1762
   1763			/* refill the reader queue and walk it again
   1764			 * keep both queues locked to avoid re-acquiring
   1765			 * the sk_receive_queue lock if fwd memory scheduling
   1766			 * is needed.
   1767			 */
   1768			spin_lock(&sk_queue->lock);
   1769			skb_queue_splice_tail_init(sk_queue, queue);
   1770
   1771			skb = __skb_try_recv_from_queue(sk, queue, flags, off,
   1772							err, &last);
   1773			if (skb && !(flags & MSG_PEEK))
   1774				udp_skb_dtor_locked(sk, skb);
   1775			spin_unlock(&sk_queue->lock);
   1776			spin_unlock_bh(&queue->lock);
   1777			if (skb)
   1778				return skb;
   1779
   1780busy_check:
   1781			if (!sk_can_busy_loop(sk))
   1782				break;
   1783
   1784			sk_busy_loop(sk, flags & MSG_DONTWAIT);
   1785		} while (!skb_queue_empty_lockless(sk_queue));
   1786
   1787		/* sk_queue is empty, reader_queue may contain peeked packets */
   1788	} while (timeo &&
   1789		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
   1790					      &error, &timeo,
   1791					      (struct sk_buff *)sk_queue));
   1792
   1793	*err = error;
   1794	return NULL;
   1795}
   1796EXPORT_SYMBOL(__skb_recv_udp);
   1797
   1798int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
   1799		  sk_read_actor_t recv_actor)
   1800{
   1801	int copied = 0;
   1802
   1803	while (1) {
   1804		struct sk_buff *skb;
   1805		int err, used;
   1806
   1807		skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
   1808		if (!skb)
   1809			return err;
   1810
   1811		if (udp_lib_checksum_complete(skb)) {
   1812			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
   1813					IS_UDPLITE(sk));
   1814			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
   1815					IS_UDPLITE(sk));
   1816			atomic_inc(&sk->sk_drops);
   1817			kfree_skb(skb);
   1818			continue;
   1819		}
   1820
   1821		used = recv_actor(desc, skb, 0, skb->len);
   1822		if (used <= 0) {
   1823			if (!copied)
   1824				copied = used;
   1825			kfree_skb(skb);
   1826			break;
   1827		} else if (used <= skb->len) {
   1828			copied += used;
   1829		}
   1830
   1831		kfree_skb(skb);
   1832		if (!desc->count)
   1833			break;
   1834	}
   1835
   1836	return copied;
   1837}
   1838EXPORT_SYMBOL(udp_read_sock);
   1839
   1840/*
   1841 * 	This should be easy, if there is something there we
   1842 * 	return it, otherwise we block.
   1843 */
   1844
   1845int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
   1846		int *addr_len)
   1847{
   1848	struct inet_sock *inet = inet_sk(sk);
   1849	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
   1850	struct sk_buff *skb;
   1851	unsigned int ulen, copied;
   1852	int off, err, peeking = flags & MSG_PEEK;
   1853	int is_udplite = IS_UDPLITE(sk);
   1854	bool checksum_valid = false;
   1855
   1856	if (flags & MSG_ERRQUEUE)
   1857		return ip_recv_error(sk, msg, len, addr_len);
   1858
   1859try_again:
   1860	off = sk_peek_offset(sk, flags);
   1861	skb = __skb_recv_udp(sk, flags, &off, &err);
   1862	if (!skb)
   1863		return err;
   1864
   1865	ulen = udp_skb_len(skb);
   1866	copied = len;
   1867	if (copied > ulen - off)
   1868		copied = ulen - off;
   1869	else if (copied < ulen)
   1870		msg->msg_flags |= MSG_TRUNC;
   1871
   1872	/*
   1873	 * If checksum is needed at all, try to do it while copying the
   1874	 * data.  If the data is truncated, or if we only want a partial
   1875	 * coverage checksum (UDP-Lite), do it before the copy.
   1876	 */
   1877
   1878	if (copied < ulen || peeking ||
   1879	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
   1880		checksum_valid = udp_skb_csum_unnecessary(skb) ||
   1881				!__udp_lib_checksum_complete(skb);
   1882		if (!checksum_valid)
   1883			goto csum_copy_err;
   1884	}
   1885
   1886	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
   1887		if (udp_skb_is_linear(skb))
   1888			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
   1889		else
   1890			err = skb_copy_datagram_msg(skb, off, msg, copied);
   1891	} else {
   1892		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
   1893
   1894		if (err == -EINVAL)
   1895			goto csum_copy_err;
   1896	}
   1897
   1898	if (unlikely(err)) {
   1899		if (!peeking) {
   1900			atomic_inc(&sk->sk_drops);
   1901			UDP_INC_STATS(sock_net(sk),
   1902				      UDP_MIB_INERRORS, is_udplite);
   1903		}
   1904		kfree_skb(skb);
   1905		return err;
   1906	}
   1907
   1908	if (!peeking)
   1909		UDP_INC_STATS(sock_net(sk),
   1910			      UDP_MIB_INDATAGRAMS, is_udplite);
   1911
   1912	sock_recv_cmsgs(msg, sk, skb);
   1913
   1914	/* Copy the address. */
   1915	if (sin) {
   1916		sin->sin_family = AF_INET;
   1917		sin->sin_port = udp_hdr(skb)->source;
   1918		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
   1919		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
   1920		*addr_len = sizeof(*sin);
   1921
   1922		BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
   1923						      (struct sockaddr *)sin);
   1924	}
   1925
   1926	if (udp_sk(sk)->gro_enabled)
   1927		udp_cmsg_recv(msg, sk, skb);
   1928
   1929	if (inet->cmsg_flags)
   1930		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
   1931
   1932	err = copied;
   1933	if (flags & MSG_TRUNC)
   1934		err = ulen;
   1935
   1936	skb_consume_udp(sk, skb, peeking ? -err : err);
   1937	return err;
   1938
   1939csum_copy_err:
   1940	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
   1941				 udp_skb_destructor)) {
   1942		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
   1943		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
   1944	}
   1945	kfree_skb(skb);
   1946
   1947	/* starting over for a new packet, but check if we need to yield */
   1948	cond_resched();
   1949	msg->msg_flags &= ~MSG_TRUNC;
   1950	goto try_again;
   1951}
   1952
   1953int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
   1954{
   1955	/* This check is replicated from __ip4_datagram_connect() and
   1956	 * intended to prevent BPF program called below from accessing bytes
   1957	 * that are out of the bound specified by user in addr_len.
   1958	 */
   1959	if (addr_len < sizeof(struct sockaddr_in))
   1960		return -EINVAL;
   1961
   1962	return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
   1963}
   1964EXPORT_SYMBOL(udp_pre_connect);
   1965
   1966int __udp_disconnect(struct sock *sk, int flags)
   1967{
   1968	struct inet_sock *inet = inet_sk(sk);
   1969	/*
   1970	 *	1003.1g - break association.
   1971	 */
   1972
   1973	sk->sk_state = TCP_CLOSE;
   1974	inet->inet_daddr = 0;
   1975	inet->inet_dport = 0;
   1976	sock_rps_reset_rxhash(sk);
   1977	sk->sk_bound_dev_if = 0;
   1978	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
   1979		inet_reset_saddr(sk);
   1980		if (sk->sk_prot->rehash &&
   1981		    (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
   1982			sk->sk_prot->rehash(sk);
   1983	}
   1984
   1985	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
   1986		sk->sk_prot->unhash(sk);
   1987		inet->inet_sport = 0;
   1988	}
   1989	sk_dst_reset(sk);
   1990	return 0;
   1991}
   1992EXPORT_SYMBOL(__udp_disconnect);
   1993
   1994int udp_disconnect(struct sock *sk, int flags)
   1995{
   1996	lock_sock(sk);
   1997	__udp_disconnect(sk, flags);
   1998	release_sock(sk);
   1999	return 0;
   2000}
   2001EXPORT_SYMBOL(udp_disconnect);
   2002
   2003void udp_lib_unhash(struct sock *sk)
   2004{
   2005	if (sk_hashed(sk)) {
   2006		struct udp_table *udptable = sk->sk_prot->h.udp_table;
   2007		struct udp_hslot *hslot, *hslot2;
   2008
   2009		hslot  = udp_hashslot(udptable, sock_net(sk),
   2010				      udp_sk(sk)->udp_port_hash);
   2011		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
   2012
   2013		spin_lock_bh(&hslot->lock);
   2014		if (rcu_access_pointer(sk->sk_reuseport_cb))
   2015			reuseport_detach_sock(sk);
   2016		if (sk_del_node_init_rcu(sk)) {
   2017			hslot->count--;
   2018			inet_sk(sk)->inet_num = 0;
   2019			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
   2020
   2021			spin_lock(&hslot2->lock);
   2022			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
   2023			hslot2->count--;
   2024			spin_unlock(&hslot2->lock);
   2025		}
   2026		spin_unlock_bh(&hslot->lock);
   2027	}
   2028}
   2029EXPORT_SYMBOL(udp_lib_unhash);
   2030
   2031/*
   2032 * inet_rcv_saddr was changed, we must rehash secondary hash
   2033 */
   2034void udp_lib_rehash(struct sock *sk, u16 newhash)
   2035{
   2036	if (sk_hashed(sk)) {
   2037		struct udp_table *udptable = sk->sk_prot->h.udp_table;
   2038		struct udp_hslot *hslot, *hslot2, *nhslot2;
   2039
   2040		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
   2041		nhslot2 = udp_hashslot2(udptable, newhash);
   2042		udp_sk(sk)->udp_portaddr_hash = newhash;
   2043
   2044		if (hslot2 != nhslot2 ||
   2045		    rcu_access_pointer(sk->sk_reuseport_cb)) {
   2046			hslot = udp_hashslot(udptable, sock_net(sk),
   2047					     udp_sk(sk)->udp_port_hash);
   2048			/* we must lock primary chain too */
   2049			spin_lock_bh(&hslot->lock);
   2050			if (rcu_access_pointer(sk->sk_reuseport_cb))
   2051				reuseport_detach_sock(sk);
   2052
   2053			if (hslot2 != nhslot2) {
   2054				spin_lock(&hslot2->lock);
   2055				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
   2056				hslot2->count--;
   2057				spin_unlock(&hslot2->lock);
   2058
   2059				spin_lock(&nhslot2->lock);
   2060				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
   2061							 &nhslot2->head);
   2062				nhslot2->count++;
   2063				spin_unlock(&nhslot2->lock);
   2064			}
   2065
   2066			spin_unlock_bh(&hslot->lock);
   2067		}
   2068	}
   2069}
   2070EXPORT_SYMBOL(udp_lib_rehash);
   2071
   2072void udp_v4_rehash(struct sock *sk)
   2073{
   2074	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
   2075					  inet_sk(sk)->inet_rcv_saddr,
   2076					  inet_sk(sk)->inet_num);
   2077	udp_lib_rehash(sk, new_hash);
   2078}
   2079
   2080static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
   2081{
   2082	int rc;
   2083
   2084	if (inet_sk(sk)->inet_daddr) {
   2085		sock_rps_save_rxhash(sk, skb);
   2086		sk_mark_napi_id(sk, skb);
   2087		sk_incoming_cpu_update(sk);
   2088	} else {
   2089		sk_mark_napi_id_once(sk, skb);
   2090	}
   2091
   2092	rc = __udp_enqueue_schedule_skb(sk, skb);
   2093	if (rc < 0) {
   2094		int is_udplite = IS_UDPLITE(sk);
   2095		int drop_reason;
   2096
   2097		/* Note that an ENOMEM error is charged twice */
   2098		if (rc == -ENOMEM) {
   2099			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
   2100					is_udplite);
   2101			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
   2102		} else {
   2103			UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS,
   2104				      is_udplite);
   2105			drop_reason = SKB_DROP_REASON_PROTO_MEM;
   2106		}
   2107		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
   2108		kfree_skb_reason(skb, drop_reason);
   2109		trace_udp_fail_queue_rcv_skb(rc, sk);
   2110		return -1;
   2111	}
   2112
   2113	return 0;
   2114}
   2115
   2116/* returns:
   2117 *  -1: error
   2118 *   0: success
   2119 *  >0: "udp encap" protocol resubmission
   2120 *
   2121 * Note that in the success and error cases, the skb is assumed to
   2122 * have either been requeued or freed.
   2123 */
   2124static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
   2125{
   2126	int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
   2127	struct udp_sock *up = udp_sk(sk);
   2128	int is_udplite = IS_UDPLITE(sk);
   2129
   2130	/*
   2131	 *	Charge it to the socket, dropping if the queue is full.
   2132	 */
   2133	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
   2134		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
   2135		goto drop;
   2136	}
   2137	nf_reset_ct(skb);
   2138
   2139	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
   2140		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
   2141
   2142		/*
   2143		 * This is an encapsulation socket so pass the skb to
   2144		 * the socket's udp_encap_rcv() hook. Otherwise, just
   2145		 * fall through and pass this up the UDP socket.
   2146		 * up->encap_rcv() returns the following value:
   2147		 * =0 if skb was successfully passed to the encap
   2148		 *    handler or was discarded by it.
   2149		 * >0 if skb should be passed on to UDP.
   2150		 * <0 if skb should be resubmitted as proto -N
   2151		 */
   2152
   2153		/* if we're overly short, let UDP handle it */
   2154		encap_rcv = READ_ONCE(up->encap_rcv);
   2155		if (encap_rcv) {
   2156			int ret;
   2157
   2158			/* Verify checksum before giving to encap */
   2159			if (udp_lib_checksum_complete(skb))
   2160				goto csum_error;
   2161
   2162			ret = encap_rcv(sk, skb);
   2163			if (ret <= 0) {
   2164				__UDP_INC_STATS(sock_net(sk),
   2165						UDP_MIB_INDATAGRAMS,
   2166						is_udplite);
   2167				return -ret;
   2168			}
   2169		}
   2170
   2171		/* FALLTHROUGH -- it's a UDP Packet */
   2172	}
   2173
   2174	/*
   2175	 * 	UDP-Lite specific tests, ignored on UDP sockets
   2176	 */
   2177	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
   2178
   2179		/*
   2180		 * MIB statistics other than incrementing the error count are
   2181		 * disabled for the following two types of errors: these depend
   2182		 * on the application settings, not on the functioning of the
   2183		 * protocol stack as such.
   2184		 *
   2185		 * RFC 3828 here recommends (sec 3.3): "There should also be a
   2186		 * way ... to ... at least let the receiving application block
   2187		 * delivery of packets with coverage values less than a value
   2188		 * provided by the application."
   2189		 */
   2190		if (up->pcrlen == 0) {          /* full coverage was set  */
   2191			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
   2192					    UDP_SKB_CB(skb)->cscov, skb->len);
   2193			goto drop;
   2194		}
   2195		/* The next case involves violating the min. coverage requested
   2196		 * by the receiver. This is subtle: if receiver wants x and x is
   2197		 * greater than the buffersize/MTU then receiver will complain
   2198		 * that it wants x while sender emits packets of smaller size y.
   2199		 * Therefore the above ...()->partial_cov statement is essential.
   2200		 */
   2201		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
   2202			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
   2203					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
   2204			goto drop;
   2205		}
   2206	}
   2207
   2208	prefetch(&sk->sk_rmem_alloc);
   2209	if (rcu_access_pointer(sk->sk_filter) &&
   2210	    udp_lib_checksum_complete(skb))
   2211			goto csum_error;
   2212
   2213	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
   2214		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
   2215		goto drop;
   2216	}
   2217
   2218	udp_csum_pull_header(skb);
   2219
   2220	ipv4_pktinfo_prepare(sk, skb);
   2221	return __udp_queue_rcv_skb(sk, skb);
   2222
   2223csum_error:
   2224	drop_reason = SKB_DROP_REASON_UDP_CSUM;
   2225	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
   2226drop:
   2227	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
   2228	atomic_inc(&sk->sk_drops);
   2229	kfree_skb_reason(skb, drop_reason);
   2230	return -1;
   2231}
   2232
   2233static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
   2234{
   2235	struct sk_buff *next, *segs;
   2236	int ret;
   2237
   2238	if (likely(!udp_unexpected_gso(sk, skb)))
   2239		return udp_queue_rcv_one_skb(sk, skb);
   2240
   2241	BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET);
   2242	__skb_push(skb, -skb_mac_offset(skb));
   2243	segs = udp_rcv_segment(sk, skb, true);
   2244	skb_list_walk_safe(segs, skb, next) {
   2245		__skb_pull(skb, skb_transport_offset(skb));
   2246
   2247		udp_post_segment_fix_csum(skb);
   2248		ret = udp_queue_rcv_one_skb(sk, skb);
   2249		if (ret > 0)
   2250			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
   2251	}
   2252	return 0;
   2253}
   2254
   2255/* For TCP sockets, sk_rx_dst is protected by socket lock
   2256 * For UDP, we use xchg() to guard against concurrent changes.
   2257 */
   2258bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
   2259{
   2260	struct dst_entry *old;
   2261
   2262	if (dst_hold_safe(dst)) {
   2263		old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
   2264		dst_release(old);
   2265		return old != dst;
   2266	}
   2267	return false;
   2268}
   2269EXPORT_SYMBOL(udp_sk_rx_dst_set);
   2270
   2271/*
   2272 *	Multicasts and broadcasts go to each listener.
   2273 *
   2274 *	Note: called only from the BH handler context.
   2275 */
   2276static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
   2277				    struct udphdr  *uh,
   2278				    __be32 saddr, __be32 daddr,
   2279				    struct udp_table *udptable,
   2280				    int proto)
   2281{
   2282	struct sock *sk, *first = NULL;
   2283	unsigned short hnum = ntohs(uh->dest);
   2284	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
   2285	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
   2286	unsigned int offset = offsetof(typeof(*sk), sk_node);
   2287	int dif = skb->dev->ifindex;
   2288	int sdif = inet_sdif(skb);
   2289	struct hlist_node *node;
   2290	struct sk_buff *nskb;
   2291
   2292	if (use_hash2) {
   2293		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
   2294			    udptable->mask;
   2295		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
   2296start_lookup:
   2297		hslot = &udptable->hash2[hash2];
   2298		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
   2299	}
   2300
   2301	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
   2302		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
   2303					 uh->source, saddr, dif, sdif, hnum))
   2304			continue;
   2305
   2306		if (!first) {
   2307			first = sk;
   2308			continue;
   2309		}
   2310		nskb = skb_clone(skb, GFP_ATOMIC);
   2311
   2312		if (unlikely(!nskb)) {
   2313			atomic_inc(&sk->sk_drops);
   2314			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
   2315					IS_UDPLITE(sk));
   2316			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
   2317					IS_UDPLITE(sk));
   2318			continue;
   2319		}
   2320		if (udp_queue_rcv_skb(sk, nskb) > 0)
   2321			consume_skb(nskb);
   2322	}
   2323
   2324	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
   2325	if (use_hash2 && hash2 != hash2_any) {
   2326		hash2 = hash2_any;
   2327		goto start_lookup;
   2328	}
   2329
   2330	if (first) {
   2331		if (udp_queue_rcv_skb(first, skb) > 0)
   2332			consume_skb(skb);
   2333	} else {
   2334		kfree_skb(skb);
   2335		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
   2336				proto == IPPROTO_UDPLITE);
   2337	}
   2338	return 0;
   2339}
   2340
   2341/* Initialize UDP checksum. If exited with zero value (success),
   2342 * CHECKSUM_UNNECESSARY means, that no more checks are required.
   2343 * Otherwise, csum completion requires checksumming packet body,
   2344 * including udp header and folding it to skb->csum.
   2345 */
   2346static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
   2347				 int proto)
   2348{
   2349	int err;
   2350
   2351	UDP_SKB_CB(skb)->partial_cov = 0;
   2352	UDP_SKB_CB(skb)->cscov = skb->len;
   2353
   2354	if (proto == IPPROTO_UDPLITE) {
   2355		err = udplite_checksum_init(skb, uh);
   2356		if (err)
   2357			return err;
   2358
   2359		if (UDP_SKB_CB(skb)->partial_cov) {
   2360			skb->csum = inet_compute_pseudo(skb, proto);
   2361			return 0;
   2362		}
   2363	}
   2364
   2365	/* Note, we are only interested in != 0 or == 0, thus the
   2366	 * force to int.
   2367	 */
   2368	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
   2369							inet_compute_pseudo);
   2370	if (err)
   2371		return err;
   2372
   2373	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
   2374		/* If SW calculated the value, we know it's bad */
   2375		if (skb->csum_complete_sw)
   2376			return 1;
   2377
   2378		/* HW says the value is bad. Let's validate that.
   2379		 * skb->csum is no longer the full packet checksum,
   2380		 * so don't treat it as such.
   2381		 */
   2382		skb_checksum_complete_unset(skb);
   2383	}
   2384
   2385	return 0;
   2386}
   2387
   2388/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
   2389 * return code conversion for ip layer consumption
   2390 */
   2391static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
   2392			       struct udphdr *uh)
   2393{
   2394	int ret;
   2395
   2396	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
   2397		skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo);
   2398
   2399	ret = udp_queue_rcv_skb(sk, skb);
   2400
   2401	/* a return value > 0 means to resubmit the input, but
   2402	 * it wants the return to be -protocol, or 0
   2403	 */
   2404	if (ret > 0)
   2405		return -ret;
   2406	return 0;
   2407}
   2408
   2409/*
   2410 *	All we need to do is get the socket, and then do a checksum.
   2411 */
   2412
   2413int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
   2414		   int proto)
   2415{
   2416	struct sock *sk;
   2417	struct udphdr *uh;
   2418	unsigned short ulen;
   2419	struct rtable *rt = skb_rtable(skb);
   2420	__be32 saddr, daddr;
   2421	struct net *net = dev_net(skb->dev);
   2422	bool refcounted;
   2423	int drop_reason;
   2424
   2425	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
   2426
   2427	/*
   2428	 *  Validate the packet.
   2429	 */
   2430	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
   2431		goto drop;		/* No space for header. */
   2432
   2433	uh   = udp_hdr(skb);
   2434	ulen = ntohs(uh->len);
   2435	saddr = ip_hdr(skb)->saddr;
   2436	daddr = ip_hdr(skb)->daddr;
   2437
   2438	if (ulen > skb->len)
   2439		goto short_packet;
   2440
   2441	if (proto == IPPROTO_UDP) {
   2442		/* UDP validates ulen. */
   2443		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
   2444			goto short_packet;
   2445		uh = udp_hdr(skb);
   2446	}
   2447
   2448	if (udp4_csum_init(skb, uh, proto))
   2449		goto csum_error;
   2450
   2451	sk = skb_steal_sock(skb, &refcounted);
   2452	if (sk) {
   2453		struct dst_entry *dst = skb_dst(skb);
   2454		int ret;
   2455
   2456		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
   2457			udp_sk_rx_dst_set(sk, dst);
   2458
   2459		ret = udp_unicast_rcv_skb(sk, skb, uh);
   2460		if (refcounted)
   2461			sock_put(sk);
   2462		return ret;
   2463	}
   2464
   2465	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
   2466		return __udp4_lib_mcast_deliver(net, skb, uh,
   2467						saddr, daddr, udptable, proto);
   2468
   2469	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
   2470	if (sk)
   2471		return udp_unicast_rcv_skb(sk, skb, uh);
   2472
   2473	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
   2474		goto drop;
   2475	nf_reset_ct(skb);
   2476
   2477	/* No socket. Drop packet silently, if checksum is wrong */
   2478	if (udp_lib_checksum_complete(skb))
   2479		goto csum_error;
   2480
   2481	drop_reason = SKB_DROP_REASON_NO_SOCKET;
   2482	__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
   2483	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
   2484
   2485	/*
   2486	 * Hmm.  We got an UDP packet to a port to which we
   2487	 * don't wanna listen.  Ignore it.
   2488	 */
   2489	kfree_skb_reason(skb, drop_reason);
   2490	return 0;
   2491
   2492short_packet:
   2493	drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
   2494	net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
   2495			    proto == IPPROTO_UDPLITE ? "Lite" : "",
   2496			    &saddr, ntohs(uh->source),
   2497			    ulen, skb->len,
   2498			    &daddr, ntohs(uh->dest));
   2499	goto drop;
   2500
   2501csum_error:
   2502	/*
   2503	 * RFC1122: OK.  Discards the bad packet silently (as far as
   2504	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
   2505	 */
   2506	drop_reason = SKB_DROP_REASON_UDP_CSUM;
   2507	net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
   2508			    proto == IPPROTO_UDPLITE ? "Lite" : "",
   2509			    &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
   2510			    ulen);
   2511	__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
   2512drop:
   2513	__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
   2514	kfree_skb_reason(skb, drop_reason);
   2515	return 0;
   2516}
   2517
   2518/* We can only early demux multicast if there is a single matching socket.
   2519 * If more than one socket found returns NULL
   2520 */
   2521static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
   2522						  __be16 loc_port, __be32 loc_addr,
   2523						  __be16 rmt_port, __be32 rmt_addr,
   2524						  int dif, int sdif)
   2525{
   2526	struct sock *sk, *result;
   2527	unsigned short hnum = ntohs(loc_port);
   2528	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
   2529	struct udp_hslot *hslot = &udp_table.hash[slot];
   2530
   2531	/* Do not bother scanning a too big list */
   2532	if (hslot->count > 10)
   2533		return NULL;
   2534
   2535	result = NULL;
   2536	sk_for_each_rcu(sk, &hslot->head) {
   2537		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
   2538					rmt_port, rmt_addr, dif, sdif, hnum)) {
   2539			if (result)
   2540				return NULL;
   2541			result = sk;
   2542		}
   2543	}
   2544
   2545	return result;
   2546}
   2547
   2548/* For unicast we should only early demux connected sockets or we can
   2549 * break forwarding setups.  The chains here can be long so only check
   2550 * if the first socket is an exact match and if not move on.
   2551 */
   2552static struct sock *__udp4_lib_demux_lookup(struct net *net,
   2553					    __be16 loc_port, __be32 loc_addr,
   2554					    __be16 rmt_port, __be32 rmt_addr,
   2555					    int dif, int sdif)
   2556{
   2557	unsigned short hnum = ntohs(loc_port);
   2558	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
   2559	unsigned int slot2 = hash2 & udp_table.mask;
   2560	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
   2561	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
   2562	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
   2563	struct sock *sk;
   2564
   2565	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
   2566		if (inet_match(net, sk, acookie, ports, dif, sdif))
   2567			return sk;
   2568		/* Only check first socket in chain */
   2569		break;
   2570	}
   2571	return NULL;
   2572}
   2573
   2574int udp_v4_early_demux(struct sk_buff *skb)
   2575{
   2576	struct net *net = dev_net(skb->dev);
   2577	struct in_device *in_dev = NULL;
   2578	const struct iphdr *iph;
   2579	const struct udphdr *uh;
   2580	struct sock *sk = NULL;
   2581	struct dst_entry *dst;
   2582	int dif = skb->dev->ifindex;
   2583	int sdif = inet_sdif(skb);
   2584	int ours;
   2585
   2586	/* validate the packet */
   2587	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
   2588		return 0;
   2589
   2590	iph = ip_hdr(skb);
   2591	uh = udp_hdr(skb);
   2592
   2593	if (skb->pkt_type == PACKET_MULTICAST) {
   2594		in_dev = __in_dev_get_rcu(skb->dev);
   2595
   2596		if (!in_dev)
   2597			return 0;
   2598
   2599		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
   2600				       iph->protocol);
   2601		if (!ours)
   2602			return 0;
   2603
   2604		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
   2605						   uh->source, iph->saddr,
   2606						   dif, sdif);
   2607	} else if (skb->pkt_type == PACKET_HOST) {
   2608		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
   2609					     uh->source, iph->saddr, dif, sdif);
   2610	}
   2611
   2612	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
   2613		return 0;
   2614
   2615	skb->sk = sk;
   2616	skb->destructor = sock_efree;
   2617	dst = rcu_dereference(sk->sk_rx_dst);
   2618
   2619	if (dst)
   2620		dst = dst_check(dst, 0);
   2621	if (dst) {
   2622		u32 itag = 0;
   2623
   2624		/* set noref for now.
   2625		 * any place which wants to hold dst has to call
   2626		 * dst_hold_safe()
   2627		 */
   2628		skb_dst_set_noref(skb, dst);
   2629
   2630		/* for unconnected multicast sockets we need to validate
   2631		 * the source on each packet
   2632		 */
   2633		if (!inet_sk(sk)->inet_daddr && in_dev)
   2634			return ip_mc_validate_source(skb, iph->daddr,
   2635						     iph->saddr,
   2636						     iph->tos & IPTOS_RT_MASK,
   2637						     skb->dev, in_dev, &itag);
   2638	}
   2639	return 0;
   2640}
   2641
   2642int udp_rcv(struct sk_buff *skb)
   2643{
   2644	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
   2645}
   2646
   2647void udp_destroy_sock(struct sock *sk)
   2648{
   2649	struct udp_sock *up = udp_sk(sk);
   2650	bool slow = lock_sock_fast(sk);
   2651
   2652	/* protects from races with udp_abort() */
   2653	sock_set_flag(sk, SOCK_DEAD);
   2654	udp_flush_pending_frames(sk);
   2655	unlock_sock_fast(sk, slow);
   2656	if (static_branch_unlikely(&udp_encap_needed_key)) {
   2657		if (up->encap_type) {
   2658			void (*encap_destroy)(struct sock *sk);
   2659			encap_destroy = READ_ONCE(up->encap_destroy);
   2660			if (encap_destroy)
   2661				encap_destroy(sk);
   2662		}
   2663		if (up->encap_enabled)
   2664			static_branch_dec(&udp_encap_needed_key);
   2665	}
   2666}
   2667
   2668/*
   2669 *	Socket option code for UDP
   2670 */
   2671int udp_lib_setsockopt(struct sock *sk, int level, int optname,
   2672		       sockptr_t optval, unsigned int optlen,
   2673		       int (*push_pending_frames)(struct sock *))
   2674{
   2675	struct udp_sock *up = udp_sk(sk);
   2676	int val, valbool;
   2677	int err = 0;
   2678	int is_udplite = IS_UDPLITE(sk);
   2679
   2680	if (optlen < sizeof(int))
   2681		return -EINVAL;
   2682
   2683	if (copy_from_sockptr(&val, optval, sizeof(val)))
   2684		return -EFAULT;
   2685
   2686	valbool = val ? 1 : 0;
   2687
   2688	switch (optname) {
   2689	case UDP_CORK:
   2690		if (val != 0) {
   2691			WRITE_ONCE(up->corkflag, 1);
   2692		} else {
   2693			WRITE_ONCE(up->corkflag, 0);
   2694			lock_sock(sk);
   2695			push_pending_frames(sk);
   2696			release_sock(sk);
   2697		}
   2698		break;
   2699
   2700	case UDP_ENCAP:
   2701		switch (val) {
   2702		case 0:
   2703#ifdef CONFIG_XFRM
   2704		case UDP_ENCAP_ESPINUDP:
   2705		case UDP_ENCAP_ESPINUDP_NON_IKE:
   2706#if IS_ENABLED(CONFIG_IPV6)
   2707			if (sk->sk_family == AF_INET6)
   2708				up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
   2709			else
   2710#endif
   2711				up->encap_rcv = xfrm4_udp_encap_rcv;
   2712#endif
   2713			fallthrough;
   2714		case UDP_ENCAP_L2TPINUDP:
   2715			up->encap_type = val;
   2716			lock_sock(sk);
   2717			udp_tunnel_encap_enable(sk->sk_socket);
   2718			release_sock(sk);
   2719			break;
   2720		default:
   2721			err = -ENOPROTOOPT;
   2722			break;
   2723		}
   2724		break;
   2725
   2726	case UDP_NO_CHECK6_TX:
   2727		up->no_check6_tx = valbool;
   2728		break;
   2729
   2730	case UDP_NO_CHECK6_RX:
   2731		up->no_check6_rx = valbool;
   2732		break;
   2733
   2734	case UDP_SEGMENT:
   2735		if (val < 0 || val > USHRT_MAX)
   2736			return -EINVAL;
   2737		WRITE_ONCE(up->gso_size, val);
   2738		break;
   2739
   2740	case UDP_GRO:
   2741		lock_sock(sk);
   2742
   2743		/* when enabling GRO, accept the related GSO packet type */
   2744		if (valbool)
   2745			udp_tunnel_encap_enable(sk->sk_socket);
   2746		up->gro_enabled = valbool;
   2747		up->accept_udp_l4 = valbool;
   2748		release_sock(sk);
   2749		break;
   2750
   2751	/*
   2752	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
   2753	 */
   2754	/* The sender sets actual checksum coverage length via this option.
   2755	 * The case coverage > packet length is handled by send module. */
   2756	case UDPLITE_SEND_CSCOV:
   2757		if (!is_udplite)         /* Disable the option on UDP sockets */
   2758			return -ENOPROTOOPT;
   2759		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
   2760			val = 8;
   2761		else if (val > USHRT_MAX)
   2762			val = USHRT_MAX;
   2763		up->pcslen = val;
   2764		up->pcflag |= UDPLITE_SEND_CC;
   2765		break;
   2766
   2767	/* The receiver specifies a minimum checksum coverage value. To make
   2768	 * sense, this should be set to at least 8 (as done below). If zero is
   2769	 * used, this again means full checksum coverage.                     */
   2770	case UDPLITE_RECV_CSCOV:
   2771		if (!is_udplite)         /* Disable the option on UDP sockets */
   2772			return -ENOPROTOOPT;
   2773		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
   2774			val = 8;
   2775		else if (val > USHRT_MAX)
   2776			val = USHRT_MAX;
   2777		up->pcrlen = val;
   2778		up->pcflag |= UDPLITE_RECV_CC;
   2779		break;
   2780
   2781	default:
   2782		err = -ENOPROTOOPT;
   2783		break;
   2784	}
   2785
   2786	return err;
   2787}
   2788EXPORT_SYMBOL(udp_lib_setsockopt);
   2789
   2790int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
   2791		   unsigned int optlen)
   2792{
   2793	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
   2794		return udp_lib_setsockopt(sk, level, optname,
   2795					  optval, optlen,
   2796					  udp_push_pending_frames);
   2797	return ip_setsockopt(sk, level, optname, optval, optlen);
   2798}
   2799
   2800int udp_lib_getsockopt(struct sock *sk, int level, int optname,
   2801		       char __user *optval, int __user *optlen)
   2802{
   2803	struct udp_sock *up = udp_sk(sk);
   2804	int val, len;
   2805
   2806	if (get_user(len, optlen))
   2807		return -EFAULT;
   2808
   2809	len = min_t(unsigned int, len, sizeof(int));
   2810
   2811	if (len < 0)
   2812		return -EINVAL;
   2813
   2814	switch (optname) {
   2815	case UDP_CORK:
   2816		val = READ_ONCE(up->corkflag);
   2817		break;
   2818
   2819	case UDP_ENCAP:
   2820		val = up->encap_type;
   2821		break;
   2822
   2823	case UDP_NO_CHECK6_TX:
   2824		val = up->no_check6_tx;
   2825		break;
   2826
   2827	case UDP_NO_CHECK6_RX:
   2828		val = up->no_check6_rx;
   2829		break;
   2830
   2831	case UDP_SEGMENT:
   2832		val = READ_ONCE(up->gso_size);
   2833		break;
   2834
   2835	case UDP_GRO:
   2836		val = up->gro_enabled;
   2837		break;
   2838
   2839	/* The following two cannot be changed on UDP sockets, the return is
   2840	 * always 0 (which corresponds to the full checksum coverage of UDP). */
   2841	case UDPLITE_SEND_CSCOV:
   2842		val = up->pcslen;
   2843		break;
   2844
   2845	case UDPLITE_RECV_CSCOV:
   2846		val = up->pcrlen;
   2847		break;
   2848
   2849	default:
   2850		return -ENOPROTOOPT;
   2851	}
   2852
   2853	if (put_user(len, optlen))
   2854		return -EFAULT;
   2855	if (copy_to_user(optval, &val, len))
   2856		return -EFAULT;
   2857	return 0;
   2858}
   2859EXPORT_SYMBOL(udp_lib_getsockopt);
   2860
   2861int udp_getsockopt(struct sock *sk, int level, int optname,
   2862		   char __user *optval, int __user *optlen)
   2863{
   2864	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
   2865		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
   2866	return ip_getsockopt(sk, level, optname, optval, optlen);
   2867}
   2868
   2869/**
   2870 * 	udp_poll - wait for a UDP event.
   2871 *	@file: - file struct
   2872 *	@sock: - socket
   2873 *	@wait: - poll table
   2874 *
   2875 *	This is same as datagram poll, except for the special case of
   2876 *	blocking sockets. If application is using a blocking fd
   2877 *	and a packet with checksum error is in the queue;
   2878 *	then it could get return from select indicating data available
   2879 *	but then block when reading it. Add special case code
   2880 *	to work around these arguably broken applications.
   2881 */
   2882__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
   2883{
   2884	__poll_t mask = datagram_poll(file, sock, wait);
   2885	struct sock *sk = sock->sk;
   2886
   2887	if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
   2888		mask |= EPOLLIN | EPOLLRDNORM;
   2889
   2890	/* Check for false positives due to checksum errors */
   2891	if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
   2892	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
   2893		mask &= ~(EPOLLIN | EPOLLRDNORM);
   2894
   2895	/* psock ingress_msg queue should not contain any bad checksum frames */
   2896	if (sk_is_readable(sk))
   2897		mask |= EPOLLIN | EPOLLRDNORM;
   2898	return mask;
   2899
   2900}
   2901EXPORT_SYMBOL(udp_poll);
   2902
   2903int udp_abort(struct sock *sk, int err)
   2904{
   2905	lock_sock(sk);
   2906
   2907	/* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
   2908	 * with close()
   2909	 */
   2910	if (sock_flag(sk, SOCK_DEAD))
   2911		goto out;
   2912
   2913	sk->sk_err = err;
   2914	sk_error_report(sk);
   2915	__udp_disconnect(sk, 0);
   2916
   2917out:
   2918	release_sock(sk);
   2919
   2920	return 0;
   2921}
   2922EXPORT_SYMBOL_GPL(udp_abort);
   2923
   2924struct proto udp_prot = {
   2925	.name			= "UDP",
   2926	.owner			= THIS_MODULE,
   2927	.close			= udp_lib_close,
   2928	.pre_connect		= udp_pre_connect,
   2929	.connect		= ip4_datagram_connect,
   2930	.disconnect		= udp_disconnect,
   2931	.ioctl			= udp_ioctl,
   2932	.init			= udp_init_sock,
   2933	.destroy		= udp_destroy_sock,
   2934	.setsockopt		= udp_setsockopt,
   2935	.getsockopt		= udp_getsockopt,
   2936	.sendmsg		= udp_sendmsg,
   2937	.recvmsg		= udp_recvmsg,
   2938	.sendpage		= udp_sendpage,
   2939	.release_cb		= ip4_datagram_release_cb,
   2940	.hash			= udp_lib_hash,
   2941	.unhash			= udp_lib_unhash,
   2942	.rehash			= udp_v4_rehash,
   2943	.get_port		= udp_v4_get_port,
   2944	.put_port		= udp_lib_unhash,
   2945#ifdef CONFIG_BPF_SYSCALL
   2946	.psock_update_sk_prot	= udp_bpf_update_proto,
   2947#endif
   2948	.memory_allocated	= &udp_memory_allocated,
   2949	.sysctl_mem		= sysctl_udp_mem,
   2950	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_udp_wmem_min),
   2951	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_udp_rmem_min),
   2952	.obj_size		= sizeof(struct udp_sock),
   2953	.h.udp_table		= &udp_table,
   2954	.diag_destroy		= udp_abort,
   2955};
   2956EXPORT_SYMBOL(udp_prot);
   2957
   2958/* ------------------------------------------------------------------------ */
   2959#ifdef CONFIG_PROC_FS
   2960
   2961static struct sock *udp_get_first(struct seq_file *seq, int start)
   2962{
   2963	struct sock *sk;
   2964	struct udp_seq_afinfo *afinfo;
   2965	struct udp_iter_state *state = seq->private;
   2966	struct net *net = seq_file_net(seq);
   2967
   2968	if (state->bpf_seq_afinfo)
   2969		afinfo = state->bpf_seq_afinfo;
   2970	else
   2971		afinfo = pde_data(file_inode(seq->file));
   2972
   2973	for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
   2974	     ++state->bucket) {
   2975		struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
   2976
   2977		if (hlist_empty(&hslot->head))
   2978			continue;
   2979
   2980		spin_lock_bh(&hslot->lock);
   2981		sk_for_each(sk, &hslot->head) {
   2982			if (!net_eq(sock_net(sk), net))
   2983				continue;
   2984			if (afinfo->family == AF_UNSPEC ||
   2985			    sk->sk_family == afinfo->family)
   2986				goto found;
   2987		}
   2988		spin_unlock_bh(&hslot->lock);
   2989	}
   2990	sk = NULL;
   2991found:
   2992	return sk;
   2993}
   2994
   2995static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
   2996{
   2997	struct udp_seq_afinfo *afinfo;
   2998	struct udp_iter_state *state = seq->private;
   2999	struct net *net = seq_file_net(seq);
   3000
   3001	if (state->bpf_seq_afinfo)
   3002		afinfo = state->bpf_seq_afinfo;
   3003	else
   3004		afinfo = pde_data(file_inode(seq->file));
   3005
   3006	do {
   3007		sk = sk_next(sk);
   3008	} while (sk && (!net_eq(sock_net(sk), net) ||
   3009			(afinfo->family != AF_UNSPEC &&
   3010			 sk->sk_family != afinfo->family)));
   3011
   3012	if (!sk) {
   3013		if (state->bucket <= afinfo->udp_table->mask)
   3014			spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
   3015		return udp_get_first(seq, state->bucket + 1);
   3016	}
   3017	return sk;
   3018}
   3019
   3020static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
   3021{
   3022	struct sock *sk = udp_get_first(seq, 0);
   3023
   3024	if (sk)
   3025		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
   3026			--pos;
   3027	return pos ? NULL : sk;
   3028}
   3029
   3030void *udp_seq_start(struct seq_file *seq, loff_t *pos)
   3031{
   3032	struct udp_iter_state *state = seq->private;
   3033	state->bucket = MAX_UDP_PORTS;
   3034
   3035	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
   3036}
   3037EXPORT_SYMBOL(udp_seq_start);
   3038
   3039void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
   3040{
   3041	struct sock *sk;
   3042
   3043	if (v == SEQ_START_TOKEN)
   3044		sk = udp_get_idx(seq, 0);
   3045	else
   3046		sk = udp_get_next(seq, v);
   3047
   3048	++*pos;
   3049	return sk;
   3050}
   3051EXPORT_SYMBOL(udp_seq_next);
   3052
   3053void udp_seq_stop(struct seq_file *seq, void *v)
   3054{
   3055	struct udp_seq_afinfo *afinfo;
   3056	struct udp_iter_state *state = seq->private;
   3057
   3058	if (state->bpf_seq_afinfo)
   3059		afinfo = state->bpf_seq_afinfo;
   3060	else
   3061		afinfo = pde_data(file_inode(seq->file));
   3062
   3063	if (state->bucket <= afinfo->udp_table->mask)
   3064		spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
   3065}
   3066EXPORT_SYMBOL(udp_seq_stop);
   3067
   3068/* ------------------------------------------------------------------------ */
   3069static void udp4_format_sock(struct sock *sp, struct seq_file *f,
   3070		int bucket)
   3071{
   3072	struct inet_sock *inet = inet_sk(sp);
   3073	__be32 dest = inet->inet_daddr;
   3074	__be32 src  = inet->inet_rcv_saddr;
   3075	__u16 destp	  = ntohs(inet->inet_dport);
   3076	__u16 srcp	  = ntohs(inet->inet_sport);
   3077
   3078	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
   3079		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
   3080		bucket, src, srcp, dest, destp, sp->sk_state,
   3081		sk_wmem_alloc_get(sp),
   3082		udp_rqueue_get(sp),
   3083		0, 0L, 0,
   3084		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
   3085		0, sock_i_ino(sp),
   3086		refcount_read(&sp->sk_refcnt), sp,
   3087		atomic_read(&sp->sk_drops));
   3088}
   3089
   3090int udp4_seq_show(struct seq_file *seq, void *v)
   3091{
   3092	seq_setwidth(seq, 127);
   3093	if (v == SEQ_START_TOKEN)
   3094		seq_puts(seq, "   sl  local_address rem_address   st tx_queue "
   3095			   "rx_queue tr tm->when retrnsmt   uid  timeout "
   3096			   "inode ref pointer drops");
   3097	else {
   3098		struct udp_iter_state *state = seq->private;
   3099
   3100		udp4_format_sock(v, seq, state->bucket);
   3101	}
   3102	seq_pad(seq, '\n');
   3103	return 0;
   3104}
   3105
   3106#ifdef CONFIG_BPF_SYSCALL
   3107struct bpf_iter__udp {
   3108	__bpf_md_ptr(struct bpf_iter_meta *, meta);
   3109	__bpf_md_ptr(struct udp_sock *, udp_sk);
   3110	uid_t uid __aligned(8);
   3111	int bucket __aligned(8);
   3112};
   3113
   3114static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
   3115			     struct udp_sock *udp_sk, uid_t uid, int bucket)
   3116{
   3117	struct bpf_iter__udp ctx;
   3118
   3119	meta->seq_num--;  /* skip SEQ_START_TOKEN */
   3120	ctx.meta = meta;
   3121	ctx.udp_sk = udp_sk;
   3122	ctx.uid = uid;
   3123	ctx.bucket = bucket;
   3124	return bpf_iter_run_prog(prog, &ctx);
   3125}
   3126
   3127static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
   3128{
   3129	struct udp_iter_state *state = seq->private;
   3130	struct bpf_iter_meta meta;
   3131	struct bpf_prog *prog;
   3132	struct sock *sk = v;
   3133	uid_t uid;
   3134
   3135	if (v == SEQ_START_TOKEN)
   3136		return 0;
   3137
   3138	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
   3139	meta.seq = seq;
   3140	prog = bpf_iter_get_info(&meta, false);
   3141	return udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
   3142}
   3143
   3144static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
   3145{
   3146	struct bpf_iter_meta meta;
   3147	struct bpf_prog *prog;
   3148
   3149	if (!v) {
   3150		meta.seq = seq;
   3151		prog = bpf_iter_get_info(&meta, true);
   3152		if (prog)
   3153			(void)udp_prog_seq_show(prog, &meta, v, 0, 0);
   3154	}
   3155
   3156	udp_seq_stop(seq, v);
   3157}
   3158
   3159static const struct seq_operations bpf_iter_udp_seq_ops = {
   3160	.start		= udp_seq_start,
   3161	.next		= udp_seq_next,
   3162	.stop		= bpf_iter_udp_seq_stop,
   3163	.show		= bpf_iter_udp_seq_show,
   3164};
   3165#endif
   3166
   3167const struct seq_operations udp_seq_ops = {
   3168	.start		= udp_seq_start,
   3169	.next		= udp_seq_next,
   3170	.stop		= udp_seq_stop,
   3171	.show		= udp4_seq_show,
   3172};
   3173EXPORT_SYMBOL(udp_seq_ops);
   3174
   3175static struct udp_seq_afinfo udp4_seq_afinfo = {
   3176	.family		= AF_INET,
   3177	.udp_table	= &udp_table,
   3178};
   3179
   3180static int __net_init udp4_proc_init_net(struct net *net)
   3181{
   3182	if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
   3183			sizeof(struct udp_iter_state), &udp4_seq_afinfo))
   3184		return -ENOMEM;
   3185	return 0;
   3186}
   3187
   3188static void __net_exit udp4_proc_exit_net(struct net *net)
   3189{
   3190	remove_proc_entry("udp", net->proc_net);
   3191}
   3192
   3193static struct pernet_operations udp4_net_ops = {
   3194	.init = udp4_proc_init_net,
   3195	.exit = udp4_proc_exit_net,
   3196};
   3197
   3198int __init udp4_proc_init(void)
   3199{
   3200	return register_pernet_subsys(&udp4_net_ops);
   3201}
   3202
   3203void udp4_proc_exit(void)
   3204{
   3205	unregister_pernet_subsys(&udp4_net_ops);
   3206}
   3207#endif /* CONFIG_PROC_FS */
   3208
   3209static __initdata unsigned long uhash_entries;
   3210static int __init set_uhash_entries(char *str)
   3211{
   3212	ssize_t ret;
   3213
   3214	if (!str)
   3215		return 0;
   3216
   3217	ret = kstrtoul(str, 0, &uhash_entries);
   3218	if (ret)
   3219		return 0;
   3220
   3221	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
   3222		uhash_entries = UDP_HTABLE_SIZE_MIN;
   3223	return 1;
   3224}
   3225__setup("uhash_entries=", set_uhash_entries);
   3226
   3227void __init udp_table_init(struct udp_table *table, const char *name)
   3228{
   3229	unsigned int i;
   3230
   3231	table->hash = alloc_large_system_hash(name,
   3232					      2 * sizeof(struct udp_hslot),
   3233					      uhash_entries,
   3234					      21, /* one slot per 2 MB */
   3235					      0,
   3236					      &table->log,
   3237					      &table->mask,
   3238					      UDP_HTABLE_SIZE_MIN,
   3239					      64 * 1024);
   3240
   3241	table->hash2 = table->hash + (table->mask + 1);
   3242	for (i = 0; i <= table->mask; i++) {
   3243		INIT_HLIST_HEAD(&table->hash[i].head);
   3244		table->hash[i].count = 0;
   3245		spin_lock_init(&table->hash[i].lock);
   3246	}
   3247	for (i = 0; i <= table->mask; i++) {
   3248		INIT_HLIST_HEAD(&table->hash2[i].head);
   3249		table->hash2[i].count = 0;
   3250		spin_lock_init(&table->hash2[i].lock);
   3251	}
   3252}
   3253
   3254u32 udp_flow_hashrnd(void)
   3255{
   3256	static u32 hashrnd __read_mostly;
   3257
   3258	net_get_random_once(&hashrnd, sizeof(hashrnd));
   3259
   3260	return hashrnd;
   3261}
   3262EXPORT_SYMBOL(udp_flow_hashrnd);
   3263
   3264static void __udp_sysctl_init(struct net *net)
   3265{
   3266	net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
   3267	net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;
   3268
   3269#ifdef CONFIG_NET_L3_MASTER_DEV
   3270	net->ipv4.sysctl_udp_l3mdev_accept = 0;
   3271#endif
   3272}
   3273
   3274static int __net_init udp_sysctl_init(struct net *net)
   3275{
   3276	__udp_sysctl_init(net);
   3277	return 0;
   3278}
   3279
   3280static struct pernet_operations __net_initdata udp_sysctl_ops = {
   3281	.init	= udp_sysctl_init,
   3282};
   3283
   3284#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
   3285DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
   3286		     struct udp_sock *udp_sk, uid_t uid, int bucket)
   3287
   3288static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux)
   3289{
   3290	struct udp_iter_state *st = priv_data;
   3291	struct udp_seq_afinfo *afinfo;
   3292	int ret;
   3293
   3294	afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
   3295	if (!afinfo)
   3296		return -ENOMEM;
   3297
   3298	afinfo->family = AF_UNSPEC;
   3299	afinfo->udp_table = &udp_table;
   3300	st->bpf_seq_afinfo = afinfo;
   3301	ret = bpf_iter_init_seq_net(priv_data, aux);
   3302	if (ret)
   3303		kfree(afinfo);
   3304	return ret;
   3305}
   3306
   3307static void bpf_iter_fini_udp(void *priv_data)
   3308{
   3309	struct udp_iter_state *st = priv_data;
   3310
   3311	kfree(st->bpf_seq_afinfo);
   3312	bpf_iter_fini_seq_net(priv_data);
   3313}
   3314
   3315static const struct bpf_iter_seq_info udp_seq_info = {
   3316	.seq_ops		= &bpf_iter_udp_seq_ops,
   3317	.init_seq_private	= bpf_iter_init_udp,
   3318	.fini_seq_private	= bpf_iter_fini_udp,
   3319	.seq_priv_size		= sizeof(struct udp_iter_state),
   3320};
   3321
   3322static struct bpf_iter_reg udp_reg_info = {
   3323	.target			= "udp",
   3324	.ctx_arg_info_size	= 1,
   3325	.ctx_arg_info		= {
   3326		{ offsetof(struct bpf_iter__udp, udp_sk),
   3327		  PTR_TO_BTF_ID_OR_NULL },
   3328	},
   3329	.seq_info		= &udp_seq_info,
   3330};
   3331
   3332static void __init bpf_iter_register(void)
   3333{
   3334	udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP];
   3335	if (bpf_iter_reg_target(&udp_reg_info))
   3336		pr_warn("Warning: could not register bpf iterator udp\n");
   3337}
   3338#endif
   3339
   3340void __init udp_init(void)
   3341{
   3342	unsigned long limit;
   3343	unsigned int i;
   3344
   3345	udp_table_init(&udp_table, "UDP");
   3346	limit = nr_free_buffer_pages() / 8;
   3347	limit = max(limit, 128UL);
   3348	sysctl_udp_mem[0] = limit / 4 * 3;
   3349	sysctl_udp_mem[1] = limit;
   3350	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
   3351
   3352	__udp_sysctl_init(&init_net);
   3353
   3354	/* 16 spinlocks per cpu */
   3355	udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
   3356	udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
   3357				GFP_KERNEL);
   3358	if (!udp_busylocks)
   3359		panic("UDP: failed to alloc udp_busylocks\n");
   3360	for (i = 0; i < (1U << udp_busylocks_log); i++)
   3361		spin_lock_init(udp_busylocks + i);
   3362
   3363	if (register_pernet_subsys(&udp_sysctl_ops))
   3364		panic("UDP: failed to init sysctl parameters.\n");
   3365
   3366#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
   3367	bpf_iter_register();
   3368#endif
   3369}