cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

socket.c (16702B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * File: socket.c
      4 *
      5 * Phonet sockets
      6 *
      7 * Copyright (C) 2008 Nokia Corporation.
      8 *
      9 * Authors: Sakari Ailus <sakari.ailus@nokia.com>
     10 *          RĂ©mi Denis-Courmont
     11 */
     12
     13#include <linux/gfp.h>
     14#include <linux/kernel.h>
     15#include <linux/net.h>
     16#include <linux/poll.h>
     17#include <linux/sched/signal.h>
     18
     19#include <net/sock.h>
     20#include <net/tcp_states.h>
     21
     22#include <linux/phonet.h>
     23#include <linux/export.h>
     24#include <net/phonet/phonet.h>
     25#include <net/phonet/pep.h>
     26#include <net/phonet/pn_dev.h>
     27
     28static int pn_socket_release(struct socket *sock)
     29{
     30	struct sock *sk = sock->sk;
     31
     32	if (sk) {
     33		sock->sk = NULL;
     34		sk->sk_prot->close(sk, 0);
     35	}
     36	return 0;
     37}
     38
     39#define PN_HASHSIZE	16
     40#define PN_HASHMASK	(PN_HASHSIZE-1)
     41
     42
     43static struct  {
     44	struct hlist_head hlist[PN_HASHSIZE];
     45	struct mutex lock;
     46} pnsocks;
     47
     48void __init pn_sock_init(void)
     49{
     50	unsigned int i;
     51
     52	for (i = 0; i < PN_HASHSIZE; i++)
     53		INIT_HLIST_HEAD(pnsocks.hlist + i);
     54	mutex_init(&pnsocks.lock);
     55}
     56
     57static struct hlist_head *pn_hash_list(u16 obj)
     58{
     59	return pnsocks.hlist + (obj & PN_HASHMASK);
     60}
     61
     62/*
     63 * Find address based on socket address, match only certain fields.
     64 * Also grab sock if it was found. Remember to sock_put it later.
     65 */
     66struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
     67{
     68	struct sock *sknode;
     69	struct sock *rval = NULL;
     70	u16 obj = pn_sockaddr_get_object(spn);
     71	u8 res = spn->spn_resource;
     72	struct hlist_head *hlist = pn_hash_list(obj);
     73
     74	rcu_read_lock();
     75	sk_for_each_rcu(sknode, hlist) {
     76		struct pn_sock *pn = pn_sk(sknode);
     77		BUG_ON(!pn->sobject); /* unbound socket */
     78
     79		if (!net_eq(sock_net(sknode), net))
     80			continue;
     81		if (pn_port(obj)) {
     82			/* Look up socket by port */
     83			if (pn_port(pn->sobject) != pn_port(obj))
     84				continue;
     85		} else {
     86			/* If port is zero, look up by resource */
     87			if (pn->resource != res)
     88				continue;
     89		}
     90		if (pn_addr(pn->sobject) &&
     91		    pn_addr(pn->sobject) != pn_addr(obj))
     92			continue;
     93
     94		rval = sknode;
     95		sock_hold(sknode);
     96		break;
     97	}
     98	rcu_read_unlock();
     99
    100	return rval;
    101}
    102
    103/* Deliver a broadcast packet (only in bottom-half) */
    104void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
    105{
    106	struct hlist_head *hlist = pnsocks.hlist;
    107	unsigned int h;
    108
    109	rcu_read_lock();
    110	for (h = 0; h < PN_HASHSIZE; h++) {
    111		struct sock *sknode;
    112
    113		sk_for_each(sknode, hlist) {
    114			struct sk_buff *clone;
    115
    116			if (!net_eq(sock_net(sknode), net))
    117				continue;
    118			if (!sock_flag(sknode, SOCK_BROADCAST))
    119				continue;
    120
    121			clone = skb_clone(skb, GFP_ATOMIC);
    122			if (clone) {
    123				sock_hold(sknode);
    124				sk_receive_skb(sknode, clone, 0);
    125			}
    126		}
    127		hlist++;
    128	}
    129	rcu_read_unlock();
    130}
    131
    132int pn_sock_hash(struct sock *sk)
    133{
    134	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
    135
    136	mutex_lock(&pnsocks.lock);
    137	sk_add_node_rcu(sk, hlist);
    138	mutex_unlock(&pnsocks.lock);
    139
    140	return 0;
    141}
    142EXPORT_SYMBOL(pn_sock_hash);
    143
    144void pn_sock_unhash(struct sock *sk)
    145{
    146	mutex_lock(&pnsocks.lock);
    147	sk_del_node_init_rcu(sk);
    148	mutex_unlock(&pnsocks.lock);
    149	pn_sock_unbind_all_res(sk);
    150	synchronize_rcu();
    151}
    152EXPORT_SYMBOL(pn_sock_unhash);
    153
    154static DEFINE_MUTEX(port_mutex);
    155
    156static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
    157{
    158	struct sock *sk = sock->sk;
    159	struct pn_sock *pn = pn_sk(sk);
    160	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
    161	int err;
    162	u16 handle;
    163	u8 saddr;
    164
    165	if (sk->sk_prot->bind)
    166		return sk->sk_prot->bind(sk, addr, len);
    167
    168	if (len < sizeof(struct sockaddr_pn))
    169		return -EINVAL;
    170	if (spn->spn_family != AF_PHONET)
    171		return -EAFNOSUPPORT;
    172
    173	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
    174	saddr = pn_addr(handle);
    175	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
    176		return -EADDRNOTAVAIL;
    177
    178	lock_sock(sk);
    179	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
    180		err = -EINVAL; /* attempt to rebind */
    181		goto out;
    182	}
    183	WARN_ON(sk_hashed(sk));
    184	mutex_lock(&port_mutex);
    185	err = sk->sk_prot->get_port(sk, pn_port(handle));
    186	if (err)
    187		goto out_port;
    188
    189	/* get_port() sets the port, bind() sets the address if applicable */
    190	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
    191	pn->resource = spn->spn_resource;
    192
    193	/* Enable RX on the socket */
    194	err = sk->sk_prot->hash(sk);
    195out_port:
    196	mutex_unlock(&port_mutex);
    197out:
    198	release_sock(sk);
    199	return err;
    200}
    201
    202static int pn_socket_autobind(struct socket *sock)
    203{
    204	struct sockaddr_pn sa;
    205	int err;
    206
    207	memset(&sa, 0, sizeof(sa));
    208	sa.spn_family = AF_PHONET;
    209	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
    210				sizeof(struct sockaddr_pn));
    211	if (err != -EINVAL)
    212		return err;
    213	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
    214	return 0; /* socket was already bound */
    215}
    216
    217static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
    218		int len, int flags)
    219{
    220	struct sock *sk = sock->sk;
    221	struct pn_sock *pn = pn_sk(sk);
    222	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
    223	struct task_struct *tsk = current;
    224	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
    225	int err;
    226
    227	if (pn_socket_autobind(sock))
    228		return -ENOBUFS;
    229	if (len < sizeof(struct sockaddr_pn))
    230		return -EINVAL;
    231	if (spn->spn_family != AF_PHONET)
    232		return -EAFNOSUPPORT;
    233
    234	lock_sock(sk);
    235
    236	switch (sock->state) {
    237	case SS_UNCONNECTED:
    238		if (sk->sk_state != TCP_CLOSE) {
    239			err = -EISCONN;
    240			goto out;
    241		}
    242		break;
    243	case SS_CONNECTING:
    244		err = -EALREADY;
    245		goto out;
    246	default:
    247		err = -EISCONN;
    248		goto out;
    249	}
    250
    251	pn->dobject = pn_sockaddr_get_object(spn);
    252	pn->resource = pn_sockaddr_get_resource(spn);
    253	sock->state = SS_CONNECTING;
    254
    255	err = sk->sk_prot->connect(sk, addr, len);
    256	if (err) {
    257		sock->state = SS_UNCONNECTED;
    258		pn->dobject = 0;
    259		goto out;
    260	}
    261
    262	while (sk->sk_state == TCP_SYN_SENT) {
    263		DEFINE_WAIT(wait);
    264
    265		if (!timeo) {
    266			err = -EINPROGRESS;
    267			goto out;
    268		}
    269		if (signal_pending(tsk)) {
    270			err = sock_intr_errno(timeo);
    271			goto out;
    272		}
    273
    274		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
    275						TASK_INTERRUPTIBLE);
    276		release_sock(sk);
    277		timeo = schedule_timeout(timeo);
    278		lock_sock(sk);
    279		finish_wait(sk_sleep(sk), &wait);
    280	}
    281
    282	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
    283		err = 0;
    284	else if (sk->sk_state == TCP_CLOSE_WAIT)
    285		err = -ECONNRESET;
    286	else
    287		err = -ECONNREFUSED;
    288	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
    289out:
    290	release_sock(sk);
    291	return err;
    292}
    293
    294static int pn_socket_accept(struct socket *sock, struct socket *newsock,
    295			    int flags, bool kern)
    296{
    297	struct sock *sk = sock->sk;
    298	struct sock *newsk;
    299	int err;
    300
    301	if (unlikely(sk->sk_state != TCP_LISTEN))
    302		return -EINVAL;
    303
    304	newsk = sk->sk_prot->accept(sk, flags, &err, kern);
    305	if (!newsk)
    306		return err;
    307
    308	lock_sock(newsk);
    309	sock_graft(newsk, newsock);
    310	newsock->state = SS_CONNECTED;
    311	release_sock(newsk);
    312	return 0;
    313}
    314
    315static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
    316				int peer)
    317{
    318	struct sock *sk = sock->sk;
    319	struct pn_sock *pn = pn_sk(sk);
    320
    321	memset(addr, 0, sizeof(struct sockaddr_pn));
    322	addr->sa_family = AF_PHONET;
    323	if (!peer) /* Race with bind() here is userland's problem. */
    324		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
    325					pn->sobject);
    326
    327	return sizeof(struct sockaddr_pn);
    328}
    329
    330static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
    331					poll_table *wait)
    332{
    333	struct sock *sk = sock->sk;
    334	struct pep_sock *pn = pep_sk(sk);
    335	__poll_t mask = 0;
    336
    337	poll_wait(file, sk_sleep(sk), wait);
    338
    339	if (sk->sk_state == TCP_CLOSE)
    340		return EPOLLERR;
    341	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
    342		mask |= EPOLLIN | EPOLLRDNORM;
    343	if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
    344		mask |= EPOLLPRI;
    345	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
    346		return EPOLLHUP;
    347
    348	if (sk->sk_state == TCP_ESTABLISHED &&
    349		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
    350		atomic_read(&pn->tx_credits))
    351		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
    352
    353	return mask;
    354}
    355
    356static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
    357				unsigned long arg)
    358{
    359	struct sock *sk = sock->sk;
    360	struct pn_sock *pn = pn_sk(sk);
    361
    362	if (cmd == SIOCPNGETOBJECT) {
    363		struct net_device *dev;
    364		u16 handle;
    365		u8 saddr;
    366
    367		if (get_user(handle, (__u16 __user *)arg))
    368			return -EFAULT;
    369
    370		lock_sock(sk);
    371		if (sk->sk_bound_dev_if)
    372			dev = dev_get_by_index(sock_net(sk),
    373						sk->sk_bound_dev_if);
    374		else
    375			dev = phonet_device_get(sock_net(sk));
    376		if (dev && (dev->flags & IFF_UP))
    377			saddr = phonet_address_get(dev, pn_addr(handle));
    378		else
    379			saddr = PN_NO_ADDR;
    380		release_sock(sk);
    381
    382		dev_put(dev);
    383		if (saddr == PN_NO_ADDR)
    384			return -EHOSTUNREACH;
    385
    386		handle = pn_object(saddr, pn_port(pn->sobject));
    387		return put_user(handle, (__u16 __user *)arg);
    388	}
    389
    390	return sk->sk_prot->ioctl(sk, cmd, arg);
    391}
    392
    393static int pn_socket_listen(struct socket *sock, int backlog)
    394{
    395	struct sock *sk = sock->sk;
    396	int err = 0;
    397
    398	if (pn_socket_autobind(sock))
    399		return -ENOBUFS;
    400
    401	lock_sock(sk);
    402	if (sock->state != SS_UNCONNECTED) {
    403		err = -EINVAL;
    404		goto out;
    405	}
    406
    407	if (sk->sk_state != TCP_LISTEN) {
    408		sk->sk_state = TCP_LISTEN;
    409		sk->sk_ack_backlog = 0;
    410	}
    411	sk->sk_max_ack_backlog = backlog;
    412out:
    413	release_sock(sk);
    414	return err;
    415}
    416
    417static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
    418			     size_t total_len)
    419{
    420	struct sock *sk = sock->sk;
    421
    422	if (pn_socket_autobind(sock))
    423		return -EAGAIN;
    424
    425	return sk->sk_prot->sendmsg(sk, m, total_len);
    426}
    427
    428const struct proto_ops phonet_dgram_ops = {
    429	.family		= AF_PHONET,
    430	.owner		= THIS_MODULE,
    431	.release	= pn_socket_release,
    432	.bind		= pn_socket_bind,
    433	.connect	= sock_no_connect,
    434	.socketpair	= sock_no_socketpair,
    435	.accept		= sock_no_accept,
    436	.getname	= pn_socket_getname,
    437	.poll		= datagram_poll,
    438	.ioctl		= pn_socket_ioctl,
    439	.listen		= sock_no_listen,
    440	.shutdown	= sock_no_shutdown,
    441	.sendmsg	= pn_socket_sendmsg,
    442	.recvmsg	= sock_common_recvmsg,
    443	.mmap		= sock_no_mmap,
    444	.sendpage	= sock_no_sendpage,
    445};
    446
    447const struct proto_ops phonet_stream_ops = {
    448	.family		= AF_PHONET,
    449	.owner		= THIS_MODULE,
    450	.release	= pn_socket_release,
    451	.bind		= pn_socket_bind,
    452	.connect	= pn_socket_connect,
    453	.socketpair	= sock_no_socketpair,
    454	.accept		= pn_socket_accept,
    455	.getname	= pn_socket_getname,
    456	.poll		= pn_socket_poll,
    457	.ioctl		= pn_socket_ioctl,
    458	.listen		= pn_socket_listen,
    459	.shutdown	= sock_no_shutdown,
    460	.setsockopt	= sock_common_setsockopt,
    461	.getsockopt	= sock_common_getsockopt,
    462	.sendmsg	= pn_socket_sendmsg,
    463	.recvmsg	= sock_common_recvmsg,
    464	.mmap		= sock_no_mmap,
    465	.sendpage	= sock_no_sendpage,
    466};
    467EXPORT_SYMBOL(phonet_stream_ops);
    468
    469/* allocate port for a socket */
    470int pn_sock_get_port(struct sock *sk, unsigned short sport)
    471{
    472	static int port_cur;
    473	struct net *net = sock_net(sk);
    474	struct pn_sock *pn = pn_sk(sk);
    475	struct sockaddr_pn try_sa;
    476	struct sock *tmpsk;
    477
    478	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
    479	try_sa.spn_family = AF_PHONET;
    480	WARN_ON(!mutex_is_locked(&port_mutex));
    481	if (!sport) {
    482		/* search free port */
    483		int port, pmin, pmax;
    484
    485		phonet_get_local_port_range(&pmin, &pmax);
    486		for (port = pmin; port <= pmax; port++) {
    487			port_cur++;
    488			if (port_cur < pmin || port_cur > pmax)
    489				port_cur = pmin;
    490
    491			pn_sockaddr_set_port(&try_sa, port_cur);
    492			tmpsk = pn_find_sock_by_sa(net, &try_sa);
    493			if (tmpsk == NULL) {
    494				sport = port_cur;
    495				goto found;
    496			} else
    497				sock_put(tmpsk);
    498		}
    499	} else {
    500		/* try to find specific port */
    501		pn_sockaddr_set_port(&try_sa, sport);
    502		tmpsk = pn_find_sock_by_sa(net, &try_sa);
    503		if (tmpsk == NULL)
    504			/* No sock there! We can use that port... */
    505			goto found;
    506		else
    507			sock_put(tmpsk);
    508	}
    509	/* the port must be in use already */
    510	return -EADDRINUSE;
    511
    512found:
    513	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
    514	return 0;
    515}
    516EXPORT_SYMBOL(pn_sock_get_port);
    517
    518#ifdef CONFIG_PROC_FS
    519static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
    520{
    521	struct net *net = seq_file_net(seq);
    522	struct hlist_head *hlist = pnsocks.hlist;
    523	struct sock *sknode;
    524	unsigned int h;
    525
    526	for (h = 0; h < PN_HASHSIZE; h++) {
    527		sk_for_each_rcu(sknode, hlist) {
    528			if (!net_eq(net, sock_net(sknode)))
    529				continue;
    530			if (!pos)
    531				return sknode;
    532			pos--;
    533		}
    534		hlist++;
    535	}
    536	return NULL;
    537}
    538
    539static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
    540{
    541	struct net *net = seq_file_net(seq);
    542
    543	do
    544		sk = sk_next(sk);
    545	while (sk && !net_eq(net, sock_net(sk)));
    546
    547	return sk;
    548}
    549
    550static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
    551	__acquires(rcu)
    552{
    553	rcu_read_lock();
    554	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
    555}
    556
    557static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    558{
    559	struct sock *sk;
    560
    561	if (v == SEQ_START_TOKEN)
    562		sk = pn_sock_get_idx(seq, 0);
    563	else
    564		sk = pn_sock_get_next(seq, v);
    565	(*pos)++;
    566	return sk;
    567}
    568
    569static void pn_sock_seq_stop(struct seq_file *seq, void *v)
    570	__releases(rcu)
    571{
    572	rcu_read_unlock();
    573}
    574
    575static int pn_sock_seq_show(struct seq_file *seq, void *v)
    576{
    577	seq_setwidth(seq, 127);
    578	if (v == SEQ_START_TOKEN)
    579		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
    580			"  uid inode ref pointer drops");
    581	else {
    582		struct sock *sk = v;
    583		struct pn_sock *pn = pn_sk(sk);
    584
    585		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
    586			"%d %pK %u",
    587			sk->sk_protocol, pn->sobject, pn->dobject,
    588			pn->resource, sk->sk_state,
    589			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
    590			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
    591			sock_i_ino(sk),
    592			refcount_read(&sk->sk_refcnt), sk,
    593			atomic_read(&sk->sk_drops));
    594	}
    595	seq_pad(seq, '\n');
    596	return 0;
    597}
    598
    599const struct seq_operations pn_sock_seq_ops = {
    600	.start = pn_sock_seq_start,
    601	.next = pn_sock_seq_next,
    602	.stop = pn_sock_seq_stop,
    603	.show = pn_sock_seq_show,
    604};
    605#endif
    606
    607static struct  {
    608	struct sock *sk[256];
    609} pnres;
    610
    611/*
    612 * Find and hold socket based on resource.
    613 */
    614struct sock *pn_find_sock_by_res(struct net *net, u8 res)
    615{
    616	struct sock *sk;
    617
    618	if (!net_eq(net, &init_net))
    619		return NULL;
    620
    621	rcu_read_lock();
    622	sk = rcu_dereference(pnres.sk[res]);
    623	if (sk)
    624		sock_hold(sk);
    625	rcu_read_unlock();
    626	return sk;
    627}
    628
    629static DEFINE_MUTEX(resource_mutex);
    630
    631int pn_sock_bind_res(struct sock *sk, u8 res)
    632{
    633	int ret = -EADDRINUSE;
    634
    635	if (!net_eq(sock_net(sk), &init_net))
    636		return -ENOIOCTLCMD;
    637	if (!capable(CAP_SYS_ADMIN))
    638		return -EPERM;
    639	if (pn_socket_autobind(sk->sk_socket))
    640		return -EAGAIN;
    641
    642	mutex_lock(&resource_mutex);
    643	if (pnres.sk[res] == NULL) {
    644		sock_hold(sk);
    645		rcu_assign_pointer(pnres.sk[res], sk);
    646		ret = 0;
    647	}
    648	mutex_unlock(&resource_mutex);
    649	return ret;
    650}
    651
    652int pn_sock_unbind_res(struct sock *sk, u8 res)
    653{
    654	int ret = -ENOENT;
    655
    656	if (!capable(CAP_SYS_ADMIN))
    657		return -EPERM;
    658
    659	mutex_lock(&resource_mutex);
    660	if (pnres.sk[res] == sk) {
    661		RCU_INIT_POINTER(pnres.sk[res], NULL);
    662		ret = 0;
    663	}
    664	mutex_unlock(&resource_mutex);
    665
    666	if (ret == 0) {
    667		synchronize_rcu();
    668		sock_put(sk);
    669	}
    670	return ret;
    671}
    672
    673void pn_sock_unbind_all_res(struct sock *sk)
    674{
    675	unsigned int res, match = 0;
    676
    677	mutex_lock(&resource_mutex);
    678	for (res = 0; res < 256; res++) {
    679		if (pnres.sk[res] == sk) {
    680			RCU_INIT_POINTER(pnres.sk[res], NULL);
    681			match++;
    682		}
    683	}
    684	mutex_unlock(&resource_mutex);
    685
    686	while (match > 0) {
    687		__sock_put(sk);
    688		match--;
    689	}
    690	/* Caller is responsible for RCU sync before final sock_put() */
    691}
    692
    693#ifdef CONFIG_PROC_FS
    694static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
    695{
    696	struct net *net = seq_file_net(seq);
    697	unsigned int i;
    698
    699	if (!net_eq(net, &init_net))
    700		return NULL;
    701
    702	for (i = 0; i < 256; i++) {
    703		if (pnres.sk[i] == NULL)
    704			continue;
    705		if (!pos)
    706			return pnres.sk + i;
    707		pos--;
    708	}
    709	return NULL;
    710}
    711
    712static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
    713{
    714	struct net *net = seq_file_net(seq);
    715	unsigned int i;
    716
    717	BUG_ON(!net_eq(net, &init_net));
    718
    719	for (i = (sk - pnres.sk) + 1; i < 256; i++)
    720		if (pnres.sk[i])
    721			return pnres.sk + i;
    722	return NULL;
    723}
    724
    725static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
    726	__acquires(resource_mutex)
    727{
    728	mutex_lock(&resource_mutex);
    729	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
    730}
    731
    732static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    733{
    734	struct sock **sk;
    735
    736	if (v == SEQ_START_TOKEN)
    737		sk = pn_res_get_idx(seq, 0);
    738	else
    739		sk = pn_res_get_next(seq, v);
    740	(*pos)++;
    741	return sk;
    742}
    743
    744static void pn_res_seq_stop(struct seq_file *seq, void *v)
    745	__releases(resource_mutex)
    746{
    747	mutex_unlock(&resource_mutex);
    748}
    749
    750static int pn_res_seq_show(struct seq_file *seq, void *v)
    751{
    752	seq_setwidth(seq, 63);
    753	if (v == SEQ_START_TOKEN)
    754		seq_puts(seq, "rs   uid inode");
    755	else {
    756		struct sock **psk = v;
    757		struct sock *sk = *psk;
    758
    759		seq_printf(seq, "%02X %5u %lu",
    760			   (int) (psk - pnres.sk),
    761			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
    762			   sock_i_ino(sk));
    763	}
    764	seq_pad(seq, '\n');
    765	return 0;
    766}
    767
    768const struct seq_operations pn_res_seq_ops = {
    769	.start = pn_res_seq_start,
    770	.next = pn_res_seq_next,
    771	.stop = pn_res_seq_stop,
    772	.show = pn_res_seq_show,
    773};
    774#endif