cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

af_iucv.c (56454B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  IUCV protocol stack for Linux on zSeries
      4 *
      5 *  Copyright IBM Corp. 2006, 2009
      6 *
      7 *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com>
      8 *		Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
      9 *  PM functions:
     10 *		Ursula Braun <ursula.braun@de.ibm.com>
     11 */
     12
     13#define KMSG_COMPONENT "af_iucv"
     14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     15
     16#include <linux/filter.h>
     17#include <linux/module.h>
     18#include <linux/netdevice.h>
     19#include <linux/types.h>
     20#include <linux/limits.h>
     21#include <linux/list.h>
     22#include <linux/errno.h>
     23#include <linux/kernel.h>
     24#include <linux/sched/signal.h>
     25#include <linux/slab.h>
     26#include <linux/skbuff.h>
     27#include <linux/init.h>
     28#include <linux/poll.h>
     29#include <linux/security.h>
     30#include <net/sock.h>
     31#include <asm/ebcdic.h>
     32#include <asm/cpcmd.h>
     33#include <linux/kmod.h>
     34
     35#include <net/iucv/af_iucv.h>
     36
     37#define VERSION "1.2"
     38
     39static char iucv_userid[80];
     40
     41static struct proto iucv_proto = {
     42	.name		= "AF_IUCV",
     43	.owner		= THIS_MODULE,
     44	.obj_size	= sizeof(struct iucv_sock),
     45};
     46
     47static struct iucv_interface *pr_iucv;
     48static struct iucv_handler af_iucv_handler;
     49
     50/* special AF_IUCV IPRM messages */
     51static const u8 iprm_shutdown[8] =
     52	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
     53
     54#define TRGCLS_SIZE	sizeof_field(struct iucv_message, class)
     55
     56#define __iucv_sock_wait(sk, condition, timeo, ret)			\
     57do {									\
     58	DEFINE_WAIT(__wait);						\
     59	long __timeo = timeo;						\
     60	ret = 0;							\
     61	prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);	\
     62	while (!(condition)) {						\
     63		if (!__timeo) {						\
     64			ret = -EAGAIN;					\
     65			break;						\
     66		}							\
     67		if (signal_pending(current)) {				\
     68			ret = sock_intr_errno(__timeo);			\
     69			break;						\
     70		}							\
     71		release_sock(sk);					\
     72		__timeo = schedule_timeout(__timeo);			\
     73		lock_sock(sk);						\
     74		ret = sock_error(sk);					\
     75		if (ret)						\
     76			break;						\
     77	}								\
     78	finish_wait(sk_sleep(sk), &__wait);				\
     79} while (0)
     80
     81#define iucv_sock_wait(sk, condition, timeo)				\
     82({									\
     83	int __ret = 0;							\
     84	if (!(condition))						\
     85		__iucv_sock_wait(sk, condition, timeo, __ret);		\
     86	__ret;								\
     87})
     88
     89static struct sock *iucv_accept_dequeue(struct sock *parent,
     90					struct socket *newsock);
     91static void iucv_sock_kill(struct sock *sk);
     92static void iucv_sock_close(struct sock *sk);
     93
     94static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
     95
     96static struct iucv_sock_list iucv_sk_list = {
     97	.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
     98	.autobind_name = ATOMIC_INIT(0)
     99};
    100
    101static inline void high_nmcpy(unsigned char *dst, char *src)
    102{
    103       memcpy(dst, src, 8);
    104}
    105
    106static inline void low_nmcpy(unsigned char *dst, char *src)
    107{
    108       memcpy(&dst[8], src, 8);
    109}
    110
    111/**
    112 * iucv_msg_length() - Returns the length of an iucv message.
    113 * @msg:	Pointer to struct iucv_message, MUST NOT be NULL
    114 *
    115 * The function returns the length of the specified iucv message @msg of data
    116 * stored in a buffer and of data stored in the parameter list (PRMDATA).
    117 *
    118 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
    119 * data:
    120 *	PRMDATA[0..6]	socket data (max 7 bytes);
    121 *	PRMDATA[7]	socket data length value (len is 0xff - PRMDATA[7])
    122 *
    123 * The socket data length is computed by subtracting the socket data length
    124 * value from 0xFF.
    125 * If the socket data len is greater 7, then PRMDATA can be used for special
    126 * notifications (see iucv_sock_shutdown); and further,
    127 * if the socket data len is > 7, the function returns 8.
    128 *
    129 * Use this function to allocate socket buffers to store iucv message data.
    130 */
    131static inline size_t iucv_msg_length(struct iucv_message *msg)
    132{
    133	size_t datalen;
    134
    135	if (msg->flags & IUCV_IPRMDATA) {
    136		datalen = 0xff - msg->rmmsg[7];
    137		return (datalen < 8) ? datalen : 8;
    138	}
    139	return msg->length;
    140}
    141
    142/**
    143 * iucv_sock_in_state() - check for specific states
    144 * @sk:		sock structure
    145 * @state:	first iucv sk state
    146 * @state2:	second iucv sk state
    147 *
    148 * Returns true if the socket in either in the first or second state.
    149 */
    150static int iucv_sock_in_state(struct sock *sk, int state, int state2)
    151{
    152	return (sk->sk_state == state || sk->sk_state == state2);
    153}
    154
    155/**
    156 * iucv_below_msglim() - function to check if messages can be sent
    157 * @sk:		sock structure
    158 *
    159 * Returns true if the send queue length is lower than the message limit.
    160 * Always returns true if the socket is not connected (no iucv path for
    161 * checking the message limit).
    162 */
    163static inline int iucv_below_msglim(struct sock *sk)
    164{
    165	struct iucv_sock *iucv = iucv_sk(sk);
    166
    167	if (sk->sk_state != IUCV_CONNECTED)
    168		return 1;
    169	if (iucv->transport == AF_IUCV_TRANS_IUCV)
    170		return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
    171	else
    172		return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
    173			(atomic_read(&iucv->pendings) <= 0));
    174}
    175
    176/*
    177 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
    178 */
    179static void iucv_sock_wake_msglim(struct sock *sk)
    180{
    181	struct socket_wq *wq;
    182
    183	rcu_read_lock();
    184	wq = rcu_dereference(sk->sk_wq);
    185	if (skwq_has_sleeper(wq))
    186		wake_up_interruptible_all(&wq->wait);
    187	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
    188	rcu_read_unlock();
    189}
    190
    191/*
    192 * afiucv_hs_send() - send a message through HiperSockets transport
    193 */
    194static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
    195		   struct sk_buff *skb, u8 flags)
    196{
    197	struct iucv_sock *iucv = iucv_sk(sock);
    198	struct af_iucv_trans_hdr *phs_hdr;
    199	int err, confirm_recv = 0;
    200
    201	phs_hdr = skb_push(skb, sizeof(*phs_hdr));
    202	memset(phs_hdr, 0, sizeof(*phs_hdr));
    203	skb_reset_network_header(skb);
    204
    205	phs_hdr->magic = ETH_P_AF_IUCV;
    206	phs_hdr->version = 1;
    207	phs_hdr->flags = flags;
    208	if (flags == AF_IUCV_FLAG_SYN)
    209		phs_hdr->window = iucv->msglimit;
    210	else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
    211		confirm_recv = atomic_read(&iucv->msg_recv);
    212		phs_hdr->window = confirm_recv;
    213		if (confirm_recv)
    214			phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
    215	}
    216	memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
    217	memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
    218	memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
    219	memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
    220	ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
    221	ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
    222	ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
    223	ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
    224	if (imsg)
    225		memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
    226
    227	skb->dev = iucv->hs_dev;
    228	if (!skb->dev) {
    229		err = -ENODEV;
    230		goto err_free;
    231	}
    232
    233	dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
    234
    235	if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
    236		err = -ENETDOWN;
    237		goto err_free;
    238	}
    239	if (skb->len > skb->dev->mtu) {
    240		if (sock->sk_type == SOCK_SEQPACKET) {
    241			err = -EMSGSIZE;
    242			goto err_free;
    243		}
    244		err = pskb_trim(skb, skb->dev->mtu);
    245		if (err)
    246			goto err_free;
    247	}
    248	skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
    249
    250	atomic_inc(&iucv->skbs_in_xmit);
    251	err = dev_queue_xmit(skb);
    252	if (net_xmit_eval(err)) {
    253		atomic_dec(&iucv->skbs_in_xmit);
    254	} else {
    255		atomic_sub(confirm_recv, &iucv->msg_recv);
    256		WARN_ON(atomic_read(&iucv->msg_recv) < 0);
    257	}
    258	return net_xmit_eval(err);
    259
    260err_free:
    261	kfree_skb(skb);
    262	return err;
    263}
    264
    265static struct sock *__iucv_get_sock_by_name(char *nm)
    266{
    267	struct sock *sk;
    268
    269	sk_for_each(sk, &iucv_sk_list.head)
    270		if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
    271			return sk;
    272
    273	return NULL;
    274}
    275
    276static void iucv_sock_destruct(struct sock *sk)
    277{
    278	skb_queue_purge(&sk->sk_receive_queue);
    279	skb_queue_purge(&sk->sk_error_queue);
    280
    281	sk_mem_reclaim(sk);
    282
    283	if (!sock_flag(sk, SOCK_DEAD)) {
    284		pr_err("Attempt to release alive iucv socket %p\n", sk);
    285		return;
    286	}
    287
    288	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
    289	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
    290	WARN_ON(sk->sk_wmem_queued);
    291	WARN_ON(sk->sk_forward_alloc);
    292}
    293
    294/* Cleanup Listen */
    295static void iucv_sock_cleanup_listen(struct sock *parent)
    296{
    297	struct sock *sk;
    298
    299	/* Close non-accepted connections */
    300	while ((sk = iucv_accept_dequeue(parent, NULL))) {
    301		iucv_sock_close(sk);
    302		iucv_sock_kill(sk);
    303	}
    304
    305	parent->sk_state = IUCV_CLOSED;
    306}
    307
    308static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
    309{
    310	write_lock_bh(&l->lock);
    311	sk_add_node(sk, &l->head);
    312	write_unlock_bh(&l->lock);
    313}
    314
    315static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
    316{
    317	write_lock_bh(&l->lock);
    318	sk_del_node_init(sk);
    319	write_unlock_bh(&l->lock);
    320}
    321
    322/* Kill socket (only if zapped and orphaned) */
    323static void iucv_sock_kill(struct sock *sk)
    324{
    325	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
    326		return;
    327
    328	iucv_sock_unlink(&iucv_sk_list, sk);
    329	sock_set_flag(sk, SOCK_DEAD);
    330	sock_put(sk);
    331}
    332
    333/* Terminate an IUCV path */
    334static void iucv_sever_path(struct sock *sk, int with_user_data)
    335{
    336	unsigned char user_data[16];
    337	struct iucv_sock *iucv = iucv_sk(sk);
    338	struct iucv_path *path = iucv->path;
    339
    340	if (iucv->path) {
    341		iucv->path = NULL;
    342		if (with_user_data) {
    343			low_nmcpy(user_data, iucv->src_name);
    344			high_nmcpy(user_data, iucv->dst_name);
    345			ASCEBC(user_data, sizeof(user_data));
    346			pr_iucv->path_sever(path, user_data);
    347		} else
    348			pr_iucv->path_sever(path, NULL);
    349		iucv_path_free(path);
    350	}
    351}
    352
    353/* Send controlling flags through an IUCV socket for HIPER transport */
    354static int iucv_send_ctrl(struct sock *sk, u8 flags)
    355{
    356	struct iucv_sock *iucv = iucv_sk(sk);
    357	int err = 0;
    358	int blen;
    359	struct sk_buff *skb;
    360	u8 shutdown = 0;
    361
    362	blen = sizeof(struct af_iucv_trans_hdr) +
    363	       LL_RESERVED_SPACE(iucv->hs_dev);
    364	if (sk->sk_shutdown & SEND_SHUTDOWN) {
    365		/* controlling flags should be sent anyway */
    366		shutdown = sk->sk_shutdown;
    367		sk->sk_shutdown &= RCV_SHUTDOWN;
    368	}
    369	skb = sock_alloc_send_skb(sk, blen, 1, &err);
    370	if (skb) {
    371		skb_reserve(skb, blen);
    372		err = afiucv_hs_send(NULL, sk, skb, flags);
    373	}
    374	if (shutdown)
    375		sk->sk_shutdown = shutdown;
    376	return err;
    377}
    378
    379/* Close an IUCV socket */
    380static void iucv_sock_close(struct sock *sk)
    381{
    382	struct iucv_sock *iucv = iucv_sk(sk);
    383	unsigned long timeo;
    384	int err = 0;
    385
    386	lock_sock(sk);
    387
    388	switch (sk->sk_state) {
    389	case IUCV_LISTEN:
    390		iucv_sock_cleanup_listen(sk);
    391		break;
    392
    393	case IUCV_CONNECTED:
    394		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
    395			err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
    396			sk->sk_state = IUCV_DISCONN;
    397			sk->sk_state_change(sk);
    398		}
    399		fallthrough;
    400
    401	case IUCV_DISCONN:
    402		sk->sk_state = IUCV_CLOSING;
    403		sk->sk_state_change(sk);
    404
    405		if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
    406			if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
    407				timeo = sk->sk_lingertime;
    408			else
    409				timeo = IUCV_DISCONN_TIMEOUT;
    410			iucv_sock_wait(sk,
    411					iucv_sock_in_state(sk, IUCV_CLOSED, 0),
    412					timeo);
    413		}
    414		fallthrough;
    415
    416	case IUCV_CLOSING:
    417		sk->sk_state = IUCV_CLOSED;
    418		sk->sk_state_change(sk);
    419
    420		sk->sk_err = ECONNRESET;
    421		sk->sk_state_change(sk);
    422
    423		skb_queue_purge(&iucv->send_skb_q);
    424		skb_queue_purge(&iucv->backlog_skb_q);
    425		fallthrough;
    426
    427	default:
    428		iucv_sever_path(sk, 1);
    429	}
    430
    431	if (iucv->hs_dev) {
    432		dev_put(iucv->hs_dev);
    433		iucv->hs_dev = NULL;
    434		sk->sk_bound_dev_if = 0;
    435	}
    436
    437	/* mark socket for deletion by iucv_sock_kill() */
    438	sock_set_flag(sk, SOCK_ZAPPED);
    439
    440	release_sock(sk);
    441}
    442
    443static void iucv_sock_init(struct sock *sk, struct sock *parent)
    444{
    445	if (parent) {
    446		sk->sk_type = parent->sk_type;
    447		security_sk_clone(parent, sk);
    448	}
    449}
    450
    451static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
    452{
    453	struct sock *sk;
    454	struct iucv_sock *iucv;
    455
    456	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
    457	if (!sk)
    458		return NULL;
    459	iucv = iucv_sk(sk);
    460
    461	sock_init_data(sock, sk);
    462	INIT_LIST_HEAD(&iucv->accept_q);
    463	spin_lock_init(&iucv->accept_q_lock);
    464	skb_queue_head_init(&iucv->send_skb_q);
    465	INIT_LIST_HEAD(&iucv->message_q.list);
    466	spin_lock_init(&iucv->message_q.lock);
    467	skb_queue_head_init(&iucv->backlog_skb_q);
    468	iucv->send_tag = 0;
    469	atomic_set(&iucv->pendings, 0);
    470	iucv->flags = 0;
    471	iucv->msglimit = 0;
    472	atomic_set(&iucv->skbs_in_xmit, 0);
    473	atomic_set(&iucv->msg_sent, 0);
    474	atomic_set(&iucv->msg_recv, 0);
    475	iucv->path = NULL;
    476	iucv->sk_txnotify = afiucv_hs_callback_txnotify;
    477	memset(&iucv->init, 0, sizeof(iucv->init));
    478	if (pr_iucv)
    479		iucv->transport = AF_IUCV_TRANS_IUCV;
    480	else
    481		iucv->transport = AF_IUCV_TRANS_HIPER;
    482
    483	sk->sk_destruct = iucv_sock_destruct;
    484	sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
    485
    486	sock_reset_flag(sk, SOCK_ZAPPED);
    487
    488	sk->sk_protocol = proto;
    489	sk->sk_state	= IUCV_OPEN;
    490
    491	iucv_sock_link(&iucv_sk_list, sk);
    492	return sk;
    493}
    494
    495static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
    496{
    497	unsigned long flags;
    498	struct iucv_sock *par = iucv_sk(parent);
    499
    500	sock_hold(sk);
    501	spin_lock_irqsave(&par->accept_q_lock, flags);
    502	list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
    503	spin_unlock_irqrestore(&par->accept_q_lock, flags);
    504	iucv_sk(sk)->parent = parent;
    505	sk_acceptq_added(parent);
    506}
    507
    508static void iucv_accept_unlink(struct sock *sk)
    509{
    510	unsigned long flags;
    511	struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
    512
    513	spin_lock_irqsave(&par->accept_q_lock, flags);
    514	list_del_init(&iucv_sk(sk)->accept_q);
    515	spin_unlock_irqrestore(&par->accept_q_lock, flags);
    516	sk_acceptq_removed(iucv_sk(sk)->parent);
    517	iucv_sk(sk)->parent = NULL;
    518	sock_put(sk);
    519}
    520
    521static struct sock *iucv_accept_dequeue(struct sock *parent,
    522					struct socket *newsock)
    523{
    524	struct iucv_sock *isk, *n;
    525	struct sock *sk;
    526
    527	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
    528		sk = (struct sock *) isk;
    529		lock_sock(sk);
    530
    531		if (sk->sk_state == IUCV_CLOSED) {
    532			iucv_accept_unlink(sk);
    533			release_sock(sk);
    534			continue;
    535		}
    536
    537		if (sk->sk_state == IUCV_CONNECTED ||
    538		    sk->sk_state == IUCV_DISCONN ||
    539		    !newsock) {
    540			iucv_accept_unlink(sk);
    541			if (newsock)
    542				sock_graft(sk, newsock);
    543
    544			release_sock(sk);
    545			return sk;
    546		}
    547
    548		release_sock(sk);
    549	}
    550	return NULL;
    551}
    552
    553static void __iucv_auto_name(struct iucv_sock *iucv)
    554{
    555	char name[12];
    556
    557	sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
    558	while (__iucv_get_sock_by_name(name)) {
    559		sprintf(name, "%08x",
    560			atomic_inc_return(&iucv_sk_list.autobind_name));
    561	}
    562	memcpy(iucv->src_name, name, 8);
    563}
    564
    565/* Bind an unbound socket */
    566static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
    567			  int addr_len)
    568{
    569	DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
    570	char uid[sizeof(sa->siucv_user_id)];
    571	struct sock *sk = sock->sk;
    572	struct iucv_sock *iucv;
    573	int err = 0;
    574	struct net_device *dev;
    575
    576	/* Verify the input sockaddr */
    577	if (addr_len < sizeof(struct sockaddr_iucv) ||
    578	    addr->sa_family != AF_IUCV)
    579		return -EINVAL;
    580
    581	lock_sock(sk);
    582	if (sk->sk_state != IUCV_OPEN) {
    583		err = -EBADFD;
    584		goto done;
    585	}
    586
    587	write_lock_bh(&iucv_sk_list.lock);
    588
    589	iucv = iucv_sk(sk);
    590	if (__iucv_get_sock_by_name(sa->siucv_name)) {
    591		err = -EADDRINUSE;
    592		goto done_unlock;
    593	}
    594	if (iucv->path)
    595		goto done_unlock;
    596
    597	/* Bind the socket */
    598	if (pr_iucv)
    599		if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
    600			goto vm_bind; /* VM IUCV transport */
    601
    602	/* try hiper transport */
    603	memcpy(uid, sa->siucv_user_id, sizeof(uid));
    604	ASCEBC(uid, 8);
    605	rcu_read_lock();
    606	for_each_netdev_rcu(&init_net, dev) {
    607		if (!memcmp(dev->perm_addr, uid, 8)) {
    608			memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
    609			/* Check for uninitialized siucv_name */
    610			if (strncmp(sa->siucv_name, "        ", 8) == 0)
    611				__iucv_auto_name(iucv);
    612			else
    613				memcpy(iucv->src_name, sa->siucv_name, 8);
    614			sk->sk_bound_dev_if = dev->ifindex;
    615			iucv->hs_dev = dev;
    616			dev_hold(dev);
    617			sk->sk_state = IUCV_BOUND;
    618			iucv->transport = AF_IUCV_TRANS_HIPER;
    619			if (!iucv->msglimit)
    620				iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
    621			rcu_read_unlock();
    622			goto done_unlock;
    623		}
    624	}
    625	rcu_read_unlock();
    626vm_bind:
    627	if (pr_iucv) {
    628		/* use local userid for backward compat */
    629		memcpy(iucv->src_name, sa->siucv_name, 8);
    630		memcpy(iucv->src_user_id, iucv_userid, 8);
    631		sk->sk_state = IUCV_BOUND;
    632		iucv->transport = AF_IUCV_TRANS_IUCV;
    633		sk->sk_allocation |= GFP_DMA;
    634		if (!iucv->msglimit)
    635			iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
    636		goto done_unlock;
    637	}
    638	/* found no dev to bind */
    639	err = -ENODEV;
    640done_unlock:
    641	/* Release the socket list lock */
    642	write_unlock_bh(&iucv_sk_list.lock);
    643done:
    644	release_sock(sk);
    645	return err;
    646}
    647
    648/* Automatically bind an unbound socket */
    649static int iucv_sock_autobind(struct sock *sk)
    650{
    651	struct iucv_sock *iucv = iucv_sk(sk);
    652	int err = 0;
    653
    654	if (unlikely(!pr_iucv))
    655		return -EPROTO;
    656
    657	memcpy(iucv->src_user_id, iucv_userid, 8);
    658	iucv->transport = AF_IUCV_TRANS_IUCV;
    659	sk->sk_allocation |= GFP_DMA;
    660
    661	write_lock_bh(&iucv_sk_list.lock);
    662	__iucv_auto_name(iucv);
    663	write_unlock_bh(&iucv_sk_list.lock);
    664
    665	if (!iucv->msglimit)
    666		iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
    667
    668	return err;
    669}
    670
    671static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
    672{
    673	DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
    674	struct sock *sk = sock->sk;
    675	struct iucv_sock *iucv = iucv_sk(sk);
    676	unsigned char user_data[16];
    677	int err;
    678
    679	high_nmcpy(user_data, sa->siucv_name);
    680	low_nmcpy(user_data, iucv->src_name);
    681	ASCEBC(user_data, sizeof(user_data));
    682
    683	/* Create path. */
    684	iucv->path = iucv_path_alloc(iucv->msglimit,
    685				     IUCV_IPRMDATA, GFP_KERNEL);
    686	if (!iucv->path) {
    687		err = -ENOMEM;
    688		goto done;
    689	}
    690	err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
    691				    sa->siucv_user_id, NULL, user_data,
    692				    sk);
    693	if (err) {
    694		iucv_path_free(iucv->path);
    695		iucv->path = NULL;
    696		switch (err) {
    697		case 0x0b:	/* Target communicator is not logged on */
    698			err = -ENETUNREACH;
    699			break;
    700		case 0x0d:	/* Max connections for this guest exceeded */
    701		case 0x0e:	/* Max connections for target guest exceeded */
    702			err = -EAGAIN;
    703			break;
    704		case 0x0f:	/* Missing IUCV authorization */
    705			err = -EACCES;
    706			break;
    707		default:
    708			err = -ECONNREFUSED;
    709			break;
    710		}
    711	}
    712done:
    713	return err;
    714}
    715
    716/* Connect an unconnected socket */
    717static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
    718			     int alen, int flags)
    719{
    720	DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
    721	struct sock *sk = sock->sk;
    722	struct iucv_sock *iucv = iucv_sk(sk);
    723	int err;
    724
    725	if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
    726		return -EINVAL;
    727
    728	if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
    729		return -EBADFD;
    730
    731	if (sk->sk_state == IUCV_OPEN &&
    732	    iucv->transport == AF_IUCV_TRANS_HIPER)
    733		return -EBADFD; /* explicit bind required */
    734
    735	if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
    736		return -EINVAL;
    737
    738	if (sk->sk_state == IUCV_OPEN) {
    739		err = iucv_sock_autobind(sk);
    740		if (unlikely(err))
    741			return err;
    742	}
    743
    744	lock_sock(sk);
    745
    746	/* Set the destination information */
    747	memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
    748	memcpy(iucv->dst_name, sa->siucv_name, 8);
    749
    750	if (iucv->transport == AF_IUCV_TRANS_HIPER)
    751		err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
    752	else
    753		err = afiucv_path_connect(sock, addr);
    754	if (err)
    755		goto done;
    756
    757	if (sk->sk_state != IUCV_CONNECTED)
    758		err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
    759							    IUCV_DISCONN),
    760				     sock_sndtimeo(sk, flags & O_NONBLOCK));
    761
    762	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
    763		err = -ECONNREFUSED;
    764
    765	if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
    766		iucv_sever_path(sk, 0);
    767
    768done:
    769	release_sock(sk);
    770	return err;
    771}
    772
    773/* Move a socket into listening state. */
    774static int iucv_sock_listen(struct socket *sock, int backlog)
    775{
    776	struct sock *sk = sock->sk;
    777	int err;
    778
    779	lock_sock(sk);
    780
    781	err = -EINVAL;
    782	if (sk->sk_state != IUCV_BOUND)
    783		goto done;
    784
    785	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
    786		goto done;
    787
    788	sk->sk_max_ack_backlog = backlog;
    789	sk->sk_ack_backlog = 0;
    790	sk->sk_state = IUCV_LISTEN;
    791	err = 0;
    792
    793done:
    794	release_sock(sk);
    795	return err;
    796}
    797
    798/* Accept a pending connection */
    799static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
    800			    int flags, bool kern)
    801{
    802	DECLARE_WAITQUEUE(wait, current);
    803	struct sock *sk = sock->sk, *nsk;
    804	long timeo;
    805	int err = 0;
    806
    807	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
    808
    809	if (sk->sk_state != IUCV_LISTEN) {
    810		err = -EBADFD;
    811		goto done;
    812	}
    813
    814	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
    815
    816	/* Wait for an incoming connection */
    817	add_wait_queue_exclusive(sk_sleep(sk), &wait);
    818	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
    819		set_current_state(TASK_INTERRUPTIBLE);
    820		if (!timeo) {
    821			err = -EAGAIN;
    822			break;
    823		}
    824
    825		release_sock(sk);
    826		timeo = schedule_timeout(timeo);
    827		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
    828
    829		if (sk->sk_state != IUCV_LISTEN) {
    830			err = -EBADFD;
    831			break;
    832		}
    833
    834		if (signal_pending(current)) {
    835			err = sock_intr_errno(timeo);
    836			break;
    837		}
    838	}
    839
    840	set_current_state(TASK_RUNNING);
    841	remove_wait_queue(sk_sleep(sk), &wait);
    842
    843	if (err)
    844		goto done;
    845
    846	newsock->state = SS_CONNECTED;
    847
    848done:
    849	release_sock(sk);
    850	return err;
    851}
    852
    853static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
    854			     int peer)
    855{
    856	DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr);
    857	struct sock *sk = sock->sk;
    858	struct iucv_sock *iucv = iucv_sk(sk);
    859
    860	addr->sa_family = AF_IUCV;
    861
    862	if (peer) {
    863		memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
    864		memcpy(siucv->siucv_name, iucv->dst_name, 8);
    865	} else {
    866		memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
    867		memcpy(siucv->siucv_name, iucv->src_name, 8);
    868	}
    869	memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
    870	memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
    871	memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
    872
    873	return sizeof(struct sockaddr_iucv);
    874}
    875
    876/**
    877 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
    878 * @path:	IUCV path
    879 * @msg:	Pointer to a struct iucv_message
    880 * @skb:	The socket data to send, skb->len MUST BE <= 7
    881 *
    882 * Send the socket data in the parameter list in the iucv message
    883 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
    884 * list and the socket data len at index 7 (last byte).
    885 * See also iucv_msg_length().
    886 *
    887 * Returns the error code from the iucv_message_send() call.
    888 */
    889static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
    890			  struct sk_buff *skb)
    891{
    892	u8 prmdata[8];
    893
    894	memcpy(prmdata, (void *) skb->data, skb->len);
    895	prmdata[7] = 0xff - (u8) skb->len;
    896	return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
    897				 (void *) prmdata, 8);
    898}
    899
    900static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
    901			     size_t len)
    902{
    903	struct sock *sk = sock->sk;
    904	struct iucv_sock *iucv = iucv_sk(sk);
    905	size_t headroom = 0;
    906	size_t linear;
    907	struct sk_buff *skb;
    908	struct iucv_message txmsg = {0};
    909	struct cmsghdr *cmsg;
    910	int cmsg_done;
    911	long timeo;
    912	char user_id[9];
    913	char appl_id[9];
    914	int err;
    915	int noblock = msg->msg_flags & MSG_DONTWAIT;
    916
    917	err = sock_error(sk);
    918	if (err)
    919		return err;
    920
    921	if (msg->msg_flags & MSG_OOB)
    922		return -EOPNOTSUPP;
    923
    924	/* SOCK_SEQPACKET: we do not support segmented records */
    925	if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
    926		return -EOPNOTSUPP;
    927
    928	lock_sock(sk);
    929
    930	if (sk->sk_shutdown & SEND_SHUTDOWN) {
    931		err = -EPIPE;
    932		goto out;
    933	}
    934
    935	/* Return if the socket is not in connected state */
    936	if (sk->sk_state != IUCV_CONNECTED) {
    937		err = -ENOTCONN;
    938		goto out;
    939	}
    940
    941	/* initialize defaults */
    942	cmsg_done   = 0;	/* check for duplicate headers */
    943
    944	/* iterate over control messages */
    945	for_each_cmsghdr(cmsg, msg) {
    946		if (!CMSG_OK(msg, cmsg)) {
    947			err = -EINVAL;
    948			goto out;
    949		}
    950
    951		if (cmsg->cmsg_level != SOL_IUCV)
    952			continue;
    953
    954		if (cmsg->cmsg_type & cmsg_done) {
    955			err = -EINVAL;
    956			goto out;
    957		}
    958		cmsg_done |= cmsg->cmsg_type;
    959
    960		switch (cmsg->cmsg_type) {
    961		case SCM_IUCV_TRGCLS:
    962			if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
    963				err = -EINVAL;
    964				goto out;
    965			}
    966
    967			/* set iucv message target class */
    968			memcpy(&txmsg.class,
    969				(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
    970
    971			break;
    972
    973		default:
    974			err = -EINVAL;
    975			goto out;
    976		}
    977	}
    978
    979	/* allocate one skb for each iucv message:
    980	 * this is fine for SOCK_SEQPACKET (unless we want to support
    981	 * segmented records using the MSG_EOR flag), but
    982	 * for SOCK_STREAM we might want to improve it in future */
    983	if (iucv->transport == AF_IUCV_TRANS_HIPER) {
    984		headroom = sizeof(struct af_iucv_trans_hdr) +
    985			   LL_RESERVED_SPACE(iucv->hs_dev);
    986		linear = min(len, PAGE_SIZE - headroom);
    987	} else {
    988		if (len < PAGE_SIZE) {
    989			linear = len;
    990		} else {
    991			/* In nonlinear "classic" iucv skb,
    992			 * reserve space for iucv_array
    993			 */
    994			headroom = sizeof(struct iucv_array) *
    995				   (MAX_SKB_FRAGS + 1);
    996			linear = PAGE_SIZE - headroom;
    997		}
    998	}
    999	skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
   1000				   noblock, &err, 0);
   1001	if (!skb)
   1002		goto out;
   1003	if (headroom)
   1004		skb_reserve(skb, headroom);
   1005	skb_put(skb, linear);
   1006	skb->len = len;
   1007	skb->data_len = len - linear;
   1008	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
   1009	if (err)
   1010		goto fail;
   1011
   1012	/* wait if outstanding messages for iucv path has reached */
   1013	timeo = sock_sndtimeo(sk, noblock);
   1014	err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
   1015	if (err)
   1016		goto fail;
   1017
   1018	/* return -ECONNRESET if the socket is no longer connected */
   1019	if (sk->sk_state != IUCV_CONNECTED) {
   1020		err = -ECONNRESET;
   1021		goto fail;
   1022	}
   1023
   1024	/* increment and save iucv message tag for msg_completion cbk */
   1025	txmsg.tag = iucv->send_tag++;
   1026	IUCV_SKB_CB(skb)->tag = txmsg.tag;
   1027
   1028	if (iucv->transport == AF_IUCV_TRANS_HIPER) {
   1029		atomic_inc(&iucv->msg_sent);
   1030		err = afiucv_hs_send(&txmsg, sk, skb, 0);
   1031		if (err) {
   1032			atomic_dec(&iucv->msg_sent);
   1033			goto out;
   1034		}
   1035	} else { /* Classic VM IUCV transport */
   1036		skb_queue_tail(&iucv->send_skb_q, skb);
   1037		atomic_inc(&iucv->skbs_in_xmit);
   1038
   1039		if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
   1040		    skb->len <= 7) {
   1041			err = iucv_send_iprm(iucv->path, &txmsg, skb);
   1042
   1043			/* on success: there is no message_complete callback */
   1044			/* for an IPRMDATA msg; remove skb from send queue   */
   1045			if (err == 0) {
   1046				atomic_dec(&iucv->skbs_in_xmit);
   1047				skb_unlink(skb, &iucv->send_skb_q);
   1048				consume_skb(skb);
   1049			}
   1050
   1051			/* this error should never happen since the	*/
   1052			/* IUCV_IPRMDATA path flag is set... sever path */
   1053			if (err == 0x15) {
   1054				pr_iucv->path_sever(iucv->path, NULL);
   1055				atomic_dec(&iucv->skbs_in_xmit);
   1056				skb_unlink(skb, &iucv->send_skb_q);
   1057				err = -EPIPE;
   1058				goto fail;
   1059			}
   1060		} else if (skb_is_nonlinear(skb)) {
   1061			struct iucv_array *iba = (struct iucv_array *)skb->head;
   1062			int i;
   1063
   1064			/* skip iucv_array lying in the headroom */
   1065			iba[0].address = (u32)(addr_t)skb->data;
   1066			iba[0].length = (u32)skb_headlen(skb);
   1067			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   1068				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   1069
   1070				iba[i + 1].address =
   1071					(u32)(addr_t)skb_frag_address(frag);
   1072				iba[i + 1].length = (u32)skb_frag_size(frag);
   1073			}
   1074			err = pr_iucv->message_send(iucv->path, &txmsg,
   1075						    IUCV_IPBUFLST, 0,
   1076						    (void *)iba, skb->len);
   1077		} else { /* non-IPRM Linear skb */
   1078			err = pr_iucv->message_send(iucv->path, &txmsg,
   1079					0, 0, (void *)skb->data, skb->len);
   1080		}
   1081		if (err) {
   1082			if (err == 3) {
   1083				user_id[8] = 0;
   1084				memcpy(user_id, iucv->dst_user_id, 8);
   1085				appl_id[8] = 0;
   1086				memcpy(appl_id, iucv->dst_name, 8);
   1087				pr_err(
   1088		"Application %s on z/VM guest %s exceeds message limit\n",
   1089					appl_id, user_id);
   1090				err = -EAGAIN;
   1091			} else {
   1092				err = -EPIPE;
   1093			}
   1094
   1095			atomic_dec(&iucv->skbs_in_xmit);
   1096			skb_unlink(skb, &iucv->send_skb_q);
   1097			goto fail;
   1098		}
   1099	}
   1100
   1101	release_sock(sk);
   1102	return len;
   1103
   1104fail:
   1105	kfree_skb(skb);
   1106out:
   1107	release_sock(sk);
   1108	return err;
   1109}
   1110
   1111static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
   1112{
   1113	size_t headroom, linear;
   1114	struct sk_buff *skb;
   1115	int err;
   1116
   1117	if (len < PAGE_SIZE) {
   1118		headroom = 0;
   1119		linear = len;
   1120	} else {
   1121		headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
   1122		linear = PAGE_SIZE - headroom;
   1123	}
   1124	skb = alloc_skb_with_frags(headroom + linear, len - linear,
   1125				   0, &err, GFP_ATOMIC | GFP_DMA);
   1126	WARN_ONCE(!skb,
   1127		  "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
   1128		  len, err);
   1129	if (skb) {
   1130		if (headroom)
   1131			skb_reserve(skb, headroom);
   1132		skb_put(skb, linear);
   1133		skb->len = len;
   1134		skb->data_len = len - linear;
   1135	}
   1136	return skb;
   1137}
   1138
   1139/* iucv_process_message() - Receive a single outstanding IUCV message
   1140 *
   1141 * Locking: must be called with message_q.lock held
   1142 */
   1143static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
   1144				 struct iucv_path *path,
   1145				 struct iucv_message *msg)
   1146{
   1147	int rc;
   1148	unsigned int len;
   1149
   1150	len = iucv_msg_length(msg);
   1151
   1152	/* store msg target class in the second 4 bytes of skb ctrl buffer */
   1153	/* Note: the first 4 bytes are reserved for msg tag */
   1154	IUCV_SKB_CB(skb)->class = msg->class;
   1155
   1156	/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
   1157	if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
   1158		if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
   1159			skb->data = NULL;
   1160			skb->len = 0;
   1161		}
   1162	} else {
   1163		if (skb_is_nonlinear(skb)) {
   1164			struct iucv_array *iba = (struct iucv_array *)skb->head;
   1165			int i;
   1166
   1167			iba[0].address = (u32)(addr_t)skb->data;
   1168			iba[0].length = (u32)skb_headlen(skb);
   1169			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   1170				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   1171
   1172				iba[i + 1].address =
   1173					(u32)(addr_t)skb_frag_address(frag);
   1174				iba[i + 1].length = (u32)skb_frag_size(frag);
   1175			}
   1176			rc = pr_iucv->message_receive(path, msg,
   1177					      IUCV_IPBUFLST,
   1178					      (void *)iba, len, NULL);
   1179		} else {
   1180			rc = pr_iucv->message_receive(path, msg,
   1181					      msg->flags & IUCV_IPRMDATA,
   1182					      skb->data, len, NULL);
   1183		}
   1184		if (rc) {
   1185			kfree_skb(skb);
   1186			return;
   1187		}
   1188		WARN_ON_ONCE(skb->len != len);
   1189	}
   1190
   1191	IUCV_SKB_CB(skb)->offset = 0;
   1192	if (sk_filter(sk, skb)) {
   1193		atomic_inc(&sk->sk_drops);	/* skb rejected by filter */
   1194		kfree_skb(skb);
   1195		return;
   1196	}
   1197	if (__sock_queue_rcv_skb(sk, skb))	/* handle rcv queue full */
   1198		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
   1199}
   1200
   1201/* iucv_process_message_q() - Process outstanding IUCV messages
   1202 *
   1203 * Locking: must be called with message_q.lock held
   1204 */
   1205static void iucv_process_message_q(struct sock *sk)
   1206{
   1207	struct iucv_sock *iucv = iucv_sk(sk);
   1208	struct sk_buff *skb;
   1209	struct sock_msg_q *p, *n;
   1210
   1211	list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
   1212		skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
   1213		if (!skb)
   1214			break;
   1215		iucv_process_message(sk, skb, p->path, &p->msg);
   1216		list_del(&p->list);
   1217		kfree(p);
   1218		if (!skb_queue_empty(&iucv->backlog_skb_q))
   1219			break;
   1220	}
   1221}
   1222
   1223static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
   1224			     size_t len, int flags)
   1225{
   1226	struct sock *sk = sock->sk;
   1227	struct iucv_sock *iucv = iucv_sk(sk);
   1228	unsigned int copied, rlen;
   1229	struct sk_buff *skb, *rskb, *cskb;
   1230	int err = 0;
   1231	u32 offset;
   1232
   1233	if ((sk->sk_state == IUCV_DISCONN) &&
   1234	    skb_queue_empty(&iucv->backlog_skb_q) &&
   1235	    skb_queue_empty(&sk->sk_receive_queue) &&
   1236	    list_empty(&iucv->message_q.list))
   1237		return 0;
   1238
   1239	if (flags & (MSG_OOB))
   1240		return -EOPNOTSUPP;
   1241
   1242	/* receive/dequeue next skb:
   1243	 * the function understands MSG_PEEK and, thus, does not dequeue skb */
   1244	skb = skb_recv_datagram(sk, flags, &err);
   1245	if (!skb) {
   1246		if (sk->sk_shutdown & RCV_SHUTDOWN)
   1247			return 0;
   1248		return err;
   1249	}
   1250
   1251	offset = IUCV_SKB_CB(skb)->offset;
   1252	rlen   = skb->len - offset;		/* real length of skb */
   1253	copied = min_t(unsigned int, rlen, len);
   1254	if (!rlen)
   1255		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
   1256
   1257	cskb = skb;
   1258	if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
   1259		if (!(flags & MSG_PEEK))
   1260			skb_queue_head(&sk->sk_receive_queue, skb);
   1261		return -EFAULT;
   1262	}
   1263
   1264	/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
   1265	if (sk->sk_type == SOCK_SEQPACKET) {
   1266		if (copied < rlen)
   1267			msg->msg_flags |= MSG_TRUNC;
   1268		/* each iucv message contains a complete record */
   1269		msg->msg_flags |= MSG_EOR;
   1270	}
   1271
   1272	/* create control message to store iucv msg target class:
   1273	 * get the trgcls from the control buffer of the skb due to
   1274	 * fragmentation of original iucv message. */
   1275	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
   1276		       sizeof(IUCV_SKB_CB(skb)->class),
   1277		       (void *)&IUCV_SKB_CB(skb)->class);
   1278	if (err) {
   1279		if (!(flags & MSG_PEEK))
   1280			skb_queue_head(&sk->sk_receive_queue, skb);
   1281		return err;
   1282	}
   1283
   1284	/* Mark read part of skb as used */
   1285	if (!(flags & MSG_PEEK)) {
   1286
   1287		/* SOCK_STREAM: re-queue skb if it contains unreceived data */
   1288		if (sk->sk_type == SOCK_STREAM) {
   1289			if (copied < rlen) {
   1290				IUCV_SKB_CB(skb)->offset = offset + copied;
   1291				skb_queue_head(&sk->sk_receive_queue, skb);
   1292				goto done;
   1293			}
   1294		}
   1295
   1296		consume_skb(skb);
   1297		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
   1298			atomic_inc(&iucv->msg_recv);
   1299			if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
   1300				WARN_ON(1);
   1301				iucv_sock_close(sk);
   1302				return -EFAULT;
   1303			}
   1304		}
   1305
   1306		/* Queue backlog skbs */
   1307		spin_lock_bh(&iucv->message_q.lock);
   1308		rskb = skb_dequeue(&iucv->backlog_skb_q);
   1309		while (rskb) {
   1310			IUCV_SKB_CB(rskb)->offset = 0;
   1311			if (__sock_queue_rcv_skb(sk, rskb)) {
   1312				/* handle rcv queue full */
   1313				skb_queue_head(&iucv->backlog_skb_q,
   1314						rskb);
   1315				break;
   1316			}
   1317			rskb = skb_dequeue(&iucv->backlog_skb_q);
   1318		}
   1319		if (skb_queue_empty(&iucv->backlog_skb_q)) {
   1320			if (!list_empty(&iucv->message_q.list))
   1321				iucv_process_message_q(sk);
   1322			if (atomic_read(&iucv->msg_recv) >=
   1323							iucv->msglimit / 2) {
   1324				err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
   1325				if (err) {
   1326					sk->sk_state = IUCV_DISCONN;
   1327					sk->sk_state_change(sk);
   1328				}
   1329			}
   1330		}
   1331		spin_unlock_bh(&iucv->message_q.lock);
   1332	}
   1333
   1334done:
   1335	/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
   1336	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
   1337		copied = rlen;
   1338
   1339	return copied;
   1340}
   1341
   1342static inline __poll_t iucv_accept_poll(struct sock *parent)
   1343{
   1344	struct iucv_sock *isk, *n;
   1345	struct sock *sk;
   1346
   1347	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
   1348		sk = (struct sock *) isk;
   1349
   1350		if (sk->sk_state == IUCV_CONNECTED)
   1351			return EPOLLIN | EPOLLRDNORM;
   1352	}
   1353
   1354	return 0;
   1355}
   1356
   1357static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
   1358			       poll_table *wait)
   1359{
   1360	struct sock *sk = sock->sk;
   1361	__poll_t mask = 0;
   1362
   1363	sock_poll_wait(file, sock, wait);
   1364
   1365	if (sk->sk_state == IUCV_LISTEN)
   1366		return iucv_accept_poll(sk);
   1367
   1368	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
   1369		mask |= EPOLLERR |
   1370			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
   1371
   1372	if (sk->sk_shutdown & RCV_SHUTDOWN)
   1373		mask |= EPOLLRDHUP;
   1374
   1375	if (sk->sk_shutdown == SHUTDOWN_MASK)
   1376		mask |= EPOLLHUP;
   1377
   1378	if (!skb_queue_empty(&sk->sk_receive_queue) ||
   1379	    (sk->sk_shutdown & RCV_SHUTDOWN))
   1380		mask |= EPOLLIN | EPOLLRDNORM;
   1381
   1382	if (sk->sk_state == IUCV_CLOSED)
   1383		mask |= EPOLLHUP;
   1384
   1385	if (sk->sk_state == IUCV_DISCONN)
   1386		mask |= EPOLLIN;
   1387
   1388	if (sock_writeable(sk) && iucv_below_msglim(sk))
   1389		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
   1390	else
   1391		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
   1392
   1393	return mask;
   1394}
   1395
   1396static int iucv_sock_shutdown(struct socket *sock, int how)
   1397{
   1398	struct sock *sk = sock->sk;
   1399	struct iucv_sock *iucv = iucv_sk(sk);
   1400	struct iucv_message txmsg;
   1401	int err = 0;
   1402
   1403	how++;
   1404
   1405	if ((how & ~SHUTDOWN_MASK) || !how)
   1406		return -EINVAL;
   1407
   1408	lock_sock(sk);
   1409	switch (sk->sk_state) {
   1410	case IUCV_LISTEN:
   1411	case IUCV_DISCONN:
   1412	case IUCV_CLOSING:
   1413	case IUCV_CLOSED:
   1414		err = -ENOTCONN;
   1415		goto fail;
   1416	default:
   1417		break;
   1418	}
   1419
   1420	if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
   1421	    sk->sk_state == IUCV_CONNECTED) {
   1422		if (iucv->transport == AF_IUCV_TRANS_IUCV) {
   1423			txmsg.class = 0;
   1424			txmsg.tag = 0;
   1425			err = pr_iucv->message_send(iucv->path, &txmsg,
   1426				IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
   1427			if (err) {
   1428				switch (err) {
   1429				case 1:
   1430					err = -ENOTCONN;
   1431					break;
   1432				case 2:
   1433					err = -ECONNRESET;
   1434					break;
   1435				default:
   1436					err = -ENOTCONN;
   1437					break;
   1438				}
   1439			}
   1440		} else
   1441			iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
   1442	}
   1443
   1444	sk->sk_shutdown |= how;
   1445	if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
   1446		if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
   1447		    iucv->path) {
   1448			err = pr_iucv->path_quiesce(iucv->path, NULL);
   1449			if (err)
   1450				err = -ENOTCONN;
   1451/*			skb_queue_purge(&sk->sk_receive_queue); */
   1452		}
   1453		skb_queue_purge(&sk->sk_receive_queue);
   1454	}
   1455
   1456	/* Wake up anyone sleeping in poll */
   1457	sk->sk_state_change(sk);
   1458
   1459fail:
   1460	release_sock(sk);
   1461	return err;
   1462}
   1463
   1464static int iucv_sock_release(struct socket *sock)
   1465{
   1466	struct sock *sk = sock->sk;
   1467	int err = 0;
   1468
   1469	if (!sk)
   1470		return 0;
   1471
   1472	iucv_sock_close(sk);
   1473
   1474	sock_orphan(sk);
   1475	iucv_sock_kill(sk);
   1476	return err;
   1477}
   1478
   1479/* getsockopt and setsockopt */
   1480static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
   1481				sockptr_t optval, unsigned int optlen)
   1482{
   1483	struct sock *sk = sock->sk;
   1484	struct iucv_sock *iucv = iucv_sk(sk);
   1485	int val;
   1486	int rc;
   1487
   1488	if (level != SOL_IUCV)
   1489		return -ENOPROTOOPT;
   1490
   1491	if (optlen < sizeof(int))
   1492		return -EINVAL;
   1493
   1494	if (copy_from_sockptr(&val, optval, sizeof(int)))
   1495		return -EFAULT;
   1496
   1497	rc = 0;
   1498
   1499	lock_sock(sk);
   1500	switch (optname) {
   1501	case SO_IPRMDATA_MSG:
   1502		if (val)
   1503			iucv->flags |= IUCV_IPRMDATA;
   1504		else
   1505			iucv->flags &= ~IUCV_IPRMDATA;
   1506		break;
   1507	case SO_MSGLIMIT:
   1508		switch (sk->sk_state) {
   1509		case IUCV_OPEN:
   1510		case IUCV_BOUND:
   1511			if (val < 1 || val > U16_MAX)
   1512				rc = -EINVAL;
   1513			else
   1514				iucv->msglimit = val;
   1515			break;
   1516		default:
   1517			rc = -EINVAL;
   1518			break;
   1519		}
   1520		break;
   1521	default:
   1522		rc = -ENOPROTOOPT;
   1523		break;
   1524	}
   1525	release_sock(sk);
   1526
   1527	return rc;
   1528}
   1529
   1530static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
   1531				char __user *optval, int __user *optlen)
   1532{
   1533	struct sock *sk = sock->sk;
   1534	struct iucv_sock *iucv = iucv_sk(sk);
   1535	unsigned int val;
   1536	int len;
   1537
   1538	if (level != SOL_IUCV)
   1539		return -ENOPROTOOPT;
   1540
   1541	if (get_user(len, optlen))
   1542		return -EFAULT;
   1543
   1544	if (len < 0)
   1545		return -EINVAL;
   1546
   1547	len = min_t(unsigned int, len, sizeof(int));
   1548
   1549	switch (optname) {
   1550	case SO_IPRMDATA_MSG:
   1551		val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
   1552		break;
   1553	case SO_MSGLIMIT:
   1554		lock_sock(sk);
   1555		val = (iucv->path != NULL) ? iucv->path->msglim	/* connected */
   1556					   : iucv->msglimit;	/* default */
   1557		release_sock(sk);
   1558		break;
   1559	case SO_MSGSIZE:
   1560		if (sk->sk_state == IUCV_OPEN)
   1561			return -EBADFD;
   1562		val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
   1563				sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
   1564				0x7fffffff;
   1565		break;
   1566	default:
   1567		return -ENOPROTOOPT;
   1568	}
   1569
   1570	if (put_user(len, optlen))
   1571		return -EFAULT;
   1572	if (copy_to_user(optval, &val, len))
   1573		return -EFAULT;
   1574
   1575	return 0;
   1576}
   1577
   1578
   1579/* Callback wrappers - called from iucv base support */
   1580static int iucv_callback_connreq(struct iucv_path *path,
   1581				 u8 ipvmid[8], u8 ipuser[16])
   1582{
   1583	unsigned char user_data[16];
   1584	unsigned char nuser_data[16];
   1585	unsigned char src_name[8];
   1586	struct sock *sk, *nsk;
   1587	struct iucv_sock *iucv, *niucv;
   1588	int err;
   1589
   1590	memcpy(src_name, ipuser, 8);
   1591	EBCASC(src_name, 8);
   1592	/* Find out if this path belongs to af_iucv. */
   1593	read_lock(&iucv_sk_list.lock);
   1594	iucv = NULL;
   1595	sk = NULL;
   1596	sk_for_each(sk, &iucv_sk_list.head)
   1597		if (sk->sk_state == IUCV_LISTEN &&
   1598		    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
   1599			/*
   1600			 * Found a listening socket with
   1601			 * src_name == ipuser[0-7].
   1602			 */
   1603			iucv = iucv_sk(sk);
   1604			break;
   1605		}
   1606	read_unlock(&iucv_sk_list.lock);
   1607	if (!iucv)
   1608		/* No socket found, not one of our paths. */
   1609		return -EINVAL;
   1610
   1611	bh_lock_sock(sk);
   1612
   1613	/* Check if parent socket is listening */
   1614	low_nmcpy(user_data, iucv->src_name);
   1615	high_nmcpy(user_data, iucv->dst_name);
   1616	ASCEBC(user_data, sizeof(user_data));
   1617	if (sk->sk_state != IUCV_LISTEN) {
   1618		err = pr_iucv->path_sever(path, user_data);
   1619		iucv_path_free(path);
   1620		goto fail;
   1621	}
   1622
   1623	/* Check for backlog size */
   1624	if (sk_acceptq_is_full(sk)) {
   1625		err = pr_iucv->path_sever(path, user_data);
   1626		iucv_path_free(path);
   1627		goto fail;
   1628	}
   1629
   1630	/* Create the new socket */
   1631	nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
   1632	if (!nsk) {
   1633		err = pr_iucv->path_sever(path, user_data);
   1634		iucv_path_free(path);
   1635		goto fail;
   1636	}
   1637
   1638	niucv = iucv_sk(nsk);
   1639	iucv_sock_init(nsk, sk);
   1640	niucv->transport = AF_IUCV_TRANS_IUCV;
   1641	nsk->sk_allocation |= GFP_DMA;
   1642
   1643	/* Set the new iucv_sock */
   1644	memcpy(niucv->dst_name, ipuser + 8, 8);
   1645	EBCASC(niucv->dst_name, 8);
   1646	memcpy(niucv->dst_user_id, ipvmid, 8);
   1647	memcpy(niucv->src_name, iucv->src_name, 8);
   1648	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
   1649	niucv->path = path;
   1650
   1651	/* Call iucv_accept */
   1652	high_nmcpy(nuser_data, ipuser + 8);
   1653	memcpy(nuser_data + 8, niucv->src_name, 8);
   1654	ASCEBC(nuser_data + 8, 8);
   1655
   1656	/* set message limit for path based on msglimit of accepting socket */
   1657	niucv->msglimit = iucv->msglimit;
   1658	path->msglim = iucv->msglimit;
   1659	err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
   1660	if (err) {
   1661		iucv_sever_path(nsk, 1);
   1662		iucv_sock_kill(nsk);
   1663		goto fail;
   1664	}
   1665
   1666	iucv_accept_enqueue(sk, nsk);
   1667
   1668	/* Wake up accept */
   1669	nsk->sk_state = IUCV_CONNECTED;
   1670	sk->sk_data_ready(sk);
   1671	err = 0;
   1672fail:
   1673	bh_unlock_sock(sk);
   1674	return 0;
   1675}
   1676
   1677static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
   1678{
   1679	struct sock *sk = path->private;
   1680
   1681	sk->sk_state = IUCV_CONNECTED;
   1682	sk->sk_state_change(sk);
   1683}
   1684
   1685static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
   1686{
   1687	struct sock *sk = path->private;
   1688	struct iucv_sock *iucv = iucv_sk(sk);
   1689	struct sk_buff *skb;
   1690	struct sock_msg_q *save_msg;
   1691	int len;
   1692
   1693	if (sk->sk_shutdown & RCV_SHUTDOWN) {
   1694		pr_iucv->message_reject(path, msg);
   1695		return;
   1696	}
   1697
   1698	spin_lock(&iucv->message_q.lock);
   1699
   1700	if (!list_empty(&iucv->message_q.list) ||
   1701	    !skb_queue_empty(&iucv->backlog_skb_q))
   1702		goto save_message;
   1703
   1704	len = atomic_read(&sk->sk_rmem_alloc);
   1705	len += SKB_TRUESIZE(iucv_msg_length(msg));
   1706	if (len > sk->sk_rcvbuf)
   1707		goto save_message;
   1708
   1709	skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
   1710	if (!skb)
   1711		goto save_message;
   1712
   1713	iucv_process_message(sk, skb, path, msg);
   1714	goto out_unlock;
   1715
   1716save_message:
   1717	save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
   1718	if (!save_msg)
   1719		goto out_unlock;
   1720	save_msg->path = path;
   1721	save_msg->msg = *msg;
   1722
   1723	list_add_tail(&save_msg->list, &iucv->message_q.list);
   1724
   1725out_unlock:
   1726	spin_unlock(&iucv->message_q.lock);
   1727}
   1728
   1729static void iucv_callback_txdone(struct iucv_path *path,
   1730				 struct iucv_message *msg)
   1731{
   1732	struct sock *sk = path->private;
   1733	struct sk_buff *this = NULL;
   1734	struct sk_buff_head *list;
   1735	struct sk_buff *list_skb;
   1736	struct iucv_sock *iucv;
   1737	unsigned long flags;
   1738
   1739	iucv = iucv_sk(sk);
   1740	list = &iucv->send_skb_q;
   1741
   1742	bh_lock_sock(sk);
   1743
   1744	spin_lock_irqsave(&list->lock, flags);
   1745	skb_queue_walk(list, list_skb) {
   1746		if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
   1747			this = list_skb;
   1748			break;
   1749		}
   1750	}
   1751	if (this) {
   1752		atomic_dec(&iucv->skbs_in_xmit);
   1753		__skb_unlink(this, list);
   1754	}
   1755
   1756	spin_unlock_irqrestore(&list->lock, flags);
   1757
   1758	if (this) {
   1759		consume_skb(this);
   1760		/* wake up any process waiting for sending */
   1761		iucv_sock_wake_msglim(sk);
   1762	}
   1763
   1764	if (sk->sk_state == IUCV_CLOSING) {
   1765		if (atomic_read(&iucv->skbs_in_xmit) == 0) {
   1766			sk->sk_state = IUCV_CLOSED;
   1767			sk->sk_state_change(sk);
   1768		}
   1769	}
   1770	bh_unlock_sock(sk);
   1771
   1772}
   1773
   1774static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
   1775{
   1776	struct sock *sk = path->private;
   1777
   1778	if (sk->sk_state == IUCV_CLOSED)
   1779		return;
   1780
   1781	bh_lock_sock(sk);
   1782	iucv_sever_path(sk, 1);
   1783	sk->sk_state = IUCV_DISCONN;
   1784
   1785	sk->sk_state_change(sk);
   1786	bh_unlock_sock(sk);
   1787}
   1788
   1789/* called if the other communication side shuts down its RECV direction;
   1790 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
   1791 */
   1792static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
   1793{
   1794	struct sock *sk = path->private;
   1795
   1796	bh_lock_sock(sk);
   1797	if (sk->sk_state != IUCV_CLOSED) {
   1798		sk->sk_shutdown |= SEND_SHUTDOWN;
   1799		sk->sk_state_change(sk);
   1800	}
   1801	bh_unlock_sock(sk);
   1802}
   1803
   1804static struct iucv_handler af_iucv_handler = {
   1805	.path_pending		= iucv_callback_connreq,
   1806	.path_complete		= iucv_callback_connack,
   1807	.path_severed		= iucv_callback_connrej,
   1808	.message_pending	= iucv_callback_rx,
   1809	.message_complete	= iucv_callback_txdone,
   1810	.path_quiesced		= iucv_callback_shutdown,
   1811};
   1812
   1813/***************** HiperSockets transport callbacks ********************/
   1814static void afiucv_swap_src_dest(struct sk_buff *skb)
   1815{
   1816	struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
   1817	char tmpID[8];
   1818	char tmpName[8];
   1819
   1820	ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
   1821	ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
   1822	ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
   1823	ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
   1824	memcpy(tmpID, trans_hdr->srcUserID, 8);
   1825	memcpy(tmpName, trans_hdr->srcAppName, 8);
   1826	memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
   1827	memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
   1828	memcpy(trans_hdr->destUserID, tmpID, 8);
   1829	memcpy(trans_hdr->destAppName, tmpName, 8);
   1830	skb_push(skb, ETH_HLEN);
   1831	memset(skb->data, 0, ETH_HLEN);
   1832}
   1833
   1834/*
   1835 * afiucv_hs_callback_syn - react on received SYN
   1836 */
   1837static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
   1838{
   1839	struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
   1840	struct sock *nsk;
   1841	struct iucv_sock *iucv, *niucv;
   1842	int err;
   1843
   1844	iucv = iucv_sk(sk);
   1845	if (!iucv) {
   1846		/* no sock - connection refused */
   1847		afiucv_swap_src_dest(skb);
   1848		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
   1849		err = dev_queue_xmit(skb);
   1850		goto out;
   1851	}
   1852
   1853	nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
   1854	bh_lock_sock(sk);
   1855	if ((sk->sk_state != IUCV_LISTEN) ||
   1856	    sk_acceptq_is_full(sk) ||
   1857	    !nsk) {
   1858		/* error on server socket - connection refused */
   1859		afiucv_swap_src_dest(skb);
   1860		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
   1861		err = dev_queue_xmit(skb);
   1862		iucv_sock_kill(nsk);
   1863		bh_unlock_sock(sk);
   1864		goto out;
   1865	}
   1866
   1867	niucv = iucv_sk(nsk);
   1868	iucv_sock_init(nsk, sk);
   1869	niucv->transport = AF_IUCV_TRANS_HIPER;
   1870	niucv->msglimit = iucv->msglimit;
   1871	if (!trans_hdr->window)
   1872		niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
   1873	else
   1874		niucv->msglimit_peer = trans_hdr->window;
   1875	memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
   1876	memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
   1877	memcpy(niucv->src_name, iucv->src_name, 8);
   1878	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
   1879	nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
   1880	niucv->hs_dev = iucv->hs_dev;
   1881	dev_hold(niucv->hs_dev);
   1882	afiucv_swap_src_dest(skb);
   1883	trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
   1884	trans_hdr->window = niucv->msglimit;
   1885	/* if receiver acks the xmit connection is established */
   1886	err = dev_queue_xmit(skb);
   1887	if (!err) {
   1888		iucv_accept_enqueue(sk, nsk);
   1889		nsk->sk_state = IUCV_CONNECTED;
   1890		sk->sk_data_ready(sk);
   1891	} else
   1892		iucv_sock_kill(nsk);
   1893	bh_unlock_sock(sk);
   1894
   1895out:
   1896	return NET_RX_SUCCESS;
   1897}
   1898
   1899/*
   1900 * afiucv_hs_callback_synack() - react on received SYN-ACK
   1901 */
   1902static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
   1903{
   1904	struct iucv_sock *iucv = iucv_sk(sk);
   1905
   1906	if (!iucv || sk->sk_state != IUCV_BOUND) {
   1907		kfree_skb(skb);
   1908		return NET_RX_SUCCESS;
   1909	}
   1910
   1911	bh_lock_sock(sk);
   1912	iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
   1913	sk->sk_state = IUCV_CONNECTED;
   1914	sk->sk_state_change(sk);
   1915	bh_unlock_sock(sk);
   1916	consume_skb(skb);
   1917	return NET_RX_SUCCESS;
   1918}
   1919
   1920/*
   1921 * afiucv_hs_callback_synfin() - react on received SYN_FIN
   1922 */
   1923static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
   1924{
   1925	struct iucv_sock *iucv = iucv_sk(sk);
   1926
   1927	if (!iucv || sk->sk_state != IUCV_BOUND) {
   1928		kfree_skb(skb);
   1929		return NET_RX_SUCCESS;
   1930	}
   1931
   1932	bh_lock_sock(sk);
   1933	sk->sk_state = IUCV_DISCONN;
   1934	sk->sk_state_change(sk);
   1935	bh_unlock_sock(sk);
   1936	consume_skb(skb);
   1937	return NET_RX_SUCCESS;
   1938}
   1939
   1940/*
   1941 * afiucv_hs_callback_fin() - react on received FIN
   1942 */
   1943static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
   1944{
   1945	struct iucv_sock *iucv = iucv_sk(sk);
   1946
   1947	/* other end of connection closed */
   1948	if (!iucv) {
   1949		kfree_skb(skb);
   1950		return NET_RX_SUCCESS;
   1951	}
   1952
   1953	bh_lock_sock(sk);
   1954	if (sk->sk_state == IUCV_CONNECTED) {
   1955		sk->sk_state = IUCV_DISCONN;
   1956		sk->sk_state_change(sk);
   1957	}
   1958	bh_unlock_sock(sk);
   1959	consume_skb(skb);
   1960	return NET_RX_SUCCESS;
   1961}
   1962
   1963/*
   1964 * afiucv_hs_callback_win() - react on received WIN
   1965 */
   1966static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
   1967{
   1968	struct iucv_sock *iucv = iucv_sk(sk);
   1969
   1970	if (!iucv)
   1971		return NET_RX_SUCCESS;
   1972
   1973	if (sk->sk_state != IUCV_CONNECTED)
   1974		return NET_RX_SUCCESS;
   1975
   1976	atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
   1977	iucv_sock_wake_msglim(sk);
   1978	return NET_RX_SUCCESS;
   1979}
   1980
   1981/*
   1982 * afiucv_hs_callback_rx() - react on received data
   1983 */
   1984static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
   1985{
   1986	struct iucv_sock *iucv = iucv_sk(sk);
   1987
   1988	if (!iucv) {
   1989		kfree_skb(skb);
   1990		return NET_RX_SUCCESS;
   1991	}
   1992
   1993	if (sk->sk_state != IUCV_CONNECTED) {
   1994		kfree_skb(skb);
   1995		return NET_RX_SUCCESS;
   1996	}
   1997
   1998	if (sk->sk_shutdown & RCV_SHUTDOWN) {
   1999		kfree_skb(skb);
   2000		return NET_RX_SUCCESS;
   2001	}
   2002
   2003	/* write stuff from iucv_msg to skb cb */
   2004	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
   2005	skb_reset_transport_header(skb);
   2006	skb_reset_network_header(skb);
   2007	IUCV_SKB_CB(skb)->offset = 0;
   2008	if (sk_filter(sk, skb)) {
   2009		atomic_inc(&sk->sk_drops);	/* skb rejected by filter */
   2010		kfree_skb(skb);
   2011		return NET_RX_SUCCESS;
   2012	}
   2013
   2014	spin_lock(&iucv->message_q.lock);
   2015	if (skb_queue_empty(&iucv->backlog_skb_q)) {
   2016		if (__sock_queue_rcv_skb(sk, skb))
   2017			/* handle rcv queue full */
   2018			skb_queue_tail(&iucv->backlog_skb_q, skb);
   2019	} else
   2020		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
   2021	spin_unlock(&iucv->message_q.lock);
   2022	return NET_RX_SUCCESS;
   2023}
   2024
   2025/*
   2026 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
   2027 *                   transport
   2028 *                   called from netif RX softirq
   2029 */
   2030static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
   2031	struct packet_type *pt, struct net_device *orig_dev)
   2032{
   2033	struct sock *sk;
   2034	struct iucv_sock *iucv;
   2035	struct af_iucv_trans_hdr *trans_hdr;
   2036	int err = NET_RX_SUCCESS;
   2037	char nullstring[8];
   2038
   2039	if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
   2040		kfree_skb(skb);
   2041		return NET_RX_SUCCESS;
   2042	}
   2043
   2044	trans_hdr = iucv_trans_hdr(skb);
   2045	EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
   2046	EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
   2047	EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
   2048	EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
   2049	memset(nullstring, 0, sizeof(nullstring));
   2050	iucv = NULL;
   2051	sk = NULL;
   2052	read_lock(&iucv_sk_list.lock);
   2053	sk_for_each(sk, &iucv_sk_list.head) {
   2054		if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
   2055			if ((!memcmp(&iucv_sk(sk)->src_name,
   2056				     trans_hdr->destAppName, 8)) &&
   2057			    (!memcmp(&iucv_sk(sk)->src_user_id,
   2058				     trans_hdr->destUserID, 8)) &&
   2059			    (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
   2060			    (!memcmp(&iucv_sk(sk)->dst_user_id,
   2061				     nullstring, 8))) {
   2062				iucv = iucv_sk(sk);
   2063				break;
   2064			}
   2065		} else {
   2066			if ((!memcmp(&iucv_sk(sk)->src_name,
   2067				     trans_hdr->destAppName, 8)) &&
   2068			    (!memcmp(&iucv_sk(sk)->src_user_id,
   2069				     trans_hdr->destUserID, 8)) &&
   2070			    (!memcmp(&iucv_sk(sk)->dst_name,
   2071				     trans_hdr->srcAppName, 8)) &&
   2072			    (!memcmp(&iucv_sk(sk)->dst_user_id,
   2073				     trans_hdr->srcUserID, 8))) {
   2074				iucv = iucv_sk(sk);
   2075				break;
   2076			}
   2077		}
   2078	}
   2079	read_unlock(&iucv_sk_list.lock);
   2080	if (!iucv)
   2081		sk = NULL;
   2082
   2083	/* no sock
   2084	how should we send with no sock
   2085	1) send without sock no send rc checking?
   2086	2) introduce default sock to handle this cases
   2087
   2088	 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
   2089	 data -> send FIN
   2090	 SYN|ACK, SYN|FIN, FIN -> no action? */
   2091
   2092	switch (trans_hdr->flags) {
   2093	case AF_IUCV_FLAG_SYN:
   2094		/* connect request */
   2095		err = afiucv_hs_callback_syn(sk, skb);
   2096		break;
   2097	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
   2098		/* connect request confirmed */
   2099		err = afiucv_hs_callback_synack(sk, skb);
   2100		break;
   2101	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
   2102		/* connect request refused */
   2103		err = afiucv_hs_callback_synfin(sk, skb);
   2104		break;
   2105	case (AF_IUCV_FLAG_FIN):
   2106		/* close request */
   2107		err = afiucv_hs_callback_fin(sk, skb);
   2108		break;
   2109	case (AF_IUCV_FLAG_WIN):
   2110		err = afiucv_hs_callback_win(sk, skb);
   2111		if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
   2112			consume_skb(skb);
   2113			break;
   2114		}
   2115		fallthrough;	/* and receive non-zero length data */
   2116	case (AF_IUCV_FLAG_SHT):
   2117		/* shutdown request */
   2118		fallthrough;	/* and receive zero length data */
   2119	case 0:
   2120		/* plain data frame */
   2121		IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
   2122		err = afiucv_hs_callback_rx(sk, skb);
   2123		break;
   2124	default:
   2125		kfree_skb(skb);
   2126	}
   2127
   2128	return err;
   2129}
   2130
   2131/*
   2132 * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
   2133 *                                 transport
   2134 */
   2135static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
   2136{
   2137	struct iucv_sock *iucv = iucv_sk(sk);
   2138
   2139	if (sock_flag(sk, SOCK_ZAPPED))
   2140		return;
   2141
   2142	switch (n) {
   2143	case TX_NOTIFY_OK:
   2144		atomic_dec(&iucv->skbs_in_xmit);
   2145		iucv_sock_wake_msglim(sk);
   2146		break;
   2147	case TX_NOTIFY_PENDING:
   2148		atomic_inc(&iucv->pendings);
   2149		break;
   2150	case TX_NOTIFY_DELAYED_OK:
   2151		atomic_dec(&iucv->skbs_in_xmit);
   2152		if (atomic_dec_return(&iucv->pendings) <= 0)
   2153			iucv_sock_wake_msglim(sk);
   2154		break;
   2155	default:
   2156		atomic_dec(&iucv->skbs_in_xmit);
   2157		if (sk->sk_state == IUCV_CONNECTED) {
   2158			sk->sk_state = IUCV_DISCONN;
   2159			sk->sk_state_change(sk);
   2160		}
   2161	}
   2162
   2163	if (sk->sk_state == IUCV_CLOSING) {
   2164		if (atomic_read(&iucv->skbs_in_xmit) == 0) {
   2165			sk->sk_state = IUCV_CLOSED;
   2166			sk->sk_state_change(sk);
   2167		}
   2168	}
   2169}
   2170
   2171/*
   2172 * afiucv_netdev_event: handle netdev notifier chain events
   2173 */
   2174static int afiucv_netdev_event(struct notifier_block *this,
   2175			       unsigned long event, void *ptr)
   2176{
   2177	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
   2178	struct sock *sk;
   2179	struct iucv_sock *iucv;
   2180
   2181	switch (event) {
   2182	case NETDEV_REBOOT:
   2183	case NETDEV_GOING_DOWN:
   2184		sk_for_each(sk, &iucv_sk_list.head) {
   2185			iucv = iucv_sk(sk);
   2186			if ((iucv->hs_dev == event_dev) &&
   2187			    (sk->sk_state == IUCV_CONNECTED)) {
   2188				if (event == NETDEV_GOING_DOWN)
   2189					iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
   2190				sk->sk_state = IUCV_DISCONN;
   2191				sk->sk_state_change(sk);
   2192			}
   2193		}
   2194		break;
   2195	case NETDEV_DOWN:
   2196	case NETDEV_UNREGISTER:
   2197	default:
   2198		break;
   2199	}
   2200	return NOTIFY_DONE;
   2201}
   2202
   2203static struct notifier_block afiucv_netdev_notifier = {
   2204	.notifier_call = afiucv_netdev_event,
   2205};
   2206
   2207static const struct proto_ops iucv_sock_ops = {
   2208	.family		= PF_IUCV,
   2209	.owner		= THIS_MODULE,
   2210	.release	= iucv_sock_release,
   2211	.bind		= iucv_sock_bind,
   2212	.connect	= iucv_sock_connect,
   2213	.listen		= iucv_sock_listen,
   2214	.accept		= iucv_sock_accept,
   2215	.getname	= iucv_sock_getname,
   2216	.sendmsg	= iucv_sock_sendmsg,
   2217	.recvmsg	= iucv_sock_recvmsg,
   2218	.poll		= iucv_sock_poll,
   2219	.ioctl		= sock_no_ioctl,
   2220	.mmap		= sock_no_mmap,
   2221	.socketpair	= sock_no_socketpair,
   2222	.shutdown	= iucv_sock_shutdown,
   2223	.setsockopt	= iucv_sock_setsockopt,
   2224	.getsockopt	= iucv_sock_getsockopt,
   2225};
   2226
   2227static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
   2228			    int kern)
   2229{
   2230	struct sock *sk;
   2231
   2232	if (protocol && protocol != PF_IUCV)
   2233		return -EPROTONOSUPPORT;
   2234
   2235	sock->state = SS_UNCONNECTED;
   2236
   2237	switch (sock->type) {
   2238	case SOCK_STREAM:
   2239	case SOCK_SEQPACKET:
   2240		/* currently, proto ops can handle both sk types */
   2241		sock->ops = &iucv_sock_ops;
   2242		break;
   2243	default:
   2244		return -ESOCKTNOSUPPORT;
   2245	}
   2246
   2247	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
   2248	if (!sk)
   2249		return -ENOMEM;
   2250
   2251	iucv_sock_init(sk, NULL);
   2252
   2253	return 0;
   2254}
   2255
   2256static const struct net_proto_family iucv_sock_family_ops = {
   2257	.family	= AF_IUCV,
   2258	.owner	= THIS_MODULE,
   2259	.create	= iucv_sock_create,
   2260};
   2261
   2262static struct packet_type iucv_packet_type = {
   2263	.type = cpu_to_be16(ETH_P_AF_IUCV),
   2264	.func = afiucv_hs_rcv,
   2265};
   2266
   2267static int __init afiucv_init(void)
   2268{
   2269	int err;
   2270
   2271	if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) {
   2272		cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
   2273		if (unlikely(err)) {
   2274			WARN_ON(err);
   2275			err = -EPROTONOSUPPORT;
   2276			goto out;
   2277		}
   2278
   2279		pr_iucv = &iucv_if;
   2280	} else {
   2281		memset(&iucv_userid, 0, sizeof(iucv_userid));
   2282		pr_iucv = NULL;
   2283	}
   2284
   2285	err = proto_register(&iucv_proto, 0);
   2286	if (err)
   2287		goto out;
   2288	err = sock_register(&iucv_sock_family_ops);
   2289	if (err)
   2290		goto out_proto;
   2291
   2292	if (pr_iucv) {
   2293		err = pr_iucv->iucv_register(&af_iucv_handler, 0);
   2294		if (err)
   2295			goto out_sock;
   2296	}
   2297
   2298	err = register_netdevice_notifier(&afiucv_netdev_notifier);
   2299	if (err)
   2300		goto out_notifier;
   2301
   2302	dev_add_pack(&iucv_packet_type);
   2303	return 0;
   2304
   2305out_notifier:
   2306	if (pr_iucv)
   2307		pr_iucv->iucv_unregister(&af_iucv_handler, 0);
   2308out_sock:
   2309	sock_unregister(PF_IUCV);
   2310out_proto:
   2311	proto_unregister(&iucv_proto);
   2312out:
   2313	return err;
   2314}
   2315
   2316static void __exit afiucv_exit(void)
   2317{
   2318	if (pr_iucv)
   2319		pr_iucv->iucv_unregister(&af_iucv_handler, 0);
   2320
   2321	unregister_netdevice_notifier(&afiucv_netdev_notifier);
   2322	dev_remove_pack(&iucv_packet_type);
   2323	sock_unregister(PF_IUCV);
   2324	proto_unregister(&iucv_proto);
   2325}
   2326
   2327module_init(afiucv_init);
   2328module_exit(afiucv_exit);
   2329
   2330MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
   2331MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
   2332MODULE_VERSION(VERSION);
   2333MODULE_LICENSE("GPL");
   2334MODULE_ALIAS_NETPROTO(PF_IUCV);