cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kcmsock.c (46106B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Kernel Connection Multiplexor
      4 *
      5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
      6 */
      7
      8#include <linux/bpf.h>
      9#include <linux/errno.h>
     10#include <linux/errqueue.h>
     11#include <linux/file.h>
     12#include <linux/filter.h>
     13#include <linux/in.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/net.h>
     17#include <linux/netdevice.h>
     18#include <linux/poll.h>
     19#include <linux/rculist.h>
     20#include <linux/skbuff.h>
     21#include <linux/socket.h>
     22#include <linux/uaccess.h>
     23#include <linux/workqueue.h>
     24#include <linux/syscalls.h>
     25#include <linux/sched/signal.h>
     26
     27#include <net/kcm.h>
     28#include <net/netns/generic.h>
     29#include <net/sock.h>
     30#include <uapi/linux/kcm.h>
     31
     32unsigned int kcm_net_id;
     33
     34static struct kmem_cache *kcm_psockp __read_mostly;
     35static struct kmem_cache *kcm_muxp __read_mostly;
     36static struct workqueue_struct *kcm_wq;
     37
     38static inline struct kcm_sock *kcm_sk(const struct sock *sk)
     39{
     40	return (struct kcm_sock *)sk;
     41}
     42
     43static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
     44{
     45	return (struct kcm_tx_msg *)skb->cb;
     46}
     47
     48static void report_csk_error(struct sock *csk, int err)
     49{
     50	csk->sk_err = EPIPE;
     51	sk_error_report(csk);
     52}
     53
     54static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
     55			       bool wakeup_kcm)
     56{
     57	struct sock *csk = psock->sk;
     58	struct kcm_mux *mux = psock->mux;
     59
     60	/* Unrecoverable error in transmit */
     61
     62	spin_lock_bh(&mux->lock);
     63
     64	if (psock->tx_stopped) {
     65		spin_unlock_bh(&mux->lock);
     66		return;
     67	}
     68
     69	psock->tx_stopped = 1;
     70	KCM_STATS_INCR(psock->stats.tx_aborts);
     71
     72	if (!psock->tx_kcm) {
     73		/* Take off psocks_avail list */
     74		list_del(&psock->psock_avail_list);
     75	} else if (wakeup_kcm) {
     76		/* In this case psock is being aborted while outside of
     77		 * write_msgs and psock is reserved. Schedule tx_work
     78		 * to handle the failure there. Need to commit tx_stopped
     79		 * before queuing work.
     80		 */
     81		smp_mb();
     82
     83		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
     84	}
     85
     86	spin_unlock_bh(&mux->lock);
     87
     88	/* Report error on lower socket */
     89	report_csk_error(csk, err);
     90}
     91
     92/* RX mux lock held. */
     93static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
     94				    struct kcm_psock *psock)
     95{
     96	STRP_STATS_ADD(mux->stats.rx_bytes,
     97		       psock->strp.stats.bytes -
     98		       psock->saved_rx_bytes);
     99	mux->stats.rx_msgs +=
    100		psock->strp.stats.msgs - psock->saved_rx_msgs;
    101	psock->saved_rx_msgs = psock->strp.stats.msgs;
    102	psock->saved_rx_bytes = psock->strp.stats.bytes;
    103}
    104
    105static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
    106				    struct kcm_psock *psock)
    107{
    108	KCM_STATS_ADD(mux->stats.tx_bytes,
    109		      psock->stats.tx_bytes - psock->saved_tx_bytes);
    110	mux->stats.tx_msgs +=
    111		psock->stats.tx_msgs - psock->saved_tx_msgs;
    112	psock->saved_tx_msgs = psock->stats.tx_msgs;
    113	psock->saved_tx_bytes = psock->stats.tx_bytes;
    114}
    115
    116static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
    117
    118/* KCM is ready to receive messages on its queue-- either the KCM is new or
    119 * has become unblocked after being blocked on full socket buffer. Queue any
    120 * pending ready messages on a psock. RX mux lock held.
    121 */
    122static void kcm_rcv_ready(struct kcm_sock *kcm)
    123{
    124	struct kcm_mux *mux = kcm->mux;
    125	struct kcm_psock *psock;
    126	struct sk_buff *skb;
    127
    128	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
    129		return;
    130
    131	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
    132		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
    133			/* Assuming buffer limit has been reached */
    134			skb_queue_head(&mux->rx_hold_queue, skb);
    135			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
    136			return;
    137		}
    138	}
    139
    140	while (!list_empty(&mux->psocks_ready)) {
    141		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
    142					 psock_ready_list);
    143
    144		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
    145			/* Assuming buffer limit has been reached */
    146			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
    147			return;
    148		}
    149
    150		/* Consumed the ready message on the psock. Schedule rx_work to
    151		 * get more messages.
    152		 */
    153		list_del(&psock->psock_ready_list);
    154		psock->ready_rx_msg = NULL;
    155		/* Commit clearing of ready_rx_msg for queuing work */
    156		smp_mb();
    157
    158		strp_unpause(&psock->strp);
    159		strp_check_rcv(&psock->strp);
    160	}
    161
    162	/* Buffer limit is okay now, add to ready list */
    163	list_add_tail(&kcm->wait_rx_list,
    164		      &kcm->mux->kcm_rx_waiters);
    165	kcm->rx_wait = true;
    166}
    167
    168static void kcm_rfree(struct sk_buff *skb)
    169{
    170	struct sock *sk = skb->sk;
    171	struct kcm_sock *kcm = kcm_sk(sk);
    172	struct kcm_mux *mux = kcm->mux;
    173	unsigned int len = skb->truesize;
    174
    175	sk_mem_uncharge(sk, len);
    176	atomic_sub(len, &sk->sk_rmem_alloc);
    177
    178	/* For reading rx_wait and rx_psock without holding lock */
    179	smp_mb__after_atomic();
    180
    181	if (!kcm->rx_wait && !kcm->rx_psock &&
    182	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
    183		spin_lock_bh(&mux->rx_lock);
    184		kcm_rcv_ready(kcm);
    185		spin_unlock_bh(&mux->rx_lock);
    186	}
    187}
    188
    189static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
    190{
    191	struct sk_buff_head *list = &sk->sk_receive_queue;
    192
    193	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
    194		return -ENOMEM;
    195
    196	if (!sk_rmem_schedule(sk, skb, skb->truesize))
    197		return -ENOBUFS;
    198
    199	skb->dev = NULL;
    200
    201	skb_orphan(skb);
    202	skb->sk = sk;
    203	skb->destructor = kcm_rfree;
    204	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
    205	sk_mem_charge(sk, skb->truesize);
    206
    207	skb_queue_tail(list, skb);
    208
    209	if (!sock_flag(sk, SOCK_DEAD))
    210		sk->sk_data_ready(sk);
    211
    212	return 0;
    213}
    214
    215/* Requeue received messages for a kcm socket to other kcm sockets. This is
    216 * called with a kcm socket is receive disabled.
    217 * RX mux lock held.
    218 */
    219static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
    220{
    221	struct sk_buff *skb;
    222	struct kcm_sock *kcm;
    223
    224	while ((skb = __skb_dequeue(head))) {
    225		/* Reset destructor to avoid calling kcm_rcv_ready */
    226		skb->destructor = sock_rfree;
    227		skb_orphan(skb);
    228try_again:
    229		if (list_empty(&mux->kcm_rx_waiters)) {
    230			skb_queue_tail(&mux->rx_hold_queue, skb);
    231			continue;
    232		}
    233
    234		kcm = list_first_entry(&mux->kcm_rx_waiters,
    235				       struct kcm_sock, wait_rx_list);
    236
    237		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
    238			/* Should mean socket buffer full */
    239			list_del(&kcm->wait_rx_list);
    240			kcm->rx_wait = false;
    241
    242			/* Commit rx_wait to read in kcm_free */
    243			smp_wmb();
    244
    245			goto try_again;
    246		}
    247	}
    248}
    249
    250/* Lower sock lock held */
    251static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
    252				       struct sk_buff *head)
    253{
    254	struct kcm_mux *mux = psock->mux;
    255	struct kcm_sock *kcm;
    256
    257	WARN_ON(psock->ready_rx_msg);
    258
    259	if (psock->rx_kcm)
    260		return psock->rx_kcm;
    261
    262	spin_lock_bh(&mux->rx_lock);
    263
    264	if (psock->rx_kcm) {
    265		spin_unlock_bh(&mux->rx_lock);
    266		return psock->rx_kcm;
    267	}
    268
    269	kcm_update_rx_mux_stats(mux, psock);
    270
    271	if (list_empty(&mux->kcm_rx_waiters)) {
    272		psock->ready_rx_msg = head;
    273		strp_pause(&psock->strp);
    274		list_add_tail(&psock->psock_ready_list,
    275			      &mux->psocks_ready);
    276		spin_unlock_bh(&mux->rx_lock);
    277		return NULL;
    278	}
    279
    280	kcm = list_first_entry(&mux->kcm_rx_waiters,
    281			       struct kcm_sock, wait_rx_list);
    282	list_del(&kcm->wait_rx_list);
    283	kcm->rx_wait = false;
    284
    285	psock->rx_kcm = kcm;
    286	kcm->rx_psock = psock;
    287
    288	spin_unlock_bh(&mux->rx_lock);
    289
    290	return kcm;
    291}
    292
    293static void kcm_done(struct kcm_sock *kcm);
    294
    295static void kcm_done_work(struct work_struct *w)
    296{
    297	kcm_done(container_of(w, struct kcm_sock, done_work));
    298}
    299
    300/* Lower sock held */
    301static void unreserve_rx_kcm(struct kcm_psock *psock,
    302			     bool rcv_ready)
    303{
    304	struct kcm_sock *kcm = psock->rx_kcm;
    305	struct kcm_mux *mux = psock->mux;
    306
    307	if (!kcm)
    308		return;
    309
    310	spin_lock_bh(&mux->rx_lock);
    311
    312	psock->rx_kcm = NULL;
    313	kcm->rx_psock = NULL;
    314
    315	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
    316	 * kcm_rfree
    317	 */
    318	smp_mb();
    319
    320	if (unlikely(kcm->done)) {
    321		spin_unlock_bh(&mux->rx_lock);
    322
    323		/* Need to run kcm_done in a task since we need to qcquire
    324		 * callback locks which may already be held here.
    325		 */
    326		INIT_WORK(&kcm->done_work, kcm_done_work);
    327		schedule_work(&kcm->done_work);
    328		return;
    329	}
    330
    331	if (unlikely(kcm->rx_disabled)) {
    332		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
    333	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
    334		/* Check for degenerative race with rx_wait that all
    335		 * data was dequeued (accounted for in kcm_rfree).
    336		 */
    337		kcm_rcv_ready(kcm);
    338	}
    339	spin_unlock_bh(&mux->rx_lock);
    340}
    341
    342/* Lower sock lock held */
    343static void psock_data_ready(struct sock *sk)
    344{
    345	struct kcm_psock *psock;
    346
    347	read_lock_bh(&sk->sk_callback_lock);
    348
    349	psock = (struct kcm_psock *)sk->sk_user_data;
    350	if (likely(psock))
    351		strp_data_ready(&psock->strp);
    352
    353	read_unlock_bh(&sk->sk_callback_lock);
    354}
    355
    356/* Called with lower sock held */
    357static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
    358{
    359	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
    360	struct kcm_sock *kcm;
    361
    362try_queue:
    363	kcm = reserve_rx_kcm(psock, skb);
    364	if (!kcm) {
    365		 /* Unable to reserve a KCM, message is held in psock and strp
    366		  * is paused.
    367		  */
    368		return;
    369	}
    370
    371	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
    372		/* Should mean socket buffer full */
    373		unreserve_rx_kcm(psock, false);
    374		goto try_queue;
    375	}
    376}
    377
    378static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
    379{
    380	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
    381	struct bpf_prog *prog = psock->bpf_prog;
    382	int res;
    383
    384	res = bpf_prog_run_pin_on_cpu(prog, skb);
    385	return res;
    386}
    387
    388static int kcm_read_sock_done(struct strparser *strp, int err)
    389{
    390	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
    391
    392	unreserve_rx_kcm(psock, true);
    393
    394	return err;
    395}
    396
    397static void psock_state_change(struct sock *sk)
    398{
    399	/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
    400	 * since application will normally not poll with EPOLLIN
    401	 * on the TCP sockets.
    402	 */
    403
    404	report_csk_error(sk, EPIPE);
    405}
    406
    407static void psock_write_space(struct sock *sk)
    408{
    409	struct kcm_psock *psock;
    410	struct kcm_mux *mux;
    411	struct kcm_sock *kcm;
    412
    413	read_lock_bh(&sk->sk_callback_lock);
    414
    415	psock = (struct kcm_psock *)sk->sk_user_data;
    416	if (unlikely(!psock))
    417		goto out;
    418	mux = psock->mux;
    419
    420	spin_lock_bh(&mux->lock);
    421
    422	/* Check if the socket is reserved so someone is waiting for sending. */
    423	kcm = psock->tx_kcm;
    424	if (kcm && !unlikely(kcm->tx_stopped))
    425		queue_work(kcm_wq, &kcm->tx_work);
    426
    427	spin_unlock_bh(&mux->lock);
    428out:
    429	read_unlock_bh(&sk->sk_callback_lock);
    430}
    431
    432static void unreserve_psock(struct kcm_sock *kcm);
    433
    434/* kcm sock is locked. */
    435static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
    436{
    437	struct kcm_mux *mux = kcm->mux;
    438	struct kcm_psock *psock;
    439
    440	psock = kcm->tx_psock;
    441
    442	smp_rmb(); /* Must read tx_psock before tx_wait */
    443
    444	if (psock) {
    445		WARN_ON(kcm->tx_wait);
    446		if (unlikely(psock->tx_stopped))
    447			unreserve_psock(kcm);
    448		else
    449			return kcm->tx_psock;
    450	}
    451
    452	spin_lock_bh(&mux->lock);
    453
    454	/* Check again under lock to see if psock was reserved for this
    455	 * psock via psock_unreserve.
    456	 */
    457	psock = kcm->tx_psock;
    458	if (unlikely(psock)) {
    459		WARN_ON(kcm->tx_wait);
    460		spin_unlock_bh(&mux->lock);
    461		return kcm->tx_psock;
    462	}
    463
    464	if (!list_empty(&mux->psocks_avail)) {
    465		psock = list_first_entry(&mux->psocks_avail,
    466					 struct kcm_psock,
    467					 psock_avail_list);
    468		list_del(&psock->psock_avail_list);
    469		if (kcm->tx_wait) {
    470			list_del(&kcm->wait_psock_list);
    471			kcm->tx_wait = false;
    472		}
    473		kcm->tx_psock = psock;
    474		psock->tx_kcm = kcm;
    475		KCM_STATS_INCR(psock->stats.reserved);
    476	} else if (!kcm->tx_wait) {
    477		list_add_tail(&kcm->wait_psock_list,
    478			      &mux->kcm_tx_waiters);
    479		kcm->tx_wait = true;
    480	}
    481
    482	spin_unlock_bh(&mux->lock);
    483
    484	return psock;
    485}
    486
    487/* mux lock held */
    488static void psock_now_avail(struct kcm_psock *psock)
    489{
    490	struct kcm_mux *mux = psock->mux;
    491	struct kcm_sock *kcm;
    492
    493	if (list_empty(&mux->kcm_tx_waiters)) {
    494		list_add_tail(&psock->psock_avail_list,
    495			      &mux->psocks_avail);
    496	} else {
    497		kcm = list_first_entry(&mux->kcm_tx_waiters,
    498				       struct kcm_sock,
    499				       wait_psock_list);
    500		list_del(&kcm->wait_psock_list);
    501		kcm->tx_wait = false;
    502		psock->tx_kcm = kcm;
    503
    504		/* Commit before changing tx_psock since that is read in
    505		 * reserve_psock before queuing work.
    506		 */
    507		smp_mb();
    508
    509		kcm->tx_psock = psock;
    510		KCM_STATS_INCR(psock->stats.reserved);
    511		queue_work(kcm_wq, &kcm->tx_work);
    512	}
    513}
    514
    515/* kcm sock is locked. */
    516static void unreserve_psock(struct kcm_sock *kcm)
    517{
    518	struct kcm_psock *psock;
    519	struct kcm_mux *mux = kcm->mux;
    520
    521	spin_lock_bh(&mux->lock);
    522
    523	psock = kcm->tx_psock;
    524
    525	if (WARN_ON(!psock)) {
    526		spin_unlock_bh(&mux->lock);
    527		return;
    528	}
    529
    530	smp_rmb(); /* Read tx_psock before tx_wait */
    531
    532	kcm_update_tx_mux_stats(mux, psock);
    533
    534	WARN_ON(kcm->tx_wait);
    535
    536	kcm->tx_psock = NULL;
    537	psock->tx_kcm = NULL;
    538	KCM_STATS_INCR(psock->stats.unreserved);
    539
    540	if (unlikely(psock->tx_stopped)) {
    541		if (psock->done) {
    542			/* Deferred free */
    543			list_del(&psock->psock_list);
    544			mux->psocks_cnt--;
    545			sock_put(psock->sk);
    546			fput(psock->sk->sk_socket->file);
    547			kmem_cache_free(kcm_psockp, psock);
    548		}
    549
    550		/* Don't put back on available list */
    551
    552		spin_unlock_bh(&mux->lock);
    553
    554		return;
    555	}
    556
    557	psock_now_avail(psock);
    558
    559	spin_unlock_bh(&mux->lock);
    560}
    561
    562static void kcm_report_tx_retry(struct kcm_sock *kcm)
    563{
    564	struct kcm_mux *mux = kcm->mux;
    565
    566	spin_lock_bh(&mux->lock);
    567	KCM_STATS_INCR(mux->stats.tx_retries);
    568	spin_unlock_bh(&mux->lock);
    569}
    570
    571/* Write any messages ready on the kcm socket.  Called with kcm sock lock
    572 * held.  Return bytes actually sent or error.
    573 */
    574static int kcm_write_msgs(struct kcm_sock *kcm)
    575{
    576	struct sock *sk = &kcm->sk;
    577	struct kcm_psock *psock;
    578	struct sk_buff *skb, *head;
    579	struct kcm_tx_msg *txm;
    580	unsigned short fragidx, frag_offset;
    581	unsigned int sent, total_sent = 0;
    582	int ret = 0;
    583
    584	kcm->tx_wait_more = false;
    585	psock = kcm->tx_psock;
    586	if (unlikely(psock && psock->tx_stopped)) {
    587		/* A reserved psock was aborted asynchronously. Unreserve
    588		 * it and we'll retry the message.
    589		 */
    590		unreserve_psock(kcm);
    591		kcm_report_tx_retry(kcm);
    592		if (skb_queue_empty(&sk->sk_write_queue))
    593			return 0;
    594
    595		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
    596
    597	} else if (skb_queue_empty(&sk->sk_write_queue)) {
    598		return 0;
    599	}
    600
    601	head = skb_peek(&sk->sk_write_queue);
    602	txm = kcm_tx_msg(head);
    603
    604	if (txm->sent) {
    605		/* Send of first skbuff in queue already in progress */
    606		if (WARN_ON(!psock)) {
    607			ret = -EINVAL;
    608			goto out;
    609		}
    610		sent = txm->sent;
    611		frag_offset = txm->frag_offset;
    612		fragidx = txm->fragidx;
    613		skb = txm->frag_skb;
    614
    615		goto do_frag;
    616	}
    617
    618try_again:
    619	psock = reserve_psock(kcm);
    620	if (!psock)
    621		goto out;
    622
    623	do {
    624		skb = head;
    625		txm = kcm_tx_msg(head);
    626		sent = 0;
    627
    628do_frag_list:
    629		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
    630			ret = -EINVAL;
    631			goto out;
    632		}
    633
    634		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
    635		     fragidx++) {
    636			skb_frag_t *frag;
    637
    638			frag_offset = 0;
    639do_frag:
    640			frag = &skb_shinfo(skb)->frags[fragidx];
    641			if (WARN_ON(!skb_frag_size(frag))) {
    642				ret = -EINVAL;
    643				goto out;
    644			}
    645
    646			ret = kernel_sendpage(psock->sk->sk_socket,
    647					      skb_frag_page(frag),
    648					      skb_frag_off(frag) + frag_offset,
    649					      skb_frag_size(frag) - frag_offset,
    650					      MSG_DONTWAIT);
    651			if (ret <= 0) {
    652				if (ret == -EAGAIN) {
    653					/* Save state to try again when there's
    654					 * write space on the socket
    655					 */
    656					txm->sent = sent;
    657					txm->frag_offset = frag_offset;
    658					txm->fragidx = fragidx;
    659					txm->frag_skb = skb;
    660
    661					ret = 0;
    662					goto out;
    663				}
    664
    665				/* Hard failure in sending message, abort this
    666				 * psock since it has lost framing
    667				 * synchronization and retry sending the
    668				 * message from the beginning.
    669				 */
    670				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
    671						   true);
    672				unreserve_psock(kcm);
    673
    674				txm->sent = 0;
    675				kcm_report_tx_retry(kcm);
    676				ret = 0;
    677
    678				goto try_again;
    679			}
    680
    681			sent += ret;
    682			frag_offset += ret;
    683			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
    684			if (frag_offset < skb_frag_size(frag)) {
    685				/* Not finished with this frag */
    686				goto do_frag;
    687			}
    688		}
    689
    690		if (skb == head) {
    691			if (skb_has_frag_list(skb)) {
    692				skb = skb_shinfo(skb)->frag_list;
    693				goto do_frag_list;
    694			}
    695		} else if (skb->next) {
    696			skb = skb->next;
    697			goto do_frag_list;
    698		}
    699
    700		/* Successfully sent the whole packet, account for it. */
    701		skb_dequeue(&sk->sk_write_queue);
    702		kfree_skb(head);
    703		sk->sk_wmem_queued -= sent;
    704		total_sent += sent;
    705		KCM_STATS_INCR(psock->stats.tx_msgs);
    706	} while ((head = skb_peek(&sk->sk_write_queue)));
    707out:
    708	if (!head) {
    709		/* Done with all queued messages. */
    710		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
    711		unreserve_psock(kcm);
    712	}
    713
    714	/* Check if write space is available */
    715	sk->sk_write_space(sk);
    716
    717	return total_sent ? : ret;
    718}
    719
    720static void kcm_tx_work(struct work_struct *w)
    721{
    722	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
    723	struct sock *sk = &kcm->sk;
    724	int err;
    725
    726	lock_sock(sk);
    727
    728	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
    729	 * aborts
    730	 */
    731	err = kcm_write_msgs(kcm);
    732	if (err < 0) {
    733		/* Hard failure in write, report error on KCM socket */
    734		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
    735		report_csk_error(&kcm->sk, -err);
    736		goto out;
    737	}
    738
    739	/* Primarily for SOCK_SEQPACKET sockets */
    740	if (likely(sk->sk_socket) &&
    741	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
    742		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
    743		sk->sk_write_space(sk);
    744	}
    745
    746out:
    747	release_sock(sk);
    748}
    749
    750static void kcm_push(struct kcm_sock *kcm)
    751{
    752	if (kcm->tx_wait_more)
    753		kcm_write_msgs(kcm);
    754}
    755
    756static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
    757			    int offset, size_t size, int flags)
    758
    759{
    760	struct sock *sk = sock->sk;
    761	struct kcm_sock *kcm = kcm_sk(sk);
    762	struct sk_buff *skb = NULL, *head = NULL;
    763	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
    764	bool eor;
    765	int err = 0;
    766	int i;
    767
    768	if (flags & MSG_SENDPAGE_NOTLAST)
    769		flags |= MSG_MORE;
    770
    771	/* No MSG_EOR from splice, only look at MSG_MORE */
    772	eor = !(flags & MSG_MORE);
    773
    774	lock_sock(sk);
    775
    776	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
    777
    778	err = -EPIPE;
    779	if (sk->sk_err)
    780		goto out_error;
    781
    782	if (kcm->seq_skb) {
    783		/* Previously opened message */
    784		head = kcm->seq_skb;
    785		skb = kcm_tx_msg(head)->last_skb;
    786		i = skb_shinfo(skb)->nr_frags;
    787
    788		if (skb_can_coalesce(skb, i, page, offset)) {
    789			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
    790			skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
    791			goto coalesced;
    792		}
    793
    794		if (i >= MAX_SKB_FRAGS) {
    795			struct sk_buff *tskb;
    796
    797			tskb = alloc_skb(0, sk->sk_allocation);
    798			while (!tskb) {
    799				kcm_push(kcm);
    800				err = sk_stream_wait_memory(sk, &timeo);
    801				if (err)
    802					goto out_error;
    803			}
    804
    805			if (head == skb)
    806				skb_shinfo(head)->frag_list = tskb;
    807			else
    808				skb->next = tskb;
    809
    810			skb = tskb;
    811			skb->ip_summed = CHECKSUM_UNNECESSARY;
    812			i = 0;
    813		}
    814	} else {
    815		/* Call the sk_stream functions to manage the sndbuf mem. */
    816		if (!sk_stream_memory_free(sk)) {
    817			kcm_push(kcm);
    818			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
    819			err = sk_stream_wait_memory(sk, &timeo);
    820			if (err)
    821				goto out_error;
    822		}
    823
    824		head = alloc_skb(0, sk->sk_allocation);
    825		while (!head) {
    826			kcm_push(kcm);
    827			err = sk_stream_wait_memory(sk, &timeo);
    828			if (err)
    829				goto out_error;
    830		}
    831
    832		skb = head;
    833		i = 0;
    834	}
    835
    836	get_page(page);
    837	skb_fill_page_desc(skb, i, page, offset, size);
    838	skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
    839
    840coalesced:
    841	skb->len += size;
    842	skb->data_len += size;
    843	skb->truesize += size;
    844	sk->sk_wmem_queued += size;
    845	sk_mem_charge(sk, size);
    846
    847	if (head != skb) {
    848		head->len += size;
    849		head->data_len += size;
    850		head->truesize += size;
    851	}
    852
    853	if (eor) {
    854		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
    855
    856		/* Message complete, queue it on send buffer */
    857		__skb_queue_tail(&sk->sk_write_queue, head);
    858		kcm->seq_skb = NULL;
    859		KCM_STATS_INCR(kcm->stats.tx_msgs);
    860
    861		if (flags & MSG_BATCH) {
    862			kcm->tx_wait_more = true;
    863		} else if (kcm->tx_wait_more || not_busy) {
    864			err = kcm_write_msgs(kcm);
    865			if (err < 0) {
    866				/* We got a hard error in write_msgs but have
    867				 * already queued this message. Report an error
    868				 * in the socket, but don't affect return value
    869				 * from sendmsg
    870				 */
    871				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
    872				report_csk_error(&kcm->sk, -err);
    873			}
    874		}
    875	} else {
    876		/* Message not complete, save state */
    877		kcm->seq_skb = head;
    878		kcm_tx_msg(head)->last_skb = skb;
    879	}
    880
    881	KCM_STATS_ADD(kcm->stats.tx_bytes, size);
    882
    883	release_sock(sk);
    884	return size;
    885
    886out_error:
    887	kcm_push(kcm);
    888
    889	err = sk_stream_error(sk, flags, err);
    890
    891	/* make sure we wake any epoll edge trigger waiter */
    892	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
    893		sk->sk_write_space(sk);
    894
    895	release_sock(sk);
    896	return err;
    897}
    898
    899static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
    900{
    901	struct sock *sk = sock->sk;
    902	struct kcm_sock *kcm = kcm_sk(sk);
    903	struct sk_buff *skb = NULL, *head = NULL;
    904	size_t copy, copied = 0;
    905	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
    906	int eor = (sock->type == SOCK_DGRAM) ?
    907		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
    908	int err = -EPIPE;
    909
    910	lock_sock(sk);
    911
    912	/* Per tcp_sendmsg this should be in poll */
    913	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
    914
    915	if (sk->sk_err)
    916		goto out_error;
    917
    918	if (kcm->seq_skb) {
    919		/* Previously opened message */
    920		head = kcm->seq_skb;
    921		skb = kcm_tx_msg(head)->last_skb;
    922		goto start;
    923	}
    924
    925	/* Call the sk_stream functions to manage the sndbuf mem. */
    926	if (!sk_stream_memory_free(sk)) {
    927		kcm_push(kcm);
    928		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
    929		err = sk_stream_wait_memory(sk, &timeo);
    930		if (err)
    931			goto out_error;
    932	}
    933
    934	if (msg_data_left(msg)) {
    935		/* New message, alloc head skb */
    936		head = alloc_skb(0, sk->sk_allocation);
    937		while (!head) {
    938			kcm_push(kcm);
    939			err = sk_stream_wait_memory(sk, &timeo);
    940			if (err)
    941				goto out_error;
    942
    943			head = alloc_skb(0, sk->sk_allocation);
    944		}
    945
    946		skb = head;
    947
    948		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
    949		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
    950		 */
    951		skb->ip_summed = CHECKSUM_UNNECESSARY;
    952	}
    953
    954start:
    955	while (msg_data_left(msg)) {
    956		bool merge = true;
    957		int i = skb_shinfo(skb)->nr_frags;
    958		struct page_frag *pfrag = sk_page_frag(sk);
    959
    960		if (!sk_page_frag_refill(sk, pfrag))
    961			goto wait_for_memory;
    962
    963		if (!skb_can_coalesce(skb, i, pfrag->page,
    964				      pfrag->offset)) {
    965			if (i == MAX_SKB_FRAGS) {
    966				struct sk_buff *tskb;
    967
    968				tskb = alloc_skb(0, sk->sk_allocation);
    969				if (!tskb)
    970					goto wait_for_memory;
    971
    972				if (head == skb)
    973					skb_shinfo(head)->frag_list = tskb;
    974				else
    975					skb->next = tskb;
    976
    977				skb = tskb;
    978				skb->ip_summed = CHECKSUM_UNNECESSARY;
    979				continue;
    980			}
    981			merge = false;
    982		}
    983
    984		copy = min_t(int, msg_data_left(msg),
    985			     pfrag->size - pfrag->offset);
    986
    987		if (!sk_wmem_schedule(sk, copy))
    988			goto wait_for_memory;
    989
    990		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
    991					       pfrag->page,
    992					       pfrag->offset,
    993					       copy);
    994		if (err)
    995			goto out_error;
    996
    997		/* Update the skb. */
    998		if (merge) {
    999			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
   1000		} else {
   1001			skb_fill_page_desc(skb, i, pfrag->page,
   1002					   pfrag->offset, copy);
   1003			get_page(pfrag->page);
   1004		}
   1005
   1006		pfrag->offset += copy;
   1007		copied += copy;
   1008		if (head != skb) {
   1009			head->len += copy;
   1010			head->data_len += copy;
   1011		}
   1012
   1013		continue;
   1014
   1015wait_for_memory:
   1016		kcm_push(kcm);
   1017		err = sk_stream_wait_memory(sk, &timeo);
   1018		if (err)
   1019			goto out_error;
   1020	}
   1021
   1022	if (eor) {
   1023		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
   1024
   1025		if (head) {
   1026			/* Message complete, queue it on send buffer */
   1027			__skb_queue_tail(&sk->sk_write_queue, head);
   1028			kcm->seq_skb = NULL;
   1029			KCM_STATS_INCR(kcm->stats.tx_msgs);
   1030		}
   1031
   1032		if (msg->msg_flags & MSG_BATCH) {
   1033			kcm->tx_wait_more = true;
   1034		} else if (kcm->tx_wait_more || not_busy) {
   1035			err = kcm_write_msgs(kcm);
   1036			if (err < 0) {
   1037				/* We got a hard error in write_msgs but have
   1038				 * already queued this message. Report an error
   1039				 * in the socket, but don't affect return value
   1040				 * from sendmsg
   1041				 */
   1042				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
   1043				report_csk_error(&kcm->sk, -err);
   1044			}
   1045		}
   1046	} else {
   1047		/* Message not complete, save state */
   1048partial_message:
   1049		if (head) {
   1050			kcm->seq_skb = head;
   1051			kcm_tx_msg(head)->last_skb = skb;
   1052		}
   1053	}
   1054
   1055	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
   1056
   1057	release_sock(sk);
   1058	return copied;
   1059
   1060out_error:
   1061	kcm_push(kcm);
   1062
   1063	if (copied && sock->type == SOCK_SEQPACKET) {
   1064		/* Wrote some bytes before encountering an
   1065		 * error, return partial success.
   1066		 */
   1067		goto partial_message;
   1068	}
   1069
   1070	if (head != kcm->seq_skb)
   1071		kfree_skb(head);
   1072
   1073	err = sk_stream_error(sk, msg->msg_flags, err);
   1074
   1075	/* make sure we wake any epoll edge trigger waiter */
   1076	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
   1077		sk->sk_write_space(sk);
   1078
   1079	release_sock(sk);
   1080	return err;
   1081}
   1082
   1083static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
   1084				     long timeo, int *err)
   1085{
   1086	struct sk_buff *skb;
   1087
   1088	while (!(skb = skb_peek(&sk->sk_receive_queue))) {
   1089		if (sk->sk_err) {
   1090			*err = sock_error(sk);
   1091			return NULL;
   1092		}
   1093
   1094		if (sock_flag(sk, SOCK_DONE))
   1095			return NULL;
   1096
   1097		if ((flags & MSG_DONTWAIT) || !timeo) {
   1098			*err = -EAGAIN;
   1099			return NULL;
   1100		}
   1101
   1102		sk_wait_data(sk, &timeo, NULL);
   1103
   1104		/* Handle signals */
   1105		if (signal_pending(current)) {
   1106			*err = sock_intr_errno(timeo);
   1107			return NULL;
   1108		}
   1109	}
   1110
   1111	return skb;
   1112}
   1113
   1114static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
   1115		       size_t len, int flags)
   1116{
   1117	struct sock *sk = sock->sk;
   1118	struct kcm_sock *kcm = kcm_sk(sk);
   1119	int err = 0;
   1120	long timeo;
   1121	struct strp_msg *stm;
   1122	int copied = 0;
   1123	struct sk_buff *skb;
   1124
   1125	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
   1126
   1127	lock_sock(sk);
   1128
   1129	skb = kcm_wait_data(sk, flags, timeo, &err);
   1130	if (!skb)
   1131		goto out;
   1132
   1133	/* Okay, have a message on the receive queue */
   1134
   1135	stm = strp_msg(skb);
   1136
   1137	if (len > stm->full_len)
   1138		len = stm->full_len;
   1139
   1140	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
   1141	if (err < 0)
   1142		goto out;
   1143
   1144	copied = len;
   1145	if (likely(!(flags & MSG_PEEK))) {
   1146		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
   1147		if (copied < stm->full_len) {
   1148			if (sock->type == SOCK_DGRAM) {
   1149				/* Truncated message */
   1150				msg->msg_flags |= MSG_TRUNC;
   1151				goto msg_finished;
   1152			}
   1153			stm->offset += copied;
   1154			stm->full_len -= copied;
   1155		} else {
   1156msg_finished:
   1157			/* Finished with message */
   1158			msg->msg_flags |= MSG_EOR;
   1159			KCM_STATS_INCR(kcm->stats.rx_msgs);
   1160			skb_unlink(skb, &sk->sk_receive_queue);
   1161			kfree_skb(skb);
   1162		}
   1163	}
   1164
   1165out:
   1166	release_sock(sk);
   1167
   1168	return copied ? : err;
   1169}
   1170
   1171static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
   1172			       struct pipe_inode_info *pipe, size_t len,
   1173			       unsigned int flags)
   1174{
   1175	struct sock *sk = sock->sk;
   1176	struct kcm_sock *kcm = kcm_sk(sk);
   1177	long timeo;
   1178	struct strp_msg *stm;
   1179	int err = 0;
   1180	ssize_t copied;
   1181	struct sk_buff *skb;
   1182
   1183	/* Only support splice for SOCKSEQPACKET */
   1184
   1185	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
   1186
   1187	lock_sock(sk);
   1188
   1189	skb = kcm_wait_data(sk, flags, timeo, &err);
   1190	if (!skb)
   1191		goto err_out;
   1192
   1193	/* Okay, have a message on the receive queue */
   1194
   1195	stm = strp_msg(skb);
   1196
   1197	if (len > stm->full_len)
   1198		len = stm->full_len;
   1199
   1200	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
   1201	if (copied < 0) {
   1202		err = copied;
   1203		goto err_out;
   1204	}
   1205
   1206	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
   1207
   1208	stm->offset += copied;
   1209	stm->full_len -= copied;
   1210
   1211	/* We have no way to return MSG_EOR. If all the bytes have been
   1212	 * read we still leave the message in the receive socket buffer.
   1213	 * A subsequent recvmsg needs to be done to return MSG_EOR and
   1214	 * finish reading the message.
   1215	 */
   1216
   1217	release_sock(sk);
   1218
   1219	return copied;
   1220
   1221err_out:
   1222	release_sock(sk);
   1223
   1224	return err;
   1225}
   1226
   1227/* kcm sock lock held */
   1228static void kcm_recv_disable(struct kcm_sock *kcm)
   1229{
   1230	struct kcm_mux *mux = kcm->mux;
   1231
   1232	if (kcm->rx_disabled)
   1233		return;
   1234
   1235	spin_lock_bh(&mux->rx_lock);
   1236
   1237	kcm->rx_disabled = 1;
   1238
   1239	/* If a psock is reserved we'll do cleanup in unreserve */
   1240	if (!kcm->rx_psock) {
   1241		if (kcm->rx_wait) {
   1242			list_del(&kcm->wait_rx_list);
   1243			kcm->rx_wait = false;
   1244		}
   1245
   1246		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
   1247	}
   1248
   1249	spin_unlock_bh(&mux->rx_lock);
   1250}
   1251
   1252/* kcm sock lock held */
   1253static void kcm_recv_enable(struct kcm_sock *kcm)
   1254{
   1255	struct kcm_mux *mux = kcm->mux;
   1256
   1257	if (!kcm->rx_disabled)
   1258		return;
   1259
   1260	spin_lock_bh(&mux->rx_lock);
   1261
   1262	kcm->rx_disabled = 0;
   1263	kcm_rcv_ready(kcm);
   1264
   1265	spin_unlock_bh(&mux->rx_lock);
   1266}
   1267
   1268static int kcm_setsockopt(struct socket *sock, int level, int optname,
   1269			  sockptr_t optval, unsigned int optlen)
   1270{
   1271	struct kcm_sock *kcm = kcm_sk(sock->sk);
   1272	int val, valbool;
   1273	int err = 0;
   1274
   1275	if (level != SOL_KCM)
   1276		return -ENOPROTOOPT;
   1277
   1278	if (optlen < sizeof(int))
   1279		return -EINVAL;
   1280
   1281	if (copy_from_sockptr(&val, optval, sizeof(int)))
   1282		return -EFAULT;
   1283
   1284	valbool = val ? 1 : 0;
   1285
   1286	switch (optname) {
   1287	case KCM_RECV_DISABLE:
   1288		lock_sock(&kcm->sk);
   1289		if (valbool)
   1290			kcm_recv_disable(kcm);
   1291		else
   1292			kcm_recv_enable(kcm);
   1293		release_sock(&kcm->sk);
   1294		break;
   1295	default:
   1296		err = -ENOPROTOOPT;
   1297	}
   1298
   1299	return err;
   1300}
   1301
   1302static int kcm_getsockopt(struct socket *sock, int level, int optname,
   1303			  char __user *optval, int __user *optlen)
   1304{
   1305	struct kcm_sock *kcm = kcm_sk(sock->sk);
   1306	int val, len;
   1307
   1308	if (level != SOL_KCM)
   1309		return -ENOPROTOOPT;
   1310
   1311	if (get_user(len, optlen))
   1312		return -EFAULT;
   1313
   1314	len = min_t(unsigned int, len, sizeof(int));
   1315	if (len < 0)
   1316		return -EINVAL;
   1317
   1318	switch (optname) {
   1319	case KCM_RECV_DISABLE:
   1320		val = kcm->rx_disabled;
   1321		break;
   1322	default:
   1323		return -ENOPROTOOPT;
   1324	}
   1325
   1326	if (put_user(len, optlen))
   1327		return -EFAULT;
   1328	if (copy_to_user(optval, &val, len))
   1329		return -EFAULT;
   1330	return 0;
   1331}
   1332
   1333static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
   1334{
   1335	struct kcm_sock *tkcm;
   1336	struct list_head *head;
   1337	int index = 0;
   1338
   1339	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
   1340	 * we set sk_state, otherwise epoll_wait always returns right away with
   1341	 * EPOLLHUP
   1342	 */
   1343	kcm->sk.sk_state = TCP_ESTABLISHED;
   1344
   1345	/* Add to mux's kcm sockets list */
   1346	kcm->mux = mux;
   1347	spin_lock_bh(&mux->lock);
   1348
   1349	head = &mux->kcm_socks;
   1350	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
   1351		if (tkcm->index != index)
   1352			break;
   1353		head = &tkcm->kcm_sock_list;
   1354		index++;
   1355	}
   1356
   1357	list_add(&kcm->kcm_sock_list, head);
   1358	kcm->index = index;
   1359
   1360	mux->kcm_socks_cnt++;
   1361	spin_unlock_bh(&mux->lock);
   1362
   1363	INIT_WORK(&kcm->tx_work, kcm_tx_work);
   1364
   1365	spin_lock_bh(&mux->rx_lock);
   1366	kcm_rcv_ready(kcm);
   1367	spin_unlock_bh(&mux->rx_lock);
   1368}
   1369
   1370static int kcm_attach(struct socket *sock, struct socket *csock,
   1371		      struct bpf_prog *prog)
   1372{
   1373	struct kcm_sock *kcm = kcm_sk(sock->sk);
   1374	struct kcm_mux *mux = kcm->mux;
   1375	struct sock *csk;
   1376	struct kcm_psock *psock = NULL, *tpsock;
   1377	struct list_head *head;
   1378	int index = 0;
   1379	static const struct strp_callbacks cb = {
   1380		.rcv_msg = kcm_rcv_strparser,
   1381		.parse_msg = kcm_parse_func_strparser,
   1382		.read_sock_done = kcm_read_sock_done,
   1383	};
   1384	int err = 0;
   1385
   1386	csk = csock->sk;
   1387	if (!csk)
   1388		return -EINVAL;
   1389
   1390	lock_sock(csk);
   1391
   1392	/* Only allow TCP sockets to be attached for now */
   1393	if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
   1394	    csk->sk_protocol != IPPROTO_TCP) {
   1395		err = -EOPNOTSUPP;
   1396		goto out;
   1397	}
   1398
   1399	/* Don't allow listeners or closed sockets */
   1400	if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
   1401		err = -EOPNOTSUPP;
   1402		goto out;
   1403	}
   1404
   1405	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
   1406	if (!psock) {
   1407		err = -ENOMEM;
   1408		goto out;
   1409	}
   1410
   1411	psock->mux = mux;
   1412	psock->sk = csk;
   1413	psock->bpf_prog = prog;
   1414
   1415	err = strp_init(&psock->strp, csk, &cb);
   1416	if (err) {
   1417		kmem_cache_free(kcm_psockp, psock);
   1418		goto out;
   1419	}
   1420
   1421	write_lock_bh(&csk->sk_callback_lock);
   1422
   1423	/* Check if sk_user_data is already by KCM or someone else.
   1424	 * Must be done under lock to prevent race conditions.
   1425	 */
   1426	if (csk->sk_user_data) {
   1427		write_unlock_bh(&csk->sk_callback_lock);
   1428		strp_stop(&psock->strp);
   1429		strp_done(&psock->strp);
   1430		kmem_cache_free(kcm_psockp, psock);
   1431		err = -EALREADY;
   1432		goto out;
   1433	}
   1434
   1435	psock->save_data_ready = csk->sk_data_ready;
   1436	psock->save_write_space = csk->sk_write_space;
   1437	psock->save_state_change = csk->sk_state_change;
   1438	csk->sk_user_data = psock;
   1439	csk->sk_data_ready = psock_data_ready;
   1440	csk->sk_write_space = psock_write_space;
   1441	csk->sk_state_change = psock_state_change;
   1442
   1443	write_unlock_bh(&csk->sk_callback_lock);
   1444
   1445	sock_hold(csk);
   1446
   1447	/* Finished initialization, now add the psock to the MUX. */
   1448	spin_lock_bh(&mux->lock);
   1449	head = &mux->psocks;
   1450	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
   1451		if (tpsock->index != index)
   1452			break;
   1453		head = &tpsock->psock_list;
   1454		index++;
   1455	}
   1456
   1457	list_add(&psock->psock_list, head);
   1458	psock->index = index;
   1459
   1460	KCM_STATS_INCR(mux->stats.psock_attach);
   1461	mux->psocks_cnt++;
   1462	psock_now_avail(psock);
   1463	spin_unlock_bh(&mux->lock);
   1464
   1465	/* Schedule RX work in case there are already bytes queued */
   1466	strp_check_rcv(&psock->strp);
   1467
   1468out:
   1469	release_sock(csk);
   1470
   1471	return err;
   1472}
   1473
   1474static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
   1475{
   1476	struct socket *csock;
   1477	struct bpf_prog *prog;
   1478	int err;
   1479
   1480	csock = sockfd_lookup(info->fd, &err);
   1481	if (!csock)
   1482		return -ENOENT;
   1483
   1484	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
   1485	if (IS_ERR(prog)) {
   1486		err = PTR_ERR(prog);
   1487		goto out;
   1488	}
   1489
   1490	err = kcm_attach(sock, csock, prog);
   1491	if (err) {
   1492		bpf_prog_put(prog);
   1493		goto out;
   1494	}
   1495
   1496	/* Keep reference on file also */
   1497
   1498	return 0;
   1499out:
   1500	sockfd_put(csock);
   1501	return err;
   1502}
   1503
   1504static void kcm_unattach(struct kcm_psock *psock)
   1505{
   1506	struct sock *csk = psock->sk;
   1507	struct kcm_mux *mux = psock->mux;
   1508
   1509	lock_sock(csk);
   1510
   1511	/* Stop getting callbacks from TCP socket. After this there should
   1512	 * be no way to reserve a kcm for this psock.
   1513	 */
   1514	write_lock_bh(&csk->sk_callback_lock);
   1515	csk->sk_user_data = NULL;
   1516	csk->sk_data_ready = psock->save_data_ready;
   1517	csk->sk_write_space = psock->save_write_space;
   1518	csk->sk_state_change = psock->save_state_change;
   1519	strp_stop(&psock->strp);
   1520
   1521	if (WARN_ON(psock->rx_kcm)) {
   1522		write_unlock_bh(&csk->sk_callback_lock);
   1523		release_sock(csk);
   1524		return;
   1525	}
   1526
   1527	spin_lock_bh(&mux->rx_lock);
   1528
   1529	/* Stop receiver activities. After this point psock should not be
   1530	 * able to get onto ready list either through callbacks or work.
   1531	 */
   1532	if (psock->ready_rx_msg) {
   1533		list_del(&psock->psock_ready_list);
   1534		kfree_skb(psock->ready_rx_msg);
   1535		psock->ready_rx_msg = NULL;
   1536		KCM_STATS_INCR(mux->stats.rx_ready_drops);
   1537	}
   1538
   1539	spin_unlock_bh(&mux->rx_lock);
   1540
   1541	write_unlock_bh(&csk->sk_callback_lock);
   1542
   1543	/* Call strp_done without sock lock */
   1544	release_sock(csk);
   1545	strp_done(&psock->strp);
   1546	lock_sock(csk);
   1547
   1548	bpf_prog_put(psock->bpf_prog);
   1549
   1550	spin_lock_bh(&mux->lock);
   1551
   1552	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
   1553	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
   1554
   1555	KCM_STATS_INCR(mux->stats.psock_unattach);
   1556
   1557	if (psock->tx_kcm) {
   1558		/* psock was reserved.  Just mark it finished and we will clean
   1559		 * up in the kcm paths, we need kcm lock which can not be
   1560		 * acquired here.
   1561		 */
   1562		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
   1563		spin_unlock_bh(&mux->lock);
   1564
   1565		/* We are unattaching a socket that is reserved. Abort the
   1566		 * socket since we may be out of sync in sending on it. We need
   1567		 * to do this without the mux lock.
   1568		 */
   1569		kcm_abort_tx_psock(psock, EPIPE, false);
   1570
   1571		spin_lock_bh(&mux->lock);
   1572		if (!psock->tx_kcm) {
   1573			/* psock now unreserved in window mux was unlocked */
   1574			goto no_reserved;
   1575		}
   1576		psock->done = 1;
   1577
   1578		/* Commit done before queuing work to process it */
   1579		smp_mb();
   1580
   1581		/* Queue tx work to make sure psock->done is handled */
   1582		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
   1583		spin_unlock_bh(&mux->lock);
   1584	} else {
   1585no_reserved:
   1586		if (!psock->tx_stopped)
   1587			list_del(&psock->psock_avail_list);
   1588		list_del(&psock->psock_list);
   1589		mux->psocks_cnt--;
   1590		spin_unlock_bh(&mux->lock);
   1591
   1592		sock_put(csk);
   1593		fput(csk->sk_socket->file);
   1594		kmem_cache_free(kcm_psockp, psock);
   1595	}
   1596
   1597	release_sock(csk);
   1598}
   1599
   1600static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
   1601{
   1602	struct kcm_sock *kcm = kcm_sk(sock->sk);
   1603	struct kcm_mux *mux = kcm->mux;
   1604	struct kcm_psock *psock;
   1605	struct socket *csock;
   1606	struct sock *csk;
   1607	int err;
   1608
   1609	csock = sockfd_lookup(info->fd, &err);
   1610	if (!csock)
   1611		return -ENOENT;
   1612
   1613	csk = csock->sk;
   1614	if (!csk) {
   1615		err = -EINVAL;
   1616		goto out;
   1617	}
   1618
   1619	err = -ENOENT;
   1620
   1621	spin_lock_bh(&mux->lock);
   1622
   1623	list_for_each_entry(psock, &mux->psocks, psock_list) {
   1624		if (psock->sk != csk)
   1625			continue;
   1626
   1627		/* Found the matching psock */
   1628
   1629		if (psock->unattaching || WARN_ON(psock->done)) {
   1630			err = -EALREADY;
   1631			break;
   1632		}
   1633
   1634		psock->unattaching = 1;
   1635
   1636		spin_unlock_bh(&mux->lock);
   1637
   1638		/* Lower socket lock should already be held */
   1639		kcm_unattach(psock);
   1640
   1641		err = 0;
   1642		goto out;
   1643	}
   1644
   1645	spin_unlock_bh(&mux->lock);
   1646
   1647out:
   1648	sockfd_put(csock);
   1649	return err;
   1650}
   1651
   1652static struct proto kcm_proto = {
   1653	.name	= "KCM",
   1654	.owner	= THIS_MODULE,
   1655	.obj_size = sizeof(struct kcm_sock),
   1656};
   1657
   1658/* Clone a kcm socket. */
   1659static struct file *kcm_clone(struct socket *osock)
   1660{
   1661	struct socket *newsock;
   1662	struct sock *newsk;
   1663
   1664	newsock = sock_alloc();
   1665	if (!newsock)
   1666		return ERR_PTR(-ENFILE);
   1667
   1668	newsock->type = osock->type;
   1669	newsock->ops = osock->ops;
   1670
   1671	__module_get(newsock->ops->owner);
   1672
   1673	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
   1674			 &kcm_proto, false);
   1675	if (!newsk) {
   1676		sock_release(newsock);
   1677		return ERR_PTR(-ENOMEM);
   1678	}
   1679	sock_init_data(newsock, newsk);
   1680	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
   1681
   1682	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
   1683}
   1684
   1685static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
   1686{
   1687	int err;
   1688
   1689	switch (cmd) {
   1690	case SIOCKCMATTACH: {
   1691		struct kcm_attach info;
   1692
   1693		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
   1694			return -EFAULT;
   1695
   1696		err = kcm_attach_ioctl(sock, &info);
   1697
   1698		break;
   1699	}
   1700	case SIOCKCMUNATTACH: {
   1701		struct kcm_unattach info;
   1702
   1703		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
   1704			return -EFAULT;
   1705
   1706		err = kcm_unattach_ioctl(sock, &info);
   1707
   1708		break;
   1709	}
   1710	case SIOCKCMCLONE: {
   1711		struct kcm_clone info;
   1712		struct file *file;
   1713
   1714		info.fd = get_unused_fd_flags(0);
   1715		if (unlikely(info.fd < 0))
   1716			return info.fd;
   1717
   1718		file = kcm_clone(sock);
   1719		if (IS_ERR(file)) {
   1720			put_unused_fd(info.fd);
   1721			return PTR_ERR(file);
   1722		}
   1723		if (copy_to_user((void __user *)arg, &info,
   1724				 sizeof(info))) {
   1725			put_unused_fd(info.fd);
   1726			fput(file);
   1727			return -EFAULT;
   1728		}
   1729		fd_install(info.fd, file);
   1730		err = 0;
   1731		break;
   1732	}
   1733	default:
   1734		err = -ENOIOCTLCMD;
   1735		break;
   1736	}
   1737
   1738	return err;
   1739}
   1740
   1741static void free_mux(struct rcu_head *rcu)
   1742{
   1743	struct kcm_mux *mux = container_of(rcu,
   1744	    struct kcm_mux, rcu);
   1745
   1746	kmem_cache_free(kcm_muxp, mux);
   1747}
   1748
   1749static void release_mux(struct kcm_mux *mux)
   1750{
   1751	struct kcm_net *knet = mux->knet;
   1752	struct kcm_psock *psock, *tmp_psock;
   1753
   1754	/* Release psocks */
   1755	list_for_each_entry_safe(psock, tmp_psock,
   1756				 &mux->psocks, psock_list) {
   1757		if (!WARN_ON(psock->unattaching))
   1758			kcm_unattach(psock);
   1759	}
   1760
   1761	if (WARN_ON(mux->psocks_cnt))
   1762		return;
   1763
   1764	__skb_queue_purge(&mux->rx_hold_queue);
   1765
   1766	mutex_lock(&knet->mutex);
   1767	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
   1768	aggregate_psock_stats(&mux->aggregate_psock_stats,
   1769			      &knet->aggregate_psock_stats);
   1770	aggregate_strp_stats(&mux->aggregate_strp_stats,
   1771			     &knet->aggregate_strp_stats);
   1772	list_del_rcu(&mux->kcm_mux_list);
   1773	knet->count--;
   1774	mutex_unlock(&knet->mutex);
   1775
   1776	call_rcu(&mux->rcu, free_mux);
   1777}
   1778
   1779static void kcm_done(struct kcm_sock *kcm)
   1780{
   1781	struct kcm_mux *mux = kcm->mux;
   1782	struct sock *sk = &kcm->sk;
   1783	int socks_cnt;
   1784
   1785	spin_lock_bh(&mux->rx_lock);
   1786	if (kcm->rx_psock) {
   1787		/* Cleanup in unreserve_rx_kcm */
   1788		WARN_ON(kcm->done);
   1789		kcm->rx_disabled = 1;
   1790		kcm->done = 1;
   1791		spin_unlock_bh(&mux->rx_lock);
   1792		return;
   1793	}
   1794
   1795	if (kcm->rx_wait) {
   1796		list_del(&kcm->wait_rx_list);
   1797		kcm->rx_wait = false;
   1798	}
   1799	/* Move any pending receive messages to other kcm sockets */
   1800	requeue_rx_msgs(mux, &sk->sk_receive_queue);
   1801
   1802	spin_unlock_bh(&mux->rx_lock);
   1803
   1804	if (WARN_ON(sk_rmem_alloc_get(sk)))
   1805		return;
   1806
   1807	/* Detach from MUX */
   1808	spin_lock_bh(&mux->lock);
   1809
   1810	list_del(&kcm->kcm_sock_list);
   1811	mux->kcm_socks_cnt--;
   1812	socks_cnt = mux->kcm_socks_cnt;
   1813
   1814	spin_unlock_bh(&mux->lock);
   1815
   1816	if (!socks_cnt) {
   1817		/* We are done with the mux now. */
   1818		release_mux(mux);
   1819	}
   1820
   1821	WARN_ON(kcm->rx_wait);
   1822
   1823	sock_put(&kcm->sk);
   1824}
   1825
   1826/* Called by kcm_release to close a KCM socket.
   1827 * If this is the last KCM socket on the MUX, destroy the MUX.
   1828 */
   1829static int kcm_release(struct socket *sock)
   1830{
   1831	struct sock *sk = sock->sk;
   1832	struct kcm_sock *kcm;
   1833	struct kcm_mux *mux;
   1834	struct kcm_psock *psock;
   1835
   1836	if (!sk)
   1837		return 0;
   1838
   1839	kcm = kcm_sk(sk);
   1840	mux = kcm->mux;
   1841
   1842	sock_orphan(sk);
   1843	kfree_skb(kcm->seq_skb);
   1844
   1845	lock_sock(sk);
   1846	/* Purge queue under lock to avoid race condition with tx_work trying
   1847	 * to act when queue is nonempty. If tx_work runs after this point
   1848	 * it will just return.
   1849	 */
   1850	__skb_queue_purge(&sk->sk_write_queue);
   1851
   1852	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
   1853	 * get a writespace callback. This prevents further work being queued
   1854	 * from the callback (unbinding the psock occurs after canceling work.
   1855	 */
   1856	kcm->tx_stopped = 1;
   1857
   1858	release_sock(sk);
   1859
   1860	spin_lock_bh(&mux->lock);
   1861	if (kcm->tx_wait) {
   1862		/* Take of tx_wait list, after this point there should be no way
   1863		 * that a psock will be assigned to this kcm.
   1864		 */
   1865		list_del(&kcm->wait_psock_list);
   1866		kcm->tx_wait = false;
   1867	}
   1868	spin_unlock_bh(&mux->lock);
   1869
   1870	/* Cancel work. After this point there should be no outside references
   1871	 * to the kcm socket.
   1872	 */
   1873	cancel_work_sync(&kcm->tx_work);
   1874
   1875	lock_sock(sk);
   1876	psock = kcm->tx_psock;
   1877	if (psock) {
   1878		/* A psock was reserved, so we need to kill it since it
   1879		 * may already have some bytes queued from a message. We
   1880		 * need to do this after removing kcm from tx_wait list.
   1881		 */
   1882		kcm_abort_tx_psock(psock, EPIPE, false);
   1883		unreserve_psock(kcm);
   1884	}
   1885	release_sock(sk);
   1886
   1887	WARN_ON(kcm->tx_wait);
   1888	WARN_ON(kcm->tx_psock);
   1889
   1890	sock->sk = NULL;
   1891
   1892	kcm_done(kcm);
   1893
   1894	return 0;
   1895}
   1896
   1897static const struct proto_ops kcm_dgram_ops = {
   1898	.family =	PF_KCM,
   1899	.owner =	THIS_MODULE,
   1900	.release =	kcm_release,
   1901	.bind =		sock_no_bind,
   1902	.connect =	sock_no_connect,
   1903	.socketpair =	sock_no_socketpair,
   1904	.accept =	sock_no_accept,
   1905	.getname =	sock_no_getname,
   1906	.poll =		datagram_poll,
   1907	.ioctl =	kcm_ioctl,
   1908	.listen =	sock_no_listen,
   1909	.shutdown =	sock_no_shutdown,
   1910	.setsockopt =	kcm_setsockopt,
   1911	.getsockopt =	kcm_getsockopt,
   1912	.sendmsg =	kcm_sendmsg,
   1913	.recvmsg =	kcm_recvmsg,
   1914	.mmap =		sock_no_mmap,
   1915	.sendpage =	kcm_sendpage,
   1916};
   1917
   1918static const struct proto_ops kcm_seqpacket_ops = {
   1919	.family =	PF_KCM,
   1920	.owner =	THIS_MODULE,
   1921	.release =	kcm_release,
   1922	.bind =		sock_no_bind,
   1923	.connect =	sock_no_connect,
   1924	.socketpair =	sock_no_socketpair,
   1925	.accept =	sock_no_accept,
   1926	.getname =	sock_no_getname,
   1927	.poll =		datagram_poll,
   1928	.ioctl =	kcm_ioctl,
   1929	.listen =	sock_no_listen,
   1930	.shutdown =	sock_no_shutdown,
   1931	.setsockopt =	kcm_setsockopt,
   1932	.getsockopt =	kcm_getsockopt,
   1933	.sendmsg =	kcm_sendmsg,
   1934	.recvmsg =	kcm_recvmsg,
   1935	.mmap =		sock_no_mmap,
   1936	.sendpage =	kcm_sendpage,
   1937	.splice_read =	kcm_splice_read,
   1938};
   1939
   1940/* Create proto operation for kcm sockets */
   1941static int kcm_create(struct net *net, struct socket *sock,
   1942		      int protocol, int kern)
   1943{
   1944	struct kcm_net *knet = net_generic(net, kcm_net_id);
   1945	struct sock *sk;
   1946	struct kcm_mux *mux;
   1947
   1948	switch (sock->type) {
   1949	case SOCK_DGRAM:
   1950		sock->ops = &kcm_dgram_ops;
   1951		break;
   1952	case SOCK_SEQPACKET:
   1953		sock->ops = &kcm_seqpacket_ops;
   1954		break;
   1955	default:
   1956		return -ESOCKTNOSUPPORT;
   1957	}
   1958
   1959	if (protocol != KCMPROTO_CONNECTED)
   1960		return -EPROTONOSUPPORT;
   1961
   1962	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
   1963	if (!sk)
   1964		return -ENOMEM;
   1965
   1966	/* Allocate a kcm mux, shared between KCM sockets */
   1967	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
   1968	if (!mux) {
   1969		sk_free(sk);
   1970		return -ENOMEM;
   1971	}
   1972
   1973	spin_lock_init(&mux->lock);
   1974	spin_lock_init(&mux->rx_lock);
   1975	INIT_LIST_HEAD(&mux->kcm_socks);
   1976	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
   1977	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
   1978
   1979	INIT_LIST_HEAD(&mux->psocks);
   1980	INIT_LIST_HEAD(&mux->psocks_ready);
   1981	INIT_LIST_HEAD(&mux->psocks_avail);
   1982
   1983	mux->knet = knet;
   1984
   1985	/* Add new MUX to list */
   1986	mutex_lock(&knet->mutex);
   1987	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
   1988	knet->count++;
   1989	mutex_unlock(&knet->mutex);
   1990
   1991	skb_queue_head_init(&mux->rx_hold_queue);
   1992
   1993	/* Init KCM socket */
   1994	sock_init_data(sock, sk);
   1995	init_kcm_sock(kcm_sk(sk), mux);
   1996
   1997	return 0;
   1998}
   1999
   2000static const struct net_proto_family kcm_family_ops = {
   2001	.family = PF_KCM,
   2002	.create = kcm_create,
   2003	.owner  = THIS_MODULE,
   2004};
   2005
   2006static __net_init int kcm_init_net(struct net *net)
   2007{
   2008	struct kcm_net *knet = net_generic(net, kcm_net_id);
   2009
   2010	INIT_LIST_HEAD_RCU(&knet->mux_list);
   2011	mutex_init(&knet->mutex);
   2012
   2013	return 0;
   2014}
   2015
   2016static __net_exit void kcm_exit_net(struct net *net)
   2017{
   2018	struct kcm_net *knet = net_generic(net, kcm_net_id);
   2019
   2020	/* All KCM sockets should be closed at this point, which should mean
   2021	 * that all multiplexors and psocks have been destroyed.
   2022	 */
   2023	WARN_ON(!list_empty(&knet->mux_list));
   2024}
   2025
   2026static struct pernet_operations kcm_net_ops = {
   2027	.init = kcm_init_net,
   2028	.exit = kcm_exit_net,
   2029	.id   = &kcm_net_id,
   2030	.size = sizeof(struct kcm_net),
   2031};
   2032
   2033static int __init kcm_init(void)
   2034{
   2035	int err = -ENOMEM;
   2036
   2037	kcm_muxp = kmem_cache_create("kcm_mux_cache",
   2038				     sizeof(struct kcm_mux), 0,
   2039				     SLAB_HWCACHE_ALIGN, NULL);
   2040	if (!kcm_muxp)
   2041		goto fail;
   2042
   2043	kcm_psockp = kmem_cache_create("kcm_psock_cache",
   2044				       sizeof(struct kcm_psock), 0,
   2045					SLAB_HWCACHE_ALIGN, NULL);
   2046	if (!kcm_psockp)
   2047		goto fail;
   2048
   2049	kcm_wq = create_singlethread_workqueue("kkcmd");
   2050	if (!kcm_wq)
   2051		goto fail;
   2052
   2053	err = proto_register(&kcm_proto, 1);
   2054	if (err)
   2055		goto fail;
   2056
   2057	err = register_pernet_device(&kcm_net_ops);
   2058	if (err)
   2059		goto net_ops_fail;
   2060
   2061	err = sock_register(&kcm_family_ops);
   2062	if (err)
   2063		goto sock_register_fail;
   2064
   2065	err = kcm_proc_init();
   2066	if (err)
   2067		goto proc_init_fail;
   2068
   2069	return 0;
   2070
   2071proc_init_fail:
   2072	sock_unregister(PF_KCM);
   2073
   2074sock_register_fail:
   2075	unregister_pernet_device(&kcm_net_ops);
   2076
   2077net_ops_fail:
   2078	proto_unregister(&kcm_proto);
   2079
   2080fail:
   2081	kmem_cache_destroy(kcm_muxp);
   2082	kmem_cache_destroy(kcm_psockp);
   2083
   2084	if (kcm_wq)
   2085		destroy_workqueue(kcm_wq);
   2086
   2087	return err;
   2088}
   2089
   2090static void __exit kcm_exit(void)
   2091{
   2092	kcm_proc_exit();
   2093	sock_unregister(PF_KCM);
   2094	unregister_pernet_device(&kcm_net_ops);
   2095	proto_unregister(&kcm_proto);
   2096	destroy_workqueue(kcm_wq);
   2097
   2098	kmem_cache_destroy(kcm_muxp);
   2099	kmem_cache_destroy(kcm_psockp);
   2100}
   2101
   2102module_init(kcm_init);
   2103module_exit(kcm_exit);
   2104
   2105MODULE_LICENSE("GPL");
   2106MODULE_ALIAS_NETPROTO(PF_KCM);