cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xfrm_user.c (86325B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* xfrm_user.c: User interface to configure xfrm engine.
      3 *
      4 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
      5 *
      6 * Changes:
      7 *	Mitsuru KANDA @USAGI
      8 * 	Kazunori MIYAZAWA @USAGI
      9 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
     10 * 		IPv6 support
     11 *
     12 */
     13
     14#include <linux/compat.h>
     15#include <linux/crypto.h>
     16#include <linux/module.h>
     17#include <linux/kernel.h>
     18#include <linux/types.h>
     19#include <linux/slab.h>
     20#include <linux/socket.h>
     21#include <linux/string.h>
     22#include <linux/net.h>
     23#include <linux/skbuff.h>
     24#include <linux/pfkeyv2.h>
     25#include <linux/ipsec.h>
     26#include <linux/init.h>
     27#include <linux/security.h>
     28#include <net/sock.h>
     29#include <net/xfrm.h>
     30#include <net/netlink.h>
     31#include <net/ah.h>
     32#include <linux/uaccess.h>
     33#if IS_ENABLED(CONFIG_IPV6)
     34#include <linux/in6.h>
     35#endif
     36#include <asm/unaligned.h>
     37
     38static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
     39{
     40	struct nlattr *rt = attrs[type];
     41	struct xfrm_algo *algp;
     42
     43	if (!rt)
     44		return 0;
     45
     46	algp = nla_data(rt);
     47	if (nla_len(rt) < (int)xfrm_alg_len(algp))
     48		return -EINVAL;
     49
     50	switch (type) {
     51	case XFRMA_ALG_AUTH:
     52	case XFRMA_ALG_CRYPT:
     53	case XFRMA_ALG_COMP:
     54		break;
     55
     56	default:
     57		return -EINVAL;
     58	}
     59
     60	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
     61	return 0;
     62}
     63
     64static int verify_auth_trunc(struct nlattr **attrs)
     65{
     66	struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
     67	struct xfrm_algo_auth *algp;
     68
     69	if (!rt)
     70		return 0;
     71
     72	algp = nla_data(rt);
     73	if (nla_len(rt) < (int)xfrm_alg_auth_len(algp))
     74		return -EINVAL;
     75
     76	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
     77	return 0;
     78}
     79
     80static int verify_aead(struct nlattr **attrs)
     81{
     82	struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
     83	struct xfrm_algo_aead *algp;
     84
     85	if (!rt)
     86		return 0;
     87
     88	algp = nla_data(rt);
     89	if (nla_len(rt) < (int)aead_len(algp))
     90		return -EINVAL;
     91
     92	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
     93	return 0;
     94}
     95
     96static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
     97			   xfrm_address_t **addrp)
     98{
     99	struct nlattr *rt = attrs[type];
    100
    101	if (rt && addrp)
    102		*addrp = nla_data(rt);
    103}
    104
    105static inline int verify_sec_ctx_len(struct nlattr **attrs)
    106{
    107	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
    108	struct xfrm_user_sec_ctx *uctx;
    109
    110	if (!rt)
    111		return 0;
    112
    113	uctx = nla_data(rt);
    114	if (uctx->len > nla_len(rt) ||
    115	    uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
    116		return -EINVAL;
    117
    118	return 0;
    119}
    120
    121static inline int verify_replay(struct xfrm_usersa_info *p,
    122				struct nlattr **attrs)
    123{
    124	struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
    125	struct xfrm_replay_state_esn *rs;
    126
    127	if (!rt)
    128		return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
    129
    130	rs = nla_data(rt);
    131
    132	if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
    133		return -EINVAL;
    134
    135	if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
    136	    nla_len(rt) != sizeof(*rs))
    137		return -EINVAL;
    138
    139	/* As only ESP and AH support ESN feature. */
    140	if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
    141		return -EINVAL;
    142
    143	if (p->replay_window != 0)
    144		return -EINVAL;
    145
    146	return 0;
    147}
    148
    149static int verify_newsa_info(struct xfrm_usersa_info *p,
    150			     struct nlattr **attrs)
    151{
    152	int err;
    153
    154	err = -EINVAL;
    155	switch (p->family) {
    156	case AF_INET:
    157		break;
    158
    159	case AF_INET6:
    160#if IS_ENABLED(CONFIG_IPV6)
    161		break;
    162#else
    163		err = -EAFNOSUPPORT;
    164		goto out;
    165#endif
    166
    167	default:
    168		goto out;
    169	}
    170
    171	switch (p->sel.family) {
    172	case AF_UNSPEC:
    173		break;
    174
    175	case AF_INET:
    176		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
    177			goto out;
    178
    179		break;
    180
    181	case AF_INET6:
    182#if IS_ENABLED(CONFIG_IPV6)
    183		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
    184			goto out;
    185
    186		break;
    187#else
    188		err = -EAFNOSUPPORT;
    189		goto out;
    190#endif
    191
    192	default:
    193		goto out;
    194	}
    195
    196	err = -EINVAL;
    197	switch (p->id.proto) {
    198	case IPPROTO_AH:
    199		if ((!attrs[XFRMA_ALG_AUTH]	&&
    200		     !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
    201		    attrs[XFRMA_ALG_AEAD]	||
    202		    attrs[XFRMA_ALG_CRYPT]	||
    203		    attrs[XFRMA_ALG_COMP]	||
    204		    attrs[XFRMA_TFCPAD])
    205			goto out;
    206		break;
    207
    208	case IPPROTO_ESP:
    209		if (attrs[XFRMA_ALG_COMP])
    210			goto out;
    211		if (!attrs[XFRMA_ALG_AUTH] &&
    212		    !attrs[XFRMA_ALG_AUTH_TRUNC] &&
    213		    !attrs[XFRMA_ALG_CRYPT] &&
    214		    !attrs[XFRMA_ALG_AEAD])
    215			goto out;
    216		if ((attrs[XFRMA_ALG_AUTH] ||
    217		     attrs[XFRMA_ALG_AUTH_TRUNC] ||
    218		     attrs[XFRMA_ALG_CRYPT]) &&
    219		    attrs[XFRMA_ALG_AEAD])
    220			goto out;
    221		if (attrs[XFRMA_TFCPAD] &&
    222		    p->mode != XFRM_MODE_TUNNEL)
    223			goto out;
    224		break;
    225
    226	case IPPROTO_COMP:
    227		if (!attrs[XFRMA_ALG_COMP]	||
    228		    attrs[XFRMA_ALG_AEAD]	||
    229		    attrs[XFRMA_ALG_AUTH]	||
    230		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
    231		    attrs[XFRMA_ALG_CRYPT]	||
    232		    attrs[XFRMA_TFCPAD]		||
    233		    (ntohl(p->id.spi) >= 0x10000))
    234			goto out;
    235		break;
    236
    237#if IS_ENABLED(CONFIG_IPV6)
    238	case IPPROTO_DSTOPTS:
    239	case IPPROTO_ROUTING:
    240		if (attrs[XFRMA_ALG_COMP]	||
    241		    attrs[XFRMA_ALG_AUTH]	||
    242		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
    243		    attrs[XFRMA_ALG_AEAD]	||
    244		    attrs[XFRMA_ALG_CRYPT]	||
    245		    attrs[XFRMA_ENCAP]		||
    246		    attrs[XFRMA_SEC_CTX]	||
    247		    attrs[XFRMA_TFCPAD]		||
    248		    !attrs[XFRMA_COADDR])
    249			goto out;
    250		break;
    251#endif
    252
    253	default:
    254		goto out;
    255	}
    256
    257	if ((err = verify_aead(attrs)))
    258		goto out;
    259	if ((err = verify_auth_trunc(attrs)))
    260		goto out;
    261	if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
    262		goto out;
    263	if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
    264		goto out;
    265	if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
    266		goto out;
    267	if ((err = verify_sec_ctx_len(attrs)))
    268		goto out;
    269	if ((err = verify_replay(p, attrs)))
    270		goto out;
    271
    272	err = -EINVAL;
    273	switch (p->mode) {
    274	case XFRM_MODE_TRANSPORT:
    275	case XFRM_MODE_TUNNEL:
    276	case XFRM_MODE_ROUTEOPTIMIZATION:
    277	case XFRM_MODE_BEET:
    278		break;
    279
    280	default:
    281		goto out;
    282	}
    283
    284	err = 0;
    285
    286	if (attrs[XFRMA_MTIMER_THRESH])
    287		if (!attrs[XFRMA_ENCAP])
    288			err = -EINVAL;
    289
    290out:
    291	return err;
    292}
    293
    294static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
    295			   struct xfrm_algo_desc *(*get_byname)(const char *, int),
    296			   struct nlattr *rta)
    297{
    298	struct xfrm_algo *p, *ualg;
    299	struct xfrm_algo_desc *algo;
    300
    301	if (!rta)
    302		return 0;
    303
    304	ualg = nla_data(rta);
    305
    306	algo = get_byname(ualg->alg_name, 1);
    307	if (!algo)
    308		return -ENOSYS;
    309	*props = algo->desc.sadb_alg_id;
    310
    311	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
    312	if (!p)
    313		return -ENOMEM;
    314
    315	strcpy(p->alg_name, algo->name);
    316	*algpp = p;
    317	return 0;
    318}
    319
    320static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
    321{
    322	struct xfrm_algo *p, *ualg;
    323	struct xfrm_algo_desc *algo;
    324
    325	if (!rta)
    326		return 0;
    327
    328	ualg = nla_data(rta);
    329
    330	algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
    331	if (!algo)
    332		return -ENOSYS;
    333	x->props.ealgo = algo->desc.sadb_alg_id;
    334
    335	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
    336	if (!p)
    337		return -ENOMEM;
    338
    339	strcpy(p->alg_name, algo->name);
    340	x->ealg = p;
    341	x->geniv = algo->uinfo.encr.geniv;
    342	return 0;
    343}
    344
    345static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
    346		       struct nlattr *rta)
    347{
    348	struct xfrm_algo *ualg;
    349	struct xfrm_algo_auth *p;
    350	struct xfrm_algo_desc *algo;
    351
    352	if (!rta)
    353		return 0;
    354
    355	ualg = nla_data(rta);
    356
    357	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
    358	if (!algo)
    359		return -ENOSYS;
    360	*props = algo->desc.sadb_alg_id;
    361
    362	p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
    363	if (!p)
    364		return -ENOMEM;
    365
    366	strcpy(p->alg_name, algo->name);
    367	p->alg_key_len = ualg->alg_key_len;
    368	p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
    369	memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
    370
    371	*algpp = p;
    372	return 0;
    373}
    374
    375static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
    376			     struct nlattr *rta)
    377{
    378	struct xfrm_algo_auth *p, *ualg;
    379	struct xfrm_algo_desc *algo;
    380
    381	if (!rta)
    382		return 0;
    383
    384	ualg = nla_data(rta);
    385
    386	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
    387	if (!algo)
    388		return -ENOSYS;
    389	if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
    390		return -EINVAL;
    391	*props = algo->desc.sadb_alg_id;
    392
    393	p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
    394	if (!p)
    395		return -ENOMEM;
    396
    397	strcpy(p->alg_name, algo->name);
    398	if (!p->alg_trunc_len)
    399		p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
    400
    401	*algpp = p;
    402	return 0;
    403}
    404
    405static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
    406{
    407	struct xfrm_algo_aead *p, *ualg;
    408	struct xfrm_algo_desc *algo;
    409
    410	if (!rta)
    411		return 0;
    412
    413	ualg = nla_data(rta);
    414
    415	algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
    416	if (!algo)
    417		return -ENOSYS;
    418	x->props.ealgo = algo->desc.sadb_alg_id;
    419
    420	p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
    421	if (!p)
    422		return -ENOMEM;
    423
    424	strcpy(p->alg_name, algo->name);
    425	x->aead = p;
    426	x->geniv = algo->uinfo.aead.geniv;
    427	return 0;
    428}
    429
    430static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
    431					 struct nlattr *rp)
    432{
    433	struct xfrm_replay_state_esn *up;
    434	unsigned int ulen;
    435
    436	if (!replay_esn || !rp)
    437		return 0;
    438
    439	up = nla_data(rp);
    440	ulen = xfrm_replay_state_esn_len(up);
    441
    442	/* Check the overall length and the internal bitmap length to avoid
    443	 * potential overflow. */
    444	if (nla_len(rp) < (int)ulen ||
    445	    xfrm_replay_state_esn_len(replay_esn) != ulen ||
    446	    replay_esn->bmp_len != up->bmp_len)
    447		return -EINVAL;
    448
    449	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
    450		return -EINVAL;
    451
    452	return 0;
    453}
    454
    455static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
    456				       struct xfrm_replay_state_esn **preplay_esn,
    457				       struct nlattr *rta)
    458{
    459	struct xfrm_replay_state_esn *p, *pp, *up;
    460	unsigned int klen, ulen;
    461
    462	if (!rta)
    463		return 0;
    464
    465	up = nla_data(rta);
    466	klen = xfrm_replay_state_esn_len(up);
    467	ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
    468
    469	p = kzalloc(klen, GFP_KERNEL);
    470	if (!p)
    471		return -ENOMEM;
    472
    473	pp = kzalloc(klen, GFP_KERNEL);
    474	if (!pp) {
    475		kfree(p);
    476		return -ENOMEM;
    477	}
    478
    479	memcpy(p, up, ulen);
    480	memcpy(pp, up, ulen);
    481
    482	*replay_esn = p;
    483	*preplay_esn = pp;
    484
    485	return 0;
    486}
    487
    488static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
    489{
    490	unsigned int len = 0;
    491
    492	if (xfrm_ctx) {
    493		len += sizeof(struct xfrm_user_sec_ctx);
    494		len += xfrm_ctx->ctx_len;
    495	}
    496	return len;
    497}
    498
    499static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
    500{
    501	memcpy(&x->id, &p->id, sizeof(x->id));
    502	memcpy(&x->sel, &p->sel, sizeof(x->sel));
    503	memcpy(&x->lft, &p->lft, sizeof(x->lft));
    504	x->props.mode = p->mode;
    505	x->props.replay_window = min_t(unsigned int, p->replay_window,
    506					sizeof(x->replay.bitmap) * 8);
    507	x->props.reqid = p->reqid;
    508	x->props.family = p->family;
    509	memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
    510	x->props.flags = p->flags;
    511
    512	if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
    513		x->sel.family = p->family;
    514}
    515
    516/*
    517 * someday when pfkey also has support, we could have the code
    518 * somehow made shareable and move it to xfrm_state.c - JHS
    519 *
    520*/
    521static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
    522				  int update_esn)
    523{
    524	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
    525	struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
    526	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
    527	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
    528	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
    529	struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
    530
    531	if (re) {
    532		struct xfrm_replay_state_esn *replay_esn;
    533		replay_esn = nla_data(re);
    534		memcpy(x->replay_esn, replay_esn,
    535		       xfrm_replay_state_esn_len(replay_esn));
    536		memcpy(x->preplay_esn, replay_esn,
    537		       xfrm_replay_state_esn_len(replay_esn));
    538	}
    539
    540	if (rp) {
    541		struct xfrm_replay_state *replay;
    542		replay = nla_data(rp);
    543		memcpy(&x->replay, replay, sizeof(*replay));
    544		memcpy(&x->preplay, replay, sizeof(*replay));
    545	}
    546
    547	if (lt) {
    548		struct xfrm_lifetime_cur *ltime;
    549		ltime = nla_data(lt);
    550		x->curlft.bytes = ltime->bytes;
    551		x->curlft.packets = ltime->packets;
    552		x->curlft.add_time = ltime->add_time;
    553		x->curlft.use_time = ltime->use_time;
    554	}
    555
    556	if (et)
    557		x->replay_maxage = nla_get_u32(et);
    558
    559	if (rt)
    560		x->replay_maxdiff = nla_get_u32(rt);
    561
    562	if (mt)
    563		x->mapping_maxage = nla_get_u32(mt);
    564}
    565
    566static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
    567{
    568	if (attrs[XFRMA_SET_MARK]) {
    569		m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
    570		if (attrs[XFRMA_SET_MARK_MASK])
    571			m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
    572		else
    573			m->m = 0xffffffff;
    574	} else {
    575		m->v = m->m = 0;
    576	}
    577}
    578
    579static struct xfrm_state *xfrm_state_construct(struct net *net,
    580					       struct xfrm_usersa_info *p,
    581					       struct nlattr **attrs,
    582					       int *errp)
    583{
    584	struct xfrm_state *x = xfrm_state_alloc(net);
    585	int err = -ENOMEM;
    586
    587	if (!x)
    588		goto error_no_put;
    589
    590	copy_from_user_state(x, p);
    591
    592	if (attrs[XFRMA_ENCAP]) {
    593		x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
    594				   sizeof(*x->encap), GFP_KERNEL);
    595		if (x->encap == NULL)
    596			goto error;
    597	}
    598
    599	if (attrs[XFRMA_COADDR]) {
    600		x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
    601				    sizeof(*x->coaddr), GFP_KERNEL);
    602		if (x->coaddr == NULL)
    603			goto error;
    604	}
    605
    606	if (attrs[XFRMA_SA_EXTRA_FLAGS])
    607		x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
    608
    609	if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD])))
    610		goto error;
    611	if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
    612				     attrs[XFRMA_ALG_AUTH_TRUNC])))
    613		goto error;
    614	if (!x->props.aalgo) {
    615		if ((err = attach_auth(&x->aalg, &x->props.aalgo,
    616				       attrs[XFRMA_ALG_AUTH])))
    617			goto error;
    618	}
    619	if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT])))
    620		goto error;
    621	if ((err = attach_one_algo(&x->calg, &x->props.calgo,
    622				   xfrm_calg_get_byname,
    623				   attrs[XFRMA_ALG_COMP])))
    624		goto error;
    625
    626	if (attrs[XFRMA_TFCPAD])
    627		x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
    628
    629	xfrm_mark_get(attrs, &x->mark);
    630
    631	xfrm_smark_init(attrs, &x->props.smark);
    632
    633	if (attrs[XFRMA_IF_ID])
    634		x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
    635
    636	err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
    637	if (err)
    638		goto error;
    639
    640	if (attrs[XFRMA_SEC_CTX]) {
    641		err = security_xfrm_state_alloc(x,
    642						nla_data(attrs[XFRMA_SEC_CTX]));
    643		if (err)
    644			goto error;
    645	}
    646
    647	if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
    648					       attrs[XFRMA_REPLAY_ESN_VAL])))
    649		goto error;
    650
    651	x->km.seq = p->seq;
    652	x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
    653	/* sysctl_xfrm_aevent_etime is in 100ms units */
    654	x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
    655
    656	if ((err = xfrm_init_replay(x)))
    657		goto error;
    658
    659	/* override default values from above */
    660	xfrm_update_ae_params(x, attrs, 0);
    661
    662	/* configure the hardware if offload is requested */
    663	if (attrs[XFRMA_OFFLOAD_DEV]) {
    664		err = xfrm_dev_state_add(net, x,
    665					 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
    666		if (err)
    667			goto error;
    668	}
    669
    670	return x;
    671
    672error:
    673	x->km.state = XFRM_STATE_DEAD;
    674	xfrm_state_put(x);
    675error_no_put:
    676	*errp = err;
    677	return NULL;
    678}
    679
    680static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
    681		struct nlattr **attrs)
    682{
    683	struct net *net = sock_net(skb->sk);
    684	struct xfrm_usersa_info *p = nlmsg_data(nlh);
    685	struct xfrm_state *x;
    686	int err;
    687	struct km_event c;
    688
    689	err = verify_newsa_info(p, attrs);
    690	if (err)
    691		return err;
    692
    693	x = xfrm_state_construct(net, p, attrs, &err);
    694	if (!x)
    695		return err;
    696
    697	xfrm_state_hold(x);
    698	if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
    699		err = xfrm_state_add(x);
    700	else
    701		err = xfrm_state_update(x);
    702
    703	xfrm_audit_state_add(x, err ? 0 : 1, true);
    704
    705	if (err < 0) {
    706		x->km.state = XFRM_STATE_DEAD;
    707		xfrm_dev_state_delete(x);
    708		__xfrm_state_put(x);
    709		goto out;
    710	}
    711
    712	if (x->km.state == XFRM_STATE_VOID)
    713		x->km.state = XFRM_STATE_VALID;
    714
    715	c.seq = nlh->nlmsg_seq;
    716	c.portid = nlh->nlmsg_pid;
    717	c.event = nlh->nlmsg_type;
    718
    719	km_state_notify(x, &c);
    720out:
    721	xfrm_state_put(x);
    722	return err;
    723}
    724
    725static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
    726						 struct xfrm_usersa_id *p,
    727						 struct nlattr **attrs,
    728						 int *errp)
    729{
    730	struct xfrm_state *x = NULL;
    731	struct xfrm_mark m;
    732	int err;
    733	u32 mark = xfrm_mark_get(attrs, &m);
    734
    735	if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
    736		err = -ESRCH;
    737		x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
    738	} else {
    739		xfrm_address_t *saddr = NULL;
    740
    741		verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
    742		if (!saddr) {
    743			err = -EINVAL;
    744			goto out;
    745		}
    746
    747		err = -ESRCH;
    748		x = xfrm_state_lookup_byaddr(net, mark,
    749					     &p->daddr, saddr,
    750					     p->proto, p->family);
    751	}
    752
    753 out:
    754	if (!x && errp)
    755		*errp = err;
    756	return x;
    757}
    758
    759static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
    760		struct nlattr **attrs)
    761{
    762	struct net *net = sock_net(skb->sk);
    763	struct xfrm_state *x;
    764	int err = -ESRCH;
    765	struct km_event c;
    766	struct xfrm_usersa_id *p = nlmsg_data(nlh);
    767
    768	x = xfrm_user_state_lookup(net, p, attrs, &err);
    769	if (x == NULL)
    770		return err;
    771
    772	if ((err = security_xfrm_state_delete(x)) != 0)
    773		goto out;
    774
    775	if (xfrm_state_kern(x)) {
    776		err = -EPERM;
    777		goto out;
    778	}
    779
    780	err = xfrm_state_delete(x);
    781
    782	if (err < 0)
    783		goto out;
    784
    785	c.seq = nlh->nlmsg_seq;
    786	c.portid = nlh->nlmsg_pid;
    787	c.event = nlh->nlmsg_type;
    788	km_state_notify(x, &c);
    789
    790out:
    791	xfrm_audit_state_delete(x, err ? 0 : 1, true);
    792	xfrm_state_put(x);
    793	return err;
    794}
    795
    796static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
    797{
    798	memset(p, 0, sizeof(*p));
    799	memcpy(&p->id, &x->id, sizeof(p->id));
    800	memcpy(&p->sel, &x->sel, sizeof(p->sel));
    801	memcpy(&p->lft, &x->lft, sizeof(p->lft));
    802	memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
    803	put_unaligned(x->stats.replay_window, &p->stats.replay_window);
    804	put_unaligned(x->stats.replay, &p->stats.replay);
    805	put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
    806	memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
    807	p->mode = x->props.mode;
    808	p->replay_window = x->props.replay_window;
    809	p->reqid = x->props.reqid;
    810	p->family = x->props.family;
    811	p->flags = x->props.flags;
    812	p->seq = x->km.seq;
    813}
    814
    815struct xfrm_dump_info {
    816	struct sk_buff *in_skb;
    817	struct sk_buff *out_skb;
    818	u32 nlmsg_seq;
    819	u16 nlmsg_flags;
    820};
    821
    822static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
    823{
    824	struct xfrm_user_sec_ctx *uctx;
    825	struct nlattr *attr;
    826	int ctx_size = sizeof(*uctx) + s->ctx_len;
    827
    828	attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
    829	if (attr == NULL)
    830		return -EMSGSIZE;
    831
    832	uctx = nla_data(attr);
    833	uctx->exttype = XFRMA_SEC_CTX;
    834	uctx->len = ctx_size;
    835	uctx->ctx_doi = s->ctx_doi;
    836	uctx->ctx_alg = s->ctx_alg;
    837	uctx->ctx_len = s->ctx_len;
    838	memcpy(uctx + 1, s->ctx_str, s->ctx_len);
    839
    840	return 0;
    841}
    842
    843static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
    844{
    845	struct xfrm_user_offload *xuo;
    846	struct nlattr *attr;
    847
    848	attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
    849	if (attr == NULL)
    850		return -EMSGSIZE;
    851
    852	xuo = nla_data(attr);
    853	memset(xuo, 0, sizeof(*xuo));
    854	xuo->ifindex = xso->dev->ifindex;
    855	if (xso->dir == XFRM_DEV_OFFLOAD_IN)
    856		xuo->flags = XFRM_OFFLOAD_INBOUND;
    857
    858	return 0;
    859}
    860
    861static bool xfrm_redact(void)
    862{
    863	return IS_ENABLED(CONFIG_SECURITY) &&
    864		security_locked_down(LOCKDOWN_XFRM_SECRET);
    865}
    866
    867static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
    868{
    869	struct xfrm_algo *algo;
    870	struct xfrm_algo_auth *ap;
    871	struct nlattr *nla;
    872	bool redact_secret = xfrm_redact();
    873
    874	nla = nla_reserve(skb, XFRMA_ALG_AUTH,
    875			  sizeof(*algo) + (auth->alg_key_len + 7) / 8);
    876	if (!nla)
    877		return -EMSGSIZE;
    878	algo = nla_data(nla);
    879	strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
    880
    881	if (redact_secret && auth->alg_key_len)
    882		memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
    883	else
    884		memcpy(algo->alg_key, auth->alg_key,
    885		       (auth->alg_key_len + 7) / 8);
    886	algo->alg_key_len = auth->alg_key_len;
    887
    888	nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
    889	if (!nla)
    890		return -EMSGSIZE;
    891	ap = nla_data(nla);
    892	memcpy(ap, auth, sizeof(struct xfrm_algo_auth));
    893	if (redact_secret && auth->alg_key_len)
    894		memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
    895	else
    896		memcpy(ap->alg_key, auth->alg_key,
    897		       (auth->alg_key_len + 7) / 8);
    898	return 0;
    899}
    900
    901static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
    902{
    903	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
    904	struct xfrm_algo_aead *ap;
    905	bool redact_secret = xfrm_redact();
    906
    907	if (!nla)
    908		return -EMSGSIZE;
    909
    910	ap = nla_data(nla);
    911	memcpy(ap, aead, sizeof(*aead));
    912
    913	if (redact_secret && aead->alg_key_len)
    914		memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
    915	else
    916		memcpy(ap->alg_key, aead->alg_key,
    917		       (aead->alg_key_len + 7) / 8);
    918	return 0;
    919}
    920
    921static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
    922{
    923	struct xfrm_algo *ap;
    924	bool redact_secret = xfrm_redact();
    925	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
    926					 xfrm_alg_len(ealg));
    927	if (!nla)
    928		return -EMSGSIZE;
    929
    930	ap = nla_data(nla);
    931	memcpy(ap, ealg, sizeof(*ealg));
    932
    933	if (redact_secret && ealg->alg_key_len)
    934		memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
    935	else
    936		memcpy(ap->alg_key, ealg->alg_key,
    937		       (ealg->alg_key_len + 7) / 8);
    938
    939	return 0;
    940}
    941
    942static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
    943{
    944	int ret = 0;
    945
    946	if (m->v | m->m) {
    947		ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
    948		if (!ret)
    949			ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
    950	}
    951	return ret;
    952}
    953
    954/* Don't change this without updating xfrm_sa_len! */
    955static int copy_to_user_state_extra(struct xfrm_state *x,
    956				    struct xfrm_usersa_info *p,
    957				    struct sk_buff *skb)
    958{
    959	int ret = 0;
    960
    961	copy_to_user_state(x, p);
    962
    963	if (x->props.extra_flags) {
    964		ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
    965				  x->props.extra_flags);
    966		if (ret)
    967			goto out;
    968	}
    969
    970	if (x->coaddr) {
    971		ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
    972		if (ret)
    973			goto out;
    974	}
    975	if (x->lastused) {
    976		ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
    977					XFRMA_PAD);
    978		if (ret)
    979			goto out;
    980	}
    981	if (x->aead) {
    982		ret = copy_to_user_aead(x->aead, skb);
    983		if (ret)
    984			goto out;
    985	}
    986	if (x->aalg) {
    987		ret = copy_to_user_auth(x->aalg, skb);
    988		if (ret)
    989			goto out;
    990	}
    991	if (x->ealg) {
    992		ret = copy_to_user_ealg(x->ealg, skb);
    993		if (ret)
    994			goto out;
    995	}
    996	if (x->calg) {
    997		ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
    998		if (ret)
    999			goto out;
   1000	}
   1001	if (x->encap) {
   1002		ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
   1003		if (ret)
   1004			goto out;
   1005	}
   1006	if (x->tfcpad) {
   1007		ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
   1008		if (ret)
   1009			goto out;
   1010	}
   1011	ret = xfrm_mark_put(skb, &x->mark);
   1012	if (ret)
   1013		goto out;
   1014
   1015	ret = xfrm_smark_put(skb, &x->props.smark);
   1016	if (ret)
   1017		goto out;
   1018
   1019	if (x->replay_esn)
   1020		ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
   1021			      xfrm_replay_state_esn_len(x->replay_esn),
   1022			      x->replay_esn);
   1023	else
   1024		ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
   1025			      &x->replay);
   1026	if (ret)
   1027		goto out;
   1028	if(x->xso.dev)
   1029		ret = copy_user_offload(&x->xso, skb);
   1030	if (ret)
   1031		goto out;
   1032	if (x->if_id) {
   1033		ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
   1034		if (ret)
   1035			goto out;
   1036	}
   1037	if (x->security) {
   1038		ret = copy_sec_ctx(x->security, skb);
   1039		if (ret)
   1040			goto out;
   1041	}
   1042	if (x->mapping_maxage)
   1043		ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
   1044out:
   1045	return ret;
   1046}
   1047
   1048static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
   1049{
   1050	struct xfrm_dump_info *sp = ptr;
   1051	struct sk_buff *in_skb = sp->in_skb;
   1052	struct sk_buff *skb = sp->out_skb;
   1053	struct xfrm_translator *xtr;
   1054	struct xfrm_usersa_info *p;
   1055	struct nlmsghdr *nlh;
   1056	int err;
   1057
   1058	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
   1059			XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
   1060	if (nlh == NULL)
   1061		return -EMSGSIZE;
   1062
   1063	p = nlmsg_data(nlh);
   1064
   1065	err = copy_to_user_state_extra(x, p, skb);
   1066	if (err) {
   1067		nlmsg_cancel(skb, nlh);
   1068		return err;
   1069	}
   1070	nlmsg_end(skb, nlh);
   1071
   1072	xtr = xfrm_get_translator();
   1073	if (xtr) {
   1074		err = xtr->alloc_compat(skb, nlh);
   1075
   1076		xfrm_put_translator(xtr);
   1077		if (err) {
   1078			nlmsg_cancel(skb, nlh);
   1079			return err;
   1080		}
   1081	}
   1082
   1083	return 0;
   1084}
   1085
   1086static int xfrm_dump_sa_done(struct netlink_callback *cb)
   1087{
   1088	struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
   1089	struct sock *sk = cb->skb->sk;
   1090	struct net *net = sock_net(sk);
   1091
   1092	if (cb->args[0])
   1093		xfrm_state_walk_done(walk, net);
   1094	return 0;
   1095}
   1096
   1097static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
   1098{
   1099	struct net *net = sock_net(skb->sk);
   1100	struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
   1101	struct xfrm_dump_info info;
   1102
   1103	BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
   1104		     sizeof(cb->args) - sizeof(cb->args[0]));
   1105
   1106	info.in_skb = cb->skb;
   1107	info.out_skb = skb;
   1108	info.nlmsg_seq = cb->nlh->nlmsg_seq;
   1109	info.nlmsg_flags = NLM_F_MULTI;
   1110
   1111	if (!cb->args[0]) {
   1112		struct nlattr *attrs[XFRMA_MAX+1];
   1113		struct xfrm_address_filter *filter = NULL;
   1114		u8 proto = 0;
   1115		int err;
   1116
   1117		err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
   1118					     xfrma_policy, cb->extack);
   1119		if (err < 0)
   1120			return err;
   1121
   1122		if (attrs[XFRMA_ADDRESS_FILTER]) {
   1123			filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
   1124					 sizeof(*filter), GFP_KERNEL);
   1125			if (filter == NULL)
   1126				return -ENOMEM;
   1127		}
   1128
   1129		if (attrs[XFRMA_PROTO])
   1130			proto = nla_get_u8(attrs[XFRMA_PROTO]);
   1131
   1132		xfrm_state_walk_init(walk, proto, filter);
   1133		cb->args[0] = 1;
   1134	}
   1135
   1136	(void) xfrm_state_walk(net, walk, dump_one_state, &info);
   1137
   1138	return skb->len;
   1139}
   1140
   1141static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
   1142					  struct xfrm_state *x, u32 seq)
   1143{
   1144	struct xfrm_dump_info info;
   1145	struct sk_buff *skb;
   1146	int err;
   1147
   1148	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
   1149	if (!skb)
   1150		return ERR_PTR(-ENOMEM);
   1151
   1152	info.in_skb = in_skb;
   1153	info.out_skb = skb;
   1154	info.nlmsg_seq = seq;
   1155	info.nlmsg_flags = 0;
   1156
   1157	err = dump_one_state(x, 0, &info);
   1158	if (err) {
   1159		kfree_skb(skb);
   1160		return ERR_PTR(err);
   1161	}
   1162
   1163	return skb;
   1164}
   1165
   1166/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
   1167 * Must be called with RCU read lock.
   1168 */
   1169static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
   1170				       u32 pid, unsigned int group)
   1171{
   1172	struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
   1173	struct xfrm_translator *xtr;
   1174
   1175	if (!nlsk) {
   1176		kfree_skb(skb);
   1177		return -EPIPE;
   1178	}
   1179
   1180	xtr = xfrm_get_translator();
   1181	if (xtr) {
   1182		int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
   1183
   1184		xfrm_put_translator(xtr);
   1185		if (err) {
   1186			kfree_skb(skb);
   1187			return err;
   1188		}
   1189	}
   1190
   1191	return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
   1192}
   1193
   1194static inline unsigned int xfrm_spdinfo_msgsize(void)
   1195{
   1196	return NLMSG_ALIGN(4)
   1197	       + nla_total_size(sizeof(struct xfrmu_spdinfo))
   1198	       + nla_total_size(sizeof(struct xfrmu_spdhinfo))
   1199	       + nla_total_size(sizeof(struct xfrmu_spdhthresh))
   1200	       + nla_total_size(sizeof(struct xfrmu_spdhthresh));
   1201}
   1202
   1203static int build_spdinfo(struct sk_buff *skb, struct net *net,
   1204			 u32 portid, u32 seq, u32 flags)
   1205{
   1206	struct xfrmk_spdinfo si;
   1207	struct xfrmu_spdinfo spc;
   1208	struct xfrmu_spdhinfo sph;
   1209	struct xfrmu_spdhthresh spt4, spt6;
   1210	struct nlmsghdr *nlh;
   1211	int err;
   1212	u32 *f;
   1213	unsigned lseq;
   1214
   1215	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
   1216	if (nlh == NULL) /* shouldn't really happen ... */
   1217		return -EMSGSIZE;
   1218
   1219	f = nlmsg_data(nlh);
   1220	*f = flags;
   1221	xfrm_spd_getinfo(net, &si);
   1222	spc.incnt = si.incnt;
   1223	spc.outcnt = si.outcnt;
   1224	spc.fwdcnt = si.fwdcnt;
   1225	spc.inscnt = si.inscnt;
   1226	spc.outscnt = si.outscnt;
   1227	spc.fwdscnt = si.fwdscnt;
   1228	sph.spdhcnt = si.spdhcnt;
   1229	sph.spdhmcnt = si.spdhmcnt;
   1230
   1231	do {
   1232		lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
   1233
   1234		spt4.lbits = net->xfrm.policy_hthresh.lbits4;
   1235		spt4.rbits = net->xfrm.policy_hthresh.rbits4;
   1236		spt6.lbits = net->xfrm.policy_hthresh.lbits6;
   1237		spt6.rbits = net->xfrm.policy_hthresh.rbits6;
   1238	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
   1239
   1240	err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
   1241	if (!err)
   1242		err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
   1243	if (!err)
   1244		err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
   1245	if (!err)
   1246		err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
   1247	if (err) {
   1248		nlmsg_cancel(skb, nlh);
   1249		return err;
   1250	}
   1251
   1252	nlmsg_end(skb, nlh);
   1253	return 0;
   1254}
   1255
   1256static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
   1257			    struct nlattr **attrs)
   1258{
   1259	struct net *net = sock_net(skb->sk);
   1260	struct xfrmu_spdhthresh *thresh4 = NULL;
   1261	struct xfrmu_spdhthresh *thresh6 = NULL;
   1262
   1263	/* selector prefixlen thresholds to hash policies */
   1264	if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
   1265		struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
   1266
   1267		if (nla_len(rta) < sizeof(*thresh4))
   1268			return -EINVAL;
   1269		thresh4 = nla_data(rta);
   1270		if (thresh4->lbits > 32 || thresh4->rbits > 32)
   1271			return -EINVAL;
   1272	}
   1273	if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
   1274		struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
   1275
   1276		if (nla_len(rta) < sizeof(*thresh6))
   1277			return -EINVAL;
   1278		thresh6 = nla_data(rta);
   1279		if (thresh6->lbits > 128 || thresh6->rbits > 128)
   1280			return -EINVAL;
   1281	}
   1282
   1283	if (thresh4 || thresh6) {
   1284		write_seqlock(&net->xfrm.policy_hthresh.lock);
   1285		if (thresh4) {
   1286			net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
   1287			net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
   1288		}
   1289		if (thresh6) {
   1290			net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
   1291			net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
   1292		}
   1293		write_sequnlock(&net->xfrm.policy_hthresh.lock);
   1294
   1295		xfrm_policy_hash_rebuild(net);
   1296	}
   1297
   1298	return 0;
   1299}
   1300
   1301static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
   1302		struct nlattr **attrs)
   1303{
   1304	struct net *net = sock_net(skb->sk);
   1305	struct sk_buff *r_skb;
   1306	u32 *flags = nlmsg_data(nlh);
   1307	u32 sportid = NETLINK_CB(skb).portid;
   1308	u32 seq = nlh->nlmsg_seq;
   1309	int err;
   1310
   1311	r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
   1312	if (r_skb == NULL)
   1313		return -ENOMEM;
   1314
   1315	err = build_spdinfo(r_skb, net, sportid, seq, *flags);
   1316	BUG_ON(err < 0);
   1317
   1318	return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
   1319}
   1320
   1321static inline unsigned int xfrm_sadinfo_msgsize(void)
   1322{
   1323	return NLMSG_ALIGN(4)
   1324	       + nla_total_size(sizeof(struct xfrmu_sadhinfo))
   1325	       + nla_total_size(4); /* XFRMA_SAD_CNT */
   1326}
   1327
   1328static int build_sadinfo(struct sk_buff *skb, struct net *net,
   1329			 u32 portid, u32 seq, u32 flags)
   1330{
   1331	struct xfrmk_sadinfo si;
   1332	struct xfrmu_sadhinfo sh;
   1333	struct nlmsghdr *nlh;
   1334	int err;
   1335	u32 *f;
   1336
   1337	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
   1338	if (nlh == NULL) /* shouldn't really happen ... */
   1339		return -EMSGSIZE;
   1340
   1341	f = nlmsg_data(nlh);
   1342	*f = flags;
   1343	xfrm_sad_getinfo(net, &si);
   1344
   1345	sh.sadhmcnt = si.sadhmcnt;
   1346	sh.sadhcnt = si.sadhcnt;
   1347
   1348	err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
   1349	if (!err)
   1350		err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
   1351	if (err) {
   1352		nlmsg_cancel(skb, nlh);
   1353		return err;
   1354	}
   1355
   1356	nlmsg_end(skb, nlh);
   1357	return 0;
   1358}
   1359
   1360static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
   1361		struct nlattr **attrs)
   1362{
   1363	struct net *net = sock_net(skb->sk);
   1364	struct sk_buff *r_skb;
   1365	u32 *flags = nlmsg_data(nlh);
   1366	u32 sportid = NETLINK_CB(skb).portid;
   1367	u32 seq = nlh->nlmsg_seq;
   1368	int err;
   1369
   1370	r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
   1371	if (r_skb == NULL)
   1372		return -ENOMEM;
   1373
   1374	err = build_sadinfo(r_skb, net, sportid, seq, *flags);
   1375	BUG_ON(err < 0);
   1376
   1377	return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
   1378}
   1379
   1380static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
   1381		struct nlattr **attrs)
   1382{
   1383	struct net *net = sock_net(skb->sk);
   1384	struct xfrm_usersa_id *p = nlmsg_data(nlh);
   1385	struct xfrm_state *x;
   1386	struct sk_buff *resp_skb;
   1387	int err = -ESRCH;
   1388
   1389	x = xfrm_user_state_lookup(net, p, attrs, &err);
   1390	if (x == NULL)
   1391		goto out_noput;
   1392
   1393	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
   1394	if (IS_ERR(resp_skb)) {
   1395		err = PTR_ERR(resp_skb);
   1396	} else {
   1397		err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
   1398	}
   1399	xfrm_state_put(x);
   1400out_noput:
   1401	return err;
   1402}
   1403
   1404static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
   1405		struct nlattr **attrs)
   1406{
   1407	struct net *net = sock_net(skb->sk);
   1408	struct xfrm_state *x;
   1409	struct xfrm_userspi_info *p;
   1410	struct xfrm_translator *xtr;
   1411	struct sk_buff *resp_skb;
   1412	xfrm_address_t *daddr;
   1413	int family;
   1414	int err;
   1415	u32 mark;
   1416	struct xfrm_mark m;
   1417	u32 if_id = 0;
   1418
   1419	p = nlmsg_data(nlh);
   1420	err = verify_spi_info(p->info.id.proto, p->min, p->max);
   1421	if (err)
   1422		goto out_noput;
   1423
   1424	family = p->info.family;
   1425	daddr = &p->info.id.daddr;
   1426
   1427	x = NULL;
   1428
   1429	mark = xfrm_mark_get(attrs, &m);
   1430
   1431	if (attrs[XFRMA_IF_ID])
   1432		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
   1433
   1434	if (p->info.seq) {
   1435		x = xfrm_find_acq_byseq(net, mark, p->info.seq);
   1436		if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
   1437			xfrm_state_put(x);
   1438			x = NULL;
   1439		}
   1440	}
   1441
   1442	if (!x)
   1443		x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
   1444				  if_id, p->info.id.proto, daddr,
   1445				  &p->info.saddr, 1,
   1446				  family);
   1447	err = -ENOENT;
   1448	if (x == NULL)
   1449		goto out_noput;
   1450
   1451	err = xfrm_alloc_spi(x, p->min, p->max);
   1452	if (err)
   1453		goto out;
   1454
   1455	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
   1456	if (IS_ERR(resp_skb)) {
   1457		err = PTR_ERR(resp_skb);
   1458		goto out;
   1459	}
   1460
   1461	xtr = xfrm_get_translator();
   1462	if (xtr) {
   1463		err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
   1464
   1465		xfrm_put_translator(xtr);
   1466		if (err) {
   1467			kfree_skb(resp_skb);
   1468			goto out;
   1469		}
   1470	}
   1471
   1472	err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
   1473
   1474out:
   1475	xfrm_state_put(x);
   1476out_noput:
   1477	return err;
   1478}
   1479
   1480static int verify_policy_dir(u8 dir)
   1481{
   1482	switch (dir) {
   1483	case XFRM_POLICY_IN:
   1484	case XFRM_POLICY_OUT:
   1485	case XFRM_POLICY_FWD:
   1486		break;
   1487
   1488	default:
   1489		return -EINVAL;
   1490	}
   1491
   1492	return 0;
   1493}
   1494
   1495static int verify_policy_type(u8 type)
   1496{
   1497	switch (type) {
   1498	case XFRM_POLICY_TYPE_MAIN:
   1499#ifdef CONFIG_XFRM_SUB_POLICY
   1500	case XFRM_POLICY_TYPE_SUB:
   1501#endif
   1502		break;
   1503
   1504	default:
   1505		return -EINVAL;
   1506	}
   1507
   1508	return 0;
   1509}
   1510
   1511static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
   1512{
   1513	int ret;
   1514
   1515	switch (p->share) {
   1516	case XFRM_SHARE_ANY:
   1517	case XFRM_SHARE_SESSION:
   1518	case XFRM_SHARE_USER:
   1519	case XFRM_SHARE_UNIQUE:
   1520		break;
   1521
   1522	default:
   1523		return -EINVAL;
   1524	}
   1525
   1526	switch (p->action) {
   1527	case XFRM_POLICY_ALLOW:
   1528	case XFRM_POLICY_BLOCK:
   1529		break;
   1530
   1531	default:
   1532		return -EINVAL;
   1533	}
   1534
   1535	switch (p->sel.family) {
   1536	case AF_INET:
   1537		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
   1538			return -EINVAL;
   1539
   1540		break;
   1541
   1542	case AF_INET6:
   1543#if IS_ENABLED(CONFIG_IPV6)
   1544		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
   1545			return -EINVAL;
   1546
   1547		break;
   1548#else
   1549		return  -EAFNOSUPPORT;
   1550#endif
   1551
   1552	default:
   1553		return -EINVAL;
   1554	}
   1555
   1556	ret = verify_policy_dir(p->dir);
   1557	if (ret)
   1558		return ret;
   1559	if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
   1560		return -EINVAL;
   1561
   1562	return 0;
   1563}
   1564
   1565static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
   1566{
   1567	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
   1568	struct xfrm_user_sec_ctx *uctx;
   1569
   1570	if (!rt)
   1571		return 0;
   1572
   1573	uctx = nla_data(rt);
   1574	return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
   1575}
   1576
   1577static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
   1578			   int nr)
   1579{
   1580	int i;
   1581
   1582	xp->xfrm_nr = nr;
   1583	for (i = 0; i < nr; i++, ut++) {
   1584		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
   1585
   1586		memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
   1587		memcpy(&t->saddr, &ut->saddr,
   1588		       sizeof(xfrm_address_t));
   1589		t->reqid = ut->reqid;
   1590		t->mode = ut->mode;
   1591		t->share = ut->share;
   1592		t->optional = ut->optional;
   1593		t->aalgos = ut->aalgos;
   1594		t->ealgos = ut->ealgos;
   1595		t->calgos = ut->calgos;
   1596		/* If all masks are ~0, then we allow all algorithms. */
   1597		t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
   1598		t->encap_family = ut->family;
   1599	}
   1600}
   1601
   1602static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
   1603{
   1604	u16 prev_family;
   1605	int i;
   1606
   1607	if (nr > XFRM_MAX_DEPTH)
   1608		return -EINVAL;
   1609
   1610	prev_family = family;
   1611
   1612	for (i = 0; i < nr; i++) {
   1613		/* We never validated the ut->family value, so many
   1614		 * applications simply leave it at zero.  The check was
   1615		 * never made and ut->family was ignored because all
   1616		 * templates could be assumed to have the same family as
   1617		 * the policy itself.  Now that we will have ipv4-in-ipv6
   1618		 * and ipv6-in-ipv4 tunnels, this is no longer true.
   1619		 */
   1620		if (!ut[i].family)
   1621			ut[i].family = family;
   1622
   1623		switch (ut[i].mode) {
   1624		case XFRM_MODE_TUNNEL:
   1625		case XFRM_MODE_BEET:
   1626			break;
   1627		default:
   1628			if (ut[i].family != prev_family)
   1629				return -EINVAL;
   1630			break;
   1631		}
   1632		if (ut[i].mode >= XFRM_MODE_MAX)
   1633			return -EINVAL;
   1634
   1635		prev_family = ut[i].family;
   1636
   1637		switch (ut[i].family) {
   1638		case AF_INET:
   1639			break;
   1640#if IS_ENABLED(CONFIG_IPV6)
   1641		case AF_INET6:
   1642			break;
   1643#endif
   1644		default:
   1645			return -EINVAL;
   1646		}
   1647
   1648		if (!xfrm_id_proto_valid(ut[i].id.proto))
   1649			return -EINVAL;
   1650	}
   1651
   1652	return 0;
   1653}
   1654
   1655static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
   1656{
   1657	struct nlattr *rt = attrs[XFRMA_TMPL];
   1658
   1659	if (!rt) {
   1660		pol->xfrm_nr = 0;
   1661	} else {
   1662		struct xfrm_user_tmpl *utmpl = nla_data(rt);
   1663		int nr = nla_len(rt) / sizeof(*utmpl);
   1664		int err;
   1665
   1666		err = validate_tmpl(nr, utmpl, pol->family);
   1667		if (err)
   1668			return err;
   1669
   1670		copy_templates(pol, utmpl, nr);
   1671	}
   1672	return 0;
   1673}
   1674
   1675static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
   1676{
   1677	struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
   1678	struct xfrm_userpolicy_type *upt;
   1679	u8 type = XFRM_POLICY_TYPE_MAIN;
   1680	int err;
   1681
   1682	if (rt) {
   1683		upt = nla_data(rt);
   1684		type = upt->type;
   1685	}
   1686
   1687	err = verify_policy_type(type);
   1688	if (err)
   1689		return err;
   1690
   1691	*tp = type;
   1692	return 0;
   1693}
   1694
   1695static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
   1696{
   1697	xp->priority = p->priority;
   1698	xp->index = p->index;
   1699	memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
   1700	memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
   1701	xp->action = p->action;
   1702	xp->flags = p->flags;
   1703	xp->family = p->sel.family;
   1704	/* XXX xp->share = p->share; */
   1705}
   1706
   1707static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
   1708{
   1709	memset(p, 0, sizeof(*p));
   1710	memcpy(&p->sel, &xp->selector, sizeof(p->sel));
   1711	memcpy(&p->lft, &xp->lft, sizeof(p->lft));
   1712	memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
   1713	p->priority = xp->priority;
   1714	p->index = xp->index;
   1715	p->sel.family = xp->family;
   1716	p->dir = dir;
   1717	p->action = xp->action;
   1718	p->flags = xp->flags;
   1719	p->share = XFRM_SHARE_ANY; /* XXX xp->share */
   1720}
   1721
   1722static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
   1723{
   1724	struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
   1725	int err;
   1726
   1727	if (!xp) {
   1728		*errp = -ENOMEM;
   1729		return NULL;
   1730	}
   1731
   1732	copy_from_user_policy(xp, p);
   1733
   1734	err = copy_from_user_policy_type(&xp->type, attrs);
   1735	if (err)
   1736		goto error;
   1737
   1738	if (!(err = copy_from_user_tmpl(xp, attrs)))
   1739		err = copy_from_user_sec_ctx(xp, attrs);
   1740	if (err)
   1741		goto error;
   1742
   1743	xfrm_mark_get(attrs, &xp->mark);
   1744
   1745	if (attrs[XFRMA_IF_ID])
   1746		xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
   1747
   1748	return xp;
   1749 error:
   1750	*errp = err;
   1751	xp->walk.dead = 1;
   1752	xfrm_policy_destroy(xp);
   1753	return NULL;
   1754}
   1755
   1756static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
   1757		struct nlattr **attrs)
   1758{
   1759	struct net *net = sock_net(skb->sk);
   1760	struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
   1761	struct xfrm_policy *xp;
   1762	struct km_event c;
   1763	int err;
   1764	int excl;
   1765
   1766	err = verify_newpolicy_info(p);
   1767	if (err)
   1768		return err;
   1769	err = verify_sec_ctx_len(attrs);
   1770	if (err)
   1771		return err;
   1772
   1773	xp = xfrm_policy_construct(net, p, attrs, &err);
   1774	if (!xp)
   1775		return err;
   1776
   1777	/* shouldn't excl be based on nlh flags??
   1778	 * Aha! this is anti-netlink really i.e  more pfkey derived
   1779	 * in netlink excl is a flag and you wouldn't need
   1780	 * a type XFRM_MSG_UPDPOLICY - JHS */
   1781	excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
   1782	err = xfrm_policy_insert(p->dir, xp, excl);
   1783	xfrm_audit_policy_add(xp, err ? 0 : 1, true);
   1784
   1785	if (err) {
   1786		security_xfrm_policy_free(xp->security);
   1787		kfree(xp);
   1788		return err;
   1789	}
   1790
   1791	c.event = nlh->nlmsg_type;
   1792	c.seq = nlh->nlmsg_seq;
   1793	c.portid = nlh->nlmsg_pid;
   1794	km_policy_notify(xp, p->dir, &c);
   1795
   1796	xfrm_pol_put(xp);
   1797
   1798	return 0;
   1799}
   1800
   1801static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
   1802{
   1803	struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
   1804	int i;
   1805
   1806	if (xp->xfrm_nr == 0)
   1807		return 0;
   1808
   1809	for (i = 0; i < xp->xfrm_nr; i++) {
   1810		struct xfrm_user_tmpl *up = &vec[i];
   1811		struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
   1812
   1813		memset(up, 0, sizeof(*up));
   1814		memcpy(&up->id, &kp->id, sizeof(up->id));
   1815		up->family = kp->encap_family;
   1816		memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
   1817		up->reqid = kp->reqid;
   1818		up->mode = kp->mode;
   1819		up->share = kp->share;
   1820		up->optional = kp->optional;
   1821		up->aalgos = kp->aalgos;
   1822		up->ealgos = kp->ealgos;
   1823		up->calgos = kp->calgos;
   1824	}
   1825
   1826	return nla_put(skb, XFRMA_TMPL,
   1827		       sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
   1828}
   1829
   1830static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
   1831{
   1832	if (x->security) {
   1833		return copy_sec_ctx(x->security, skb);
   1834	}
   1835	return 0;
   1836}
   1837
   1838static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
   1839{
   1840	if (xp->security)
   1841		return copy_sec_ctx(xp->security, skb);
   1842	return 0;
   1843}
   1844static inline unsigned int userpolicy_type_attrsize(void)
   1845{
   1846#ifdef CONFIG_XFRM_SUB_POLICY
   1847	return nla_total_size(sizeof(struct xfrm_userpolicy_type));
   1848#else
   1849	return 0;
   1850#endif
   1851}
   1852
   1853#ifdef CONFIG_XFRM_SUB_POLICY
   1854static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
   1855{
   1856	struct xfrm_userpolicy_type upt;
   1857
   1858	/* Sadly there are two holes in struct xfrm_userpolicy_type */
   1859	memset(&upt, 0, sizeof(upt));
   1860	upt.type = type;
   1861
   1862	return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
   1863}
   1864
   1865#else
   1866static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
   1867{
   1868	return 0;
   1869}
   1870#endif
   1871
   1872static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
   1873{
   1874	struct xfrm_dump_info *sp = ptr;
   1875	struct xfrm_userpolicy_info *p;
   1876	struct sk_buff *in_skb = sp->in_skb;
   1877	struct sk_buff *skb = sp->out_skb;
   1878	struct xfrm_translator *xtr;
   1879	struct nlmsghdr *nlh;
   1880	int err;
   1881
   1882	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
   1883			XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
   1884	if (nlh == NULL)
   1885		return -EMSGSIZE;
   1886
   1887	p = nlmsg_data(nlh);
   1888	copy_to_user_policy(xp, p, dir);
   1889	err = copy_to_user_tmpl(xp, skb);
   1890	if (!err)
   1891		err = copy_to_user_sec_ctx(xp, skb);
   1892	if (!err)
   1893		err = copy_to_user_policy_type(xp->type, skb);
   1894	if (!err)
   1895		err = xfrm_mark_put(skb, &xp->mark);
   1896	if (!err)
   1897		err = xfrm_if_id_put(skb, xp->if_id);
   1898	if (err) {
   1899		nlmsg_cancel(skb, nlh);
   1900		return err;
   1901	}
   1902	nlmsg_end(skb, nlh);
   1903
   1904	xtr = xfrm_get_translator();
   1905	if (xtr) {
   1906		err = xtr->alloc_compat(skb, nlh);
   1907
   1908		xfrm_put_translator(xtr);
   1909		if (err) {
   1910			nlmsg_cancel(skb, nlh);
   1911			return err;
   1912		}
   1913	}
   1914
   1915	return 0;
   1916}
   1917
   1918static int xfrm_dump_policy_done(struct netlink_callback *cb)
   1919{
   1920	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
   1921	struct net *net = sock_net(cb->skb->sk);
   1922
   1923	xfrm_policy_walk_done(walk, net);
   1924	return 0;
   1925}
   1926
   1927static int xfrm_dump_policy_start(struct netlink_callback *cb)
   1928{
   1929	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
   1930
   1931	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
   1932
   1933	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
   1934	return 0;
   1935}
   1936
   1937static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
   1938{
   1939	struct net *net = sock_net(skb->sk);
   1940	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
   1941	struct xfrm_dump_info info;
   1942
   1943	info.in_skb = cb->skb;
   1944	info.out_skb = skb;
   1945	info.nlmsg_seq = cb->nlh->nlmsg_seq;
   1946	info.nlmsg_flags = NLM_F_MULTI;
   1947
   1948	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
   1949
   1950	return skb->len;
   1951}
   1952
   1953static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
   1954					  struct xfrm_policy *xp,
   1955					  int dir, u32 seq)
   1956{
   1957	struct xfrm_dump_info info;
   1958	struct sk_buff *skb;
   1959	int err;
   1960
   1961	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
   1962	if (!skb)
   1963		return ERR_PTR(-ENOMEM);
   1964
   1965	info.in_skb = in_skb;
   1966	info.out_skb = skb;
   1967	info.nlmsg_seq = seq;
   1968	info.nlmsg_flags = 0;
   1969
   1970	err = dump_one_policy(xp, dir, 0, &info);
   1971	if (err) {
   1972		kfree_skb(skb);
   1973		return ERR_PTR(err);
   1974	}
   1975
   1976	return skb;
   1977}
   1978
   1979static int xfrm_notify_userpolicy(struct net *net)
   1980{
   1981	struct xfrm_userpolicy_default *up;
   1982	int len = NLMSG_ALIGN(sizeof(*up));
   1983	struct nlmsghdr *nlh;
   1984	struct sk_buff *skb;
   1985	int err;
   1986
   1987	skb = nlmsg_new(len, GFP_ATOMIC);
   1988	if (skb == NULL)
   1989		return -ENOMEM;
   1990
   1991	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
   1992	if (nlh == NULL) {
   1993		kfree_skb(skb);
   1994		return -EMSGSIZE;
   1995	}
   1996
   1997	up = nlmsg_data(nlh);
   1998	up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
   1999	up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
   2000	up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
   2001
   2002	nlmsg_end(skb, nlh);
   2003
   2004	rcu_read_lock();
   2005	err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
   2006	rcu_read_unlock();
   2007
   2008	return err;
   2009}
   2010
   2011static bool xfrm_userpolicy_is_valid(__u8 policy)
   2012{
   2013	return policy == XFRM_USERPOLICY_BLOCK ||
   2014	       policy == XFRM_USERPOLICY_ACCEPT;
   2015}
   2016
   2017static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
   2018			    struct nlattr **attrs)
   2019{
   2020	struct net *net = sock_net(skb->sk);
   2021	struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
   2022
   2023	if (xfrm_userpolicy_is_valid(up->in))
   2024		net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
   2025
   2026	if (xfrm_userpolicy_is_valid(up->fwd))
   2027		net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
   2028
   2029	if (xfrm_userpolicy_is_valid(up->out))
   2030		net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
   2031
   2032	rt_genid_bump_all(net);
   2033
   2034	xfrm_notify_userpolicy(net);
   2035	return 0;
   2036}
   2037
   2038static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
   2039			    struct nlattr **attrs)
   2040{
   2041	struct sk_buff *r_skb;
   2042	struct nlmsghdr *r_nlh;
   2043	struct net *net = sock_net(skb->sk);
   2044	struct xfrm_userpolicy_default *r_up;
   2045	int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
   2046	u32 portid = NETLINK_CB(skb).portid;
   2047	u32 seq = nlh->nlmsg_seq;
   2048
   2049	r_skb = nlmsg_new(len, GFP_ATOMIC);
   2050	if (!r_skb)
   2051		return -ENOMEM;
   2052
   2053	r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
   2054	if (!r_nlh) {
   2055		kfree_skb(r_skb);
   2056		return -EMSGSIZE;
   2057	}
   2058
   2059	r_up = nlmsg_data(r_nlh);
   2060	r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
   2061	r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
   2062	r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
   2063	nlmsg_end(r_skb, r_nlh);
   2064
   2065	return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
   2066}
   2067
   2068static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
   2069		struct nlattr **attrs)
   2070{
   2071	struct net *net = sock_net(skb->sk);
   2072	struct xfrm_policy *xp;
   2073	struct xfrm_userpolicy_id *p;
   2074	u8 type = XFRM_POLICY_TYPE_MAIN;
   2075	int err;
   2076	struct km_event c;
   2077	int delete;
   2078	struct xfrm_mark m;
   2079	u32 if_id = 0;
   2080
   2081	p = nlmsg_data(nlh);
   2082	delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
   2083
   2084	err = copy_from_user_policy_type(&type, attrs);
   2085	if (err)
   2086		return err;
   2087
   2088	err = verify_policy_dir(p->dir);
   2089	if (err)
   2090		return err;
   2091
   2092	if (attrs[XFRMA_IF_ID])
   2093		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
   2094
   2095	xfrm_mark_get(attrs, &m);
   2096
   2097	if (p->index)
   2098		xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
   2099				      p->index, delete, &err);
   2100	else {
   2101		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
   2102		struct xfrm_sec_ctx *ctx;
   2103
   2104		err = verify_sec_ctx_len(attrs);
   2105		if (err)
   2106			return err;
   2107
   2108		ctx = NULL;
   2109		if (rt) {
   2110			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
   2111
   2112			err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
   2113			if (err)
   2114				return err;
   2115		}
   2116		xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
   2117					   &p->sel, ctx, delete, &err);
   2118		security_xfrm_policy_free(ctx);
   2119	}
   2120	if (xp == NULL)
   2121		return -ENOENT;
   2122
   2123	if (!delete) {
   2124		struct sk_buff *resp_skb;
   2125
   2126		resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
   2127		if (IS_ERR(resp_skb)) {
   2128			err = PTR_ERR(resp_skb);
   2129		} else {
   2130			err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
   2131					    NETLINK_CB(skb).portid);
   2132		}
   2133	} else {
   2134		xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
   2135
   2136		if (err != 0)
   2137			goto out;
   2138
   2139		c.data.byid = p->index;
   2140		c.event = nlh->nlmsg_type;
   2141		c.seq = nlh->nlmsg_seq;
   2142		c.portid = nlh->nlmsg_pid;
   2143		km_policy_notify(xp, p->dir, &c);
   2144	}
   2145
   2146out:
   2147	xfrm_pol_put(xp);
   2148	return err;
   2149}
   2150
   2151static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
   2152		struct nlattr **attrs)
   2153{
   2154	struct net *net = sock_net(skb->sk);
   2155	struct km_event c;
   2156	struct xfrm_usersa_flush *p = nlmsg_data(nlh);
   2157	int err;
   2158
   2159	err = xfrm_state_flush(net, p->proto, true, false);
   2160	if (err) {
   2161		if (err == -ESRCH) /* empty table */
   2162			return 0;
   2163		return err;
   2164	}
   2165	c.data.proto = p->proto;
   2166	c.event = nlh->nlmsg_type;
   2167	c.seq = nlh->nlmsg_seq;
   2168	c.portid = nlh->nlmsg_pid;
   2169	c.net = net;
   2170	km_state_notify(NULL, &c);
   2171
   2172	return 0;
   2173}
   2174
   2175static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
   2176{
   2177	unsigned int replay_size = x->replay_esn ?
   2178			      xfrm_replay_state_esn_len(x->replay_esn) :
   2179			      sizeof(struct xfrm_replay_state);
   2180
   2181	return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
   2182	       + nla_total_size(replay_size)
   2183	       + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
   2184	       + nla_total_size(sizeof(struct xfrm_mark))
   2185	       + nla_total_size(4) /* XFRM_AE_RTHR */
   2186	       + nla_total_size(4); /* XFRM_AE_ETHR */
   2187}
   2188
   2189static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
   2190{
   2191	struct xfrm_aevent_id *id;
   2192	struct nlmsghdr *nlh;
   2193	int err;
   2194
   2195	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
   2196	if (nlh == NULL)
   2197		return -EMSGSIZE;
   2198
   2199	id = nlmsg_data(nlh);
   2200	memset(&id->sa_id, 0, sizeof(id->sa_id));
   2201	memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
   2202	id->sa_id.spi = x->id.spi;
   2203	id->sa_id.family = x->props.family;
   2204	id->sa_id.proto = x->id.proto;
   2205	memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
   2206	id->reqid = x->props.reqid;
   2207	id->flags = c->data.aevent;
   2208
   2209	if (x->replay_esn) {
   2210		err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
   2211			      xfrm_replay_state_esn_len(x->replay_esn),
   2212			      x->replay_esn);
   2213	} else {
   2214		err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
   2215			      &x->replay);
   2216	}
   2217	if (err)
   2218		goto out_cancel;
   2219	err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
   2220			    XFRMA_PAD);
   2221	if (err)
   2222		goto out_cancel;
   2223
   2224	if (id->flags & XFRM_AE_RTHR) {
   2225		err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
   2226		if (err)
   2227			goto out_cancel;
   2228	}
   2229	if (id->flags & XFRM_AE_ETHR) {
   2230		err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
   2231				  x->replay_maxage * 10 / HZ);
   2232		if (err)
   2233			goto out_cancel;
   2234	}
   2235	err = xfrm_mark_put(skb, &x->mark);
   2236	if (err)
   2237		goto out_cancel;
   2238
   2239	err = xfrm_if_id_put(skb, x->if_id);
   2240	if (err)
   2241		goto out_cancel;
   2242
   2243	nlmsg_end(skb, nlh);
   2244	return 0;
   2245
   2246out_cancel:
   2247	nlmsg_cancel(skb, nlh);
   2248	return err;
   2249}
   2250
   2251static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
   2252		struct nlattr **attrs)
   2253{
   2254	struct net *net = sock_net(skb->sk);
   2255	struct xfrm_state *x;
   2256	struct sk_buff *r_skb;
   2257	int err;
   2258	struct km_event c;
   2259	u32 mark;
   2260	struct xfrm_mark m;
   2261	struct xfrm_aevent_id *p = nlmsg_data(nlh);
   2262	struct xfrm_usersa_id *id = &p->sa_id;
   2263
   2264	mark = xfrm_mark_get(attrs, &m);
   2265
   2266	x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
   2267	if (x == NULL)
   2268		return -ESRCH;
   2269
   2270	r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
   2271	if (r_skb == NULL) {
   2272		xfrm_state_put(x);
   2273		return -ENOMEM;
   2274	}
   2275
   2276	/*
   2277	 * XXX: is this lock really needed - none of the other
   2278	 * gets lock (the concern is things getting updated
   2279	 * while we are still reading) - jhs
   2280	*/
   2281	spin_lock_bh(&x->lock);
   2282	c.data.aevent = p->flags;
   2283	c.seq = nlh->nlmsg_seq;
   2284	c.portid = nlh->nlmsg_pid;
   2285
   2286	err = build_aevent(r_skb, x, &c);
   2287	BUG_ON(err < 0);
   2288
   2289	err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
   2290	spin_unlock_bh(&x->lock);
   2291	xfrm_state_put(x);
   2292	return err;
   2293}
   2294
   2295static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
   2296		struct nlattr **attrs)
   2297{
   2298	struct net *net = sock_net(skb->sk);
   2299	struct xfrm_state *x;
   2300	struct km_event c;
   2301	int err = -EINVAL;
   2302	u32 mark = 0;
   2303	struct xfrm_mark m;
   2304	struct xfrm_aevent_id *p = nlmsg_data(nlh);
   2305	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
   2306	struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
   2307	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
   2308	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
   2309	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
   2310
   2311	if (!lt && !rp && !re && !et && !rt)
   2312		return err;
   2313
   2314	/* pedantic mode - thou shalt sayeth replaceth */
   2315	if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
   2316		return err;
   2317
   2318	mark = xfrm_mark_get(attrs, &m);
   2319
   2320	x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
   2321	if (x == NULL)
   2322		return -ESRCH;
   2323
   2324	if (x->km.state != XFRM_STATE_VALID)
   2325		goto out;
   2326
   2327	err = xfrm_replay_verify_len(x->replay_esn, re);
   2328	if (err)
   2329		goto out;
   2330
   2331	spin_lock_bh(&x->lock);
   2332	xfrm_update_ae_params(x, attrs, 1);
   2333	spin_unlock_bh(&x->lock);
   2334
   2335	c.event = nlh->nlmsg_type;
   2336	c.seq = nlh->nlmsg_seq;
   2337	c.portid = nlh->nlmsg_pid;
   2338	c.data.aevent = XFRM_AE_CU;
   2339	km_state_notify(x, &c);
   2340	err = 0;
   2341out:
   2342	xfrm_state_put(x);
   2343	return err;
   2344}
   2345
   2346static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
   2347		struct nlattr **attrs)
   2348{
   2349	struct net *net = sock_net(skb->sk);
   2350	struct km_event c;
   2351	u8 type = XFRM_POLICY_TYPE_MAIN;
   2352	int err;
   2353
   2354	err = copy_from_user_policy_type(&type, attrs);
   2355	if (err)
   2356		return err;
   2357
   2358	err = xfrm_policy_flush(net, type, true);
   2359	if (err) {
   2360		if (err == -ESRCH) /* empty table */
   2361			return 0;
   2362		return err;
   2363	}
   2364
   2365	c.data.type = type;
   2366	c.event = nlh->nlmsg_type;
   2367	c.seq = nlh->nlmsg_seq;
   2368	c.portid = nlh->nlmsg_pid;
   2369	c.net = net;
   2370	km_policy_notify(NULL, 0, &c);
   2371	return 0;
   2372}
   2373
   2374static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
   2375		struct nlattr **attrs)
   2376{
   2377	struct net *net = sock_net(skb->sk);
   2378	struct xfrm_policy *xp;
   2379	struct xfrm_user_polexpire *up = nlmsg_data(nlh);
   2380	struct xfrm_userpolicy_info *p = &up->pol;
   2381	u8 type = XFRM_POLICY_TYPE_MAIN;
   2382	int err = -ENOENT;
   2383	struct xfrm_mark m;
   2384	u32 if_id = 0;
   2385
   2386	err = copy_from_user_policy_type(&type, attrs);
   2387	if (err)
   2388		return err;
   2389
   2390	err = verify_policy_dir(p->dir);
   2391	if (err)
   2392		return err;
   2393
   2394	if (attrs[XFRMA_IF_ID])
   2395		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
   2396
   2397	xfrm_mark_get(attrs, &m);
   2398
   2399	if (p->index)
   2400		xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
   2401				      0, &err);
   2402	else {
   2403		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
   2404		struct xfrm_sec_ctx *ctx;
   2405
   2406		err = verify_sec_ctx_len(attrs);
   2407		if (err)
   2408			return err;
   2409
   2410		ctx = NULL;
   2411		if (rt) {
   2412			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
   2413
   2414			err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
   2415			if (err)
   2416				return err;
   2417		}
   2418		xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
   2419					   &p->sel, ctx, 0, &err);
   2420		security_xfrm_policy_free(ctx);
   2421	}
   2422	if (xp == NULL)
   2423		return -ENOENT;
   2424
   2425	if (unlikely(xp->walk.dead))
   2426		goto out;
   2427
   2428	err = 0;
   2429	if (up->hard) {
   2430		xfrm_policy_delete(xp, p->dir);
   2431		xfrm_audit_policy_delete(xp, 1, true);
   2432	}
   2433	km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
   2434
   2435out:
   2436	xfrm_pol_put(xp);
   2437	return err;
   2438}
   2439
   2440static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
   2441		struct nlattr **attrs)
   2442{
   2443	struct net *net = sock_net(skb->sk);
   2444	struct xfrm_state *x;
   2445	int err;
   2446	struct xfrm_user_expire *ue = nlmsg_data(nlh);
   2447	struct xfrm_usersa_info *p = &ue->state;
   2448	struct xfrm_mark m;
   2449	u32 mark = xfrm_mark_get(attrs, &m);
   2450
   2451	x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
   2452
   2453	err = -ENOENT;
   2454	if (x == NULL)
   2455		return err;
   2456
   2457	spin_lock_bh(&x->lock);
   2458	err = -EINVAL;
   2459	if (x->km.state != XFRM_STATE_VALID)
   2460		goto out;
   2461	km_state_expired(x, ue->hard, nlh->nlmsg_pid);
   2462
   2463	if (ue->hard) {
   2464		__xfrm_state_delete(x);
   2465		xfrm_audit_state_delete(x, 1, true);
   2466	}
   2467	err = 0;
   2468out:
   2469	spin_unlock_bh(&x->lock);
   2470	xfrm_state_put(x);
   2471	return err;
   2472}
   2473
   2474static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
   2475		struct nlattr **attrs)
   2476{
   2477	struct net *net = sock_net(skb->sk);
   2478	struct xfrm_policy *xp;
   2479	struct xfrm_user_tmpl *ut;
   2480	int i;
   2481	struct nlattr *rt = attrs[XFRMA_TMPL];
   2482	struct xfrm_mark mark;
   2483
   2484	struct xfrm_user_acquire *ua = nlmsg_data(nlh);
   2485	struct xfrm_state *x = xfrm_state_alloc(net);
   2486	int err = -ENOMEM;
   2487
   2488	if (!x)
   2489		goto nomem;
   2490
   2491	xfrm_mark_get(attrs, &mark);
   2492
   2493	err = verify_newpolicy_info(&ua->policy);
   2494	if (err)
   2495		goto free_state;
   2496	err = verify_sec_ctx_len(attrs);
   2497	if (err)
   2498		goto free_state;
   2499
   2500	/*   build an XP */
   2501	xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
   2502	if (!xp)
   2503		goto free_state;
   2504
   2505	memcpy(&x->id, &ua->id, sizeof(ua->id));
   2506	memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
   2507	memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
   2508	xp->mark.m = x->mark.m = mark.m;
   2509	xp->mark.v = x->mark.v = mark.v;
   2510	ut = nla_data(rt);
   2511	/* extract the templates and for each call km_key */
   2512	for (i = 0; i < xp->xfrm_nr; i++, ut++) {
   2513		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
   2514		memcpy(&x->id, &t->id, sizeof(x->id));
   2515		x->props.mode = t->mode;
   2516		x->props.reqid = t->reqid;
   2517		x->props.family = ut->family;
   2518		t->aalgos = ua->aalgos;
   2519		t->ealgos = ua->ealgos;
   2520		t->calgos = ua->calgos;
   2521		err = km_query(x, t, xp);
   2522
   2523	}
   2524
   2525	xfrm_state_free(x);
   2526	kfree(xp);
   2527
   2528	return 0;
   2529
   2530free_state:
   2531	xfrm_state_free(x);
   2532nomem:
   2533	return err;
   2534}
   2535
   2536#ifdef CONFIG_XFRM_MIGRATE
   2537static int copy_from_user_migrate(struct xfrm_migrate *ma,
   2538				  struct xfrm_kmaddress *k,
   2539				  struct nlattr **attrs, int *num)
   2540{
   2541	struct nlattr *rt = attrs[XFRMA_MIGRATE];
   2542	struct xfrm_user_migrate *um;
   2543	int i, num_migrate;
   2544
   2545	if (k != NULL) {
   2546		struct xfrm_user_kmaddress *uk;
   2547
   2548		uk = nla_data(attrs[XFRMA_KMADDRESS]);
   2549		memcpy(&k->local, &uk->local, sizeof(k->local));
   2550		memcpy(&k->remote, &uk->remote, sizeof(k->remote));
   2551		k->family = uk->family;
   2552		k->reserved = uk->reserved;
   2553	}
   2554
   2555	um = nla_data(rt);
   2556	num_migrate = nla_len(rt) / sizeof(*um);
   2557
   2558	if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
   2559		return -EINVAL;
   2560
   2561	for (i = 0; i < num_migrate; i++, um++, ma++) {
   2562		memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
   2563		memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
   2564		memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
   2565		memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
   2566
   2567		ma->proto = um->proto;
   2568		ma->mode = um->mode;
   2569		ma->reqid = um->reqid;
   2570
   2571		ma->old_family = um->old_family;
   2572		ma->new_family = um->new_family;
   2573	}
   2574
   2575	*num = i;
   2576	return 0;
   2577}
   2578
   2579static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
   2580			   struct nlattr **attrs)
   2581{
   2582	struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
   2583	struct xfrm_migrate m[XFRM_MAX_DEPTH];
   2584	struct xfrm_kmaddress km, *kmp;
   2585	u8 type;
   2586	int err;
   2587	int n = 0;
   2588	struct net *net = sock_net(skb->sk);
   2589	struct xfrm_encap_tmpl  *encap = NULL;
   2590	u32 if_id = 0;
   2591
   2592	if (attrs[XFRMA_MIGRATE] == NULL)
   2593		return -EINVAL;
   2594
   2595	kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
   2596
   2597	err = copy_from_user_policy_type(&type, attrs);
   2598	if (err)
   2599		return err;
   2600
   2601	err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
   2602	if (err)
   2603		return err;
   2604
   2605	if (!n)
   2606		return 0;
   2607
   2608	if (attrs[XFRMA_ENCAP]) {
   2609		encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
   2610				sizeof(*encap), GFP_KERNEL);
   2611		if (!encap)
   2612			return -ENOMEM;
   2613	}
   2614
   2615	if (attrs[XFRMA_IF_ID])
   2616		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
   2617
   2618	err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id);
   2619
   2620	kfree(encap);
   2621
   2622	return err;
   2623}
   2624#else
   2625static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
   2626			   struct nlattr **attrs)
   2627{
   2628	return -ENOPROTOOPT;
   2629}
   2630#endif
   2631
   2632#ifdef CONFIG_XFRM_MIGRATE
   2633static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
   2634{
   2635	struct xfrm_user_migrate um;
   2636
   2637	memset(&um, 0, sizeof(um));
   2638	um.proto = m->proto;
   2639	um.mode = m->mode;
   2640	um.reqid = m->reqid;
   2641	um.old_family = m->old_family;
   2642	memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
   2643	memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
   2644	um.new_family = m->new_family;
   2645	memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
   2646	memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
   2647
   2648	return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
   2649}
   2650
   2651static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
   2652{
   2653	struct xfrm_user_kmaddress uk;
   2654
   2655	memset(&uk, 0, sizeof(uk));
   2656	uk.family = k->family;
   2657	uk.reserved = k->reserved;
   2658	memcpy(&uk.local, &k->local, sizeof(uk.local));
   2659	memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
   2660
   2661	return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
   2662}
   2663
   2664static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
   2665						int with_encp)
   2666{
   2667	return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
   2668	      + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
   2669	      + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
   2670	      + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
   2671	      + userpolicy_type_attrsize();
   2672}
   2673
   2674static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
   2675			 int num_migrate, const struct xfrm_kmaddress *k,
   2676			 const struct xfrm_selector *sel,
   2677			 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
   2678{
   2679	const struct xfrm_migrate *mp;
   2680	struct xfrm_userpolicy_id *pol_id;
   2681	struct nlmsghdr *nlh;
   2682	int i, err;
   2683
   2684	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
   2685	if (nlh == NULL)
   2686		return -EMSGSIZE;
   2687
   2688	pol_id = nlmsg_data(nlh);
   2689	/* copy data from selector, dir, and type to the pol_id */
   2690	memset(pol_id, 0, sizeof(*pol_id));
   2691	memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
   2692	pol_id->dir = dir;
   2693
   2694	if (k != NULL) {
   2695		err = copy_to_user_kmaddress(k, skb);
   2696		if (err)
   2697			goto out_cancel;
   2698	}
   2699	if (encap) {
   2700		err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
   2701		if (err)
   2702			goto out_cancel;
   2703	}
   2704	err = copy_to_user_policy_type(type, skb);
   2705	if (err)
   2706		goto out_cancel;
   2707	for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
   2708		err = copy_to_user_migrate(mp, skb);
   2709		if (err)
   2710			goto out_cancel;
   2711	}
   2712
   2713	nlmsg_end(skb, nlh);
   2714	return 0;
   2715
   2716out_cancel:
   2717	nlmsg_cancel(skb, nlh);
   2718	return err;
   2719}
   2720
   2721static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
   2722			     const struct xfrm_migrate *m, int num_migrate,
   2723			     const struct xfrm_kmaddress *k,
   2724			     const struct xfrm_encap_tmpl *encap)
   2725{
   2726	struct net *net = &init_net;
   2727	struct sk_buff *skb;
   2728	int err;
   2729
   2730	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
   2731			GFP_ATOMIC);
   2732	if (skb == NULL)
   2733		return -ENOMEM;
   2734
   2735	/* build migrate */
   2736	err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
   2737	BUG_ON(err < 0);
   2738
   2739	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
   2740}
   2741#else
   2742static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
   2743			     const struct xfrm_migrate *m, int num_migrate,
   2744			     const struct xfrm_kmaddress *k,
   2745			     const struct xfrm_encap_tmpl *encap)
   2746{
   2747	return -ENOPROTOOPT;
   2748}
   2749#endif
   2750
   2751#define XMSGSIZE(type) sizeof(struct type)
   2752
   2753const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
   2754	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
   2755	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
   2756	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
   2757	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
   2758	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
   2759	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
   2760	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
   2761	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
   2762	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
   2763	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
   2764	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
   2765	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
   2766	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
   2767	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
   2768	[XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
   2769	[XFRM_MSG_GETAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
   2770	[XFRM_MSG_REPORT      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
   2771	[XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
   2772	[XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = sizeof(u32),
   2773	[XFRM_MSG_NEWSPDINFO  - XFRM_MSG_BASE] = sizeof(u32),
   2774	[XFRM_MSG_GETSPDINFO  - XFRM_MSG_BASE] = sizeof(u32),
   2775	[XFRM_MSG_SETDEFAULT  - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
   2776	[XFRM_MSG_GETDEFAULT  - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
   2777};
   2778EXPORT_SYMBOL_GPL(xfrm_msg_min);
   2779
   2780#undef XMSGSIZE
   2781
   2782const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
   2783	[XFRMA_SA]		= { .len = sizeof(struct xfrm_usersa_info)},
   2784	[XFRMA_POLICY]		= { .len = sizeof(struct xfrm_userpolicy_info)},
   2785	[XFRMA_LASTUSED]	= { .type = NLA_U64},
   2786	[XFRMA_ALG_AUTH_TRUNC]	= { .len = sizeof(struct xfrm_algo_auth)},
   2787	[XFRMA_ALG_AEAD]	= { .len = sizeof(struct xfrm_algo_aead) },
   2788	[XFRMA_ALG_AUTH]	= { .len = sizeof(struct xfrm_algo) },
   2789	[XFRMA_ALG_CRYPT]	= { .len = sizeof(struct xfrm_algo) },
   2790	[XFRMA_ALG_COMP]	= { .len = sizeof(struct xfrm_algo) },
   2791	[XFRMA_ENCAP]		= { .len = sizeof(struct xfrm_encap_tmpl) },
   2792	[XFRMA_TMPL]		= { .len = sizeof(struct xfrm_user_tmpl) },
   2793	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_sec_ctx) },
   2794	[XFRMA_LTIME_VAL]	= { .len = sizeof(struct xfrm_lifetime_cur) },
   2795	[XFRMA_REPLAY_VAL]	= { .len = sizeof(struct xfrm_replay_state) },
   2796	[XFRMA_REPLAY_THRESH]	= { .type = NLA_U32 },
   2797	[XFRMA_ETIMER_THRESH]	= { .type = NLA_U32 },
   2798	[XFRMA_SRCADDR]		= { .len = sizeof(xfrm_address_t) },
   2799	[XFRMA_COADDR]		= { .len = sizeof(xfrm_address_t) },
   2800	[XFRMA_POLICY_TYPE]	= { .len = sizeof(struct xfrm_userpolicy_type)},
   2801	[XFRMA_MIGRATE]		= { .len = sizeof(struct xfrm_user_migrate) },
   2802	[XFRMA_KMADDRESS]	= { .len = sizeof(struct xfrm_user_kmaddress) },
   2803	[XFRMA_MARK]		= { .len = sizeof(struct xfrm_mark) },
   2804	[XFRMA_TFCPAD]		= { .type = NLA_U32 },
   2805	[XFRMA_REPLAY_ESN_VAL]	= { .len = sizeof(struct xfrm_replay_state_esn) },
   2806	[XFRMA_SA_EXTRA_FLAGS]	= { .type = NLA_U32 },
   2807	[XFRMA_PROTO]		= { .type = NLA_U8 },
   2808	[XFRMA_ADDRESS_FILTER]	= { .len = sizeof(struct xfrm_address_filter) },
   2809	[XFRMA_OFFLOAD_DEV]	= { .len = sizeof(struct xfrm_user_offload) },
   2810	[XFRMA_SET_MARK]	= { .type = NLA_U32 },
   2811	[XFRMA_SET_MARK_MASK]	= { .type = NLA_U32 },
   2812	[XFRMA_IF_ID]		= { .type = NLA_U32 },
   2813};
   2814EXPORT_SYMBOL_GPL(xfrma_policy);
   2815
   2816static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
   2817	[XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
   2818	[XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
   2819};
   2820
   2821static const struct xfrm_link {
   2822	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
   2823	int (*start)(struct netlink_callback *);
   2824	int (*dump)(struct sk_buff *, struct netlink_callback *);
   2825	int (*done)(struct netlink_callback *);
   2826	const struct nla_policy *nla_pol;
   2827	int nla_max;
   2828} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
   2829	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
   2830	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = { .doit = xfrm_del_sa        },
   2831	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
   2832						   .dump = xfrm_dump_sa,
   2833						   .done = xfrm_dump_sa_done  },
   2834	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
   2835	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
   2836	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
   2837						   .start = xfrm_dump_policy_start,
   2838						   .dump = xfrm_dump_policy,
   2839						   .done = xfrm_dump_policy_done },
   2840	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
   2841	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire   },
   2842	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
   2843	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
   2844	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
   2845	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
   2846	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa      },
   2847	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy  },
   2848	[XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = { .doit = xfrm_new_ae  },
   2849	[XFRM_MSG_GETAE       - XFRM_MSG_BASE] = { .doit = xfrm_get_ae  },
   2850	[XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate    },
   2851	[XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo   },
   2852	[XFRM_MSG_NEWSPDINFO  - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
   2853						   .nla_pol = xfrma_spd_policy,
   2854						   .nla_max = XFRMA_SPD_MAX },
   2855	[XFRM_MSG_GETSPDINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo   },
   2856	[XFRM_MSG_SETDEFAULT  - XFRM_MSG_BASE] = { .doit = xfrm_set_default   },
   2857	[XFRM_MSG_GETDEFAULT  - XFRM_MSG_BASE] = { .doit = xfrm_get_default   },
   2858};
   2859
   2860static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
   2861			     struct netlink_ext_ack *extack)
   2862{
   2863	struct net *net = sock_net(skb->sk);
   2864	struct nlattr *attrs[XFRMA_MAX+1];
   2865	const struct xfrm_link *link;
   2866	struct nlmsghdr *nlh64 = NULL;
   2867	int type, err;
   2868
   2869	type = nlh->nlmsg_type;
   2870	if (type > XFRM_MSG_MAX)
   2871		return -EINVAL;
   2872
   2873	type -= XFRM_MSG_BASE;
   2874	link = &xfrm_dispatch[type];
   2875
   2876	/* All operations require privileges, even GET */
   2877	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
   2878		return -EPERM;
   2879
   2880	if (in_compat_syscall()) {
   2881		struct xfrm_translator *xtr = xfrm_get_translator();
   2882
   2883		if (!xtr)
   2884			return -EOPNOTSUPP;
   2885
   2886		nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
   2887					    link->nla_pol, extack);
   2888		xfrm_put_translator(xtr);
   2889		if (IS_ERR(nlh64))
   2890			return PTR_ERR(nlh64);
   2891		if (nlh64)
   2892			nlh = nlh64;
   2893	}
   2894
   2895	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
   2896	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
   2897	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
   2898		struct netlink_dump_control c = {
   2899			.start = link->start,
   2900			.dump = link->dump,
   2901			.done = link->done,
   2902		};
   2903
   2904		if (link->dump == NULL) {
   2905			err = -EINVAL;
   2906			goto err;
   2907		}
   2908
   2909		err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
   2910		goto err;
   2911	}
   2912
   2913	err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
   2914				     link->nla_max ? : XFRMA_MAX,
   2915				     link->nla_pol ? : xfrma_policy, extack);
   2916	if (err < 0)
   2917		goto err;
   2918
   2919	if (link->doit == NULL) {
   2920		err = -EINVAL;
   2921		goto err;
   2922	}
   2923
   2924	err = link->doit(skb, nlh, attrs);
   2925
   2926	/* We need to free skb allocated in xfrm_alloc_compat() before
   2927	 * returning from this function, because consume_skb() won't take
   2928	 * care of frag_list since netlink destructor sets
   2929	 * sbk->head to NULL. (see netlink_skb_destructor())
   2930	 */
   2931	if (skb_has_frag_list(skb)) {
   2932		kfree_skb(skb_shinfo(skb)->frag_list);
   2933		skb_shinfo(skb)->frag_list = NULL;
   2934	}
   2935
   2936err:
   2937	kvfree(nlh64);
   2938	return err;
   2939}
   2940
   2941static void xfrm_netlink_rcv(struct sk_buff *skb)
   2942{
   2943	struct net *net = sock_net(skb->sk);
   2944
   2945	mutex_lock(&net->xfrm.xfrm_cfg_mutex);
   2946	netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
   2947	mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
   2948}
   2949
   2950static inline unsigned int xfrm_expire_msgsize(void)
   2951{
   2952	return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
   2953	       + nla_total_size(sizeof(struct xfrm_mark));
   2954}
   2955
   2956static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
   2957{
   2958	struct xfrm_user_expire *ue;
   2959	struct nlmsghdr *nlh;
   2960	int err;
   2961
   2962	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
   2963	if (nlh == NULL)
   2964		return -EMSGSIZE;
   2965
   2966	ue = nlmsg_data(nlh);
   2967	copy_to_user_state(x, &ue->state);
   2968	ue->hard = (c->data.hard != 0) ? 1 : 0;
   2969	/* clear the padding bytes */
   2970	memset_after(ue, 0, hard);
   2971
   2972	err = xfrm_mark_put(skb, &x->mark);
   2973	if (err)
   2974		return err;
   2975
   2976	err = xfrm_if_id_put(skb, x->if_id);
   2977	if (err)
   2978		return err;
   2979
   2980	nlmsg_end(skb, nlh);
   2981	return 0;
   2982}
   2983
   2984static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
   2985{
   2986	struct net *net = xs_net(x);
   2987	struct sk_buff *skb;
   2988
   2989	skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
   2990	if (skb == NULL)
   2991		return -ENOMEM;
   2992
   2993	if (build_expire(skb, x, c) < 0) {
   2994		kfree_skb(skb);
   2995		return -EMSGSIZE;
   2996	}
   2997
   2998	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
   2999}
   3000
   3001static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
   3002{
   3003	struct net *net = xs_net(x);
   3004	struct sk_buff *skb;
   3005	int err;
   3006
   3007	skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
   3008	if (skb == NULL)
   3009		return -ENOMEM;
   3010
   3011	err = build_aevent(skb, x, c);
   3012	BUG_ON(err < 0);
   3013
   3014	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
   3015}
   3016
   3017static int xfrm_notify_sa_flush(const struct km_event *c)
   3018{
   3019	struct net *net = c->net;
   3020	struct xfrm_usersa_flush *p;
   3021	struct nlmsghdr *nlh;
   3022	struct sk_buff *skb;
   3023	int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
   3024
   3025	skb = nlmsg_new(len, GFP_ATOMIC);
   3026	if (skb == NULL)
   3027		return -ENOMEM;
   3028
   3029	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
   3030	if (nlh == NULL) {
   3031		kfree_skb(skb);
   3032		return -EMSGSIZE;
   3033	}
   3034
   3035	p = nlmsg_data(nlh);
   3036	p->proto = c->data.proto;
   3037
   3038	nlmsg_end(skb, nlh);
   3039
   3040	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
   3041}
   3042
   3043static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
   3044{
   3045	unsigned int l = 0;
   3046	if (x->aead)
   3047		l += nla_total_size(aead_len(x->aead));
   3048	if (x->aalg) {
   3049		l += nla_total_size(sizeof(struct xfrm_algo) +
   3050				    (x->aalg->alg_key_len + 7) / 8);
   3051		l += nla_total_size(xfrm_alg_auth_len(x->aalg));
   3052	}
   3053	if (x->ealg)
   3054		l += nla_total_size(xfrm_alg_len(x->ealg));
   3055	if (x->calg)
   3056		l += nla_total_size(sizeof(*x->calg));
   3057	if (x->encap)
   3058		l += nla_total_size(sizeof(*x->encap));
   3059	if (x->tfcpad)
   3060		l += nla_total_size(sizeof(x->tfcpad));
   3061	if (x->replay_esn)
   3062		l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
   3063	else
   3064		l += nla_total_size(sizeof(struct xfrm_replay_state));
   3065	if (x->security)
   3066		l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
   3067				    x->security->ctx_len);
   3068	if (x->coaddr)
   3069		l += nla_total_size(sizeof(*x->coaddr));
   3070	if (x->props.extra_flags)
   3071		l += nla_total_size(sizeof(x->props.extra_flags));
   3072	if (x->xso.dev)
   3073		 l += nla_total_size(sizeof(struct xfrm_user_offload));
   3074	if (x->props.smark.v | x->props.smark.m) {
   3075		l += nla_total_size(sizeof(x->props.smark.v));
   3076		l += nla_total_size(sizeof(x->props.smark.m));
   3077	}
   3078	if (x->if_id)
   3079		l += nla_total_size(sizeof(x->if_id));
   3080
   3081	/* Must count x->lastused as it may become non-zero behind our back. */
   3082	l += nla_total_size_64bit(sizeof(u64));
   3083
   3084	if (x->mapping_maxage)
   3085		l += nla_total_size(sizeof(x->mapping_maxage));
   3086
   3087	return l;
   3088}
   3089
   3090static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
   3091{
   3092	struct net *net = xs_net(x);
   3093	struct xfrm_usersa_info *p;
   3094	struct xfrm_usersa_id *id;
   3095	struct nlmsghdr *nlh;
   3096	struct sk_buff *skb;
   3097	unsigned int len = xfrm_sa_len(x);
   3098	unsigned int headlen;
   3099	int err;
   3100
   3101	headlen = sizeof(*p);
   3102	if (c->event == XFRM_MSG_DELSA) {
   3103		len += nla_total_size(headlen);
   3104		headlen = sizeof(*id);
   3105		len += nla_total_size(sizeof(struct xfrm_mark));
   3106	}
   3107	len += NLMSG_ALIGN(headlen);
   3108
   3109	skb = nlmsg_new(len, GFP_ATOMIC);
   3110	if (skb == NULL)
   3111		return -ENOMEM;
   3112
   3113	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
   3114	err = -EMSGSIZE;
   3115	if (nlh == NULL)
   3116		goto out_free_skb;
   3117
   3118	p = nlmsg_data(nlh);
   3119	if (c->event == XFRM_MSG_DELSA) {
   3120		struct nlattr *attr;
   3121
   3122		id = nlmsg_data(nlh);
   3123		memset(id, 0, sizeof(*id));
   3124		memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
   3125		id->spi = x->id.spi;
   3126		id->family = x->props.family;
   3127		id->proto = x->id.proto;
   3128
   3129		attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
   3130		err = -EMSGSIZE;
   3131		if (attr == NULL)
   3132			goto out_free_skb;
   3133
   3134		p = nla_data(attr);
   3135	}
   3136	err = copy_to_user_state_extra(x, p, skb);
   3137	if (err)
   3138		goto out_free_skb;
   3139
   3140	nlmsg_end(skb, nlh);
   3141
   3142	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
   3143
   3144out_free_skb:
   3145	kfree_skb(skb);
   3146	return err;
   3147}
   3148
   3149static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
   3150{
   3151
   3152	switch (c->event) {
   3153	case XFRM_MSG_EXPIRE:
   3154		return xfrm_exp_state_notify(x, c);
   3155	case XFRM_MSG_NEWAE:
   3156		return xfrm_aevent_state_notify(x, c);
   3157	case XFRM_MSG_DELSA:
   3158	case XFRM_MSG_UPDSA:
   3159	case XFRM_MSG_NEWSA:
   3160		return xfrm_notify_sa(x, c);
   3161	case XFRM_MSG_FLUSHSA:
   3162		return xfrm_notify_sa_flush(c);
   3163	default:
   3164		printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
   3165		       c->event);
   3166		break;
   3167	}
   3168
   3169	return 0;
   3170
   3171}
   3172
   3173static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
   3174						struct xfrm_policy *xp)
   3175{
   3176	return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
   3177	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
   3178	       + nla_total_size(sizeof(struct xfrm_mark))
   3179	       + nla_total_size(xfrm_user_sec_ctx_size(x->security))
   3180	       + userpolicy_type_attrsize();
   3181}
   3182
   3183static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
   3184			 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
   3185{
   3186	__u32 seq = xfrm_get_acqseq();
   3187	struct xfrm_user_acquire *ua;
   3188	struct nlmsghdr *nlh;
   3189	int err;
   3190
   3191	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
   3192	if (nlh == NULL)
   3193		return -EMSGSIZE;
   3194
   3195	ua = nlmsg_data(nlh);
   3196	memcpy(&ua->id, &x->id, sizeof(ua->id));
   3197	memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
   3198	memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
   3199	copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
   3200	ua->aalgos = xt->aalgos;
   3201	ua->ealgos = xt->ealgos;
   3202	ua->calgos = xt->calgos;
   3203	ua->seq = x->km.seq = seq;
   3204
   3205	err = copy_to_user_tmpl(xp, skb);
   3206	if (!err)
   3207		err = copy_to_user_state_sec_ctx(x, skb);
   3208	if (!err)
   3209		err = copy_to_user_policy_type(xp->type, skb);
   3210	if (!err)
   3211		err = xfrm_mark_put(skb, &xp->mark);
   3212	if (!err)
   3213		err = xfrm_if_id_put(skb, xp->if_id);
   3214	if (err) {
   3215		nlmsg_cancel(skb, nlh);
   3216		return err;
   3217	}
   3218
   3219	nlmsg_end(skb, nlh);
   3220	return 0;
   3221}
   3222
   3223static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
   3224			     struct xfrm_policy *xp)
   3225{
   3226	struct net *net = xs_net(x);
   3227	struct sk_buff *skb;
   3228	int err;
   3229
   3230	skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
   3231	if (skb == NULL)
   3232		return -ENOMEM;
   3233
   3234	err = build_acquire(skb, x, xt, xp);
   3235	BUG_ON(err < 0);
   3236
   3237	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
   3238}
   3239
   3240/* User gives us xfrm_user_policy_info followed by an array of 0
   3241 * or more templates.
   3242 */
   3243static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
   3244					       u8 *data, int len, int *dir)
   3245{
   3246	struct net *net = sock_net(sk);
   3247	struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
   3248	struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
   3249	struct xfrm_policy *xp;
   3250	int nr;
   3251
   3252	switch (sk->sk_family) {
   3253	case AF_INET:
   3254		if (opt != IP_XFRM_POLICY) {
   3255			*dir = -EOPNOTSUPP;
   3256			return NULL;
   3257		}
   3258		break;
   3259#if IS_ENABLED(CONFIG_IPV6)
   3260	case AF_INET6:
   3261		if (opt != IPV6_XFRM_POLICY) {
   3262			*dir = -EOPNOTSUPP;
   3263			return NULL;
   3264		}
   3265		break;
   3266#endif
   3267	default:
   3268		*dir = -EINVAL;
   3269		return NULL;
   3270	}
   3271
   3272	*dir = -EINVAL;
   3273
   3274	if (len < sizeof(*p) ||
   3275	    verify_newpolicy_info(p))
   3276		return NULL;
   3277
   3278	nr = ((len - sizeof(*p)) / sizeof(*ut));
   3279	if (validate_tmpl(nr, ut, p->sel.family))
   3280		return NULL;
   3281
   3282	if (p->dir > XFRM_POLICY_OUT)
   3283		return NULL;
   3284
   3285	xp = xfrm_policy_alloc(net, GFP_ATOMIC);
   3286	if (xp == NULL) {
   3287		*dir = -ENOBUFS;
   3288		return NULL;
   3289	}
   3290
   3291	copy_from_user_policy(xp, p);
   3292	xp->type = XFRM_POLICY_TYPE_MAIN;
   3293	copy_templates(xp, ut, nr);
   3294
   3295	*dir = p->dir;
   3296
   3297	return xp;
   3298}
   3299
   3300static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
   3301{
   3302	return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
   3303	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
   3304	       + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
   3305	       + nla_total_size(sizeof(struct xfrm_mark))
   3306	       + userpolicy_type_attrsize();
   3307}
   3308
   3309static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
   3310			   int dir, const struct km_event *c)
   3311{
   3312	struct xfrm_user_polexpire *upe;
   3313	int hard = c->data.hard;
   3314	struct nlmsghdr *nlh;
   3315	int err;
   3316
   3317	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
   3318	if (nlh == NULL)
   3319		return -EMSGSIZE;
   3320
   3321	upe = nlmsg_data(nlh);
   3322	copy_to_user_policy(xp, &upe->pol, dir);
   3323	err = copy_to_user_tmpl(xp, skb);
   3324	if (!err)
   3325		err = copy_to_user_sec_ctx(xp, skb);
   3326	if (!err)
   3327		err = copy_to_user_policy_type(xp->type, skb);
   3328	if (!err)
   3329		err = xfrm_mark_put(skb, &xp->mark);
   3330	if (!err)
   3331		err = xfrm_if_id_put(skb, xp->if_id);
   3332	if (err) {
   3333		nlmsg_cancel(skb, nlh);
   3334		return err;
   3335	}
   3336	upe->hard = !!hard;
   3337
   3338	nlmsg_end(skb, nlh);
   3339	return 0;
   3340}
   3341
   3342static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
   3343{
   3344	struct net *net = xp_net(xp);
   3345	struct sk_buff *skb;
   3346	int err;
   3347
   3348	skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
   3349	if (skb == NULL)
   3350		return -ENOMEM;
   3351
   3352	err = build_polexpire(skb, xp, dir, c);
   3353	BUG_ON(err < 0);
   3354
   3355	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
   3356}
   3357
   3358static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
   3359{
   3360	unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
   3361	struct net *net = xp_net(xp);
   3362	struct xfrm_userpolicy_info *p;
   3363	struct xfrm_userpolicy_id *id;
   3364	struct nlmsghdr *nlh;
   3365	struct sk_buff *skb;
   3366	unsigned int headlen;
   3367	int err;
   3368
   3369	headlen = sizeof(*p);
   3370	if (c->event == XFRM_MSG_DELPOLICY) {
   3371		len += nla_total_size(headlen);
   3372		headlen = sizeof(*id);
   3373	}
   3374	len += userpolicy_type_attrsize();
   3375	len += nla_total_size(sizeof(struct xfrm_mark));
   3376	len += NLMSG_ALIGN(headlen);
   3377
   3378	skb = nlmsg_new(len, GFP_ATOMIC);
   3379	if (skb == NULL)
   3380		return -ENOMEM;
   3381
   3382	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
   3383	err = -EMSGSIZE;
   3384	if (nlh == NULL)
   3385		goto out_free_skb;
   3386
   3387	p = nlmsg_data(nlh);
   3388	if (c->event == XFRM_MSG_DELPOLICY) {
   3389		struct nlattr *attr;
   3390
   3391		id = nlmsg_data(nlh);
   3392		memset(id, 0, sizeof(*id));
   3393		id->dir = dir;
   3394		if (c->data.byid)
   3395			id->index = xp->index;
   3396		else
   3397			memcpy(&id->sel, &xp->selector, sizeof(id->sel));
   3398
   3399		attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
   3400		err = -EMSGSIZE;
   3401		if (attr == NULL)
   3402			goto out_free_skb;
   3403
   3404		p = nla_data(attr);
   3405	}
   3406
   3407	copy_to_user_policy(xp, p, dir);
   3408	err = copy_to_user_tmpl(xp, skb);
   3409	if (!err)
   3410		err = copy_to_user_policy_type(xp->type, skb);
   3411	if (!err)
   3412		err = xfrm_mark_put(skb, &xp->mark);
   3413	if (!err)
   3414		err = xfrm_if_id_put(skb, xp->if_id);
   3415	if (err)
   3416		goto out_free_skb;
   3417
   3418	nlmsg_end(skb, nlh);
   3419
   3420	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
   3421
   3422out_free_skb:
   3423	kfree_skb(skb);
   3424	return err;
   3425}
   3426
   3427static int xfrm_notify_policy_flush(const struct km_event *c)
   3428{
   3429	struct net *net = c->net;
   3430	struct nlmsghdr *nlh;
   3431	struct sk_buff *skb;
   3432	int err;
   3433
   3434	skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
   3435	if (skb == NULL)
   3436		return -ENOMEM;
   3437
   3438	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
   3439	err = -EMSGSIZE;
   3440	if (nlh == NULL)
   3441		goto out_free_skb;
   3442	err = copy_to_user_policy_type(c->data.type, skb);
   3443	if (err)
   3444		goto out_free_skb;
   3445
   3446	nlmsg_end(skb, nlh);
   3447
   3448	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
   3449
   3450out_free_skb:
   3451	kfree_skb(skb);
   3452	return err;
   3453}
   3454
   3455static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
   3456{
   3457
   3458	switch (c->event) {
   3459	case XFRM_MSG_NEWPOLICY:
   3460	case XFRM_MSG_UPDPOLICY:
   3461	case XFRM_MSG_DELPOLICY:
   3462		return xfrm_notify_policy(xp, dir, c);
   3463	case XFRM_MSG_FLUSHPOLICY:
   3464		return xfrm_notify_policy_flush(c);
   3465	case XFRM_MSG_POLEXPIRE:
   3466		return xfrm_exp_policy_notify(xp, dir, c);
   3467	default:
   3468		printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
   3469		       c->event);
   3470	}
   3471
   3472	return 0;
   3473
   3474}
   3475
   3476static inline unsigned int xfrm_report_msgsize(void)
   3477{
   3478	return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
   3479}
   3480
   3481static int build_report(struct sk_buff *skb, u8 proto,
   3482			struct xfrm_selector *sel, xfrm_address_t *addr)
   3483{
   3484	struct xfrm_user_report *ur;
   3485	struct nlmsghdr *nlh;
   3486
   3487	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
   3488	if (nlh == NULL)
   3489		return -EMSGSIZE;
   3490
   3491	ur = nlmsg_data(nlh);
   3492	ur->proto = proto;
   3493	memcpy(&ur->sel, sel, sizeof(ur->sel));
   3494
   3495	if (addr) {
   3496		int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
   3497		if (err) {
   3498			nlmsg_cancel(skb, nlh);
   3499			return err;
   3500		}
   3501	}
   3502	nlmsg_end(skb, nlh);
   3503	return 0;
   3504}
   3505
   3506static int xfrm_send_report(struct net *net, u8 proto,
   3507			    struct xfrm_selector *sel, xfrm_address_t *addr)
   3508{
   3509	struct sk_buff *skb;
   3510	int err;
   3511
   3512	skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
   3513	if (skb == NULL)
   3514		return -ENOMEM;
   3515
   3516	err = build_report(skb, proto, sel, addr);
   3517	BUG_ON(err < 0);
   3518
   3519	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
   3520}
   3521
   3522static inline unsigned int xfrm_mapping_msgsize(void)
   3523{
   3524	return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
   3525}
   3526
   3527static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
   3528			 xfrm_address_t *new_saddr, __be16 new_sport)
   3529{
   3530	struct xfrm_user_mapping *um;
   3531	struct nlmsghdr *nlh;
   3532
   3533	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
   3534	if (nlh == NULL)
   3535		return -EMSGSIZE;
   3536
   3537	um = nlmsg_data(nlh);
   3538
   3539	memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
   3540	um->id.spi = x->id.spi;
   3541	um->id.family = x->props.family;
   3542	um->id.proto = x->id.proto;
   3543	memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
   3544	memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
   3545	um->new_sport = new_sport;
   3546	um->old_sport = x->encap->encap_sport;
   3547	um->reqid = x->props.reqid;
   3548
   3549	nlmsg_end(skb, nlh);
   3550	return 0;
   3551}
   3552
   3553static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
   3554			     __be16 sport)
   3555{
   3556	struct net *net = xs_net(x);
   3557	struct sk_buff *skb;
   3558	int err;
   3559
   3560	if (x->id.proto != IPPROTO_ESP)
   3561		return -EINVAL;
   3562
   3563	if (!x->encap)
   3564		return -EINVAL;
   3565
   3566	skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
   3567	if (skb == NULL)
   3568		return -ENOMEM;
   3569
   3570	err = build_mapping(skb, x, ipaddr, sport);
   3571	BUG_ON(err < 0);
   3572
   3573	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
   3574}
   3575
   3576static bool xfrm_is_alive(const struct km_event *c)
   3577{
   3578	return (bool)xfrm_acquire_is_on(c->net);
   3579}
   3580
   3581static struct xfrm_mgr netlink_mgr = {
   3582	.notify		= xfrm_send_state_notify,
   3583	.acquire	= xfrm_send_acquire,
   3584	.compile_policy	= xfrm_compile_policy,
   3585	.notify_policy	= xfrm_send_policy_notify,
   3586	.report		= xfrm_send_report,
   3587	.migrate	= xfrm_send_migrate,
   3588	.new_mapping	= xfrm_send_mapping,
   3589	.is_alive	= xfrm_is_alive,
   3590};
   3591
   3592static int __net_init xfrm_user_net_init(struct net *net)
   3593{
   3594	struct sock *nlsk;
   3595	struct netlink_kernel_cfg cfg = {
   3596		.groups	= XFRMNLGRP_MAX,
   3597		.input	= xfrm_netlink_rcv,
   3598	};
   3599
   3600	nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
   3601	if (nlsk == NULL)
   3602		return -ENOMEM;
   3603	net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
   3604	rcu_assign_pointer(net->xfrm.nlsk, nlsk);
   3605	return 0;
   3606}
   3607
   3608static void __net_exit xfrm_user_net_pre_exit(struct net *net)
   3609{
   3610	RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
   3611}
   3612
   3613static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
   3614{
   3615	struct net *net;
   3616
   3617	list_for_each_entry(net, net_exit_list, exit_list)
   3618		netlink_kernel_release(net->xfrm.nlsk_stash);
   3619}
   3620
   3621static struct pernet_operations xfrm_user_net_ops = {
   3622	.init	    = xfrm_user_net_init,
   3623	.pre_exit   = xfrm_user_net_pre_exit,
   3624	.exit_batch = xfrm_user_net_exit,
   3625};
   3626
   3627static int __init xfrm_user_init(void)
   3628{
   3629	int rv;
   3630
   3631	printk(KERN_INFO "Initializing XFRM netlink socket\n");
   3632
   3633	rv = register_pernet_subsys(&xfrm_user_net_ops);
   3634	if (rv < 0)
   3635		return rv;
   3636	rv = xfrm_register_km(&netlink_mgr);
   3637	if (rv < 0)
   3638		unregister_pernet_subsys(&xfrm_user_net_ops);
   3639	return rv;
   3640}
   3641
   3642static void __exit xfrm_user_exit(void)
   3643{
   3644	xfrm_unregister_km(&netlink_mgr);
   3645	unregister_pernet_subsys(&xfrm_user_net_ops);
   3646}
   3647
   3648module_init(xfrm_user_init);
   3649module_exit(xfrm_user_exit);
   3650MODULE_LICENSE("GPL");
   3651MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);