cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

act_csum.c (17705B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Checksum updating actions
      4 *
      5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
      6 */
      7
      8#include <linux/types.h>
      9#include <linux/init.h>
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/spinlock.h>
     13
     14#include <linux/netlink.h>
     15#include <net/netlink.h>
     16#include <linux/rtnetlink.h>
     17
     18#include <linux/skbuff.h>
     19
     20#include <net/ip.h>
     21#include <net/ipv6.h>
     22#include <net/icmp.h>
     23#include <linux/icmpv6.h>
     24#include <linux/igmp.h>
     25#include <net/tcp.h>
     26#include <net/udp.h>
     27#include <net/ip6_checksum.h>
     28#include <net/sctp/checksum.h>
     29
     30#include <net/act_api.h>
     31#include <net/pkt_cls.h>
     32
     33#include <linux/tc_act/tc_csum.h>
     34#include <net/tc_act/tc_csum.h>
     35
     36static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
     37	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
     38};
     39
     40static unsigned int csum_net_id;
     41static struct tc_action_ops act_csum_ops;
     42
     43static int tcf_csum_init(struct net *net, struct nlattr *nla,
     44			 struct nlattr *est, struct tc_action **a,
     45			 struct tcf_proto *tp,
     46			 u32 flags, struct netlink_ext_ack *extack)
     47{
     48	struct tc_action_net *tn = net_generic(net, csum_net_id);
     49	bool bind = flags & TCA_ACT_FLAGS_BIND;
     50	struct tcf_csum_params *params_new;
     51	struct nlattr *tb[TCA_CSUM_MAX + 1];
     52	struct tcf_chain *goto_ch = NULL;
     53	struct tc_csum *parm;
     54	struct tcf_csum *p;
     55	int ret = 0, err;
     56	u32 index;
     57
     58	if (nla == NULL)
     59		return -EINVAL;
     60
     61	err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
     62					  NULL);
     63	if (err < 0)
     64		return err;
     65
     66	if (tb[TCA_CSUM_PARMS] == NULL)
     67		return -EINVAL;
     68	parm = nla_data(tb[TCA_CSUM_PARMS]);
     69	index = parm->index;
     70	err = tcf_idr_check_alloc(tn, &index, a, bind);
     71	if (!err) {
     72		ret = tcf_idr_create_from_flags(tn, index, est, a,
     73						&act_csum_ops, bind, flags);
     74		if (ret) {
     75			tcf_idr_cleanup(tn, index);
     76			return ret;
     77		}
     78		ret = ACT_P_CREATED;
     79	} else if (err > 0) {
     80		if (bind)/* dont override defaults */
     81			return 0;
     82		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
     83			tcf_idr_release(*a, bind);
     84			return -EEXIST;
     85		}
     86	} else {
     87		return err;
     88	}
     89
     90	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
     91	if (err < 0)
     92		goto release_idr;
     93
     94	p = to_tcf_csum(*a);
     95
     96	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
     97	if (unlikely(!params_new)) {
     98		err = -ENOMEM;
     99		goto put_chain;
    100	}
    101	params_new->update_flags = parm->update_flags;
    102
    103	spin_lock_bh(&p->tcf_lock);
    104	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
    105	params_new = rcu_replace_pointer(p->params, params_new,
    106					 lockdep_is_held(&p->tcf_lock));
    107	spin_unlock_bh(&p->tcf_lock);
    108
    109	if (goto_ch)
    110		tcf_chain_put_by_act(goto_ch);
    111	if (params_new)
    112		kfree_rcu(params_new, rcu);
    113
    114	return ret;
    115put_chain:
    116	if (goto_ch)
    117		tcf_chain_put_by_act(goto_ch);
    118release_idr:
    119	tcf_idr_release(*a, bind);
    120	return err;
    121}
    122
    123/**
    124 * tcf_csum_skb_nextlayer - Get next layer pointer
    125 * @skb: sk_buff to use
    126 * @ihl: previous summed headers length
    127 * @ipl: complete packet length
    128 * @jhl: next header length
    129 *
    130 * Check the expected next layer availability in the specified sk_buff.
    131 * Return the next layer pointer if pass, NULL otherwise.
    132 */
    133static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
    134				    unsigned int ihl, unsigned int ipl,
    135				    unsigned int jhl)
    136{
    137	int ntkoff = skb_network_offset(skb);
    138	int hl = ihl + jhl;
    139
    140	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
    141	    skb_try_make_writable(skb, hl + ntkoff))
    142		return NULL;
    143	else
    144		return (void *)(skb_network_header(skb) + ihl);
    145}
    146
    147static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
    148			      unsigned int ipl)
    149{
    150	struct icmphdr *icmph;
    151
    152	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
    153	if (icmph == NULL)
    154		return 0;
    155
    156	icmph->checksum = 0;
    157	skb->csum = csum_partial(icmph, ipl - ihl, 0);
    158	icmph->checksum = csum_fold(skb->csum);
    159
    160	skb->ip_summed = CHECKSUM_NONE;
    161
    162	return 1;
    163}
    164
    165static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
    166			      unsigned int ihl, unsigned int ipl)
    167{
    168	struct igmphdr *igmph;
    169
    170	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
    171	if (igmph == NULL)
    172		return 0;
    173
    174	igmph->csum = 0;
    175	skb->csum = csum_partial(igmph, ipl - ihl, 0);
    176	igmph->csum = csum_fold(skb->csum);
    177
    178	skb->ip_summed = CHECKSUM_NONE;
    179
    180	return 1;
    181}
    182
    183static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
    184			      unsigned int ipl)
    185{
    186	struct icmp6hdr *icmp6h;
    187	const struct ipv6hdr *ip6h;
    188
    189	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
    190	if (icmp6h == NULL)
    191		return 0;
    192
    193	ip6h = ipv6_hdr(skb);
    194	icmp6h->icmp6_cksum = 0;
    195	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
    196	icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
    197					      ipl - ihl, IPPROTO_ICMPV6,
    198					      skb->csum);
    199
    200	skb->ip_summed = CHECKSUM_NONE;
    201
    202	return 1;
    203}
    204
    205static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
    206			     unsigned int ipl)
    207{
    208	struct tcphdr *tcph;
    209	const struct iphdr *iph;
    210
    211	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
    212		return 1;
    213
    214	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
    215	if (tcph == NULL)
    216		return 0;
    217
    218	iph = ip_hdr(skb);
    219	tcph->check = 0;
    220	skb->csum = csum_partial(tcph, ipl - ihl, 0);
    221	tcph->check = tcp_v4_check(ipl - ihl,
    222				   iph->saddr, iph->daddr, skb->csum);
    223
    224	skb->ip_summed = CHECKSUM_NONE;
    225
    226	return 1;
    227}
    228
    229static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
    230			     unsigned int ipl)
    231{
    232	struct tcphdr *tcph;
    233	const struct ipv6hdr *ip6h;
    234
    235	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
    236		return 1;
    237
    238	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
    239	if (tcph == NULL)
    240		return 0;
    241
    242	ip6h = ipv6_hdr(skb);
    243	tcph->check = 0;
    244	skb->csum = csum_partial(tcph, ipl - ihl, 0);
    245	tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
    246				      ipl - ihl, IPPROTO_TCP,
    247				      skb->csum);
    248
    249	skb->ip_summed = CHECKSUM_NONE;
    250
    251	return 1;
    252}
    253
    254static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
    255			     unsigned int ipl, int udplite)
    256{
    257	struct udphdr *udph;
    258	const struct iphdr *iph;
    259	u16 ul;
    260
    261	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
    262		return 1;
    263
    264	/*
    265	 * Support both UDP and UDPLITE checksum algorithms, Don't use
    266	 * udph->len to get the real length without any protocol check,
    267	 * UDPLITE uses udph->len for another thing,
    268	 * Use iph->tot_len, or just ipl.
    269	 */
    270
    271	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
    272	if (udph == NULL)
    273		return 0;
    274
    275	iph = ip_hdr(skb);
    276	ul = ntohs(udph->len);
    277
    278	if (udplite || udph->check) {
    279
    280		udph->check = 0;
    281
    282		if (udplite) {
    283			if (ul == 0)
    284				skb->csum = csum_partial(udph, ipl - ihl, 0);
    285			else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
    286				skb->csum = csum_partial(udph, ul, 0);
    287			else
    288				goto ignore_obscure_skb;
    289		} else {
    290			if (ul != ipl - ihl)
    291				goto ignore_obscure_skb;
    292
    293			skb->csum = csum_partial(udph, ul, 0);
    294		}
    295
    296		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
    297						ul, iph->protocol,
    298						skb->csum);
    299
    300		if (!udph->check)
    301			udph->check = CSUM_MANGLED_0;
    302	}
    303
    304	skb->ip_summed = CHECKSUM_NONE;
    305
    306ignore_obscure_skb:
    307	return 1;
    308}
    309
    310static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
    311			     unsigned int ipl, int udplite)
    312{
    313	struct udphdr *udph;
    314	const struct ipv6hdr *ip6h;
    315	u16 ul;
    316
    317	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
    318		return 1;
    319
    320	/*
    321	 * Support both UDP and UDPLITE checksum algorithms, Don't use
    322	 * udph->len to get the real length without any protocol check,
    323	 * UDPLITE uses udph->len for another thing,
    324	 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
    325	 */
    326
    327	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
    328	if (udph == NULL)
    329		return 0;
    330
    331	ip6h = ipv6_hdr(skb);
    332	ul = ntohs(udph->len);
    333
    334	udph->check = 0;
    335
    336	if (udplite) {
    337		if (ul == 0)
    338			skb->csum = csum_partial(udph, ipl - ihl, 0);
    339
    340		else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
    341			skb->csum = csum_partial(udph, ul, 0);
    342
    343		else
    344			goto ignore_obscure_skb;
    345	} else {
    346		if (ul != ipl - ihl)
    347			goto ignore_obscure_skb;
    348
    349		skb->csum = csum_partial(udph, ul, 0);
    350	}
    351
    352	udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
    353				      udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
    354				      skb->csum);
    355
    356	if (!udph->check)
    357		udph->check = CSUM_MANGLED_0;
    358
    359	skb->ip_summed = CHECKSUM_NONE;
    360
    361ignore_obscure_skb:
    362	return 1;
    363}
    364
    365static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
    366			 unsigned int ipl)
    367{
    368	struct sctphdr *sctph;
    369
    370	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
    371		return 1;
    372
    373	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
    374	if (!sctph)
    375		return 0;
    376
    377	sctph->checksum = sctp_compute_cksum(skb,
    378					     skb_network_offset(skb) + ihl);
    379	skb->ip_summed = CHECKSUM_NONE;
    380	skb->csum_not_inet = 0;
    381
    382	return 1;
    383}
    384
    385static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
    386{
    387	const struct iphdr *iph;
    388	int ntkoff;
    389
    390	ntkoff = skb_network_offset(skb);
    391
    392	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
    393		goto fail;
    394
    395	iph = ip_hdr(skb);
    396
    397	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
    398	case IPPROTO_ICMP:
    399		if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
    400			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
    401						ntohs(iph->tot_len)))
    402				goto fail;
    403		break;
    404	case IPPROTO_IGMP:
    405		if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
    406			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
    407						ntohs(iph->tot_len)))
    408				goto fail;
    409		break;
    410	case IPPROTO_TCP:
    411		if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
    412			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
    413					       ntohs(iph->tot_len)))
    414				goto fail;
    415		break;
    416	case IPPROTO_UDP:
    417		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
    418			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
    419					       ntohs(iph->tot_len), 0))
    420				goto fail;
    421		break;
    422	case IPPROTO_UDPLITE:
    423		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
    424			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
    425					       ntohs(iph->tot_len), 1))
    426				goto fail;
    427		break;
    428	case IPPROTO_SCTP:
    429		if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
    430		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
    431			goto fail;
    432		break;
    433	}
    434
    435	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
    436		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
    437			goto fail;
    438
    439		ip_send_check(ip_hdr(skb));
    440	}
    441
    442	return 1;
    443
    444fail:
    445	return 0;
    446}
    447
    448static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
    449				 unsigned int *pl)
    450{
    451	int off, len, optlen;
    452	unsigned char *xh = (void *)ip6xh;
    453
    454	off = sizeof(*ip6xh);
    455	len = ixhl - off;
    456
    457	while (len > 1) {
    458		switch (xh[off]) {
    459		case IPV6_TLV_PAD1:
    460			optlen = 1;
    461			break;
    462		case IPV6_TLV_JUMBO:
    463			optlen = xh[off + 1] + 2;
    464			if (optlen != 6 || len < 6 || (off & 3) != 2)
    465				/* wrong jumbo option length/alignment */
    466				return 0;
    467			*pl = ntohl(*(__be32 *)(xh + off + 2));
    468			goto done;
    469		default:
    470			optlen = xh[off + 1] + 2;
    471			if (optlen > len)
    472				/* ignore obscure options */
    473				goto done;
    474			break;
    475		}
    476		off += optlen;
    477		len -= optlen;
    478	}
    479
    480done:
    481	return 1;
    482}
    483
    484static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
    485{
    486	struct ipv6hdr *ip6h;
    487	struct ipv6_opt_hdr *ip6xh;
    488	unsigned int hl, ixhl;
    489	unsigned int pl;
    490	int ntkoff;
    491	u8 nexthdr;
    492
    493	ntkoff = skb_network_offset(skb);
    494
    495	hl = sizeof(*ip6h);
    496
    497	if (!pskb_may_pull(skb, hl + ntkoff))
    498		goto fail;
    499
    500	ip6h = ipv6_hdr(skb);
    501
    502	pl = ntohs(ip6h->payload_len);
    503	nexthdr = ip6h->nexthdr;
    504
    505	do {
    506		switch (nexthdr) {
    507		case NEXTHDR_FRAGMENT:
    508			goto ignore_skb;
    509		case NEXTHDR_ROUTING:
    510		case NEXTHDR_HOP:
    511		case NEXTHDR_DEST:
    512			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
    513				goto fail;
    514			ip6xh = (void *)(skb_network_header(skb) + hl);
    515			ixhl = ipv6_optlen(ip6xh);
    516			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
    517				goto fail;
    518			ip6xh = (void *)(skb_network_header(skb) + hl);
    519			if ((nexthdr == NEXTHDR_HOP) &&
    520			    !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
    521				goto fail;
    522			nexthdr = ip6xh->nexthdr;
    523			hl += ixhl;
    524			break;
    525		case IPPROTO_ICMPV6:
    526			if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
    527				if (!tcf_csum_ipv6_icmp(skb,
    528							hl, pl + sizeof(*ip6h)))
    529					goto fail;
    530			goto done;
    531		case IPPROTO_TCP:
    532			if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
    533				if (!tcf_csum_ipv6_tcp(skb,
    534						       hl, pl + sizeof(*ip6h)))
    535					goto fail;
    536			goto done;
    537		case IPPROTO_UDP:
    538			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
    539				if (!tcf_csum_ipv6_udp(skb, hl,
    540						       pl + sizeof(*ip6h), 0))
    541					goto fail;
    542			goto done;
    543		case IPPROTO_UDPLITE:
    544			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
    545				if (!tcf_csum_ipv6_udp(skb, hl,
    546						       pl + sizeof(*ip6h), 1))
    547					goto fail;
    548			goto done;
    549		case IPPROTO_SCTP:
    550			if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
    551			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
    552				goto fail;
    553			goto done;
    554		default:
    555			goto ignore_skb;
    556		}
    557	} while (pskb_may_pull(skb, hl + 1 + ntkoff));
    558
    559done:
    560ignore_skb:
    561	return 1;
    562
    563fail:
    564	return 0;
    565}
    566
    567static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
    568			struct tcf_result *res)
    569{
    570	struct tcf_csum *p = to_tcf_csum(a);
    571	bool orig_vlan_tag_present = false;
    572	unsigned int vlan_hdr_count = 0;
    573	struct tcf_csum_params *params;
    574	u32 update_flags;
    575	__be16 protocol;
    576	int action;
    577
    578	params = rcu_dereference_bh(p->params);
    579
    580	tcf_lastuse_update(&p->tcf_tm);
    581	tcf_action_update_bstats(&p->common, skb);
    582
    583	action = READ_ONCE(p->tcf_action);
    584	if (unlikely(action == TC_ACT_SHOT))
    585		goto drop;
    586
    587	update_flags = params->update_flags;
    588	protocol = skb_protocol(skb, false);
    589again:
    590	switch (protocol) {
    591	case cpu_to_be16(ETH_P_IP):
    592		if (!tcf_csum_ipv4(skb, update_flags))
    593			goto drop;
    594		break;
    595	case cpu_to_be16(ETH_P_IPV6):
    596		if (!tcf_csum_ipv6(skb, update_flags))
    597			goto drop;
    598		break;
    599	case cpu_to_be16(ETH_P_8021AD):
    600		fallthrough;
    601	case cpu_to_be16(ETH_P_8021Q):
    602		if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
    603			protocol = skb->protocol;
    604			orig_vlan_tag_present = true;
    605		} else {
    606			struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
    607
    608			protocol = vlan->h_vlan_encapsulated_proto;
    609			skb_pull(skb, VLAN_HLEN);
    610			skb_reset_network_header(skb);
    611			vlan_hdr_count++;
    612		}
    613		goto again;
    614	}
    615
    616out:
    617	/* Restore the skb for the pulled VLAN tags */
    618	while (vlan_hdr_count--) {
    619		skb_push(skb, VLAN_HLEN);
    620		skb_reset_network_header(skb);
    621	}
    622
    623	return action;
    624
    625drop:
    626	tcf_action_inc_drop_qstats(&p->common);
    627	action = TC_ACT_SHOT;
    628	goto out;
    629}
    630
    631static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
    632			 int ref)
    633{
    634	unsigned char *b = skb_tail_pointer(skb);
    635	struct tcf_csum *p = to_tcf_csum(a);
    636	struct tcf_csum_params *params;
    637	struct tc_csum opt = {
    638		.index   = p->tcf_index,
    639		.refcnt  = refcount_read(&p->tcf_refcnt) - ref,
    640		.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
    641	};
    642	struct tcf_t t;
    643
    644	spin_lock_bh(&p->tcf_lock);
    645	params = rcu_dereference_protected(p->params,
    646					   lockdep_is_held(&p->tcf_lock));
    647	opt.action = p->tcf_action;
    648	opt.update_flags = params->update_flags;
    649
    650	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
    651		goto nla_put_failure;
    652
    653	tcf_tm_dump(&t, &p->tcf_tm);
    654	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
    655		goto nla_put_failure;
    656	spin_unlock_bh(&p->tcf_lock);
    657
    658	return skb->len;
    659
    660nla_put_failure:
    661	spin_unlock_bh(&p->tcf_lock);
    662	nlmsg_trim(skb, b);
    663	return -1;
    664}
    665
    666static void tcf_csum_cleanup(struct tc_action *a)
    667{
    668	struct tcf_csum *p = to_tcf_csum(a);
    669	struct tcf_csum_params *params;
    670
    671	params = rcu_dereference_protected(p->params, 1);
    672	if (params)
    673		kfree_rcu(params, rcu);
    674}
    675
    676static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
    677			   struct netlink_callback *cb, int type,
    678			   const struct tc_action_ops *ops,
    679			   struct netlink_ext_ack *extack)
    680{
    681	struct tc_action_net *tn = net_generic(net, csum_net_id);
    682
    683	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
    684}
    685
    686static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
    687{
    688	struct tc_action_net *tn = net_generic(net, csum_net_id);
    689
    690	return tcf_idr_search(tn, a, index);
    691}
    692
    693static size_t tcf_csum_get_fill_size(const struct tc_action *act)
    694{
    695	return nla_total_size(sizeof(struct tc_csum));
    696}
    697
    698static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
    699				      u32 *index_inc, bool bind,
    700				      struct netlink_ext_ack *extack)
    701{
    702	if (bind) {
    703		struct flow_action_entry *entry = entry_data;
    704
    705		entry->id = FLOW_ACTION_CSUM;
    706		entry->csum_flags = tcf_csum_update_flags(act);
    707		*index_inc = 1;
    708	} else {
    709		struct flow_offload_action *fl_action = entry_data;
    710
    711		fl_action->id = FLOW_ACTION_CSUM;
    712	}
    713
    714	return 0;
    715}
    716
    717static struct tc_action_ops act_csum_ops = {
    718	.kind		= "csum",
    719	.id		= TCA_ID_CSUM,
    720	.owner		= THIS_MODULE,
    721	.act		= tcf_csum_act,
    722	.dump		= tcf_csum_dump,
    723	.init		= tcf_csum_init,
    724	.cleanup	= tcf_csum_cleanup,
    725	.walk		= tcf_csum_walker,
    726	.lookup		= tcf_csum_search,
    727	.get_fill_size  = tcf_csum_get_fill_size,
    728	.offload_act_setup = tcf_csum_offload_act_setup,
    729	.size		= sizeof(struct tcf_csum),
    730};
    731
    732static __net_init int csum_init_net(struct net *net)
    733{
    734	struct tc_action_net *tn = net_generic(net, csum_net_id);
    735
    736	return tc_action_net_init(net, tn, &act_csum_ops);
    737}
    738
    739static void __net_exit csum_exit_net(struct list_head *net_list)
    740{
    741	tc_action_net_exit(net_list, csum_net_id);
    742}
    743
    744static struct pernet_operations csum_net_ops = {
    745	.init = csum_init_net,
    746	.exit_batch = csum_exit_net,
    747	.id   = &csum_net_id,
    748	.size = sizeof(struct tc_action_net),
    749};
    750
    751MODULE_DESCRIPTION("Checksum updating actions");
    752MODULE_LICENSE("GPL");
    753
    754static int __init csum_init_module(void)
    755{
    756	return tcf_register_action(&act_csum_ops, &csum_net_ops);
    757}
    758
    759static void __exit csum_cleanup_module(void)
    760{
    761	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
    762}
    763
    764module_init(csum_init_module);
    765module_exit(csum_cleanup_module);