cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mrp.c (25345B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *	IEEE 802.1Q Multiple Registration Protocol (MRP)
      4 *
      5 *	Copyright (c) 2012 Massachusetts Institute of Technology
      6 *
      7 *	Adapted from code in net/802/garp.c
      8 *	Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
      9 */
     10#include <linux/kernel.h>
     11#include <linux/timer.h>
     12#include <linux/skbuff.h>
     13#include <linux/netdevice.h>
     14#include <linux/etherdevice.h>
     15#include <linux/rtnetlink.h>
     16#include <linux/slab.h>
     17#include <linux/module.h>
     18#include <net/mrp.h>
     19#include <asm/unaligned.h>
     20
     21static unsigned int mrp_join_time __read_mostly = 200;
     22module_param(mrp_join_time, uint, 0644);
     23MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
     24
     25static unsigned int mrp_periodic_time __read_mostly = 1000;
     26module_param(mrp_periodic_time, uint, 0644);
     27MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
     28
     29MODULE_LICENSE("GPL");
     30
     31static const u8
     32mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
     33	[MRP_APPLICANT_VO] = {
     34		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
     35		[MRP_EVENT_JOIN]	= MRP_APPLICANT_VP,
     36		[MRP_EVENT_LV]		= MRP_APPLICANT_VO,
     37		[MRP_EVENT_TX]		= MRP_APPLICANT_VO,
     38		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_VO,
     39		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_AO,
     40		[MRP_EVENT_R_IN]	= MRP_APPLICANT_VO,
     41		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_VO,
     42		[MRP_EVENT_R_MT]	= MRP_APPLICANT_VO,
     43		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VO,
     44		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VO,
     45		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VO,
     46		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_VO,
     47	},
     48	[MRP_APPLICANT_VP] = {
     49		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
     50		[MRP_EVENT_JOIN]	= MRP_APPLICANT_VP,
     51		[MRP_EVENT_LV]		= MRP_APPLICANT_VO,
     52		[MRP_EVENT_TX]		= MRP_APPLICANT_AA,
     53		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_VP,
     54		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_AP,
     55		[MRP_EVENT_R_IN]	= MRP_APPLICANT_VP,
     56		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_VP,
     57		[MRP_EVENT_R_MT]	= MRP_APPLICANT_VP,
     58		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
     59		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
     60		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
     61		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_VP,
     62	},
     63	[MRP_APPLICANT_VN] = {
     64		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
     65		[MRP_EVENT_JOIN]	= MRP_APPLICANT_VN,
     66		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
     67		[MRP_EVENT_TX]		= MRP_APPLICANT_AN,
     68		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_VN,
     69		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_VN,
     70		[MRP_EVENT_R_IN]	= MRP_APPLICANT_VN,
     71		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_VN,
     72		[MRP_EVENT_R_MT]	= MRP_APPLICANT_VN,
     73		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VN,
     74		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VN,
     75		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VN,
     76		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_VN,
     77	},
     78	[MRP_APPLICANT_AN] = {
     79		[MRP_EVENT_NEW]		= MRP_APPLICANT_AN,
     80		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AN,
     81		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
     82		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
     83		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AN,
     84		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_AN,
     85		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AN,
     86		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AN,
     87		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AN,
     88		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VN,
     89		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VN,
     90		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VN,
     91		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AN,
     92	},
     93	[MRP_APPLICANT_AA] = {
     94		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
     95		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AA,
     96		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
     97		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
     98		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AA,
     99		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QA,
    100		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AA,
    101		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AA,
    102		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AA,
    103		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
    104		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
    105		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
    106		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AA,
    107	},
    108	[MRP_APPLICANT_QA] = {
    109		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
    110		[MRP_EVENT_JOIN]	= MRP_APPLICANT_QA,
    111		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
    112		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
    113		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_QA,
    114		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QA,
    115		[MRP_EVENT_R_IN]	= MRP_APPLICANT_QA,
    116		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AA,
    117		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AA,
    118		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
    119		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
    120		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
    121		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AA,
    122	},
    123	[MRP_APPLICANT_LA] = {
    124		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
    125		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AA,
    126		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
    127		[MRP_EVENT_TX]		= MRP_APPLICANT_VO,
    128		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_LA,
    129		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_LA,
    130		[MRP_EVENT_R_IN]	= MRP_APPLICANT_LA,
    131		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_LA,
    132		[MRP_EVENT_R_MT]	= MRP_APPLICANT_LA,
    133		[MRP_EVENT_R_LV]	= MRP_APPLICANT_LA,
    134		[MRP_EVENT_R_LA]	= MRP_APPLICANT_LA,
    135		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_LA,
    136		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_LA,
    137	},
    138	[MRP_APPLICANT_AO] = {
    139		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
    140		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AP,
    141		[MRP_EVENT_LV]		= MRP_APPLICANT_AO,
    142		[MRP_EVENT_TX]		= MRP_APPLICANT_AO,
    143		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AO,
    144		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QO,
    145		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AO,
    146		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AO,
    147		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AO,
    148		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VO,
    149		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VO,
    150		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VO,
    151		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AO,
    152	},
    153	[MRP_APPLICANT_QO] = {
    154		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
    155		[MRP_EVENT_JOIN]	= MRP_APPLICANT_QP,
    156		[MRP_EVENT_LV]		= MRP_APPLICANT_QO,
    157		[MRP_EVENT_TX]		= MRP_APPLICANT_QO,
    158		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_QO,
    159		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QO,
    160		[MRP_EVENT_R_IN]	= MRP_APPLICANT_QO,
    161		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AO,
    162		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AO,
    163		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VO,
    164		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VO,
    165		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VO,
    166		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_QO,
    167	},
    168	[MRP_APPLICANT_AP] = {
    169		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
    170		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AP,
    171		[MRP_EVENT_LV]		= MRP_APPLICANT_AO,
    172		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
    173		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AP,
    174		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QP,
    175		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AP,
    176		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AP,
    177		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AP,
    178		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
    179		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
    180		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
    181		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AP,
    182	},
    183	[MRP_APPLICANT_QP] = {
    184		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
    185		[MRP_EVENT_JOIN]	= MRP_APPLICANT_QP,
    186		[MRP_EVENT_LV]		= MRP_APPLICANT_QO,
    187		[MRP_EVENT_TX]		= MRP_APPLICANT_QP,
    188		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_QP,
    189		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QP,
    190		[MRP_EVENT_R_IN]	= MRP_APPLICANT_QP,
    191		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AP,
    192		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AP,
    193		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
    194		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
    195		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
    196		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AP,
    197	},
    198};
    199
    200static const u8
    201mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
    202	[MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
    203	[MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
    204	[MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
    205	[MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
    206	[MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
    207	[MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
    208	[MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
    209	[MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
    210	[MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
    211	[MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
    212	[MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
    213};
    214
    215static void mrp_attrvalue_inc(void *value, u8 len)
    216{
    217	u8 *v = (u8 *)value;
    218
    219	/* Add 1 to the last byte. If it becomes zero,
    220	 * go to the previous byte and repeat.
    221	 */
    222	while (len > 0 && !++v[--len])
    223		;
    224}
    225
    226static int mrp_attr_cmp(const struct mrp_attr *attr,
    227			 const void *value, u8 len, u8 type)
    228{
    229	if (attr->type != type)
    230		return attr->type - type;
    231	if (attr->len != len)
    232		return attr->len - len;
    233	return memcmp(attr->value, value, len);
    234}
    235
    236static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
    237					const void *value, u8 len, u8 type)
    238{
    239	struct rb_node *parent = app->mad.rb_node;
    240	struct mrp_attr *attr;
    241	int d;
    242
    243	while (parent) {
    244		attr = rb_entry(parent, struct mrp_attr, node);
    245		d = mrp_attr_cmp(attr, value, len, type);
    246		if (d > 0)
    247			parent = parent->rb_left;
    248		else if (d < 0)
    249			parent = parent->rb_right;
    250		else
    251			return attr;
    252	}
    253	return NULL;
    254}
    255
    256static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
    257					const void *value, u8 len, u8 type)
    258{
    259	struct rb_node *parent = NULL, **p = &app->mad.rb_node;
    260	struct mrp_attr *attr;
    261	int d;
    262
    263	while (*p) {
    264		parent = *p;
    265		attr = rb_entry(parent, struct mrp_attr, node);
    266		d = mrp_attr_cmp(attr, value, len, type);
    267		if (d > 0)
    268			p = &parent->rb_left;
    269		else if (d < 0)
    270			p = &parent->rb_right;
    271		else {
    272			/* The attribute already exists; re-use it. */
    273			return attr;
    274		}
    275	}
    276	attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
    277	if (!attr)
    278		return attr;
    279	attr->state = MRP_APPLICANT_VO;
    280	attr->type  = type;
    281	attr->len   = len;
    282	memcpy(attr->value, value, len);
    283
    284	rb_link_node(&attr->node, parent, p);
    285	rb_insert_color(&attr->node, &app->mad);
    286	return attr;
    287}
    288
    289static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
    290{
    291	rb_erase(&attr->node, &app->mad);
    292	kfree(attr);
    293}
    294
    295static void mrp_attr_destroy_all(struct mrp_applicant *app)
    296{
    297	struct rb_node *node, *next;
    298	struct mrp_attr *attr;
    299
    300	for (node = rb_first(&app->mad);
    301	     next = node ? rb_next(node) : NULL, node != NULL;
    302	     node = next) {
    303		attr = rb_entry(node, struct mrp_attr, node);
    304		mrp_attr_destroy(app, attr);
    305	}
    306}
    307
    308static int mrp_pdu_init(struct mrp_applicant *app)
    309{
    310	struct sk_buff *skb;
    311	struct mrp_pdu_hdr *ph;
    312
    313	skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
    314			GFP_ATOMIC);
    315	if (!skb)
    316		return -ENOMEM;
    317
    318	skb->dev = app->dev;
    319	skb->protocol = app->app->pkttype.type;
    320	skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
    321	skb_reset_network_header(skb);
    322	skb_reset_transport_header(skb);
    323
    324	ph = __skb_put(skb, sizeof(*ph));
    325	ph->version = app->app->version;
    326
    327	app->pdu = skb;
    328	return 0;
    329}
    330
    331static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
    332{
    333	__be16 *endmark;
    334
    335	if (skb_tailroom(app->pdu) < sizeof(*endmark))
    336		return -1;
    337	endmark = __skb_put(app->pdu, sizeof(*endmark));
    338	put_unaligned(MRP_END_MARK, endmark);
    339	return 0;
    340}
    341
    342static void mrp_pdu_queue(struct mrp_applicant *app)
    343{
    344	if (!app->pdu)
    345		return;
    346
    347	if (mrp_cb(app->pdu)->mh)
    348		mrp_pdu_append_end_mark(app);
    349	mrp_pdu_append_end_mark(app);
    350
    351	dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
    352			app->app->group_address, app->dev->dev_addr,
    353			app->pdu->len);
    354
    355	skb_queue_tail(&app->queue, app->pdu);
    356	app->pdu = NULL;
    357}
    358
    359static void mrp_queue_xmit(struct mrp_applicant *app)
    360{
    361	struct sk_buff *skb;
    362
    363	while ((skb = skb_dequeue(&app->queue)))
    364		dev_queue_xmit(skb);
    365}
    366
    367static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
    368				  u8 attrtype, u8 attrlen)
    369{
    370	struct mrp_msg_hdr *mh;
    371
    372	if (mrp_cb(app->pdu)->mh) {
    373		if (mrp_pdu_append_end_mark(app) < 0)
    374			return -1;
    375		mrp_cb(app->pdu)->mh = NULL;
    376		mrp_cb(app->pdu)->vah = NULL;
    377	}
    378
    379	if (skb_tailroom(app->pdu) < sizeof(*mh))
    380		return -1;
    381	mh = __skb_put(app->pdu, sizeof(*mh));
    382	mh->attrtype = attrtype;
    383	mh->attrlen = attrlen;
    384	mrp_cb(app->pdu)->mh = mh;
    385	return 0;
    386}
    387
    388static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
    389				      const void *firstattrvalue, u8 attrlen)
    390{
    391	struct mrp_vecattr_hdr *vah;
    392
    393	if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
    394		return -1;
    395	vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
    396	put_unaligned(0, &vah->lenflags);
    397	memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
    398	mrp_cb(app->pdu)->vah = vah;
    399	memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
    400	return 0;
    401}
    402
    403static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
    404					const struct mrp_attr *attr,
    405					enum mrp_vecattr_event vaevent)
    406{
    407	u16 len, pos;
    408	u8 *vaevents;
    409	int err;
    410again:
    411	if (!app->pdu) {
    412		err = mrp_pdu_init(app);
    413		if (err < 0)
    414			return err;
    415	}
    416
    417	/* If there is no Message header in the PDU, or the Message header is
    418	 * for a different attribute type, add an EndMark (if necessary) and a
    419	 * new Message header to the PDU.
    420	 */
    421	if (!mrp_cb(app->pdu)->mh ||
    422	    mrp_cb(app->pdu)->mh->attrtype != attr->type ||
    423	    mrp_cb(app->pdu)->mh->attrlen != attr->len) {
    424		if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
    425			goto queue;
    426	}
    427
    428	/* If there is no VectorAttribute header for this Message in the PDU,
    429	 * or this attribute's value does not sequentially follow the previous
    430	 * attribute's value, add a new VectorAttribute header to the PDU.
    431	 */
    432	if (!mrp_cb(app->pdu)->vah ||
    433	    memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
    434		if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
    435			goto queue;
    436	}
    437
    438	len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
    439	pos = len % 3;
    440
    441	/* Events are packed into Vectors in the PDU, three to a byte. Add a
    442	 * byte to the end of the Vector if necessary.
    443	 */
    444	if (!pos) {
    445		if (skb_tailroom(app->pdu) < sizeof(u8))
    446			goto queue;
    447		vaevents = __skb_put(app->pdu, sizeof(u8));
    448	} else {
    449		vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
    450	}
    451
    452	switch (pos) {
    453	case 0:
    454		*vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
    455				       __MRP_VECATTR_EVENT_MAX);
    456		break;
    457	case 1:
    458		*vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
    459		break;
    460	case 2:
    461		*vaevents += vaevent;
    462		break;
    463	default:
    464		WARN_ON(1);
    465	}
    466
    467	/* Increment the length of the VectorAttribute in the PDU, as well as
    468	 * the value of the next attribute that would continue its Vector.
    469	 */
    470	put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
    471	mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
    472
    473	return 0;
    474
    475queue:
    476	mrp_pdu_queue(app);
    477	goto again;
    478}
    479
    480static void mrp_attr_event(struct mrp_applicant *app,
    481			   struct mrp_attr *attr, enum mrp_event event)
    482{
    483	enum mrp_applicant_state state;
    484
    485	state = mrp_applicant_state_table[attr->state][event];
    486	if (state == MRP_APPLICANT_INVALID) {
    487		WARN_ON(1);
    488		return;
    489	}
    490
    491	if (event == MRP_EVENT_TX) {
    492		/* When appending the attribute fails, don't update its state
    493		 * in order to retry at the next TX event.
    494		 */
    495
    496		switch (mrp_tx_action_table[attr->state]) {
    497		case MRP_TX_ACTION_NONE:
    498		case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
    499		case MRP_TX_ACTION_S_IN_OPTIONAL:
    500			break;
    501		case MRP_TX_ACTION_S_NEW:
    502			if (mrp_pdu_append_vecattr_event(
    503				    app, attr, MRP_VECATTR_EVENT_NEW) < 0)
    504				return;
    505			break;
    506		case MRP_TX_ACTION_S_JOIN_IN:
    507			if (mrp_pdu_append_vecattr_event(
    508				    app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
    509				return;
    510			break;
    511		case MRP_TX_ACTION_S_LV:
    512			if (mrp_pdu_append_vecattr_event(
    513				    app, attr, MRP_VECATTR_EVENT_LV) < 0)
    514				return;
    515			/* As a pure applicant, sending a leave message
    516			 * implies that the attribute was unregistered and
    517			 * can be destroyed.
    518			 */
    519			mrp_attr_destroy(app, attr);
    520			return;
    521		default:
    522			WARN_ON(1);
    523		}
    524	}
    525
    526	attr->state = state;
    527}
    528
    529int mrp_request_join(const struct net_device *dev,
    530		     const struct mrp_application *appl,
    531		     const void *value, u8 len, u8 type)
    532{
    533	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
    534	struct mrp_applicant *app = rtnl_dereference(
    535		port->applicants[appl->type]);
    536	struct mrp_attr *attr;
    537
    538	if (sizeof(struct mrp_skb_cb) + len >
    539	    sizeof_field(struct sk_buff, cb))
    540		return -ENOMEM;
    541
    542	spin_lock_bh(&app->lock);
    543	attr = mrp_attr_create(app, value, len, type);
    544	if (!attr) {
    545		spin_unlock_bh(&app->lock);
    546		return -ENOMEM;
    547	}
    548	mrp_attr_event(app, attr, MRP_EVENT_JOIN);
    549	spin_unlock_bh(&app->lock);
    550	return 0;
    551}
    552EXPORT_SYMBOL_GPL(mrp_request_join);
    553
    554void mrp_request_leave(const struct net_device *dev,
    555		       const struct mrp_application *appl,
    556		       const void *value, u8 len, u8 type)
    557{
    558	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
    559	struct mrp_applicant *app = rtnl_dereference(
    560		port->applicants[appl->type]);
    561	struct mrp_attr *attr;
    562
    563	if (sizeof(struct mrp_skb_cb) + len >
    564	    sizeof_field(struct sk_buff, cb))
    565		return;
    566
    567	spin_lock_bh(&app->lock);
    568	attr = mrp_attr_lookup(app, value, len, type);
    569	if (!attr) {
    570		spin_unlock_bh(&app->lock);
    571		return;
    572	}
    573	mrp_attr_event(app, attr, MRP_EVENT_LV);
    574	spin_unlock_bh(&app->lock);
    575}
    576EXPORT_SYMBOL_GPL(mrp_request_leave);
    577
    578static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
    579{
    580	struct rb_node *node, *next;
    581	struct mrp_attr *attr;
    582
    583	for (node = rb_first(&app->mad);
    584	     next = node ? rb_next(node) : NULL, node != NULL;
    585	     node = next) {
    586		attr = rb_entry(node, struct mrp_attr, node);
    587		mrp_attr_event(app, attr, event);
    588	}
    589}
    590
    591static void mrp_join_timer_arm(struct mrp_applicant *app)
    592{
    593	unsigned long delay;
    594
    595	delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
    596	mod_timer(&app->join_timer, jiffies + delay);
    597}
    598
    599static void mrp_join_timer(struct timer_list *t)
    600{
    601	struct mrp_applicant *app = from_timer(app, t, join_timer);
    602
    603	spin_lock(&app->lock);
    604	mrp_mad_event(app, MRP_EVENT_TX);
    605	mrp_pdu_queue(app);
    606	spin_unlock(&app->lock);
    607
    608	mrp_queue_xmit(app);
    609	mrp_join_timer_arm(app);
    610}
    611
    612static void mrp_periodic_timer_arm(struct mrp_applicant *app)
    613{
    614	mod_timer(&app->periodic_timer,
    615		  jiffies + msecs_to_jiffies(mrp_periodic_time));
    616}
    617
    618static void mrp_periodic_timer(struct timer_list *t)
    619{
    620	struct mrp_applicant *app = from_timer(app, t, periodic_timer);
    621
    622	spin_lock(&app->lock);
    623	mrp_mad_event(app, MRP_EVENT_PERIODIC);
    624	mrp_pdu_queue(app);
    625	spin_unlock(&app->lock);
    626
    627	mrp_periodic_timer_arm(app);
    628}
    629
    630static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
    631{
    632	__be16 endmark;
    633
    634	if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
    635		return -1;
    636	if (endmark == MRP_END_MARK) {
    637		*offset += sizeof(endmark);
    638		return -1;
    639	}
    640	return 0;
    641}
    642
    643static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
    644					struct sk_buff *skb,
    645					enum mrp_vecattr_event vaevent)
    646{
    647	struct mrp_attr *attr;
    648	enum mrp_event event;
    649
    650	attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
    651			       mrp_cb(skb)->mh->attrlen,
    652			       mrp_cb(skb)->mh->attrtype);
    653	if (attr == NULL)
    654		return;
    655
    656	switch (vaevent) {
    657	case MRP_VECATTR_EVENT_NEW:
    658		event = MRP_EVENT_R_NEW;
    659		break;
    660	case MRP_VECATTR_EVENT_JOIN_IN:
    661		event = MRP_EVENT_R_JOIN_IN;
    662		break;
    663	case MRP_VECATTR_EVENT_IN:
    664		event = MRP_EVENT_R_IN;
    665		break;
    666	case MRP_VECATTR_EVENT_JOIN_MT:
    667		event = MRP_EVENT_R_JOIN_MT;
    668		break;
    669	case MRP_VECATTR_EVENT_MT:
    670		event = MRP_EVENT_R_MT;
    671		break;
    672	case MRP_VECATTR_EVENT_LV:
    673		event = MRP_EVENT_R_LV;
    674		break;
    675	default:
    676		return;
    677	}
    678
    679	mrp_attr_event(app, attr, event);
    680}
    681
    682static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
    683				 struct sk_buff *skb, int *offset)
    684{
    685	struct mrp_vecattr_hdr _vah;
    686	u16 valen;
    687	u8 vaevents, vaevent;
    688
    689	mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
    690					      &_vah);
    691	if (!mrp_cb(skb)->vah)
    692		return -1;
    693	*offset += sizeof(_vah);
    694
    695	if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
    696	    MRP_VECATTR_HDR_FLAG_LA)
    697		mrp_mad_event(app, MRP_EVENT_R_LA);
    698	valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
    699			    MRP_VECATTR_HDR_LEN_MASK);
    700
    701	/* The VectorAttribute structure in a PDU carries event information
    702	 * about one or more attributes having consecutive values. Only the
    703	 * value for the first attribute is contained in the structure. So
    704	 * we make a copy of that value, and then increment it each time we
    705	 * advance to the next event in its Vector.
    706	 */
    707	if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
    708	    sizeof_field(struct sk_buff, cb))
    709		return -1;
    710	if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
    711			  mrp_cb(skb)->mh->attrlen) < 0)
    712		return -1;
    713	*offset += mrp_cb(skb)->mh->attrlen;
    714
    715	/* In a VectorAttribute, the Vector contains events which are packed
    716	 * three to a byte. We process one byte of the Vector at a time.
    717	 */
    718	while (valen > 0) {
    719		if (skb_copy_bits(skb, *offset, &vaevents,
    720				  sizeof(vaevents)) < 0)
    721			return -1;
    722		*offset += sizeof(vaevents);
    723
    724		/* Extract and process the first event. */
    725		vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
    726				      __MRP_VECATTR_EVENT_MAX);
    727		if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
    728			/* The byte is malformed; stop processing. */
    729			return -1;
    730		}
    731		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
    732
    733		/* If present, extract and process the second event. */
    734		if (!--valen)
    735			break;
    736		mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
    737				  mrp_cb(skb)->mh->attrlen);
    738		vaevents %= (__MRP_VECATTR_EVENT_MAX *
    739			     __MRP_VECATTR_EVENT_MAX);
    740		vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
    741		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
    742
    743		/* If present, extract and process the third event. */
    744		if (!--valen)
    745			break;
    746		mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
    747				  mrp_cb(skb)->mh->attrlen);
    748		vaevents %= __MRP_VECATTR_EVENT_MAX;
    749		vaevent = vaevents;
    750		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
    751	}
    752	return 0;
    753}
    754
    755static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
    756			     int *offset)
    757{
    758	struct mrp_msg_hdr _mh;
    759
    760	mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
    761	if (!mrp_cb(skb)->mh)
    762		return -1;
    763	*offset += sizeof(_mh);
    764
    765	if (mrp_cb(skb)->mh->attrtype == 0 ||
    766	    mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
    767	    mrp_cb(skb)->mh->attrlen == 0)
    768		return -1;
    769
    770	while (skb->len > *offset) {
    771		if (mrp_pdu_parse_end_mark(skb, offset) < 0)
    772			break;
    773		if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
    774			return -1;
    775	}
    776	return 0;
    777}
    778
    779static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
    780		   struct packet_type *pt, struct net_device *orig_dev)
    781{
    782	struct mrp_application *appl = container_of(pt, struct mrp_application,
    783						    pkttype);
    784	struct mrp_port *port;
    785	struct mrp_applicant *app;
    786	struct mrp_pdu_hdr _ph;
    787	const struct mrp_pdu_hdr *ph;
    788	int offset = skb_network_offset(skb);
    789
    790	/* If the interface is in promiscuous mode, drop the packet if
    791	 * it was unicast to another host.
    792	 */
    793	if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
    794		goto out;
    795	skb = skb_share_check(skb, GFP_ATOMIC);
    796	if (unlikely(!skb))
    797		goto out;
    798	port = rcu_dereference(dev->mrp_port);
    799	if (unlikely(!port))
    800		goto out;
    801	app = rcu_dereference(port->applicants[appl->type]);
    802	if (unlikely(!app))
    803		goto out;
    804
    805	ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
    806	if (!ph)
    807		goto out;
    808	offset += sizeof(_ph);
    809
    810	if (ph->version != app->app->version)
    811		goto out;
    812
    813	spin_lock(&app->lock);
    814	while (skb->len > offset) {
    815		if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
    816			break;
    817		if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
    818			break;
    819	}
    820	spin_unlock(&app->lock);
    821out:
    822	kfree_skb(skb);
    823	return 0;
    824}
    825
    826static int mrp_init_port(struct net_device *dev)
    827{
    828	struct mrp_port *port;
    829
    830	port = kzalloc(sizeof(*port), GFP_KERNEL);
    831	if (!port)
    832		return -ENOMEM;
    833	rcu_assign_pointer(dev->mrp_port, port);
    834	return 0;
    835}
    836
    837static void mrp_release_port(struct net_device *dev)
    838{
    839	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
    840	unsigned int i;
    841
    842	for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
    843		if (rtnl_dereference(port->applicants[i]))
    844			return;
    845	}
    846	RCU_INIT_POINTER(dev->mrp_port, NULL);
    847	kfree_rcu(port, rcu);
    848}
    849
    850int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
    851{
    852	struct mrp_applicant *app;
    853	int err;
    854
    855	ASSERT_RTNL();
    856
    857	if (!rtnl_dereference(dev->mrp_port)) {
    858		err = mrp_init_port(dev);
    859		if (err < 0)
    860			goto err1;
    861	}
    862
    863	err = -ENOMEM;
    864	app = kzalloc(sizeof(*app), GFP_KERNEL);
    865	if (!app)
    866		goto err2;
    867
    868	err = dev_mc_add(dev, appl->group_address);
    869	if (err < 0)
    870		goto err3;
    871
    872	app->dev = dev;
    873	app->app = appl;
    874	app->mad = RB_ROOT;
    875	spin_lock_init(&app->lock);
    876	skb_queue_head_init(&app->queue);
    877	rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
    878	timer_setup(&app->join_timer, mrp_join_timer, 0);
    879	mrp_join_timer_arm(app);
    880	timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
    881	mrp_periodic_timer_arm(app);
    882	return 0;
    883
    884err3:
    885	kfree(app);
    886err2:
    887	mrp_release_port(dev);
    888err1:
    889	return err;
    890}
    891EXPORT_SYMBOL_GPL(mrp_init_applicant);
    892
    893void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
    894{
    895	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
    896	struct mrp_applicant *app = rtnl_dereference(
    897		port->applicants[appl->type]);
    898
    899	ASSERT_RTNL();
    900
    901	RCU_INIT_POINTER(port->applicants[appl->type], NULL);
    902
    903	/* Delete timer and generate a final TX event to flush out
    904	 * all pending messages before the applicant is gone.
    905	 */
    906	del_timer_sync(&app->join_timer);
    907	del_timer_sync(&app->periodic_timer);
    908
    909	spin_lock_bh(&app->lock);
    910	mrp_mad_event(app, MRP_EVENT_TX);
    911	mrp_attr_destroy_all(app);
    912	mrp_pdu_queue(app);
    913	spin_unlock_bh(&app->lock);
    914
    915	mrp_queue_xmit(app);
    916
    917	dev_mc_del(dev, appl->group_address);
    918	kfree_rcu(app, rcu);
    919	mrp_release_port(dev);
    920}
    921EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
    922
    923int mrp_register_application(struct mrp_application *appl)
    924{
    925	appl->pkttype.func = mrp_rcv;
    926	dev_add_pack(&appl->pkttype);
    927	return 0;
    928}
    929EXPORT_SYMBOL_GPL(mrp_register_application);
    930
    931void mrp_unregister_application(struct mrp_application *appl)
    932{
    933	dev_remove_pack(&appl->pkttype);
    934}
    935EXPORT_SYMBOL_GPL(mrp_unregister_application);