cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hsr_forward.c (17279B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright 2011-2014 Autronica Fire and Security AS
      3 *
      4 * Author(s):
      5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
      6 *
      7 * Frame router for HSR and PRP.
      8 */
      9
     10#include "hsr_forward.h"
     11#include <linux/types.h>
     12#include <linux/skbuff.h>
     13#include <linux/etherdevice.h>
     14#include <linux/if_vlan.h>
     15#include "hsr_main.h"
     16#include "hsr_framereg.h"
     17
     18struct hsr_node;
     19
     20/* The uses I can see for these HSR supervision frames are:
     21 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
     22 *    22") to reset any sequence_nr counters belonging to that node. Useful if
     23 *    the other node's counter has been reset for some reason.
     24 *    --
     25 *    Or not - resetting the counter and bridging the frame would create a
     26 *    loop, unfortunately.
     27 *
     28 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
     29 *    frame is received from a particular node, we know something is wrong.
     30 *    We just register these (as with normal frames) and throw them away.
     31 *
     32 * 3) Allow different MAC addresses for the two slave interfaces, using the
     33 *    MacAddressA field.
     34 */
     35static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
     36{
     37	struct ethhdr *eth_hdr;
     38	struct hsr_sup_tag *hsr_sup_tag;
     39	struct hsrv1_ethhdr_sp *hsr_V1_hdr;
     40	struct hsr_sup_tlv *hsr_sup_tlv;
     41	u16 total_length = 0;
     42
     43	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
     44	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
     45
     46	/* Correct addr? */
     47	if (!ether_addr_equal(eth_hdr->h_dest,
     48			      hsr->sup_multicast_addr))
     49		return false;
     50
     51	/* Correct ether type?. */
     52	if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
     53	      eth_hdr->h_proto == htons(ETH_P_HSR)))
     54		return false;
     55
     56	/* Get the supervision header from correct location. */
     57	if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
     58		total_length = sizeof(struct hsrv1_ethhdr_sp);
     59		if (!pskb_may_pull(skb, total_length))
     60			return false;
     61
     62		hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
     63		if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
     64			return false;
     65
     66		hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
     67	} else {
     68		total_length = sizeof(struct hsrv0_ethhdr_sp);
     69		if (!pskb_may_pull(skb, total_length))
     70			return false;
     71
     72		hsr_sup_tag =
     73		     &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
     74	}
     75
     76	if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
     77	    hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
     78	    hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
     79	    hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
     80		return false;
     81	if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
     82	    hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
     83		return false;
     84
     85	/* Get next tlv */
     86	total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
     87	if (!pskb_may_pull(skb, total_length))
     88		return false;
     89	skb_pull(skb, total_length);
     90	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
     91	skb_push(skb, total_length);
     92
     93	/* if this is a redbox supervision frame we need to verify
     94	 * that more data is available
     95	 */
     96	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
     97		/* tlv length must be a length of a mac address */
     98		if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
     99			return false;
    100
    101		/* make sure another tlv follows */
    102		total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
    103		if (!pskb_may_pull(skb, total_length))
    104			return false;
    105
    106		/* get next tlv */
    107		skb_pull(skb, total_length);
    108		hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
    109		skb_push(skb, total_length);
    110	}
    111
    112	/* end of tlvs must follow at the end */
    113	if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
    114	    hsr_sup_tlv->HSR_TLV_length != 0)
    115		return false;
    116
    117	return true;
    118}
    119
    120static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
    121					       struct hsr_frame_info *frame)
    122{
    123	struct sk_buff *skb;
    124	int copylen;
    125	unsigned char *dst, *src;
    126
    127	skb_pull(skb_in, HSR_HLEN);
    128	skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
    129	skb_push(skb_in, HSR_HLEN);
    130	if (!skb)
    131		return NULL;
    132
    133	skb_reset_mac_header(skb);
    134
    135	if (skb->ip_summed == CHECKSUM_PARTIAL)
    136		skb->csum_start -= HSR_HLEN;
    137
    138	copylen = 2 * ETH_ALEN;
    139	if (frame->is_vlan)
    140		copylen += VLAN_HLEN;
    141	src = skb_mac_header(skb_in);
    142	dst = skb_mac_header(skb);
    143	memcpy(dst, src, copylen);
    144
    145	skb->protocol = eth_hdr(skb)->h_proto;
    146	return skb;
    147}
    148
    149struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
    150				       struct hsr_port *port)
    151{
    152	if (!frame->skb_std) {
    153		if (frame->skb_hsr) {
    154			frame->skb_std =
    155				create_stripped_skb_hsr(frame->skb_hsr, frame);
    156		} else {
    157			/* Unexpected */
    158			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
    159				  __FILE__, __LINE__, port->dev->name);
    160			return NULL;
    161		}
    162	}
    163
    164	return skb_clone(frame->skb_std, GFP_ATOMIC);
    165}
    166
    167struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
    168				       struct hsr_port *port)
    169{
    170	if (!frame->skb_std) {
    171		if (frame->skb_prp) {
    172			/* trim the skb by len - HSR_HLEN to exclude RCT */
    173			skb_trim(frame->skb_prp,
    174				 frame->skb_prp->len - HSR_HLEN);
    175			frame->skb_std =
    176				__pskb_copy(frame->skb_prp,
    177					    skb_headroom(frame->skb_prp),
    178					    GFP_ATOMIC);
    179		} else {
    180			/* Unexpected */
    181			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
    182				  __FILE__, __LINE__, port->dev->name);
    183			return NULL;
    184		}
    185	}
    186
    187	return skb_clone(frame->skb_std, GFP_ATOMIC);
    188}
    189
    190static void prp_set_lan_id(struct prp_rct *trailer,
    191			   struct hsr_port *port)
    192{
    193	int lane_id;
    194
    195	if (port->type == HSR_PT_SLAVE_A)
    196		lane_id = 0;
    197	else
    198		lane_id = 1;
    199
    200	/* Add net_id in the upper 3 bits of lane_id */
    201	lane_id |= port->hsr->net_id;
    202	set_prp_lan_id(trailer, lane_id);
    203}
    204
    205/* Tailroom for PRP rct should have been created before calling this */
    206static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
    207				    struct hsr_frame_info *frame,
    208				    struct hsr_port *port)
    209{
    210	struct prp_rct *trailer;
    211	int min_size = ETH_ZLEN;
    212	int lsdu_size;
    213
    214	if (!skb)
    215		return skb;
    216
    217	if (frame->is_vlan)
    218		min_size = VLAN_ETH_ZLEN;
    219
    220	if (skb_put_padto(skb, min_size))
    221		return NULL;
    222
    223	trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
    224	lsdu_size = skb->len - 14;
    225	if (frame->is_vlan)
    226		lsdu_size -= 4;
    227	prp_set_lan_id(trailer, port);
    228	set_prp_LSDU_size(trailer, lsdu_size);
    229	trailer->sequence_nr = htons(frame->sequence_nr);
    230	trailer->PRP_suffix = htons(ETH_P_PRP);
    231	skb->protocol = eth_hdr(skb)->h_proto;
    232
    233	return skb;
    234}
    235
    236static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
    237			    struct hsr_port *port)
    238{
    239	int path_id;
    240
    241	if (port->type == HSR_PT_SLAVE_A)
    242		path_id = 0;
    243	else
    244		path_id = 1;
    245
    246	set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
    247}
    248
    249static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
    250				    struct hsr_frame_info *frame,
    251				    struct hsr_port *port, u8 proto_version)
    252{
    253	struct hsr_ethhdr *hsr_ethhdr;
    254	int lsdu_size;
    255
    256	/* pad to minimum packet size which is 60 + 6 (HSR tag) */
    257	if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
    258		return NULL;
    259
    260	lsdu_size = skb->len - 14;
    261	if (frame->is_vlan)
    262		lsdu_size -= 4;
    263
    264	hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
    265
    266	hsr_set_path_id(hsr_ethhdr, port);
    267	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
    268	hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
    269	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
    270	hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
    271			ETH_P_HSR : ETH_P_PRP);
    272	skb->protocol = hsr_ethhdr->ethhdr.h_proto;
    273
    274	return skb;
    275}
    276
    277/* If the original frame was an HSR tagged frame, just clone it to be sent
    278 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
    279 */
    280struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
    281					struct hsr_port *port)
    282{
    283	unsigned char *dst, *src;
    284	struct sk_buff *skb;
    285	int movelen;
    286
    287	if (frame->skb_hsr) {
    288		struct hsr_ethhdr *hsr_ethhdr =
    289			(struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
    290
    291		/* set the lane id properly */
    292		hsr_set_path_id(hsr_ethhdr, port);
    293		return skb_clone(frame->skb_hsr, GFP_ATOMIC);
    294	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
    295		return skb_clone(frame->skb_std, GFP_ATOMIC);
    296	}
    297
    298	/* Create the new skb with enough headroom to fit the HSR tag */
    299	skb = __pskb_copy(frame->skb_std,
    300			  skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
    301	if (!skb)
    302		return NULL;
    303	skb_reset_mac_header(skb);
    304
    305	if (skb->ip_summed == CHECKSUM_PARTIAL)
    306		skb->csum_start += HSR_HLEN;
    307
    308	movelen = ETH_HLEN;
    309	if (frame->is_vlan)
    310		movelen += VLAN_HLEN;
    311
    312	src = skb_mac_header(skb);
    313	dst = skb_push(skb, HSR_HLEN);
    314	memmove(dst, src, movelen);
    315	skb_reset_mac_header(skb);
    316
    317	/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
    318	 * that case
    319	 */
    320	return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
    321}
    322
    323struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
    324					struct hsr_port *port)
    325{
    326	struct sk_buff *skb;
    327
    328	if (frame->skb_prp) {
    329		struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
    330
    331		if (trailer) {
    332			prp_set_lan_id(trailer, port);
    333		} else {
    334			WARN_ONCE(!trailer, "errored PRP skb");
    335			return NULL;
    336		}
    337		return skb_clone(frame->skb_prp, GFP_ATOMIC);
    338	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
    339		return skb_clone(frame->skb_std, GFP_ATOMIC);
    340	}
    341
    342	skb = skb_copy_expand(frame->skb_std, 0,
    343			      skb_tailroom(frame->skb_std) + HSR_HLEN,
    344			      GFP_ATOMIC);
    345	prp_fill_rct(skb, frame, port);
    346
    347	return skb;
    348}
    349
    350static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
    351			       struct hsr_node *node_src)
    352{
    353	bool was_multicast_frame;
    354	int res;
    355
    356	was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
    357	hsr_addr_subst_source(node_src, skb);
    358	skb_pull(skb, ETH_HLEN);
    359	res = netif_rx(skb);
    360	if (res == NET_RX_DROP) {
    361		dev->stats.rx_dropped++;
    362	} else {
    363		dev->stats.rx_packets++;
    364		dev->stats.rx_bytes += skb->len;
    365		if (was_multicast_frame)
    366			dev->stats.multicast++;
    367	}
    368}
    369
    370static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
    371		    struct hsr_frame_info *frame)
    372{
    373	if (frame->port_rcv->type == HSR_PT_MASTER) {
    374		hsr_addr_subst_dest(frame->node_src, skb, port);
    375
    376		/* Address substitution (IEC62439-3 pp 26, 50): replace mac
    377		 * address of outgoing frame with that of the outgoing slave's.
    378		 */
    379		ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
    380	}
    381	return dev_queue_xmit(skb);
    382}
    383
    384bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
    385{
    386	return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
    387		 port->type ==  HSR_PT_SLAVE_B) ||
    388		(frame->port_rcv->type == HSR_PT_SLAVE_B &&
    389		 port->type ==  HSR_PT_SLAVE_A));
    390}
    391
    392bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
    393{
    394	if (port->dev->features & NETIF_F_HW_HSR_FWD)
    395		return prp_drop_frame(frame, port);
    396
    397	return false;
    398}
    399
    400/* Forward the frame through all devices except:
    401 * - Back through the receiving device
    402 * - If it's a HSR frame: through a device where it has passed before
    403 * - if it's a PRP frame: through another PRP slave device (no bridge)
    404 * - To the local HSR master only if the frame is directly addressed to it, or
    405 *   a non-supervision multicast or broadcast frame.
    406 *
    407 * HSR slave devices should insert a HSR tag into the frame, or forward the
    408 * frame unchanged if it's already tagged. Interlink devices should strip HSR
    409 * tags if they're of the non-HSR type (but only after duplicate discard). The
    410 * master device always strips HSR tags.
    411 */
    412static void hsr_forward_do(struct hsr_frame_info *frame)
    413{
    414	struct hsr_port *port;
    415	struct sk_buff *skb;
    416	bool sent = false;
    417
    418	hsr_for_each_port(frame->port_rcv->hsr, port) {
    419		struct hsr_priv *hsr = port->hsr;
    420		/* Don't send frame back the way it came */
    421		if (port == frame->port_rcv)
    422			continue;
    423
    424		/* Don't deliver locally unless we should */
    425		if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
    426			continue;
    427
    428		/* Deliver frames directly addressed to us to master only */
    429		if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
    430			continue;
    431
    432		/* If hardware duplicate generation is enabled, only send out
    433		 * one port.
    434		 */
    435		if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
    436			continue;
    437
    438		/* Don't send frame over port where it has been sent before.
    439		 * Also fro SAN, this shouldn't be done.
    440		 */
    441		if (!frame->is_from_san &&
    442		    hsr_register_frame_out(port, frame->node_src,
    443					   frame->sequence_nr))
    444			continue;
    445
    446		if (frame->is_supervision && port->type == HSR_PT_MASTER) {
    447			hsr_handle_sup_frame(frame);
    448			continue;
    449		}
    450
    451		/* Check if frame is to be dropped. Eg. for PRP no forward
    452		 * between ports.
    453		 */
    454		if (hsr->proto_ops->drop_frame &&
    455		    hsr->proto_ops->drop_frame(frame, port))
    456			continue;
    457
    458		if (port->type != HSR_PT_MASTER)
    459			skb = hsr->proto_ops->create_tagged_frame(frame, port);
    460		else
    461			skb = hsr->proto_ops->get_untagged_frame(frame, port);
    462
    463		if (!skb) {
    464			frame->port_rcv->dev->stats.rx_dropped++;
    465			continue;
    466		}
    467
    468		skb->dev = port->dev;
    469		if (port->type == HSR_PT_MASTER) {
    470			hsr_deliver_master(skb, port->dev, frame->node_src);
    471		} else {
    472			if (!hsr_xmit(skb, port, frame))
    473				sent = true;
    474		}
    475	}
    476}
    477
    478static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
    479			     struct hsr_frame_info *frame)
    480{
    481	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
    482		frame->is_local_exclusive = true;
    483		skb->pkt_type = PACKET_HOST;
    484	} else {
    485		frame->is_local_exclusive = false;
    486	}
    487
    488	if (skb->pkt_type == PACKET_HOST ||
    489	    skb->pkt_type == PACKET_MULTICAST ||
    490	    skb->pkt_type == PACKET_BROADCAST) {
    491		frame->is_local_dest = true;
    492	} else {
    493		frame->is_local_dest = false;
    494	}
    495}
    496
    497static void handle_std_frame(struct sk_buff *skb,
    498			     struct hsr_frame_info *frame)
    499{
    500	struct hsr_port *port = frame->port_rcv;
    501	struct hsr_priv *hsr = port->hsr;
    502	unsigned long irqflags;
    503
    504	frame->skb_hsr = NULL;
    505	frame->skb_prp = NULL;
    506	frame->skb_std = skb;
    507
    508	if (port->type != HSR_PT_MASTER) {
    509		frame->is_from_san = true;
    510	} else {
    511		/* Sequence nr for the master node */
    512		spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
    513		frame->sequence_nr = hsr->sequence_nr;
    514		hsr->sequence_nr++;
    515		spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
    516	}
    517}
    518
    519int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
    520			struct hsr_frame_info *frame)
    521{
    522	struct hsr_port *port = frame->port_rcv;
    523	struct hsr_priv *hsr = port->hsr;
    524
    525	/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
    526	if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
    527	    proto == htons(ETH_P_HSR)) {
    528		/* Check if skb contains hsr_ethhdr */
    529		if (skb->mac_len < sizeof(struct hsr_ethhdr))
    530			return -EINVAL;
    531
    532		/* HSR tagged frame :- Data or Supervision */
    533		frame->skb_std = NULL;
    534		frame->skb_prp = NULL;
    535		frame->skb_hsr = skb;
    536		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
    537		return 0;
    538	}
    539
    540	/* Standard frame or PRP from master port */
    541	handle_std_frame(skb, frame);
    542
    543	return 0;
    544}
    545
    546int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
    547			struct hsr_frame_info *frame)
    548{
    549	/* Supervision frame */
    550	struct prp_rct *rct = skb_get_PRP_rct(skb);
    551
    552	if (rct &&
    553	    prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
    554		frame->skb_hsr = NULL;
    555		frame->skb_std = NULL;
    556		frame->skb_prp = skb;
    557		frame->sequence_nr = prp_get_skb_sequence_nr(rct);
    558		return 0;
    559	}
    560	handle_std_frame(skb, frame);
    561
    562	return 0;
    563}
    564
    565static int fill_frame_info(struct hsr_frame_info *frame,
    566			   struct sk_buff *skb, struct hsr_port *port)
    567{
    568	struct hsr_priv *hsr = port->hsr;
    569	struct hsr_vlan_ethhdr *vlan_hdr;
    570	struct ethhdr *ethhdr;
    571	__be16 proto;
    572	int ret;
    573	u32 hash;
    574
    575	/* Check if skb contains ethhdr */
    576	if (skb->mac_len < sizeof(struct ethhdr))
    577		return -EINVAL;
    578
    579	memset(frame, 0, sizeof(*frame));
    580
    581	ethhdr = (struct ethhdr *)skb_mac_header(skb);
    582	hash = hsr_mac_hash(port->hsr, ethhdr->h_source);
    583	frame->is_supervision = is_supervision_frame(port->hsr, skb);
    584	frame->node_src = hsr_get_node(port, &hsr->node_db[hash], skb,
    585				       frame->is_supervision,
    586				       port->type);
    587	if (!frame->node_src)
    588		return -1; /* Unknown node and !is_supervision, or no mem */
    589
    590	frame->is_vlan = false;
    591	proto = ethhdr->h_proto;
    592
    593	if (proto == htons(ETH_P_8021Q))
    594		frame->is_vlan = true;
    595
    596	if (frame->is_vlan) {
    597		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
    598		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
    599		/* FIXME: */
    600		netdev_warn_once(skb->dev, "VLAN not yet supported");
    601	}
    602
    603	frame->is_from_san = false;
    604	frame->port_rcv = port;
    605	ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
    606	if (ret)
    607		return ret;
    608
    609	check_local_dest(port->hsr, skb, frame);
    610
    611	return 0;
    612}
    613
    614/* Must be called holding rcu read lock (because of the port parameter) */
    615void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
    616{
    617	struct hsr_frame_info frame;
    618
    619	if (fill_frame_info(&frame, skb, port) < 0)
    620		goto out_drop;
    621
    622	hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
    623	hsr_forward_do(&frame);
    624	/* Gets called for ingress frames as well as egress from master port.
    625	 * So check and increment stats for master port only here.
    626	 */
    627	if (port->type == HSR_PT_MASTER) {
    628		port->dev->stats.tx_packets++;
    629		port->dev->stats.tx_bytes += skb->len;
    630	}
    631
    632	kfree_skb(frame.skb_hsr);
    633	kfree_skb(frame.skb_prp);
    634	kfree_skb(frame.skb_std);
    635	return;
    636
    637out_drop:
    638	port->dev->stats.tx_dropped++;
    639	kfree_skb(skb);
    640}