cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tx.c (8359B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2
      3#include <net/6lowpan.h>
      4#include <net/ndisc.h>
      5#include <net/ieee802154_netdev.h>
      6#include <net/mac802154.h>
      7
      8#include "6lowpan_i.h"
      9
     10#define LOWPAN_FRAG1_HEAD_SIZE	0x4
     11#define LOWPAN_FRAGN_HEAD_SIZE	0x5
     12
     13struct lowpan_addr_info {
     14	struct ieee802154_addr daddr;
     15	struct ieee802154_addr saddr;
     16};
     17
     18static inline struct
     19lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
     20{
     21	WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
     22	return (struct lowpan_addr_info *)(skb->data -
     23			sizeof(struct lowpan_addr_info));
     24}
     25
     26/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
     27 * sockets gives an 8 byte array for addresses only!
     28 *
     29 * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
     30 * sense here. We should disable it, the right use-case would be AF_INET6
     31 * RAW/DGRAM sockets.
     32 */
     33int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
     34			 unsigned short type, const void *daddr,
     35			 const void *saddr, unsigned int len)
     36{
     37	struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
     38	struct lowpan_addr_info *info = lowpan_skb_priv(skb);
     39	struct lowpan_802154_neigh *llneigh = NULL;
     40	const struct ipv6hdr *hdr = ipv6_hdr(skb);
     41	struct neighbour *n;
     42
     43	if (!daddr)
     44		return -EINVAL;
     45
     46	/* TODO:
     47	 * if this package isn't ipv6 one, where should it be routed?
     48	 */
     49	if (type != ETH_P_IPV6)
     50		return 0;
     51
     52	/* intra-pan communication */
     53	info->saddr.pan_id = wpan_dev->pan_id;
     54	info->daddr.pan_id = info->saddr.pan_id;
     55
     56	if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
     57		info->daddr.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
     58		info->daddr.mode = IEEE802154_ADDR_SHORT;
     59	} else {
     60		__le16 short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
     61
     62		n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev);
     63		if (n) {
     64			llneigh = lowpan_802154_neigh(neighbour_priv(n));
     65			read_lock_bh(&n->lock);
     66			short_addr = llneigh->short_addr;
     67			read_unlock_bh(&n->lock);
     68		}
     69
     70		if (llneigh &&
     71		    lowpan_802154_is_valid_src_short_addr(short_addr)) {
     72			info->daddr.short_addr = short_addr;
     73			info->daddr.mode = IEEE802154_ADDR_SHORT;
     74		} else {
     75			info->daddr.mode = IEEE802154_ADDR_LONG;
     76			ieee802154_be64_to_le64(&info->daddr.extended_addr,
     77						daddr);
     78		}
     79
     80		if (n)
     81			neigh_release(n);
     82	}
     83
     84	if (!saddr) {
     85		if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
     86			info->saddr.mode = IEEE802154_ADDR_SHORT;
     87			info->saddr.short_addr = wpan_dev->short_addr;
     88		} else {
     89			info->saddr.mode = IEEE802154_ADDR_LONG;
     90			info->saddr.extended_addr = wpan_dev->extended_addr;
     91		}
     92	} else {
     93		info->saddr.mode = IEEE802154_ADDR_LONG;
     94		ieee802154_be64_to_le64(&info->saddr.extended_addr, saddr);
     95	}
     96
     97	return 0;
     98}
     99
    100static struct sk_buff*
    101lowpan_alloc_frag(struct sk_buff *skb, int size,
    102		  const struct ieee802154_hdr *master_hdr, bool frag1)
    103{
    104	struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
    105	struct sk_buff *frag;
    106	int rc;
    107
    108	frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
    109			 GFP_ATOMIC);
    110
    111	if (likely(frag)) {
    112		frag->dev = wdev;
    113		frag->priority = skb->priority;
    114		skb_reserve(frag, wdev->needed_headroom);
    115		skb_reset_network_header(frag);
    116		*mac_cb(frag) = *mac_cb(skb);
    117
    118		if (frag1) {
    119			skb_put_data(frag, skb_mac_header(skb), skb->mac_len);
    120		} else {
    121			rc = wpan_dev_hard_header(frag, wdev,
    122						  &master_hdr->dest,
    123						  &master_hdr->source, size);
    124			if (rc < 0) {
    125				kfree_skb(frag);
    126				return ERR_PTR(rc);
    127			}
    128		}
    129	} else {
    130		frag = ERR_PTR(-ENOMEM);
    131	}
    132
    133	return frag;
    134}
    135
    136static int
    137lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
    138		     u8 *frag_hdr, int frag_hdrlen,
    139		     int offset, int len, bool frag1)
    140{
    141	struct sk_buff *frag;
    142
    143	raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
    144
    145	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
    146	if (IS_ERR(frag))
    147		return PTR_ERR(frag);
    148
    149	skb_put_data(frag, frag_hdr, frag_hdrlen);
    150	skb_put_data(frag, skb_network_header(skb) + offset, len);
    151
    152	raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
    153
    154	return dev_queue_xmit(frag);
    155}
    156
    157static int
    158lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
    159		       const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
    160		       u16 dgram_offset)
    161{
    162	__be16 frag_tag;
    163	u8 frag_hdr[5];
    164	int frag_cap, frag_len, payload_cap, rc;
    165	int skb_unprocessed, skb_offset;
    166
    167	frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
    168	lowpan_802154_dev(ldev)->fragment_tag++;
    169
    170	frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
    171	frag_hdr[1] = dgram_size & 0xff;
    172	memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
    173
    174	payload_cap = ieee802154_max_payload(wpan_hdr);
    175
    176	frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
    177			      skb_network_header_len(skb), 8);
    178
    179	skb_offset = skb_network_header_len(skb);
    180	skb_unprocessed = skb->len - skb->mac_len - skb_offset;
    181
    182	rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
    183				  LOWPAN_FRAG1_HEAD_SIZE, 0,
    184				  frag_len + skb_network_header_len(skb),
    185				  true);
    186	if (rc) {
    187		pr_debug("%s unable to send FRAG1 packet (tag: %d)",
    188			 __func__, ntohs(frag_tag));
    189		goto err;
    190	}
    191
    192	frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
    193	frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
    194	frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
    195
    196	do {
    197		dgram_offset += frag_len;
    198		skb_offset += frag_len;
    199		skb_unprocessed -= frag_len;
    200		frag_len = min(frag_cap, skb_unprocessed);
    201
    202		frag_hdr[4] = dgram_offset >> 3;
    203
    204		rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
    205					  LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
    206					  frag_len, false);
    207		if (rc) {
    208			pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
    209				 __func__, ntohs(frag_tag), skb_offset);
    210			goto err;
    211		}
    212	} while (skb_unprocessed > frag_cap);
    213
    214	ldev->stats.tx_packets++;
    215	ldev->stats.tx_bytes += dgram_size;
    216	consume_skb(skb);
    217	return NET_XMIT_SUCCESS;
    218
    219err:
    220	kfree_skb(skb);
    221	return rc;
    222}
    223
    224static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
    225			 u16 *dgram_size, u16 *dgram_offset)
    226{
    227	struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
    228	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
    229	struct lowpan_addr_info info;
    230
    231	memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
    232
    233	*dgram_size = skb->len;
    234	lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
    235	/* dgram_offset = (saved bytes after compression) + lowpan header len */
    236	*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
    237
    238	cb->type = IEEE802154_FC_TYPE_DATA;
    239
    240	if (info.daddr.mode == IEEE802154_ADDR_SHORT &&
    241	    ieee802154_is_broadcast_short_addr(info.daddr.short_addr))
    242		cb->ackreq = false;
    243	else
    244		cb->ackreq = wpan_dev->ackreq;
    245
    246	return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
    247				    &info.daddr, &info.saddr, 0);
    248}
    249
    250netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
    251{
    252	struct ieee802154_hdr wpan_hdr;
    253	int max_single, ret;
    254	u16 dgram_size, dgram_offset;
    255
    256	pr_debug("package xmit\n");
    257
    258	WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
    259
    260	/* We must take a copy of the skb before we modify/replace the ipv6
    261	 * header as the header could be used elsewhere
    262	 */
    263	if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
    264		     skb_tailroom(skb) < ldev->needed_tailroom)) {
    265		struct sk_buff *nskb;
    266
    267		nskb = skb_copy_expand(skb, ldev->needed_headroom,
    268				       ldev->needed_tailroom, GFP_ATOMIC);
    269		if (likely(nskb)) {
    270			consume_skb(skb);
    271			skb = nskb;
    272		} else {
    273			kfree_skb(skb);
    274			return NET_XMIT_DROP;
    275		}
    276	} else {
    277		skb = skb_unshare(skb, GFP_ATOMIC);
    278		if (!skb)
    279			return NET_XMIT_DROP;
    280	}
    281
    282	ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
    283	if (ret < 0) {
    284		kfree_skb(skb);
    285		return NET_XMIT_DROP;
    286	}
    287
    288	if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
    289		kfree_skb(skb);
    290		return NET_XMIT_DROP;
    291	}
    292
    293	max_single = ieee802154_max_payload(&wpan_hdr);
    294
    295	if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
    296		skb->dev = lowpan_802154_dev(ldev)->wdev;
    297		ldev->stats.tx_packets++;
    298		ldev->stats.tx_bytes += dgram_size;
    299		return dev_queue_xmit(skb);
    300	} else {
    301		netdev_tx_t rc;
    302
    303		pr_debug("frame is too big, fragmentation is needed\n");
    304		rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
    305					    dgram_offset);
    306
    307		return rc < 0 ? NET_XMIT_DROP : rc;
    308	}
    309}