cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

net.c (10671B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2021 Intel Corporation
      4 */
      5
      6#include <uapi/linux/if_ether.h>
      7#include <uapi/linux/if_arp.h>
      8#include <uapi/linux/icmp.h>
      9
     10#include <linux/etherdevice.h>
     11#include <linux/netdevice.h>
     12#include <linux/skbuff.h>
     13#include <linux/ieee80211.h>
     14
     15#include <net/cfg80211.h>
     16#include <net/ip.h>
     17
     18#include <linux/if_arp.h>
     19#include <linux/icmp.h>
     20#include <linux/udp.h>
     21#include <linux/ip.h>
     22#include <linux/mm.h>
     23
     24#include "internal.h"
     25#include "sap.h"
     26#include "iwl-mei.h"
     27
     28/*
     29 * Returns true if further filtering should be stopped. Only in that case
     30 * pass_to_csme and rx_handler_res are set. Otherwise, next level of filters
     31 * should be checked.
     32 */
     33static bool iwl_mei_rx_filter_eth(const struct ethhdr *ethhdr,
     34				  const struct iwl_sap_oob_filters *filters,
     35				  bool *pass_to_csme,
     36				  rx_handler_result_t *rx_handler_res)
     37{
     38	const struct iwl_sap_eth_filter *filt;
     39
     40	/* This filter is not relevant for UCAST packet */
     41	if (!is_multicast_ether_addr(ethhdr->h_dest) ||
     42	    is_broadcast_ether_addr(ethhdr->h_dest))
     43		return false;
     44
     45	for (filt = &filters->eth_filters[0];
     46	     filt < &filters->eth_filters[0] + ARRAY_SIZE(filters->eth_filters);
     47	     filt++) {
     48		/* Assume there are no enabled filter after a disabled one */
     49		if (!(filt->flags & SAP_ETH_FILTER_ENABLED))
     50			break;
     51
     52		if (compare_ether_header(filt->mac_address, ethhdr->h_dest))
     53			continue;
     54
     55		/* Packet needs to reach the host's stack */
     56		if (filt->flags & SAP_ETH_FILTER_COPY)
     57			*rx_handler_res = RX_HANDLER_PASS;
     58		else
     59			*rx_handler_res = RX_HANDLER_CONSUMED;
     60
     61		/* We have an authoritative answer, stop filtering */
     62		if (filt->flags & SAP_ETH_FILTER_STOP) {
     63			*pass_to_csme = true;
     64			return true;
     65		}
     66
     67		return false;
     68	}
     69
     70	 /* MCAST frames that don't match layer 2 filters are not sent to ME */
     71	*pass_to_csme  = false;
     72
     73	return true;
     74}
     75
     76/*
     77 * Returns true iff the frame should be passed to CSME in which case
     78 * rx_handler_res is set.
     79 */
     80static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
     81				  const struct iwl_sap_oob_filters *filters,
     82				  rx_handler_result_t *rx_handler_res)
     83{
     84	const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
     85	const struct arphdr *arp;
     86	const __be32 *target_ip;
     87	u32 flags = le32_to_cpu(filt->flags);
     88
     89	if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
     90		return false;
     91
     92	arp = arp_hdr(skb);
     93
     94	/* Handle only IPv4 over ethernet ARP frames */
     95	if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
     96	    arp->ar_pro != htons(ETH_P_IP))
     97		return false;
     98
     99	/*
    100	 * After the ARP header, we have:
    101	 * src MAC address   - 6 bytes
    102	 * src IP address    - 4 bytes
    103	 * target MAC addess - 6 bytes
    104	 */
    105	target_ip = (const void *)((const u8 *)(arp + 1) +
    106				   ETH_ALEN + sizeof(__be32) + ETH_ALEN);
    107
    108	/*
    109	 * ARP request is forwarded to ME only if IP address match in the
    110	 * ARP request's target ip field.
    111	 */
    112	if (arp->ar_op == htons(ARPOP_REQUEST) &&
    113	    (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS)) &&
    114	    (filt->ipv4_addr == 0 || filt->ipv4_addr == *target_ip)) {
    115		if (flags & SAP_IPV4_FILTER_ARP_REQ_COPY)
    116			*rx_handler_res = RX_HANDLER_PASS;
    117		else
    118			*rx_handler_res = RX_HANDLER_CONSUMED;
    119
    120		return true;
    121	}
    122
    123	/* ARP reply is always forwarded to ME regardless of the IP */
    124	if (flags & SAP_IPV4_FILTER_ARP_RESP_PASS &&
    125	    arp->ar_op == htons(ARPOP_REPLY)) {
    126		if (flags & SAP_IPV4_FILTER_ARP_RESP_COPY)
    127			*rx_handler_res = RX_HANDLER_PASS;
    128		else
    129			*rx_handler_res = RX_HANDLER_CONSUMED;
    130
    131		return true;
    132	}
    133
    134	return false;
    135}
    136
    137static bool
    138iwl_mei_rx_filter_tcp_udp(struct sk_buff *skb, bool  ip_match,
    139			  const struct iwl_sap_oob_filters *filters,
    140			  rx_handler_result_t *rx_handler_res)
    141{
    142	const struct iwl_sap_flex_filter *filt;
    143
    144	for (filt = &filters->flex_filters[0];
    145	     filt < &filters->flex_filters[0] + ARRAY_SIZE(filters->flex_filters);
    146	     filt++) {
    147		if (!(filt->flags & SAP_FLEX_FILTER_ENABLED))
    148			break;
    149
    150		/*
    151		 * We are required to have a match on the IP level and we didn't
    152		 * have such match.
    153		 */
    154		if ((filt->flags &
    155		     (SAP_FLEX_FILTER_IPV4 | SAP_FLEX_FILTER_IPV6)) &&
    156		    !ip_match)
    157			continue;
    158
    159		if ((filt->flags & SAP_FLEX_FILTER_UDP) &&
    160		    ip_hdr(skb)->protocol != IPPROTO_UDP)
    161			continue;
    162
    163		if ((filt->flags & SAP_FLEX_FILTER_TCP) &&
    164		    ip_hdr(skb)->protocol != IPPROTO_TCP)
    165			continue;
    166
    167		/*
    168		 * We must have either a TCP header or a UDP header, both
    169		 * starts with a source port and then a destination port.
    170		 * Both are big endian words.
    171		 * Use a UDP header and that will work for TCP as well.
    172		 */
    173		if ((filt->src_port && filt->src_port != udp_hdr(skb)->source) ||
    174		    (filt->dst_port && filt->dst_port != udp_hdr(skb)->dest))
    175			continue;
    176
    177		if (filt->flags & SAP_FLEX_FILTER_COPY)
    178			*rx_handler_res = RX_HANDLER_PASS;
    179		else
    180			*rx_handler_res = RX_HANDLER_CONSUMED;
    181
    182		return true;
    183	}
    184
    185	return false;
    186}
    187
    188static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
    189				   const struct iwl_sap_oob_filters *filters,
    190				   rx_handler_result_t *rx_handler_res)
    191{
    192	const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
    193	const struct iphdr *iphdr;
    194	unsigned int iphdrlen;
    195	bool match;
    196
    197	if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
    198	    !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
    199		return false;
    200
    201	iphdrlen = ip_hdrlen(skb);
    202	iphdr = ip_hdr(skb);
    203	match = !filters->ipv4_filter.ipv4_addr ||
    204		filters->ipv4_filter.ipv4_addr == iphdr->daddr;
    205
    206	skb_set_transport_header(skb, skb_network_offset(skb) + iphdrlen);
    207
    208	switch (ip_hdr(skb)->protocol) {
    209	case IPPROTO_UDP:
    210	case IPPROTO_TCP:
    211		/*
    212		 * UDP header is shorter than TCP header and we look at the first bytes
    213		 * of the header anyway (see below).
    214		 * If we have a truncated TCP packet, let CSME handle this.
    215		 */
    216		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
    217				   sizeof(struct udphdr)))
    218			return false;
    219
    220		return iwl_mei_rx_filter_tcp_udp(skb, match,
    221						 filters, rx_handler_res);
    222
    223	case IPPROTO_ICMP: {
    224		struct icmphdr *icmp;
    225
    226		if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(*icmp)))
    227			return false;
    228
    229		icmp = icmp_hdr(skb);
    230
    231		/*
    232		 * Don't pass echo requests to ME even if it wants it as we
    233		 * want the host to answer.
    234		 */
    235		if ((filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS)) &&
    236		    match && (icmp->type != ICMP_ECHO || icmp->code != 0)) {
    237			if (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY))
    238				*rx_handler_res = RX_HANDLER_PASS;
    239			else
    240				*rx_handler_res = RX_HANDLER_CONSUMED;
    241
    242			return true;
    243		}
    244		break;
    245		}
    246	case IPPROTO_ICMPV6:
    247		/* TODO: Should we have the same ICMP request logic here too? */
    248		if ((filters->icmpv6_flags & cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED) &&
    249		     match)) {
    250			if (filters->icmpv6_flags &
    251			    cpu_to_le32(SAP_ICMPV6_FILTER_COPY))
    252				*rx_handler_res = RX_HANDLER_PASS;
    253			else
    254				*rx_handler_res = RX_HANDLER_CONSUMED;
    255
    256			return true;
    257		}
    258		break;
    259	default:
    260		return false;
    261	}
    262
    263	return false;
    264}
    265
    266static bool iwl_mei_rx_filter_ipv6(struct sk_buff *skb,
    267				   const struct iwl_sap_oob_filters *filters,
    268				   rx_handler_result_t *rx_handler_res)
    269{
    270	*rx_handler_res = RX_HANDLER_PASS;
    271
    272	/* TODO */
    273
    274	return false;
    275}
    276
    277static rx_handler_result_t
    278iwl_mei_rx_pass_to_csme(struct sk_buff *skb,
    279			const struct iwl_sap_oob_filters *filters,
    280			bool *pass_to_csme)
    281{
    282	const struct ethhdr *ethhdr = (void *)skb_mac_header(skb);
    283	rx_handler_result_t rx_handler_res = RX_HANDLER_PASS;
    284	bool (*filt_handler)(struct sk_buff *skb,
    285			     const struct iwl_sap_oob_filters *filters,
    286			     rx_handler_result_t *rx_handler_res);
    287
    288	/*
    289	 * skb->data points the IP header / ARP header and the ETH header
    290	 * is in the headroom.
    291	 */
    292	skb_reset_network_header(skb);
    293
    294	/*
    295	 * MCAST IP packets sent by us are received again here without
    296	 * an ETH header. Drop them here.
    297	 */
    298	if (!skb_mac_offset(skb))
    299		return RX_HANDLER_PASS;
    300
    301	if (skb_headroom(skb) < sizeof(*ethhdr))
    302		return RX_HANDLER_PASS;
    303
    304	if (iwl_mei_rx_filter_eth(ethhdr, filters,
    305				  pass_to_csme, &rx_handler_res))
    306		return rx_handler_res;
    307
    308	switch (skb->protocol) {
    309	case htons(ETH_P_IP):
    310		filt_handler = iwl_mei_rx_filter_ipv4;
    311		break;
    312	case htons(ETH_P_ARP):
    313		filt_handler = iwl_mei_rx_filter_arp;
    314		break;
    315	case htons(ETH_P_IPV6):
    316		filt_handler = iwl_mei_rx_filter_ipv6;
    317		break;
    318	default:
    319		*pass_to_csme = false;
    320		return rx_handler_res;
    321	}
    322
    323	*pass_to_csme = filt_handler(skb, filters, &rx_handler_res);
    324
    325	return rx_handler_res;
    326}
    327
    328rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
    329				      const struct iwl_sap_oob_filters *filters,
    330				      bool *pass_to_csme)
    331{
    332	rx_handler_result_t ret;
    333	struct sk_buff *skb;
    334
    335	ret = iwl_mei_rx_pass_to_csme(orig_skb, filters, pass_to_csme);
    336
    337	if (!*pass_to_csme)
    338		return RX_HANDLER_PASS;
    339
    340	if (ret == RX_HANDLER_PASS)
    341		skb = skb_copy(orig_skb, GFP_ATOMIC);
    342	else
    343		skb = orig_skb;
    344
    345	/* CSME wants the MAC header as well, push it back */
    346	skb_push(skb, skb->data - skb_mac_header(skb));
    347
    348	/*
    349	 * Add the packet that CSME wants to get to the ring. Don't send the
    350	 * Check Shared Area HECI message since this is not possible from the
    351	 * Rx context. The caller will schedule a worker to do just that.
    352	 */
    353	iwl_mei_add_data_to_ring(skb, false);
    354
    355	/*
    356	 * In case we drop the packet, don't free it, the caller will do that
    357	 * for us
    358	 */
    359	if (ret == RX_HANDLER_PASS)
    360		dev_kfree_skb(skb);
    361
    362	return ret;
    363}
    364
    365#define DHCP_SERVER_PORT 67
    366#define DHCP_CLIENT_PORT 68
    367void iwl_mei_tx_copy_to_csme(struct sk_buff *origskb, unsigned int ivlen)
    368{
    369	struct ieee80211_hdr *hdr;
    370	struct sk_buff *skb;
    371	struct ethhdr ethhdr;
    372	struct ethhdr *eth;
    373
    374	/* Catch DHCP packets */
    375	if (origskb->protocol != htons(ETH_P_IP) ||
    376	    ip_hdr(origskb)->protocol != IPPROTO_UDP ||
    377	    udp_hdr(origskb)->source != htons(DHCP_CLIENT_PORT) ||
    378	    udp_hdr(origskb)->dest != htons(DHCP_SERVER_PORT))
    379		return;
    380
    381	/*
    382	 * We could be a bit less aggressive here and not copy everything, but
    383	 * this is very rare anyway, do don't bother much.
    384	 */
    385	skb = skb_copy(origskb, GFP_ATOMIC);
    386	if (!skb)
    387		return;
    388
    389	skb->protocol = origskb->protocol;
    390
    391	hdr = (void *)skb->data;
    392
    393	memcpy(ethhdr.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
    394	memcpy(ethhdr.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
    395
    396	/*
    397	 * Remove the ieee80211 header + IV + SNAP but leave the ethertype
    398	 * We still have enough headroom for the sap header.
    399	 */
    400	pskb_pull(skb, ieee80211_hdrlen(hdr->frame_control) + ivlen + 6);
    401	eth = skb_push(skb, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
    402	memcpy(eth, &ethhdr, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
    403
    404	iwl_mei_add_data_to_ring(skb, true);
    405
    406	dev_kfree_skb(skb);
    407}
    408EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme);