cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

test_sk_assign.c (4293B)


      1// SPDX-License-Identifier: GPL-2.0
      2// Copyright (c) 2019 Cloudflare Ltd.
      3// Copyright (c) 2020 Isovalent, Inc.
      4
      5#include <stddef.h>
      6#include <stdbool.h>
      7#include <string.h>
      8#include <linux/bpf.h>
      9#include <linux/if_ether.h>
     10#include <linux/in.h>
     11#include <linux/ip.h>
     12#include <linux/ipv6.h>
     13#include <linux/pkt_cls.h>
     14#include <linux/tcp.h>
     15#include <sys/socket.h>
     16#include <bpf/bpf_helpers.h>
     17#include <bpf/bpf_endian.h>
     18
     19/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
     20#define PIN_GLOBAL_NS 2
     21
     22/* Must match struct bpf_elf_map layout from iproute2 */
     23struct {
     24	__u32 type;
     25	__u32 size_key;
     26	__u32 size_value;
     27	__u32 max_elem;
     28	__u32 flags;
     29	__u32 id;
     30	__u32 pinning;
     31} server_map SEC("maps") = {
     32	.type = BPF_MAP_TYPE_SOCKMAP,
     33	.size_key = sizeof(int),
     34	.size_value  = sizeof(__u64),
     35	.max_elem = 1,
     36	.pinning = PIN_GLOBAL_NS,
     37};
     38
     39char _license[] SEC("license") = "GPL";
     40
     41/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
     42static inline struct bpf_sock_tuple *
     43get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
     44{
     45	void *data_end = (void *)(long)skb->data_end;
     46	void *data = (void *)(long)skb->data;
     47	struct bpf_sock_tuple *result;
     48	struct ethhdr *eth;
     49	__u64 tuple_len;
     50	__u8 proto = 0;
     51	__u64 ihl_len;
     52
     53	eth = (struct ethhdr *)(data);
     54	if (eth + 1 > data_end)
     55		return NULL;
     56
     57	if (eth->h_proto == bpf_htons(ETH_P_IP)) {
     58		struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
     59
     60		if (iph + 1 > data_end)
     61			return NULL;
     62		if (iph->ihl != 5)
     63			/* Options are not supported */
     64			return NULL;
     65		ihl_len = iph->ihl * 4;
     66		proto = iph->protocol;
     67		*ipv4 = true;
     68		result = (struct bpf_sock_tuple *)&iph->saddr;
     69	} else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
     70		struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
     71
     72		if (ip6h + 1 > data_end)
     73			return NULL;
     74		ihl_len = sizeof(*ip6h);
     75		proto = ip6h->nexthdr;
     76		*ipv4 = false;
     77		result = (struct bpf_sock_tuple *)&ip6h->saddr;
     78	} else {
     79		return (struct bpf_sock_tuple *)data;
     80	}
     81
     82	if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
     83		return NULL;
     84
     85	*tcp = (proto == IPPROTO_TCP);
     86	return result;
     87}
     88
     89static inline int
     90handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
     91{
     92	struct bpf_sock *sk;
     93	const int zero = 0;
     94	size_t tuple_len;
     95	__be16 dport;
     96	int ret;
     97
     98	tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
     99	if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
    100		return TC_ACT_SHOT;
    101
    102	sk = bpf_sk_lookup_udp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
    103	if (sk)
    104		goto assign;
    105
    106	dport = ipv4 ? tuple->ipv4.dport : tuple->ipv6.dport;
    107	if (dport != bpf_htons(4321))
    108		return TC_ACT_OK;
    109
    110	sk = bpf_map_lookup_elem(&server_map, &zero);
    111	if (!sk)
    112		return TC_ACT_SHOT;
    113
    114assign:
    115	ret = bpf_sk_assign(skb, sk, 0);
    116	bpf_sk_release(sk);
    117	return ret;
    118}
    119
    120static inline int
    121handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
    122{
    123	struct bpf_sock *sk;
    124	const int zero = 0;
    125	size_t tuple_len;
    126	__be16 dport;
    127	int ret;
    128
    129	tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
    130	if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
    131		return TC_ACT_SHOT;
    132
    133	sk = bpf_skc_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
    134	if (sk) {
    135		if (sk->state != BPF_TCP_LISTEN)
    136			goto assign;
    137		bpf_sk_release(sk);
    138	}
    139
    140	dport = ipv4 ? tuple->ipv4.dport : tuple->ipv6.dport;
    141	if (dport != bpf_htons(4321))
    142		return TC_ACT_OK;
    143
    144	sk = bpf_map_lookup_elem(&server_map, &zero);
    145	if (!sk)
    146		return TC_ACT_SHOT;
    147
    148	if (sk->state != BPF_TCP_LISTEN) {
    149		bpf_sk_release(sk);
    150		return TC_ACT_SHOT;
    151	}
    152
    153assign:
    154	ret = bpf_sk_assign(skb, sk, 0);
    155	bpf_sk_release(sk);
    156	return ret;
    157}
    158
    159SEC("tc")
    160int bpf_sk_assign_test(struct __sk_buff *skb)
    161{
    162	struct bpf_sock_tuple *tuple;
    163	bool ipv4 = false;
    164	bool tcp = false;
    165	int tuple_len;
    166	int ret = 0;
    167
    168	tuple = get_tuple(skb, &ipv4, &tcp);
    169	if (!tuple)
    170		return TC_ACT_SHOT;
    171
    172	/* Note that the verifier socket return type for bpf_skc_lookup_tcp()
    173	 * differs from bpf_sk_lookup_udp(), so even though the C-level type is
    174	 * the same here, if we try to share the implementations they will
    175	 * fail to verify because we're crossing pointer types.
    176	 */
    177	if (tcp)
    178		ret = handle_tcp(skb, tuple, ipv4);
    179	else
    180		ret = handle_udp(skb, tuple, ipv4);
    181
    182	return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
    183}