cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nf_queue.c (8037B)


      1/*
      2 * Rusty Russell (C)2000 -- This code is GPL.
      3 * Patrick McHardy (c) 2006-2012
      4 */
      5
      6#include <linux/kernel.h>
      7#include <linux/slab.h>
      8#include <linux/init.h>
      9#include <linux/module.h>
     10#include <linux/proc_fs.h>
     11#include <linux/skbuff.h>
     12#include <linux/netfilter.h>
     13#include <linux/netfilter_ipv4.h>
     14#include <linux/netfilter_ipv6.h>
     15#include <linux/netfilter_bridge.h>
     16#include <linux/seq_file.h>
     17#include <linux/rcupdate.h>
     18#include <net/protocol.h>
     19#include <net/netfilter/nf_queue.h>
     20#include <net/dst.h>
     21
     22#include "nf_internals.h"
     23
     24static const struct nf_queue_handler __rcu *nf_queue_handler;
     25
     26/*
     27 * Hook for nfnetlink_queue to register its queue handler.
     28 * We do this so that most of the NFQUEUE code can be modular.
     29 *
     30 * Once the queue is registered it must reinject all packets it
     31 * receives, no matter what.
     32 */
     33
     34void nf_register_queue_handler(const struct nf_queue_handler *qh)
     35{
     36	/* should never happen, we only have one queueing backend in kernel */
     37	WARN_ON(rcu_access_pointer(nf_queue_handler));
     38	rcu_assign_pointer(nf_queue_handler, qh);
     39}
     40EXPORT_SYMBOL(nf_register_queue_handler);
     41
     42/* The caller must flush their queue before this */
     43void nf_unregister_queue_handler(void)
     44{
     45	RCU_INIT_POINTER(nf_queue_handler, NULL);
     46}
     47EXPORT_SYMBOL(nf_unregister_queue_handler);
     48
     49static void nf_queue_sock_put(struct sock *sk)
     50{
     51#ifdef CONFIG_INET
     52	sock_gen_put(sk);
     53#else
     54	sock_put(sk);
     55#endif
     56}
     57
     58static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
     59{
     60	struct nf_hook_state *state = &entry->state;
     61
     62	/* Release those devices we held, or Alexey will kill me. */
     63	dev_put(state->in);
     64	dev_put(state->out);
     65	if (state->sk)
     66		nf_queue_sock_put(state->sk);
     67
     68#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
     69	dev_put(entry->physin);
     70	dev_put(entry->physout);
     71#endif
     72}
     73
     74void nf_queue_entry_free(struct nf_queue_entry *entry)
     75{
     76	nf_queue_entry_release_refs(entry);
     77	kfree(entry);
     78}
     79EXPORT_SYMBOL_GPL(nf_queue_entry_free);
     80
     81static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
     82{
     83#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
     84	const struct sk_buff *skb = entry->skb;
     85	struct nf_bridge_info *nf_bridge;
     86
     87	nf_bridge = nf_bridge_info_get(skb);
     88	if (nf_bridge) {
     89		entry->physin = nf_bridge_get_physindev(skb);
     90		entry->physout = nf_bridge_get_physoutdev(skb);
     91	} else {
     92		entry->physin = NULL;
     93		entry->physout = NULL;
     94	}
     95#endif
     96}
     97
     98/* Bump dev refs so they don't vanish while packet is out */
     99bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
    100{
    101	struct nf_hook_state *state = &entry->state;
    102
    103	if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
    104		return false;
    105
    106	dev_hold(state->in);
    107	dev_hold(state->out);
    108
    109#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
    110	dev_hold(entry->physin);
    111	dev_hold(entry->physout);
    112#endif
    113	return true;
    114}
    115EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
    116
    117void nf_queue_nf_hook_drop(struct net *net)
    118{
    119	const struct nf_queue_handler *qh;
    120
    121	rcu_read_lock();
    122	qh = rcu_dereference(nf_queue_handler);
    123	if (qh)
    124		qh->nf_hook_drop(net);
    125	rcu_read_unlock();
    126}
    127EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
    128
    129static void nf_ip_saveroute(const struct sk_buff *skb,
    130			    struct nf_queue_entry *entry)
    131{
    132	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
    133
    134	if (entry->state.hook == NF_INET_LOCAL_OUT) {
    135		const struct iphdr *iph = ip_hdr(skb);
    136
    137		rt_info->tos = iph->tos;
    138		rt_info->daddr = iph->daddr;
    139		rt_info->saddr = iph->saddr;
    140		rt_info->mark = skb->mark;
    141	}
    142}
    143
    144static void nf_ip6_saveroute(const struct sk_buff *skb,
    145			     struct nf_queue_entry *entry)
    146{
    147	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
    148
    149	if (entry->state.hook == NF_INET_LOCAL_OUT) {
    150		const struct ipv6hdr *iph = ipv6_hdr(skb);
    151
    152		rt_info->daddr = iph->daddr;
    153		rt_info->saddr = iph->saddr;
    154		rt_info->mark = skb->mark;
    155	}
    156}
    157
    158static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
    159		      unsigned int index, unsigned int queuenum)
    160{
    161	struct nf_queue_entry *entry = NULL;
    162	const struct nf_queue_handler *qh;
    163	unsigned int route_key_size;
    164	int status;
    165
    166	/* QUEUE == DROP if no one is waiting, to be safe. */
    167	qh = rcu_dereference(nf_queue_handler);
    168	if (!qh)
    169		return -ESRCH;
    170
    171	switch (state->pf) {
    172	case AF_INET:
    173		route_key_size = sizeof(struct ip_rt_info);
    174		break;
    175	case AF_INET6:
    176		route_key_size = sizeof(struct ip6_rt_info);
    177		break;
    178	default:
    179		route_key_size = 0;
    180		break;
    181	}
    182
    183	if (skb_sk_is_prefetched(skb)) {
    184		struct sock *sk = skb->sk;
    185
    186		if (!sk_is_refcounted(sk)) {
    187			if (!refcount_inc_not_zero(&sk->sk_refcnt))
    188				return -ENOTCONN;
    189
    190			/* drop refcount on skb_orphan */
    191			skb->destructor = sock_edemux;
    192		}
    193	}
    194
    195	entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
    196	if (!entry)
    197		return -ENOMEM;
    198
    199	if (skb_dst(skb) && !skb_dst_force(skb)) {
    200		kfree(entry);
    201		return -ENETDOWN;
    202	}
    203
    204	*entry = (struct nf_queue_entry) {
    205		.skb	= skb,
    206		.state	= *state,
    207		.hook_index = index,
    208		.size	= sizeof(*entry) + route_key_size,
    209	};
    210
    211	__nf_queue_entry_init_physdevs(entry);
    212
    213	if (!nf_queue_entry_get_refs(entry)) {
    214		kfree(entry);
    215		return -ENOTCONN;
    216	}
    217
    218	switch (entry->state.pf) {
    219	case AF_INET:
    220		nf_ip_saveroute(skb, entry);
    221		break;
    222	case AF_INET6:
    223		nf_ip6_saveroute(skb, entry);
    224		break;
    225	}
    226
    227	status = qh->outfn(entry, queuenum);
    228	if (status < 0) {
    229		nf_queue_entry_free(entry);
    230		return status;
    231	}
    232
    233	return 0;
    234}
    235
    236/* Packets leaving via this function must come back through nf_reinject(). */
    237int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
    238	     unsigned int index, unsigned int verdict)
    239{
    240	int ret;
    241
    242	ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
    243	if (ret < 0) {
    244		if (ret == -ESRCH &&
    245		    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
    246			return 1;
    247		kfree_skb(skb);
    248	}
    249
    250	return 0;
    251}
    252EXPORT_SYMBOL_GPL(nf_queue);
    253
    254static unsigned int nf_iterate(struct sk_buff *skb,
    255			       struct nf_hook_state *state,
    256			       const struct nf_hook_entries *hooks,
    257			       unsigned int *index)
    258{
    259	const struct nf_hook_entry *hook;
    260	unsigned int verdict, i = *index;
    261
    262	while (i < hooks->num_hook_entries) {
    263		hook = &hooks->hooks[i];
    264repeat:
    265		verdict = nf_hook_entry_hookfn(hook, skb, state);
    266		if (verdict != NF_ACCEPT) {
    267			*index = i;
    268			if (verdict != NF_REPEAT)
    269				return verdict;
    270			goto repeat;
    271		}
    272		i++;
    273	}
    274
    275	*index = i;
    276	return NF_ACCEPT;
    277}
    278
    279static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
    280{
    281	switch (pf) {
    282#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
    283	case NFPROTO_BRIDGE:
    284		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
    285#endif
    286	case NFPROTO_IPV4:
    287		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
    288	case NFPROTO_IPV6:
    289		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
    290	default:
    291		WARN_ON_ONCE(1);
    292		return NULL;
    293	}
    294
    295	return NULL;
    296}
    297
    298/* Caller must hold rcu read-side lock */
    299void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
    300{
    301	const struct nf_hook_entry *hook_entry;
    302	const struct nf_hook_entries *hooks;
    303	struct sk_buff *skb = entry->skb;
    304	const struct net *net;
    305	unsigned int i;
    306	int err;
    307	u8 pf;
    308
    309	net = entry->state.net;
    310	pf = entry->state.pf;
    311
    312	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
    313
    314	i = entry->hook_index;
    315	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
    316		kfree_skb(skb);
    317		nf_queue_entry_free(entry);
    318		return;
    319	}
    320
    321	hook_entry = &hooks->hooks[i];
    322
    323	/* Continue traversal iff userspace said ok... */
    324	if (verdict == NF_REPEAT)
    325		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
    326
    327	if (verdict == NF_ACCEPT) {
    328		if (nf_reroute(skb, entry) < 0)
    329			verdict = NF_DROP;
    330	}
    331
    332	if (verdict == NF_ACCEPT) {
    333next_hook:
    334		++i;
    335		verdict = nf_iterate(skb, &entry->state, hooks, &i);
    336	}
    337
    338	switch (verdict & NF_VERDICT_MASK) {
    339	case NF_ACCEPT:
    340	case NF_STOP:
    341		local_bh_disable();
    342		entry->state.okfn(entry->state.net, entry->state.sk, skb);
    343		local_bh_enable();
    344		break;
    345	case NF_QUEUE:
    346		err = nf_queue(skb, &entry->state, i, verdict);
    347		if (err == 1)
    348			goto next_hook;
    349		break;
    350	case NF_STOLEN:
    351		break;
    352	default:
    353		kfree_skb(skb);
    354	}
    355
    356	nf_queue_entry_free(entry);
    357}
    358EXPORT_SYMBOL(nf_reinject);