cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dst.c (8899B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * net/core/dst.c	Protocol independent destination cache.
      4 *
      5 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
      6 *
      7 */
      8
      9#include <linux/bitops.h>
     10#include <linux/errno.h>
     11#include <linux/init.h>
     12#include <linux/kernel.h>
     13#include <linux/workqueue.h>
     14#include <linux/mm.h>
     15#include <linux/module.h>
     16#include <linux/slab.h>
     17#include <linux/netdevice.h>
     18#include <linux/skbuff.h>
     19#include <linux/string.h>
     20#include <linux/types.h>
     21#include <net/net_namespace.h>
     22#include <linux/sched.h>
     23#include <linux/prefetch.h>
     24#include <net/lwtunnel.h>
     25#include <net/xfrm.h>
     26
     27#include <net/dst.h>
     28#include <net/dst_metadata.h>
     29
     30int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
     31{
     32	kfree_skb(skb);
     33	return 0;
     34}
     35EXPORT_SYMBOL(dst_discard_out);
     36
     37const struct dst_metrics dst_default_metrics = {
     38	/* This initializer is needed to force linker to place this variable
     39	 * into const section. Otherwise it might end into bss section.
     40	 * We really want to avoid false sharing on this variable, and catch
     41	 * any writes on it.
     42	 */
     43	.refcnt = REFCOUNT_INIT(1),
     44};
     45EXPORT_SYMBOL(dst_default_metrics);
     46
     47void dst_init(struct dst_entry *dst, struct dst_ops *ops,
     48	      struct net_device *dev, int initial_ref, int initial_obsolete,
     49	      unsigned short flags)
     50{
     51	dst->dev = dev;
     52	dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC);
     53	dst->ops = ops;
     54	dst_init_metrics(dst, dst_default_metrics.metrics, true);
     55	dst->expires = 0UL;
     56#ifdef CONFIG_XFRM
     57	dst->xfrm = NULL;
     58#endif
     59	dst->input = dst_discard;
     60	dst->output = dst_discard_out;
     61	dst->error = 0;
     62	dst->obsolete = initial_obsolete;
     63	dst->header_len = 0;
     64	dst->trailer_len = 0;
     65#ifdef CONFIG_IP_ROUTE_CLASSID
     66	dst->tclassid = 0;
     67#endif
     68	dst->lwtstate = NULL;
     69	atomic_set(&dst->__refcnt, initial_ref);
     70	dst->__use = 0;
     71	dst->lastuse = jiffies;
     72	dst->flags = flags;
     73	if (!(flags & DST_NOCOUNT))
     74		dst_entries_add(ops, 1);
     75}
     76EXPORT_SYMBOL(dst_init);
     77
     78void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
     79		int initial_ref, int initial_obsolete, unsigned short flags)
     80{
     81	struct dst_entry *dst;
     82
     83	if (ops->gc &&
     84	    !(flags & DST_NOCOUNT) &&
     85	    dst_entries_get_fast(ops) > ops->gc_thresh) {
     86		if (ops->gc(ops)) {
     87			pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n");
     88			return NULL;
     89		}
     90	}
     91
     92	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
     93	if (!dst)
     94		return NULL;
     95
     96	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
     97
     98	return dst;
     99}
    100EXPORT_SYMBOL(dst_alloc);
    101
    102struct dst_entry *dst_destroy(struct dst_entry * dst)
    103{
    104	struct dst_entry *child = NULL;
    105
    106	smp_rmb();
    107
    108#ifdef CONFIG_XFRM
    109	if (dst->xfrm) {
    110		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
    111
    112		child = xdst->child;
    113	}
    114#endif
    115	if (!(dst->flags & DST_NOCOUNT))
    116		dst_entries_add(dst->ops, -1);
    117
    118	if (dst->ops->destroy)
    119		dst->ops->destroy(dst);
    120	dev_put_track(dst->dev, &dst->dev_tracker);
    121
    122	lwtstate_put(dst->lwtstate);
    123
    124	if (dst->flags & DST_METADATA)
    125		metadata_dst_free((struct metadata_dst *)dst);
    126	else
    127		kmem_cache_free(dst->ops->kmem_cachep, dst);
    128
    129	dst = child;
    130	if (dst)
    131		dst_release_immediate(dst);
    132	return NULL;
    133}
    134EXPORT_SYMBOL(dst_destroy);
    135
    136static void dst_destroy_rcu(struct rcu_head *head)
    137{
    138	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
    139
    140	dst = dst_destroy(dst);
    141}
    142
    143/* Operations to mark dst as DEAD and clean up the net device referenced
    144 * by dst:
    145 * 1. put the dst under blackhole interface and discard all tx/rx packets
    146 *    on this route.
    147 * 2. release the net_device
    148 * This function should be called when removing routes from the fib tree
    149 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
    150 * make the next dst_ops->check() fail.
    151 */
    152void dst_dev_put(struct dst_entry *dst)
    153{
    154	struct net_device *dev = dst->dev;
    155
    156	dst->obsolete = DST_OBSOLETE_DEAD;
    157	if (dst->ops->ifdown)
    158		dst->ops->ifdown(dst, dev, true);
    159	dst->input = dst_discard;
    160	dst->output = dst_discard_out;
    161	dst->dev = blackhole_netdev;
    162	dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker,
    163			  GFP_ATOMIC);
    164}
    165EXPORT_SYMBOL(dst_dev_put);
    166
    167void dst_release(struct dst_entry *dst)
    168{
    169	if (dst) {
    170		int newrefcnt;
    171
    172		newrefcnt = atomic_dec_return(&dst->__refcnt);
    173		if (WARN_ONCE(newrefcnt < 0, "dst_release underflow"))
    174			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
    175					     __func__, dst, newrefcnt);
    176		if (!newrefcnt)
    177			call_rcu(&dst->rcu_head, dst_destroy_rcu);
    178	}
    179}
    180EXPORT_SYMBOL(dst_release);
    181
    182void dst_release_immediate(struct dst_entry *dst)
    183{
    184	if (dst) {
    185		int newrefcnt;
    186
    187		newrefcnt = atomic_dec_return(&dst->__refcnt);
    188		if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow"))
    189			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
    190					     __func__, dst, newrefcnt);
    191		if (!newrefcnt)
    192			dst_destroy(dst);
    193	}
    194}
    195EXPORT_SYMBOL(dst_release_immediate);
    196
    197u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
    198{
    199	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
    200
    201	if (p) {
    202		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
    203		unsigned long prev, new;
    204
    205		refcount_set(&p->refcnt, 1);
    206		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
    207
    208		new = (unsigned long) p;
    209		prev = cmpxchg(&dst->_metrics, old, new);
    210
    211		if (prev != old) {
    212			kfree(p);
    213			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
    214			if (prev & DST_METRICS_READ_ONLY)
    215				p = NULL;
    216		} else if (prev & DST_METRICS_REFCOUNTED) {
    217			if (refcount_dec_and_test(&old_p->refcnt))
    218				kfree(old_p);
    219		}
    220	}
    221	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
    222	return (u32 *)p;
    223}
    224EXPORT_SYMBOL(dst_cow_metrics_generic);
    225
    226/* Caller asserts that dst_metrics_read_only(dst) is false.  */
    227void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
    228{
    229	unsigned long prev, new;
    230
    231	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
    232	prev = cmpxchg(&dst->_metrics, old, new);
    233	if (prev == old)
    234		kfree(__DST_METRICS_PTR(old));
    235}
    236EXPORT_SYMBOL(__dst_destroy_metrics_generic);
    237
    238struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
    239{
    240	return NULL;
    241}
    242
    243u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
    244{
    245	return NULL;
    246}
    247
    248struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
    249					     struct sk_buff *skb,
    250					     const void *daddr)
    251{
    252	return NULL;
    253}
    254
    255void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
    256			       struct sk_buff *skb, u32 mtu,
    257			       bool confirm_neigh)
    258{
    259}
    260EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
    261
    262void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
    263			    struct sk_buff *skb)
    264{
    265}
    266EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
    267
    268unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
    269{
    270	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
    271
    272	return mtu ? : dst->dev->mtu;
    273}
    274EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
    275
    276static struct dst_ops dst_blackhole_ops = {
    277	.family		= AF_UNSPEC,
    278	.neigh_lookup	= dst_blackhole_neigh_lookup,
    279	.check		= dst_blackhole_check,
    280	.cow_metrics	= dst_blackhole_cow_metrics,
    281	.update_pmtu	= dst_blackhole_update_pmtu,
    282	.redirect	= dst_blackhole_redirect,
    283	.mtu		= dst_blackhole_mtu,
    284};
    285
    286static void __metadata_dst_init(struct metadata_dst *md_dst,
    287				enum metadata_type type, u8 optslen)
    288{
    289	struct dst_entry *dst;
    290
    291	dst = &md_dst->dst;
    292	dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
    293		 DST_METADATA | DST_NOCOUNT);
    294	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
    295	md_dst->type = type;
    296}
    297
    298struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
    299					gfp_t flags)
    300{
    301	struct metadata_dst *md_dst;
    302
    303	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
    304	if (!md_dst)
    305		return NULL;
    306
    307	__metadata_dst_init(md_dst, type, optslen);
    308
    309	return md_dst;
    310}
    311EXPORT_SYMBOL_GPL(metadata_dst_alloc);
    312
    313void metadata_dst_free(struct metadata_dst *md_dst)
    314{
    315#ifdef CONFIG_DST_CACHE
    316	if (md_dst->type == METADATA_IP_TUNNEL)
    317		dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
    318#endif
    319	kfree(md_dst);
    320}
    321EXPORT_SYMBOL_GPL(metadata_dst_free);
    322
    323struct metadata_dst __percpu *
    324metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
    325{
    326	int cpu;
    327	struct metadata_dst __percpu *md_dst;
    328
    329	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
    330				    __alignof__(struct metadata_dst), flags);
    331	if (!md_dst)
    332		return NULL;
    333
    334	for_each_possible_cpu(cpu)
    335		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
    336
    337	return md_dst;
    338}
    339EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
    340
    341void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
    342{
    343#ifdef CONFIG_DST_CACHE
    344	int cpu;
    345
    346	for_each_possible_cpu(cpu) {
    347		struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
    348
    349		if (one_md_dst->type == METADATA_IP_TUNNEL)
    350			dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
    351	}
    352#endif
    353	free_percpu(md_dst);
    354}
    355EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);