cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fib_rules.c (31812B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * net/core/fib_rules.c		Generic Routing Rules
      4 *
      5 * Authors:	Thomas Graf <tgraf@suug.ch>
      6 */
      7
      8#include <linux/types.h>
      9#include <linux/kernel.h>
     10#include <linux/slab.h>
     11#include <linux/list.h>
     12#include <linux/module.h>
     13#include <net/net_namespace.h>
     14#include <net/sock.h>
     15#include <net/fib_rules.h>
     16#include <net/ip_tunnels.h>
     17#include <linux/indirect_call_wrapper.h>
     18
     19#if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES)
     20#ifdef CONFIG_IP_MULTIPLE_TABLES
     21#define INDIRECT_CALL_MT(f, f2, f1, ...) \
     22	INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__)
     23#else
     24#define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
     25#endif
     26#elif defined(CONFIG_IP_MULTIPLE_TABLES)
     27#define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
     28#else
     29#define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__)
     30#endif
     31
     32static const struct fib_kuid_range fib_kuid_range_unset = {
     33	KUIDT_INIT(0),
     34	KUIDT_INIT(~0),
     35};
     36
     37bool fib_rule_matchall(const struct fib_rule *rule)
     38{
     39	if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
     40	    rule->flags)
     41		return false;
     42	if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
     43		return false;
     44	if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
     45	    !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
     46		return false;
     47	if (fib_rule_port_range_set(&rule->sport_range))
     48		return false;
     49	if (fib_rule_port_range_set(&rule->dport_range))
     50		return false;
     51	return true;
     52}
     53EXPORT_SYMBOL_GPL(fib_rule_matchall);
     54
     55int fib_default_rule_add(struct fib_rules_ops *ops,
     56			 u32 pref, u32 table, u32 flags)
     57{
     58	struct fib_rule *r;
     59
     60	r = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT);
     61	if (r == NULL)
     62		return -ENOMEM;
     63
     64	refcount_set(&r->refcnt, 1);
     65	r->action = FR_ACT_TO_TBL;
     66	r->pref = pref;
     67	r->table = table;
     68	r->flags = flags;
     69	r->proto = RTPROT_KERNEL;
     70	r->fr_net = ops->fro_net;
     71	r->uid_range = fib_kuid_range_unset;
     72
     73	r->suppress_prefixlen = -1;
     74	r->suppress_ifgroup = -1;
     75
     76	/* The lock is not required here, the list in unreacheable
     77	 * at the moment this function is called */
     78	list_add_tail(&r->list, &ops->rules_list);
     79	return 0;
     80}
     81EXPORT_SYMBOL(fib_default_rule_add);
     82
     83static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
     84{
     85	struct list_head *pos;
     86	struct fib_rule *rule;
     87
     88	if (!list_empty(&ops->rules_list)) {
     89		pos = ops->rules_list.next;
     90		if (pos->next != &ops->rules_list) {
     91			rule = list_entry(pos->next, struct fib_rule, list);
     92			if (rule->pref)
     93				return rule->pref - 1;
     94		}
     95	}
     96
     97	return 0;
     98}
     99
    100static void notify_rule_change(int event, struct fib_rule *rule,
    101			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
    102			       u32 pid);
    103
    104static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
    105{
    106	struct fib_rules_ops *ops;
    107
    108	rcu_read_lock();
    109	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
    110		if (ops->family == family) {
    111			if (!try_module_get(ops->owner))
    112				ops = NULL;
    113			rcu_read_unlock();
    114			return ops;
    115		}
    116	}
    117	rcu_read_unlock();
    118
    119	return NULL;
    120}
    121
    122static void rules_ops_put(struct fib_rules_ops *ops)
    123{
    124	if (ops)
    125		module_put(ops->owner);
    126}
    127
    128static void flush_route_cache(struct fib_rules_ops *ops)
    129{
    130	if (ops->flush_cache)
    131		ops->flush_cache(ops);
    132}
    133
    134static int __fib_rules_register(struct fib_rules_ops *ops)
    135{
    136	int err = -EEXIST;
    137	struct fib_rules_ops *o;
    138	struct net *net;
    139
    140	net = ops->fro_net;
    141
    142	if (ops->rule_size < sizeof(struct fib_rule))
    143		return -EINVAL;
    144
    145	if (ops->match == NULL || ops->configure == NULL ||
    146	    ops->compare == NULL || ops->fill == NULL ||
    147	    ops->action == NULL)
    148		return -EINVAL;
    149
    150	spin_lock(&net->rules_mod_lock);
    151	list_for_each_entry(o, &net->rules_ops, list)
    152		if (ops->family == o->family)
    153			goto errout;
    154
    155	list_add_tail_rcu(&ops->list, &net->rules_ops);
    156	err = 0;
    157errout:
    158	spin_unlock(&net->rules_mod_lock);
    159
    160	return err;
    161}
    162
    163struct fib_rules_ops *
    164fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
    165{
    166	struct fib_rules_ops *ops;
    167	int err;
    168
    169	ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
    170	if (ops == NULL)
    171		return ERR_PTR(-ENOMEM);
    172
    173	INIT_LIST_HEAD(&ops->rules_list);
    174	ops->fro_net = net;
    175
    176	err = __fib_rules_register(ops);
    177	if (err) {
    178		kfree(ops);
    179		ops = ERR_PTR(err);
    180	}
    181
    182	return ops;
    183}
    184EXPORT_SYMBOL_GPL(fib_rules_register);
    185
    186static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
    187{
    188	struct fib_rule *rule, *tmp;
    189
    190	list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
    191		list_del_rcu(&rule->list);
    192		if (ops->delete)
    193			ops->delete(rule);
    194		fib_rule_put(rule);
    195	}
    196}
    197
    198void fib_rules_unregister(struct fib_rules_ops *ops)
    199{
    200	struct net *net = ops->fro_net;
    201
    202	spin_lock(&net->rules_mod_lock);
    203	list_del_rcu(&ops->list);
    204	spin_unlock(&net->rules_mod_lock);
    205
    206	fib_rules_cleanup_ops(ops);
    207	kfree_rcu(ops, rcu);
    208}
    209EXPORT_SYMBOL_GPL(fib_rules_unregister);
    210
    211static int uid_range_set(struct fib_kuid_range *range)
    212{
    213	return uid_valid(range->start) && uid_valid(range->end);
    214}
    215
    216static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
    217{
    218	struct fib_rule_uid_range *in;
    219	struct fib_kuid_range out;
    220
    221	in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
    222
    223	out.start = make_kuid(current_user_ns(), in->start);
    224	out.end = make_kuid(current_user_ns(), in->end);
    225
    226	return out;
    227}
    228
    229static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
    230{
    231	struct fib_rule_uid_range out = {
    232		from_kuid_munged(current_user_ns(), range->start),
    233		from_kuid_munged(current_user_ns(), range->end)
    234	};
    235
    236	return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
    237}
    238
    239static int nla_get_port_range(struct nlattr *pattr,
    240			      struct fib_rule_port_range *port_range)
    241{
    242	const struct fib_rule_port_range *pr = nla_data(pattr);
    243
    244	if (!fib_rule_port_range_valid(pr))
    245		return -EINVAL;
    246
    247	port_range->start = pr->start;
    248	port_range->end = pr->end;
    249
    250	return 0;
    251}
    252
    253static int nla_put_port_range(struct sk_buff *skb, int attrtype,
    254			      struct fib_rule_port_range *range)
    255{
    256	return nla_put(skb, attrtype, sizeof(*range), range);
    257}
    258
    259static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
    260			  struct flowi *fl, int flags,
    261			  struct fib_lookup_arg *arg)
    262{
    263	int ret = 0;
    264
    265	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
    266		goto out;
    267
    268	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
    269		goto out;
    270
    271	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
    272		goto out;
    273
    274	if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
    275		goto out;
    276
    277	if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
    278		goto out;
    279
    280	if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
    281	    uid_gt(fl->flowi_uid, rule->uid_range.end))
    282		goto out;
    283
    284	ret = INDIRECT_CALL_MT(ops->match,
    285			       fib6_rule_match,
    286			       fib4_rule_match,
    287			       rule, fl, flags);
    288out:
    289	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
    290}
    291
    292int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
    293		     int flags, struct fib_lookup_arg *arg)
    294{
    295	struct fib_rule *rule;
    296	int err;
    297
    298	rcu_read_lock();
    299
    300	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
    301jumped:
    302		if (!fib_rule_match(rule, ops, fl, flags, arg))
    303			continue;
    304
    305		if (rule->action == FR_ACT_GOTO) {
    306			struct fib_rule *target;
    307
    308			target = rcu_dereference(rule->ctarget);
    309			if (target == NULL) {
    310				continue;
    311			} else {
    312				rule = target;
    313				goto jumped;
    314			}
    315		} else if (rule->action == FR_ACT_NOP)
    316			continue;
    317		else
    318			err = INDIRECT_CALL_MT(ops->action,
    319					       fib6_rule_action,
    320					       fib4_rule_action,
    321					       rule, fl, flags, arg);
    322
    323		if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
    324							      fib6_rule_suppress,
    325							      fib4_rule_suppress,
    326							      rule, flags, arg))
    327			continue;
    328
    329		if (err != -EAGAIN) {
    330			if ((arg->flags & FIB_LOOKUP_NOREF) ||
    331			    likely(refcount_inc_not_zero(&rule->refcnt))) {
    332				arg->rule = rule;
    333				goto out;
    334			}
    335			break;
    336		}
    337	}
    338
    339	err = -ESRCH;
    340out:
    341	rcu_read_unlock();
    342
    343	return err;
    344}
    345EXPORT_SYMBOL_GPL(fib_rules_lookup);
    346
    347static int call_fib_rule_notifier(struct notifier_block *nb,
    348				  enum fib_event_type event_type,
    349				  struct fib_rule *rule, int family,
    350				  struct netlink_ext_ack *extack)
    351{
    352	struct fib_rule_notifier_info info = {
    353		.info.family = family,
    354		.info.extack = extack,
    355		.rule = rule,
    356	};
    357
    358	return call_fib_notifier(nb, event_type, &info.info);
    359}
    360
    361static int call_fib_rule_notifiers(struct net *net,
    362				   enum fib_event_type event_type,
    363				   struct fib_rule *rule,
    364				   struct fib_rules_ops *ops,
    365				   struct netlink_ext_ack *extack)
    366{
    367	struct fib_rule_notifier_info info = {
    368		.info.family = ops->family,
    369		.info.extack = extack,
    370		.rule = rule,
    371	};
    372
    373	ops->fib_rules_seq++;
    374	return call_fib_notifiers(net, event_type, &info.info);
    375}
    376
    377/* Called with rcu_read_lock() */
    378int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
    379		   struct netlink_ext_ack *extack)
    380{
    381	struct fib_rules_ops *ops;
    382	struct fib_rule *rule;
    383	int err = 0;
    384
    385	ops = lookup_rules_ops(net, family);
    386	if (!ops)
    387		return -EAFNOSUPPORT;
    388	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
    389		err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD,
    390					     rule, family, extack);
    391		if (err)
    392			break;
    393	}
    394	rules_ops_put(ops);
    395
    396	return err;
    397}
    398EXPORT_SYMBOL_GPL(fib_rules_dump);
    399
    400unsigned int fib_rules_seq_read(struct net *net, int family)
    401{
    402	unsigned int fib_rules_seq;
    403	struct fib_rules_ops *ops;
    404
    405	ASSERT_RTNL();
    406
    407	ops = lookup_rules_ops(net, family);
    408	if (!ops)
    409		return 0;
    410	fib_rules_seq = ops->fib_rules_seq;
    411	rules_ops_put(ops);
    412
    413	return fib_rules_seq;
    414}
    415EXPORT_SYMBOL_GPL(fib_rules_seq_read);
    416
    417static struct fib_rule *rule_find(struct fib_rules_ops *ops,
    418				  struct fib_rule_hdr *frh,
    419				  struct nlattr **tb,
    420				  struct fib_rule *rule,
    421				  bool user_priority)
    422{
    423	struct fib_rule *r;
    424
    425	list_for_each_entry(r, &ops->rules_list, list) {
    426		if (rule->action && r->action != rule->action)
    427			continue;
    428
    429		if (rule->table && r->table != rule->table)
    430			continue;
    431
    432		if (user_priority && r->pref != rule->pref)
    433			continue;
    434
    435		if (rule->iifname[0] &&
    436		    memcmp(r->iifname, rule->iifname, IFNAMSIZ))
    437			continue;
    438
    439		if (rule->oifname[0] &&
    440		    memcmp(r->oifname, rule->oifname, IFNAMSIZ))
    441			continue;
    442
    443		if (rule->mark && r->mark != rule->mark)
    444			continue;
    445
    446		if (rule->suppress_ifgroup != -1 &&
    447		    r->suppress_ifgroup != rule->suppress_ifgroup)
    448			continue;
    449
    450		if (rule->suppress_prefixlen != -1 &&
    451		    r->suppress_prefixlen != rule->suppress_prefixlen)
    452			continue;
    453
    454		if (rule->mark_mask && r->mark_mask != rule->mark_mask)
    455			continue;
    456
    457		if (rule->tun_id && r->tun_id != rule->tun_id)
    458			continue;
    459
    460		if (r->fr_net != rule->fr_net)
    461			continue;
    462
    463		if (rule->l3mdev && r->l3mdev != rule->l3mdev)
    464			continue;
    465
    466		if (uid_range_set(&rule->uid_range) &&
    467		    (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
    468		    !uid_eq(r->uid_range.end, rule->uid_range.end)))
    469			continue;
    470
    471		if (rule->ip_proto && r->ip_proto != rule->ip_proto)
    472			continue;
    473
    474		if (rule->proto && r->proto != rule->proto)
    475			continue;
    476
    477		if (fib_rule_port_range_set(&rule->sport_range) &&
    478		    !fib_rule_port_range_compare(&r->sport_range,
    479						 &rule->sport_range))
    480			continue;
    481
    482		if (fib_rule_port_range_set(&rule->dport_range) &&
    483		    !fib_rule_port_range_compare(&r->dport_range,
    484						 &rule->dport_range))
    485			continue;
    486
    487		if (!ops->compare(r, frh, tb))
    488			continue;
    489		return r;
    490	}
    491
    492	return NULL;
    493}
    494
    495#ifdef CONFIG_NET_L3_MASTER_DEV
    496static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
    497			      struct netlink_ext_ack *extack)
    498{
    499	nlrule->l3mdev = nla_get_u8(nla);
    500	if (nlrule->l3mdev != 1) {
    501		NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute");
    502		return -1;
    503	}
    504
    505	return 0;
    506}
    507#else
    508static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
    509			      struct netlink_ext_ack *extack)
    510{
    511	NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel");
    512	return -1;
    513}
    514#endif
    515
    516static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
    517		       struct netlink_ext_ack *extack,
    518		       struct fib_rules_ops *ops,
    519		       struct nlattr *tb[],
    520		       struct fib_rule **rule,
    521		       bool *user_priority)
    522{
    523	struct net *net = sock_net(skb->sk);
    524	struct fib_rule_hdr *frh = nlmsg_data(nlh);
    525	struct fib_rule *nlrule = NULL;
    526	int err = -EINVAL;
    527
    528	if (frh->src_len)
    529		if (!tb[FRA_SRC] ||
    530		    frh->src_len > (ops->addr_size * 8) ||
    531		    nla_len(tb[FRA_SRC]) != ops->addr_size) {
    532			NL_SET_ERR_MSG(extack, "Invalid source address");
    533			goto errout;
    534	}
    535
    536	if (frh->dst_len)
    537		if (!tb[FRA_DST] ||
    538		    frh->dst_len > (ops->addr_size * 8) ||
    539		    nla_len(tb[FRA_DST]) != ops->addr_size) {
    540			NL_SET_ERR_MSG(extack, "Invalid dst address");
    541			goto errout;
    542	}
    543
    544	nlrule = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT);
    545	if (!nlrule) {
    546		err = -ENOMEM;
    547		goto errout;
    548	}
    549	refcount_set(&nlrule->refcnt, 1);
    550	nlrule->fr_net = net;
    551
    552	if (tb[FRA_PRIORITY]) {
    553		nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
    554		*user_priority = true;
    555	} else {
    556		nlrule->pref = fib_default_rule_pref(ops);
    557	}
    558
    559	nlrule->proto = tb[FRA_PROTOCOL] ?
    560		nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
    561
    562	if (tb[FRA_IIFNAME]) {
    563		struct net_device *dev;
    564
    565		nlrule->iifindex = -1;
    566		nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
    567		dev = __dev_get_by_name(net, nlrule->iifname);
    568		if (dev)
    569			nlrule->iifindex = dev->ifindex;
    570	}
    571
    572	if (tb[FRA_OIFNAME]) {
    573		struct net_device *dev;
    574
    575		nlrule->oifindex = -1;
    576		nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
    577		dev = __dev_get_by_name(net, nlrule->oifname);
    578		if (dev)
    579			nlrule->oifindex = dev->ifindex;
    580	}
    581
    582	if (tb[FRA_FWMARK]) {
    583		nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
    584		if (nlrule->mark)
    585			/* compatibility: if the mark value is non-zero all bits
    586			 * are compared unless a mask is explicitly specified.
    587			 */
    588			nlrule->mark_mask = 0xFFFFFFFF;
    589	}
    590
    591	if (tb[FRA_FWMASK])
    592		nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
    593
    594	if (tb[FRA_TUN_ID])
    595		nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
    596
    597	err = -EINVAL;
    598	if (tb[FRA_L3MDEV] &&
    599	    fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0)
    600		goto errout_free;
    601
    602	nlrule->action = frh->action;
    603	nlrule->flags = frh->flags;
    604	nlrule->table = frh_get_table(frh, tb);
    605	if (tb[FRA_SUPPRESS_PREFIXLEN])
    606		nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
    607	else
    608		nlrule->suppress_prefixlen = -1;
    609
    610	if (tb[FRA_SUPPRESS_IFGROUP])
    611		nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
    612	else
    613		nlrule->suppress_ifgroup = -1;
    614
    615	if (tb[FRA_GOTO]) {
    616		if (nlrule->action != FR_ACT_GOTO) {
    617			NL_SET_ERR_MSG(extack, "Unexpected goto");
    618			goto errout_free;
    619		}
    620
    621		nlrule->target = nla_get_u32(tb[FRA_GOTO]);
    622		/* Backward jumps are prohibited to avoid endless loops */
    623		if (nlrule->target <= nlrule->pref) {
    624			NL_SET_ERR_MSG(extack, "Backward goto not supported");
    625			goto errout_free;
    626		}
    627	} else if (nlrule->action == FR_ACT_GOTO) {
    628		NL_SET_ERR_MSG(extack, "Missing goto target for action goto");
    629		goto errout_free;
    630	}
    631
    632	if (nlrule->l3mdev && nlrule->table) {
    633		NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive");
    634		goto errout_free;
    635	}
    636
    637	if (tb[FRA_UID_RANGE]) {
    638		if (current_user_ns() != net->user_ns) {
    639			err = -EPERM;
    640			NL_SET_ERR_MSG(extack, "No permission to set uid");
    641			goto errout_free;
    642		}
    643
    644		nlrule->uid_range = nla_get_kuid_range(tb);
    645
    646		if (!uid_range_set(&nlrule->uid_range) ||
    647		    !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) {
    648			NL_SET_ERR_MSG(extack, "Invalid uid range");
    649			goto errout_free;
    650		}
    651	} else {
    652		nlrule->uid_range = fib_kuid_range_unset;
    653	}
    654
    655	if (tb[FRA_IP_PROTO])
    656		nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
    657
    658	if (tb[FRA_SPORT_RANGE]) {
    659		err = nla_get_port_range(tb[FRA_SPORT_RANGE],
    660					 &nlrule->sport_range);
    661		if (err) {
    662			NL_SET_ERR_MSG(extack, "Invalid sport range");
    663			goto errout_free;
    664		}
    665	}
    666
    667	if (tb[FRA_DPORT_RANGE]) {
    668		err = nla_get_port_range(tb[FRA_DPORT_RANGE],
    669					 &nlrule->dport_range);
    670		if (err) {
    671			NL_SET_ERR_MSG(extack, "Invalid dport range");
    672			goto errout_free;
    673		}
    674	}
    675
    676	*rule = nlrule;
    677
    678	return 0;
    679
    680errout_free:
    681	kfree(nlrule);
    682errout:
    683	return err;
    684}
    685
    686static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
    687		       struct nlattr **tb, struct fib_rule *rule)
    688{
    689	struct fib_rule *r;
    690
    691	list_for_each_entry(r, &ops->rules_list, list) {
    692		if (r->action != rule->action)
    693			continue;
    694
    695		if (r->table != rule->table)
    696			continue;
    697
    698		if (r->pref != rule->pref)
    699			continue;
    700
    701		if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
    702			continue;
    703
    704		if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
    705			continue;
    706
    707		if (r->mark != rule->mark)
    708			continue;
    709
    710		if (r->suppress_ifgroup != rule->suppress_ifgroup)
    711			continue;
    712
    713		if (r->suppress_prefixlen != rule->suppress_prefixlen)
    714			continue;
    715
    716		if (r->mark_mask != rule->mark_mask)
    717			continue;
    718
    719		if (r->tun_id != rule->tun_id)
    720			continue;
    721
    722		if (r->fr_net != rule->fr_net)
    723			continue;
    724
    725		if (r->l3mdev != rule->l3mdev)
    726			continue;
    727
    728		if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
    729		    !uid_eq(r->uid_range.end, rule->uid_range.end))
    730			continue;
    731
    732		if (r->ip_proto != rule->ip_proto)
    733			continue;
    734
    735		if (r->proto != rule->proto)
    736			continue;
    737
    738		if (!fib_rule_port_range_compare(&r->sport_range,
    739						 &rule->sport_range))
    740			continue;
    741
    742		if (!fib_rule_port_range_compare(&r->dport_range,
    743						 &rule->dport_range))
    744			continue;
    745
    746		if (!ops->compare(r, frh, tb))
    747			continue;
    748		return 1;
    749	}
    750	return 0;
    751}
    752
    753static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = {
    754	[FRA_UNSPEC]	= { .strict_start_type = FRA_DPORT_RANGE + 1 },
    755	[FRA_IIFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
    756	[FRA_OIFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
    757	[FRA_PRIORITY]	= { .type = NLA_U32 },
    758	[FRA_FWMARK]	= { .type = NLA_U32 },
    759	[FRA_FLOW]	= { .type = NLA_U32 },
    760	[FRA_TUN_ID]	= { .type = NLA_U64 },
    761	[FRA_FWMASK]	= { .type = NLA_U32 },
    762	[FRA_TABLE]     = { .type = NLA_U32 },
    763	[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 },
    764	[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 },
    765	[FRA_GOTO]	= { .type = NLA_U32 },
    766	[FRA_L3MDEV]	= { .type = NLA_U8 },
    767	[FRA_UID_RANGE]	= { .len = sizeof(struct fib_rule_uid_range) },
    768	[FRA_PROTOCOL]  = { .type = NLA_U8 },
    769	[FRA_IP_PROTO]  = { .type = NLA_U8 },
    770	[FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
    771	[FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
    772};
    773
    774int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
    775		   struct netlink_ext_ack *extack)
    776{
    777	struct net *net = sock_net(skb->sk);
    778	struct fib_rule_hdr *frh = nlmsg_data(nlh);
    779	struct fib_rules_ops *ops = NULL;
    780	struct fib_rule *rule = NULL, *r, *last = NULL;
    781	struct nlattr *tb[FRA_MAX + 1];
    782	int err = -EINVAL, unresolved = 0;
    783	bool user_priority = false;
    784
    785	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
    786		NL_SET_ERR_MSG(extack, "Invalid msg length");
    787		goto errout;
    788	}
    789
    790	ops = lookup_rules_ops(net, frh->family);
    791	if (!ops) {
    792		err = -EAFNOSUPPORT;
    793		NL_SET_ERR_MSG(extack, "Rule family not supported");
    794		goto errout;
    795	}
    796
    797	err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
    798				     fib_rule_policy, extack);
    799	if (err < 0) {
    800		NL_SET_ERR_MSG(extack, "Error parsing msg");
    801		goto errout;
    802	}
    803
    804	err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
    805	if (err)
    806		goto errout;
    807
    808	if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
    809	    rule_exists(ops, frh, tb, rule)) {
    810		err = -EEXIST;
    811		goto errout_free;
    812	}
    813
    814	err = ops->configure(rule, skb, frh, tb, extack);
    815	if (err < 0)
    816		goto errout_free;
    817
    818	err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
    819				      extack);
    820	if (err < 0)
    821		goto errout_free;
    822
    823	list_for_each_entry(r, &ops->rules_list, list) {
    824		if (r->pref == rule->target) {
    825			RCU_INIT_POINTER(rule->ctarget, r);
    826			break;
    827		}
    828	}
    829
    830	if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
    831		unresolved = 1;
    832
    833	list_for_each_entry(r, &ops->rules_list, list) {
    834		if (r->pref > rule->pref)
    835			break;
    836		last = r;
    837	}
    838
    839	if (last)
    840		list_add_rcu(&rule->list, &last->list);
    841	else
    842		list_add_rcu(&rule->list, &ops->rules_list);
    843
    844	if (ops->unresolved_rules) {
    845		/*
    846		 * There are unresolved goto rules in the list, check if
    847		 * any of them are pointing to this new rule.
    848		 */
    849		list_for_each_entry(r, &ops->rules_list, list) {
    850			if (r->action == FR_ACT_GOTO &&
    851			    r->target == rule->pref &&
    852			    rtnl_dereference(r->ctarget) == NULL) {
    853				rcu_assign_pointer(r->ctarget, rule);
    854				if (--ops->unresolved_rules == 0)
    855					break;
    856			}
    857		}
    858	}
    859
    860	if (rule->action == FR_ACT_GOTO)
    861		ops->nr_goto_rules++;
    862
    863	if (unresolved)
    864		ops->unresolved_rules++;
    865
    866	if (rule->tun_id)
    867		ip_tunnel_need_metadata();
    868
    869	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
    870	flush_route_cache(ops);
    871	rules_ops_put(ops);
    872	return 0;
    873
    874errout_free:
    875	kfree(rule);
    876errout:
    877	rules_ops_put(ops);
    878	return err;
    879}
    880EXPORT_SYMBOL_GPL(fib_nl_newrule);
    881
    882int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
    883		   struct netlink_ext_ack *extack)
    884{
    885	struct net *net = sock_net(skb->sk);
    886	struct fib_rule_hdr *frh = nlmsg_data(nlh);
    887	struct fib_rules_ops *ops = NULL;
    888	struct fib_rule *rule = NULL, *r, *nlrule = NULL;
    889	struct nlattr *tb[FRA_MAX+1];
    890	int err = -EINVAL;
    891	bool user_priority = false;
    892
    893	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
    894		NL_SET_ERR_MSG(extack, "Invalid msg length");
    895		goto errout;
    896	}
    897
    898	ops = lookup_rules_ops(net, frh->family);
    899	if (ops == NULL) {
    900		err = -EAFNOSUPPORT;
    901		NL_SET_ERR_MSG(extack, "Rule family not supported");
    902		goto errout;
    903	}
    904
    905	err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
    906				     fib_rule_policy, extack);
    907	if (err < 0) {
    908		NL_SET_ERR_MSG(extack, "Error parsing msg");
    909		goto errout;
    910	}
    911
    912	err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
    913	if (err)
    914		goto errout;
    915
    916	rule = rule_find(ops, frh, tb, nlrule, user_priority);
    917	if (!rule) {
    918		err = -ENOENT;
    919		goto errout;
    920	}
    921
    922	if (rule->flags & FIB_RULE_PERMANENT) {
    923		err = -EPERM;
    924		goto errout;
    925	}
    926
    927	if (ops->delete) {
    928		err = ops->delete(rule);
    929		if (err)
    930			goto errout;
    931	}
    932
    933	if (rule->tun_id)
    934		ip_tunnel_unneed_metadata();
    935
    936	list_del_rcu(&rule->list);
    937
    938	if (rule->action == FR_ACT_GOTO) {
    939		ops->nr_goto_rules--;
    940		if (rtnl_dereference(rule->ctarget) == NULL)
    941			ops->unresolved_rules--;
    942	}
    943
    944	/*
    945	 * Check if this rule is a target to any of them. If so,
    946	 * adjust to the next one with the same preference or
    947	 * disable them. As this operation is eventually very
    948	 * expensive, it is only performed if goto rules, except
    949	 * current if it is goto rule, have actually been added.
    950	 */
    951	if (ops->nr_goto_rules > 0) {
    952		struct fib_rule *n;
    953
    954		n = list_next_entry(rule, list);
    955		if (&n->list == &ops->rules_list || n->pref != rule->pref)
    956			n = NULL;
    957		list_for_each_entry(r, &ops->rules_list, list) {
    958			if (rtnl_dereference(r->ctarget) != rule)
    959				continue;
    960			rcu_assign_pointer(r->ctarget, n);
    961			if (!n)
    962				ops->unresolved_rules++;
    963		}
    964	}
    965
    966	call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
    967				NULL);
    968	notify_rule_change(RTM_DELRULE, rule, ops, nlh,
    969			   NETLINK_CB(skb).portid);
    970	fib_rule_put(rule);
    971	flush_route_cache(ops);
    972	rules_ops_put(ops);
    973	kfree(nlrule);
    974	return 0;
    975
    976errout:
    977	kfree(nlrule);
    978	rules_ops_put(ops);
    979	return err;
    980}
    981EXPORT_SYMBOL_GPL(fib_nl_delrule);
    982
    983static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
    984					 struct fib_rule *rule)
    985{
    986	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
    987			 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
    988			 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
    989			 + nla_total_size(4) /* FRA_PRIORITY */
    990			 + nla_total_size(4) /* FRA_TABLE */
    991			 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
    992			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
    993			 + nla_total_size(4) /* FRA_FWMARK */
    994			 + nla_total_size(4) /* FRA_FWMASK */
    995			 + nla_total_size_64bit(8) /* FRA_TUN_ID */
    996			 + nla_total_size(sizeof(struct fib_kuid_range))
    997			 + nla_total_size(1) /* FRA_PROTOCOL */
    998			 + nla_total_size(1) /* FRA_IP_PROTO */
    999			 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */
   1000			 + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */
   1001
   1002	if (ops->nlmsg_payload)
   1003		payload += ops->nlmsg_payload(rule);
   1004
   1005	return payload;
   1006}
   1007
   1008static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
   1009			    u32 pid, u32 seq, int type, int flags,
   1010			    struct fib_rules_ops *ops)
   1011{
   1012	struct nlmsghdr *nlh;
   1013	struct fib_rule_hdr *frh;
   1014
   1015	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
   1016	if (nlh == NULL)
   1017		return -EMSGSIZE;
   1018
   1019	frh = nlmsg_data(nlh);
   1020	frh->family = ops->family;
   1021	frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
   1022	if (nla_put_u32(skb, FRA_TABLE, rule->table))
   1023		goto nla_put_failure;
   1024	if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
   1025		goto nla_put_failure;
   1026	frh->res1 = 0;
   1027	frh->res2 = 0;
   1028	frh->action = rule->action;
   1029	frh->flags = rule->flags;
   1030
   1031	if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
   1032		goto nla_put_failure;
   1033
   1034	if (rule->action == FR_ACT_GOTO &&
   1035	    rcu_access_pointer(rule->ctarget) == NULL)
   1036		frh->flags |= FIB_RULE_UNRESOLVED;
   1037
   1038	if (rule->iifname[0]) {
   1039		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
   1040			goto nla_put_failure;
   1041		if (rule->iifindex == -1)
   1042			frh->flags |= FIB_RULE_IIF_DETACHED;
   1043	}
   1044
   1045	if (rule->oifname[0]) {
   1046		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
   1047			goto nla_put_failure;
   1048		if (rule->oifindex == -1)
   1049			frh->flags |= FIB_RULE_OIF_DETACHED;
   1050	}
   1051
   1052	if ((rule->pref &&
   1053	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
   1054	    (rule->mark &&
   1055	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
   1056	    ((rule->mark_mask || rule->mark) &&
   1057	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
   1058	    (rule->target &&
   1059	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
   1060	    (rule->tun_id &&
   1061	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
   1062	    (rule->l3mdev &&
   1063	     nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
   1064	    (uid_range_set(&rule->uid_range) &&
   1065	     nla_put_uid_range(skb, &rule->uid_range)) ||
   1066	    (fib_rule_port_range_set(&rule->sport_range) &&
   1067	     nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
   1068	    (fib_rule_port_range_set(&rule->dport_range) &&
   1069	     nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
   1070	    (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
   1071		goto nla_put_failure;
   1072
   1073	if (rule->suppress_ifgroup != -1) {
   1074		if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
   1075			goto nla_put_failure;
   1076	}
   1077
   1078	if (ops->fill(rule, skb, frh) < 0)
   1079		goto nla_put_failure;
   1080
   1081	nlmsg_end(skb, nlh);
   1082	return 0;
   1083
   1084nla_put_failure:
   1085	nlmsg_cancel(skb, nlh);
   1086	return -EMSGSIZE;
   1087}
   1088
   1089static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
   1090		      struct fib_rules_ops *ops)
   1091{
   1092	int idx = 0;
   1093	struct fib_rule *rule;
   1094	int err = 0;
   1095
   1096	rcu_read_lock();
   1097	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
   1098		if (idx < cb->args[1])
   1099			goto skip;
   1100
   1101		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
   1102				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
   1103				       NLM_F_MULTI, ops);
   1104		if (err)
   1105			break;
   1106skip:
   1107		idx++;
   1108	}
   1109	rcu_read_unlock();
   1110	cb->args[1] = idx;
   1111	rules_ops_put(ops);
   1112
   1113	return err;
   1114}
   1115
   1116static int fib_valid_dumprule_req(const struct nlmsghdr *nlh,
   1117				   struct netlink_ext_ack *extack)
   1118{
   1119	struct fib_rule_hdr *frh;
   1120
   1121	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
   1122		NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request");
   1123		return -EINVAL;
   1124	}
   1125
   1126	frh = nlmsg_data(nlh);
   1127	if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
   1128	    frh->res1 || frh->res2 || frh->action || frh->flags) {
   1129		NL_SET_ERR_MSG(extack,
   1130			       "Invalid values in header for fib rule dump request");
   1131		return -EINVAL;
   1132	}
   1133
   1134	if (nlmsg_attrlen(nlh, sizeof(*frh))) {
   1135		NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request");
   1136		return -EINVAL;
   1137	}
   1138
   1139	return 0;
   1140}
   1141
   1142static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
   1143{
   1144	const struct nlmsghdr *nlh = cb->nlh;
   1145	struct net *net = sock_net(skb->sk);
   1146	struct fib_rules_ops *ops;
   1147	int idx = 0, family;
   1148
   1149	if (cb->strict_check) {
   1150		int err = fib_valid_dumprule_req(nlh, cb->extack);
   1151
   1152		if (err < 0)
   1153			return err;
   1154	}
   1155
   1156	family = rtnl_msg_family(nlh);
   1157	if (family != AF_UNSPEC) {
   1158		/* Protocol specific dump request */
   1159		ops = lookup_rules_ops(net, family);
   1160		if (ops == NULL)
   1161			return -EAFNOSUPPORT;
   1162
   1163		dump_rules(skb, cb, ops);
   1164
   1165		return skb->len;
   1166	}
   1167
   1168	rcu_read_lock();
   1169	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
   1170		if (idx < cb->args[0] || !try_module_get(ops->owner))
   1171			goto skip;
   1172
   1173		if (dump_rules(skb, cb, ops) < 0)
   1174			break;
   1175
   1176		cb->args[1] = 0;
   1177skip:
   1178		idx++;
   1179	}
   1180	rcu_read_unlock();
   1181	cb->args[0] = idx;
   1182
   1183	return skb->len;
   1184}
   1185
   1186static void notify_rule_change(int event, struct fib_rule *rule,
   1187			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
   1188			       u32 pid)
   1189{
   1190	struct net *net;
   1191	struct sk_buff *skb;
   1192	int err = -ENOMEM;
   1193
   1194	net = ops->fro_net;
   1195	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
   1196	if (skb == NULL)
   1197		goto errout;
   1198
   1199	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
   1200	if (err < 0) {
   1201		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
   1202		WARN_ON(err == -EMSGSIZE);
   1203		kfree_skb(skb);
   1204		goto errout;
   1205	}
   1206
   1207	rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
   1208	return;
   1209errout:
   1210	if (err < 0)
   1211		rtnl_set_sk_err(net, ops->nlgroup, err);
   1212}
   1213
   1214static void attach_rules(struct list_head *rules, struct net_device *dev)
   1215{
   1216	struct fib_rule *rule;
   1217
   1218	list_for_each_entry(rule, rules, list) {
   1219		if (rule->iifindex == -1 &&
   1220		    strcmp(dev->name, rule->iifname) == 0)
   1221			rule->iifindex = dev->ifindex;
   1222		if (rule->oifindex == -1 &&
   1223		    strcmp(dev->name, rule->oifname) == 0)
   1224			rule->oifindex = dev->ifindex;
   1225	}
   1226}
   1227
   1228static void detach_rules(struct list_head *rules, struct net_device *dev)
   1229{
   1230	struct fib_rule *rule;
   1231
   1232	list_for_each_entry(rule, rules, list) {
   1233		if (rule->iifindex == dev->ifindex)
   1234			rule->iifindex = -1;
   1235		if (rule->oifindex == dev->ifindex)
   1236			rule->oifindex = -1;
   1237	}
   1238}
   1239
   1240
   1241static int fib_rules_event(struct notifier_block *this, unsigned long event,
   1242			   void *ptr)
   1243{
   1244	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
   1245	struct net *net = dev_net(dev);
   1246	struct fib_rules_ops *ops;
   1247
   1248	ASSERT_RTNL();
   1249
   1250	switch (event) {
   1251	case NETDEV_REGISTER:
   1252		list_for_each_entry(ops, &net->rules_ops, list)
   1253			attach_rules(&ops->rules_list, dev);
   1254		break;
   1255
   1256	case NETDEV_CHANGENAME:
   1257		list_for_each_entry(ops, &net->rules_ops, list) {
   1258			detach_rules(&ops->rules_list, dev);
   1259			attach_rules(&ops->rules_list, dev);
   1260		}
   1261		break;
   1262
   1263	case NETDEV_UNREGISTER:
   1264		list_for_each_entry(ops, &net->rules_ops, list)
   1265			detach_rules(&ops->rules_list, dev);
   1266		break;
   1267	}
   1268
   1269	return NOTIFY_DONE;
   1270}
   1271
   1272static struct notifier_block fib_rules_notifier = {
   1273	.notifier_call = fib_rules_event,
   1274};
   1275
   1276static int __net_init fib_rules_net_init(struct net *net)
   1277{
   1278	INIT_LIST_HEAD(&net->rules_ops);
   1279	spin_lock_init(&net->rules_mod_lock);
   1280	return 0;
   1281}
   1282
   1283static void __net_exit fib_rules_net_exit(struct net *net)
   1284{
   1285	WARN_ON_ONCE(!list_empty(&net->rules_ops));
   1286}
   1287
   1288static struct pernet_operations fib_rules_net_ops = {
   1289	.init = fib_rules_net_init,
   1290	.exit = fib_rules_net_exit,
   1291};
   1292
   1293static int __init fib_rules_init(void)
   1294{
   1295	int err;
   1296	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
   1297	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
   1298	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0);
   1299
   1300	err = register_pernet_subsys(&fib_rules_net_ops);
   1301	if (err < 0)
   1302		goto fail;
   1303
   1304	err = register_netdevice_notifier(&fib_rules_notifier);
   1305	if (err < 0)
   1306		goto fail_unregister;
   1307
   1308	return 0;
   1309
   1310fail_unregister:
   1311	unregister_pernet_subsys(&fib_rules_net_ops);
   1312fail:
   1313	rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
   1314	rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
   1315	rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
   1316	return err;
   1317}
   1318
   1319subsys_initcall(fib_rules_init);