ila_xlat.c (14026B)
1// SPDX-License-Identifier: GPL-2.0 2#include <linux/jhash.h> 3#include <linux/netfilter.h> 4#include <linux/rcupdate.h> 5#include <linux/rhashtable.h> 6#include <linux/vmalloc.h> 7#include <net/genetlink.h> 8#include <net/ila.h> 9#include <net/netns/generic.h> 10#include <uapi/linux/genetlink.h> 11#include "ila.h" 12 13struct ila_xlat_params { 14 struct ila_params ip; 15 int ifindex; 16}; 17 18struct ila_map { 19 struct ila_xlat_params xp; 20 struct rhash_head node; 21 struct ila_map __rcu *next; 22 struct rcu_head rcu; 23}; 24 25#define MAX_LOCKS 1024 26#define LOCKS_PER_CPU 10 27 28static int alloc_ila_locks(struct ila_net *ilan) 29{ 30 return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask, 31 MAX_LOCKS, LOCKS_PER_CPU, 32 GFP_KERNEL); 33} 34 35static u32 hashrnd __read_mostly; 36static __always_inline void __ila_hash_secret_init(void) 37{ 38 net_get_random_once(&hashrnd, sizeof(hashrnd)); 39} 40 41static inline u32 ila_locator_hash(struct ila_locator loc) 42{ 43 u32 *v = (u32 *)loc.v32; 44 45 __ila_hash_secret_init(); 46 return jhash_2words(v[0], v[1], hashrnd); 47} 48 49static inline spinlock_t *ila_get_lock(struct ila_net *ilan, 50 struct ila_locator loc) 51{ 52 return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask]; 53} 54 55static inline int ila_cmp_wildcards(struct ila_map *ila, 56 struct ila_addr *iaddr, int ifindex) 57{ 58 return (ila->xp.ifindex && ila->xp.ifindex != ifindex); 59} 60 61static inline int ila_cmp_params(struct ila_map *ila, 62 struct ila_xlat_params *xp) 63{ 64 return (ila->xp.ifindex != xp->ifindex); 65} 66 67static int ila_cmpfn(struct rhashtable_compare_arg *arg, 68 const void *obj) 69{ 70 const struct ila_map *ila = obj; 71 72 return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key); 73} 74 75static inline int ila_order(struct ila_map *ila) 76{ 77 int score = 0; 78 79 if (ila->xp.ifindex) 80 score += 1 << 1; 81 82 return score; 83} 84 85static const struct rhashtable_params rht_params = { 86 .nelem_hint = 1024, 87 .head_offset = offsetof(struct ila_map, node), 88 .key_offset = offsetof(struct ila_map, xp.ip.locator_match), 89 .key_len = sizeof(u64), /* identifier */ 90 .max_size = 1048576, 91 .min_size = 256, 92 .automatic_shrinking = true, 93 .obj_cmpfn = ila_cmpfn, 94}; 95 96static int parse_nl_config(struct genl_info *info, 97 struct ila_xlat_params *xp) 98{ 99 memset(xp, 0, sizeof(*xp)); 100 101 if (info->attrs[ILA_ATTR_LOCATOR]) 102 xp->ip.locator.v64 = (__force __be64)nla_get_u64( 103 info->attrs[ILA_ATTR_LOCATOR]); 104 105 if (info->attrs[ILA_ATTR_LOCATOR_MATCH]) 106 xp->ip.locator_match.v64 = (__force __be64)nla_get_u64( 107 info->attrs[ILA_ATTR_LOCATOR_MATCH]); 108 109 if (info->attrs[ILA_ATTR_CSUM_MODE]) 110 xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]); 111 else 112 xp->ip.csum_mode = ILA_CSUM_NO_ACTION; 113 114 if (info->attrs[ILA_ATTR_IDENT_TYPE]) 115 xp->ip.ident_type = nla_get_u8( 116 info->attrs[ILA_ATTR_IDENT_TYPE]); 117 else 118 xp->ip.ident_type = ILA_ATYPE_USE_FORMAT; 119 120 if (info->attrs[ILA_ATTR_IFINDEX]) 121 xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]); 122 123 return 0; 124} 125 126/* Must be called with rcu readlock */ 127static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr, 128 int ifindex, 129 struct ila_net *ilan) 130{ 131 struct ila_map *ila; 132 133 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc, 134 rht_params); 135 while (ila) { 136 if (!ila_cmp_wildcards(ila, iaddr, ifindex)) 137 return ila; 138 ila = rcu_access_pointer(ila->next); 139 } 140 141 return NULL; 142} 143 144/* Must be called with rcu readlock */ 145static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp, 146 struct ila_net *ilan) 147{ 148 struct ila_map *ila; 149 150 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, 151 &xp->ip.locator_match, 152 rht_params); 153 while (ila) { 154 if (!ila_cmp_params(ila, xp)) 155 return ila; 156 ila = rcu_access_pointer(ila->next); 157 } 158 159 return NULL; 160} 161 162static inline void ila_release(struct ila_map *ila) 163{ 164 kfree_rcu(ila, rcu); 165} 166 167static void ila_free_node(struct ila_map *ila) 168{ 169 struct ila_map *next; 170 171 /* Assume rcu_readlock held */ 172 while (ila) { 173 next = rcu_access_pointer(ila->next); 174 ila_release(ila); 175 ila = next; 176 } 177} 178 179static void ila_free_cb(void *ptr, void *arg) 180{ 181 ila_free_node((struct ila_map *)ptr); 182} 183 184static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila); 185 186static unsigned int 187ila_nf_input(void *priv, 188 struct sk_buff *skb, 189 const struct nf_hook_state *state) 190{ 191 ila_xlat_addr(skb, false); 192 return NF_ACCEPT; 193} 194 195static const struct nf_hook_ops ila_nf_hook_ops[] = { 196 { 197 .hook = ila_nf_input, 198 .pf = NFPROTO_IPV6, 199 .hooknum = NF_INET_PRE_ROUTING, 200 .priority = -1, 201 }, 202}; 203 204static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) 205{ 206 struct ila_net *ilan = net_generic(net, ila_net_id); 207 struct ila_map *ila, *head; 208 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); 209 int err = 0, order; 210 211 if (!ilan->xlat.hooks_registered) { 212 /* We defer registering net hooks in the namespace until the 213 * first mapping is added. 214 */ 215 err = nf_register_net_hooks(net, ila_nf_hook_ops, 216 ARRAY_SIZE(ila_nf_hook_ops)); 217 if (err) 218 return err; 219 220 ilan->xlat.hooks_registered = true; 221 } 222 223 ila = kzalloc(sizeof(*ila), GFP_KERNEL); 224 if (!ila) 225 return -ENOMEM; 226 227 ila_init_saved_csum(&xp->ip); 228 229 ila->xp = *xp; 230 231 order = ila_order(ila); 232 233 spin_lock(lock); 234 235 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, 236 &xp->ip.locator_match, 237 rht_params); 238 if (!head) { 239 /* New entry for the rhash_table */ 240 err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table, 241 &ila->node, rht_params); 242 } else { 243 struct ila_map *tila = head, *prev = NULL; 244 245 do { 246 if (!ila_cmp_params(tila, xp)) { 247 err = -EEXIST; 248 goto out; 249 } 250 251 if (order > ila_order(tila)) 252 break; 253 254 prev = tila; 255 tila = rcu_dereference_protected(tila->next, 256 lockdep_is_held(lock)); 257 } while (tila); 258 259 if (prev) { 260 /* Insert in sub list of head */ 261 RCU_INIT_POINTER(ila->next, tila); 262 rcu_assign_pointer(prev->next, ila); 263 } else { 264 /* Make this ila new head */ 265 RCU_INIT_POINTER(ila->next, head); 266 err = rhashtable_replace_fast(&ilan->xlat.rhash_table, 267 &head->node, 268 &ila->node, rht_params); 269 if (err) 270 goto out; 271 } 272 } 273 274out: 275 spin_unlock(lock); 276 277 if (err) 278 kfree(ila); 279 280 return err; 281} 282 283static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) 284{ 285 struct ila_net *ilan = net_generic(net, ila_net_id); 286 struct ila_map *ila, *head, *prev; 287 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); 288 int err = -ENOENT; 289 290 spin_lock(lock); 291 292 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, 293 &xp->ip.locator_match, rht_params); 294 ila = head; 295 296 prev = NULL; 297 298 while (ila) { 299 if (ila_cmp_params(ila, xp)) { 300 prev = ila; 301 ila = rcu_dereference_protected(ila->next, 302 lockdep_is_held(lock)); 303 continue; 304 } 305 306 err = 0; 307 308 if (prev) { 309 /* Not head, just delete from list */ 310 rcu_assign_pointer(prev->next, ila->next); 311 } else { 312 /* It is the head. If there is something in the 313 * sublist we need to make a new head. 314 */ 315 head = rcu_dereference_protected(ila->next, 316 lockdep_is_held(lock)); 317 if (head) { 318 /* Put first entry in the sublist into the 319 * table 320 */ 321 err = rhashtable_replace_fast( 322 &ilan->xlat.rhash_table, &ila->node, 323 &head->node, rht_params); 324 if (err) 325 goto out; 326 } else { 327 /* Entry no longer used */ 328 err = rhashtable_remove_fast( 329 &ilan->xlat.rhash_table, 330 &ila->node, rht_params); 331 } 332 } 333 334 ila_release(ila); 335 336 break; 337 } 338 339out: 340 spin_unlock(lock); 341 342 return err; 343} 344 345int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) 346{ 347 struct net *net = genl_info_net(info); 348 struct ila_xlat_params p; 349 int err; 350 351 err = parse_nl_config(info, &p); 352 if (err) 353 return err; 354 355 return ila_add_mapping(net, &p); 356} 357 358int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) 359{ 360 struct net *net = genl_info_net(info); 361 struct ila_xlat_params xp; 362 int err; 363 364 err = parse_nl_config(info, &xp); 365 if (err) 366 return err; 367 368 ila_del_mapping(net, &xp); 369 370 return 0; 371} 372 373static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan, 374 struct ila_map *ila) 375{ 376 return ila_get_lock(ilan, ila->xp.ip.locator_match); 377} 378 379int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info) 380{ 381 struct net *net = genl_info_net(info); 382 struct ila_net *ilan = net_generic(net, ila_net_id); 383 struct rhashtable_iter iter; 384 struct ila_map *ila; 385 spinlock_t *lock; 386 int ret = 0; 387 388 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter); 389 rhashtable_walk_start(&iter); 390 391 for (;;) { 392 ila = rhashtable_walk_next(&iter); 393 394 if (IS_ERR(ila)) { 395 if (PTR_ERR(ila) == -EAGAIN) 396 continue; 397 ret = PTR_ERR(ila); 398 goto done; 399 } else if (!ila) { 400 break; 401 } 402 403 lock = lock_from_ila_map(ilan, ila); 404 405 spin_lock(lock); 406 407 ret = rhashtable_remove_fast(&ilan->xlat.rhash_table, 408 &ila->node, rht_params); 409 if (!ret) 410 ila_free_node(ila); 411 412 spin_unlock(lock); 413 414 if (ret) 415 break; 416 } 417 418done: 419 rhashtable_walk_stop(&iter); 420 rhashtable_walk_exit(&iter); 421 return ret; 422} 423 424static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) 425{ 426 if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, 427 (__force u64)ila->xp.ip.locator.v64, 428 ILA_ATTR_PAD) || 429 nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH, 430 (__force u64)ila->xp.ip.locator_match.v64, 431 ILA_ATTR_PAD) || 432 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) || 433 nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) || 434 nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type)) 435 return -1; 436 437 return 0; 438} 439 440static int ila_dump_info(struct ila_map *ila, 441 u32 portid, u32 seq, u32 flags, 442 struct sk_buff *skb, u8 cmd) 443{ 444 void *hdr; 445 446 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd); 447 if (!hdr) 448 return -ENOMEM; 449 450 if (ila_fill_info(ila, skb) < 0) 451 goto nla_put_failure; 452 453 genlmsg_end(skb, hdr); 454 return 0; 455 456nla_put_failure: 457 genlmsg_cancel(skb, hdr); 458 return -EMSGSIZE; 459} 460 461int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) 462{ 463 struct net *net = genl_info_net(info); 464 struct ila_net *ilan = net_generic(net, ila_net_id); 465 struct sk_buff *msg; 466 struct ila_xlat_params xp; 467 struct ila_map *ila; 468 int ret; 469 470 ret = parse_nl_config(info, &xp); 471 if (ret) 472 return ret; 473 474 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 475 if (!msg) 476 return -ENOMEM; 477 478 rcu_read_lock(); 479 480 ila = ila_lookup_by_params(&xp, ilan); 481 if (ila) { 482 ret = ila_dump_info(ila, 483 info->snd_portid, 484 info->snd_seq, 0, msg, 485 info->genlhdr->cmd); 486 } 487 488 rcu_read_unlock(); 489 490 if (ret < 0) 491 goto out_free; 492 493 return genlmsg_reply(msg, info); 494 495out_free: 496 nlmsg_free(msg); 497 return ret; 498} 499 500struct ila_dump_iter { 501 struct rhashtable_iter rhiter; 502 int skip; 503}; 504 505int ila_xlat_nl_dump_start(struct netlink_callback *cb) 506{ 507 struct net *net = sock_net(cb->skb->sk); 508 struct ila_net *ilan = net_generic(net, ila_net_id); 509 struct ila_dump_iter *iter; 510 511 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 512 if (!iter) 513 return -ENOMEM; 514 515 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter); 516 517 iter->skip = 0; 518 cb->args[0] = (long)iter; 519 520 return 0; 521} 522 523int ila_xlat_nl_dump_done(struct netlink_callback *cb) 524{ 525 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; 526 527 rhashtable_walk_exit(&iter->rhiter); 528 529 kfree(iter); 530 531 return 0; 532} 533 534int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) 535{ 536 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; 537 struct rhashtable_iter *rhiter = &iter->rhiter; 538 int skip = iter->skip; 539 struct ila_map *ila; 540 int ret; 541 542 rhashtable_walk_start(rhiter); 543 544 /* Get first entry */ 545 ila = rhashtable_walk_peek(rhiter); 546 547 if (ila && !IS_ERR(ila) && skip) { 548 /* Skip over visited entries */ 549 550 while (ila && skip) { 551 /* Skip over any ila entries in this list that we 552 * have already dumped. 553 */ 554 ila = rcu_access_pointer(ila->next); 555 skip--; 556 } 557 } 558 559 skip = 0; 560 561 for (;;) { 562 if (IS_ERR(ila)) { 563 ret = PTR_ERR(ila); 564 if (ret == -EAGAIN) { 565 /* Table has changed and iter has reset. Return 566 * -EAGAIN to the application even if we have 567 * written data to the skb. The application 568 * needs to deal with this. 569 */ 570 571 goto out_ret; 572 } else { 573 break; 574 } 575 } else if (!ila) { 576 ret = 0; 577 break; 578 } 579 580 while (ila) { 581 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid, 582 cb->nlh->nlmsg_seq, NLM_F_MULTI, 583 skb, ILA_CMD_GET); 584 if (ret) 585 goto out; 586 587 skip++; 588 ila = rcu_access_pointer(ila->next); 589 } 590 591 skip = 0; 592 ila = rhashtable_walk_next(rhiter); 593 } 594 595out: 596 iter->skip = skip; 597 ret = (skb->len ? : ret); 598 599out_ret: 600 rhashtable_walk_stop(rhiter); 601 return ret; 602} 603 604int ila_xlat_init_net(struct net *net) 605{ 606 struct ila_net *ilan = net_generic(net, ila_net_id); 607 int err; 608 609 err = alloc_ila_locks(ilan); 610 if (err) 611 return err; 612 613 err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params); 614 if (err) { 615 free_bucket_spinlocks(ilan->xlat.locks); 616 return err; 617 } 618 619 return 0; 620} 621 622void ila_xlat_exit_net(struct net *net) 623{ 624 struct ila_net *ilan = net_generic(net, ila_net_id); 625 626 rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL); 627 628 free_bucket_spinlocks(ilan->xlat.locks); 629 630 if (ilan->xlat.hooks_registered) 631 nf_unregister_net_hooks(net, ila_nf_hook_ops, 632 ARRAY_SIZE(ila_nf_hook_ops)); 633} 634 635static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) 636{ 637 struct ila_map *ila; 638 struct ipv6hdr *ip6h = ipv6_hdr(skb); 639 struct net *net = dev_net(skb->dev); 640 struct ila_net *ilan = net_generic(net, ila_net_id); 641 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr); 642 643 /* Assumes skb contains a valid IPv6 header that is pulled */ 644 645 /* No check here that ILA type in the mapping matches what is in the 646 * address. We assume that whatever sender gaves us can be translated. 647 * The checksum mode however is relevant. 648 */ 649 650 rcu_read_lock(); 651 652 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan); 653 if (ila) 654 ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila); 655 656 rcu_read_unlock(); 657 658 return 0; 659}