cls_rsvp.h (18297B)
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 */ 7 8/* 9 Comparing to general packet classification problem, 10 RSVP needs only several relatively simple rules: 11 12 * (dst, protocol) are always specified, 13 so that we are able to hash them. 14 * src may be exact, or may be wildcard, so that 15 we can keep a hash table plus one wildcard entry. 16 * source port (or flow label) is important only if src is given. 17 18 IMPLEMENTATION. 19 20 We use a two level hash table: The top level is keyed by 21 destination address and protocol ID, every bucket contains a list 22 of "rsvp sessions", identified by destination address, protocol and 23 DPI(="Destination Port ID"): triple (key, mask, offset). 24 25 Every bucket has a smaller hash table keyed by source address 26 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations. 27 Every bucket is again a list of "RSVP flows", selected by 28 source address and SPI(="Source Port ID" here rather than 29 "security parameter index"): triple (key, mask, offset). 30 31 32 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP) 33 and all fragmented packets go to the best-effort traffic class. 34 35 36 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires 37 only one "Generalized Port Identifier". So that for classic 38 ah, esp (and udp,tcp) both *pi should coincide or one of them 39 should be wildcard. 40 41 At first sight, this redundancy is just a waste of CPU 42 resources. But DPI and SPI add the possibility to assign different 43 priorities to GPIs. Look also at note 4 about tunnels below. 44 45 46 NOTE 3. One complication is the case of tunneled packets. 47 We implement it as following: if the first lookup 48 matches a special session with "tunnelhdr" value not zero, 49 flowid doesn't contain the true flow ID, but the tunnel ID (1...255). 50 In this case, we pull tunnelhdr bytes and restart lookup 51 with tunnel ID added to the list of keys. Simple and stupid 8)8) 52 It's enough for PIMREG and IPIP. 53 54 55 NOTE 4. Two GPIs make it possible to parse even GRE packets. 56 F.e. DPI can select ETH_P_IP (and necessary flags to make 57 tunnelhdr correct) in GRE protocol field and SPI matches 58 GRE key. Is it not nice? 8)8) 59 60 61 Well, as result, despite its simplicity, we get a pretty 62 powerful classification engine. */ 63 64 65struct rsvp_head { 66 u32 tmap[256/32]; 67 u32 hgenerator; 68 u8 tgenerator; 69 struct rsvp_session __rcu *ht[256]; 70 struct rcu_head rcu; 71}; 72 73struct rsvp_session { 74 struct rsvp_session __rcu *next; 75 __be32 dst[RSVP_DST_LEN]; 76 struct tc_rsvp_gpi dpi; 77 u8 protocol; 78 u8 tunnelid; 79 /* 16 (src,sport) hash slots, and one wildcard source slot */ 80 struct rsvp_filter __rcu *ht[16 + 1]; 81 struct rcu_head rcu; 82}; 83 84 85struct rsvp_filter { 86 struct rsvp_filter __rcu *next; 87 __be32 src[RSVP_DST_LEN]; 88 struct tc_rsvp_gpi spi; 89 u8 tunnelhdr; 90 91 struct tcf_result res; 92 struct tcf_exts exts; 93 94 u32 handle; 95 struct rsvp_session *sess; 96 struct rcu_work rwork; 97}; 98 99static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) 100{ 101 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1]; 102 103 h ^= h>>16; 104 h ^= h>>8; 105 return (h ^ protocol ^ tunnelid) & 0xFF; 106} 107 108static inline unsigned int hash_src(__be32 *src) 109{ 110 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1]; 111 112 h ^= h>>16; 113 h ^= h>>8; 114 h ^= h>>4; 115 return h & 0xF; 116} 117 118#define RSVP_APPLY_RESULT() \ 119{ \ 120 int r = tcf_exts_exec(skb, &f->exts, res); \ 121 if (r < 0) \ 122 continue; \ 123 else if (r > 0) \ 124 return r; \ 125} 126 127static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, 128 struct tcf_result *res) 129{ 130 struct rsvp_head *head = rcu_dereference_bh(tp->root); 131 struct rsvp_session *s; 132 struct rsvp_filter *f; 133 unsigned int h1, h2; 134 __be32 *dst, *src; 135 u8 protocol; 136 u8 tunnelid = 0; 137 u8 *xprt; 138#if RSVP_DST_LEN == 4 139 struct ipv6hdr *nhptr; 140 141 if (!pskb_network_may_pull(skb, sizeof(*nhptr))) 142 return -1; 143 nhptr = ipv6_hdr(skb); 144#else 145 struct iphdr *nhptr; 146 147 if (!pskb_network_may_pull(skb, sizeof(*nhptr))) 148 return -1; 149 nhptr = ip_hdr(skb); 150#endif 151restart: 152 153#if RSVP_DST_LEN == 4 154 src = &nhptr->saddr.s6_addr32[0]; 155 dst = &nhptr->daddr.s6_addr32[0]; 156 protocol = nhptr->nexthdr; 157 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr); 158#else 159 src = &nhptr->saddr; 160 dst = &nhptr->daddr; 161 protocol = nhptr->protocol; 162 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2); 163 if (ip_is_fragment(nhptr)) 164 return -1; 165#endif 166 167 h1 = hash_dst(dst, protocol, tunnelid); 168 h2 = hash_src(src); 169 170 for (s = rcu_dereference_bh(head->ht[h1]); s; 171 s = rcu_dereference_bh(s->next)) { 172 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && 173 protocol == s->protocol && 174 !(s->dpi.mask & 175 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) && 176#if RSVP_DST_LEN == 4 177 dst[0] == s->dst[0] && 178 dst[1] == s->dst[1] && 179 dst[2] == s->dst[2] && 180#endif 181 tunnelid == s->tunnelid) { 182 183 for (f = rcu_dereference_bh(s->ht[h2]); f; 184 f = rcu_dereference_bh(f->next)) { 185 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && 186 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) 187#if RSVP_DST_LEN == 4 188 && 189 src[0] == f->src[0] && 190 src[1] == f->src[1] && 191 src[2] == f->src[2] 192#endif 193 ) { 194 *res = f->res; 195 RSVP_APPLY_RESULT(); 196 197matched: 198 if (f->tunnelhdr == 0) 199 return 0; 200 201 tunnelid = f->res.classid; 202 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr)); 203 goto restart; 204 } 205 } 206 207 /* And wildcard bucket... */ 208 for (f = rcu_dereference_bh(s->ht[16]); f; 209 f = rcu_dereference_bh(f->next)) { 210 *res = f->res; 211 RSVP_APPLY_RESULT(); 212 goto matched; 213 } 214 return -1; 215 } 216 } 217 return -1; 218} 219 220static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h) 221{ 222 struct rsvp_head *head = rtnl_dereference(tp->root); 223 struct rsvp_session *s; 224 struct rsvp_filter __rcu **ins; 225 struct rsvp_filter *pins; 226 unsigned int h1 = h & 0xFF; 227 unsigned int h2 = (h >> 8) & 0xFF; 228 229 for (s = rtnl_dereference(head->ht[h1]); s; 230 s = rtnl_dereference(s->next)) { 231 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ; 232 ins = &pins->next, pins = rtnl_dereference(*ins)) { 233 if (pins->handle == h) { 234 RCU_INIT_POINTER(n->next, pins->next); 235 rcu_assign_pointer(*ins, n); 236 return; 237 } 238 } 239 } 240 241 /* Something went wrong if we are trying to replace a non-existent 242 * node. Mind as well halt instead of silently failing. 243 */ 244 BUG_ON(1); 245} 246 247static void *rsvp_get(struct tcf_proto *tp, u32 handle) 248{ 249 struct rsvp_head *head = rtnl_dereference(tp->root); 250 struct rsvp_session *s; 251 struct rsvp_filter *f; 252 unsigned int h1 = handle & 0xFF; 253 unsigned int h2 = (handle >> 8) & 0xFF; 254 255 if (h2 > 16) 256 return NULL; 257 258 for (s = rtnl_dereference(head->ht[h1]); s; 259 s = rtnl_dereference(s->next)) { 260 for (f = rtnl_dereference(s->ht[h2]); f; 261 f = rtnl_dereference(f->next)) { 262 if (f->handle == handle) 263 return f; 264 } 265 } 266 return NULL; 267} 268 269static int rsvp_init(struct tcf_proto *tp) 270{ 271 struct rsvp_head *data; 272 273 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); 274 if (data) { 275 rcu_assign_pointer(tp->root, data); 276 return 0; 277 } 278 return -ENOBUFS; 279} 280 281static void __rsvp_delete_filter(struct rsvp_filter *f) 282{ 283 tcf_exts_destroy(&f->exts); 284 tcf_exts_put_net(&f->exts); 285 kfree(f); 286} 287 288static void rsvp_delete_filter_work(struct work_struct *work) 289{ 290 struct rsvp_filter *f = container_of(to_rcu_work(work), 291 struct rsvp_filter, 292 rwork); 293 rtnl_lock(); 294 __rsvp_delete_filter(f); 295 rtnl_unlock(); 296} 297 298static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) 299{ 300 tcf_unbind_filter(tp, &f->res); 301 /* all classifiers are required to call tcf_exts_destroy() after rcu 302 * grace period, since converted-to-rcu actions are relying on that 303 * in cleanup() callback 304 */ 305 if (tcf_exts_get_net(&f->exts)) 306 tcf_queue_work(&f->rwork, rsvp_delete_filter_work); 307 else 308 __rsvp_delete_filter(f); 309} 310 311static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held, 312 struct netlink_ext_ack *extack) 313{ 314 struct rsvp_head *data = rtnl_dereference(tp->root); 315 int h1, h2; 316 317 if (data == NULL) 318 return; 319 320 for (h1 = 0; h1 < 256; h1++) { 321 struct rsvp_session *s; 322 323 while ((s = rtnl_dereference(data->ht[h1])) != NULL) { 324 RCU_INIT_POINTER(data->ht[h1], s->next); 325 326 for (h2 = 0; h2 <= 16; h2++) { 327 struct rsvp_filter *f; 328 329 while ((f = rtnl_dereference(s->ht[h2])) != NULL) { 330 rcu_assign_pointer(s->ht[h2], f->next); 331 rsvp_delete_filter(tp, f); 332 } 333 } 334 kfree_rcu(s, rcu); 335 } 336 } 337 kfree_rcu(data, rcu); 338} 339 340static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last, 341 bool rtnl_held, struct netlink_ext_ack *extack) 342{ 343 struct rsvp_head *head = rtnl_dereference(tp->root); 344 struct rsvp_filter *nfp, *f = arg; 345 struct rsvp_filter __rcu **fp; 346 unsigned int h = f->handle; 347 struct rsvp_session __rcu **sp; 348 struct rsvp_session *nsp, *s = f->sess; 349 int i, h1; 350 351 fp = &s->ht[(h >> 8) & 0xFF]; 352 for (nfp = rtnl_dereference(*fp); nfp; 353 fp = &nfp->next, nfp = rtnl_dereference(*fp)) { 354 if (nfp == f) { 355 RCU_INIT_POINTER(*fp, f->next); 356 rsvp_delete_filter(tp, f); 357 358 /* Strip tree */ 359 360 for (i = 0; i <= 16; i++) 361 if (s->ht[i]) 362 goto out; 363 364 /* OK, session has no flows */ 365 sp = &head->ht[h & 0xFF]; 366 for (nsp = rtnl_dereference(*sp); nsp; 367 sp = &nsp->next, nsp = rtnl_dereference(*sp)) { 368 if (nsp == s) { 369 RCU_INIT_POINTER(*sp, s->next); 370 kfree_rcu(s, rcu); 371 goto out; 372 } 373 } 374 375 break; 376 } 377 } 378 379out: 380 *last = true; 381 for (h1 = 0; h1 < 256; h1++) { 382 if (rcu_access_pointer(head->ht[h1])) { 383 *last = false; 384 break; 385 } 386 } 387 388 return 0; 389} 390 391static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) 392{ 393 struct rsvp_head *data = rtnl_dereference(tp->root); 394 int i = 0xFFFF; 395 396 while (i-- > 0) { 397 u32 h; 398 399 if ((data->hgenerator += 0x10000) == 0) 400 data->hgenerator = 0x10000; 401 h = data->hgenerator|salt; 402 if (!rsvp_get(tp, h)) 403 return h; 404 } 405 return 0; 406} 407 408static int tunnel_bts(struct rsvp_head *data) 409{ 410 int n = data->tgenerator >> 5; 411 u32 b = 1 << (data->tgenerator & 0x1F); 412 413 if (data->tmap[n] & b) 414 return 0; 415 data->tmap[n] |= b; 416 return 1; 417} 418 419static void tunnel_recycle(struct rsvp_head *data) 420{ 421 struct rsvp_session __rcu **sht = data->ht; 422 u32 tmap[256/32]; 423 int h1, h2; 424 425 memset(tmap, 0, sizeof(tmap)); 426 427 for (h1 = 0; h1 < 256; h1++) { 428 struct rsvp_session *s; 429 for (s = rtnl_dereference(sht[h1]); s; 430 s = rtnl_dereference(s->next)) { 431 for (h2 = 0; h2 <= 16; h2++) { 432 struct rsvp_filter *f; 433 434 for (f = rtnl_dereference(s->ht[h2]); f; 435 f = rtnl_dereference(f->next)) { 436 if (f->tunnelhdr == 0) 437 continue; 438 data->tgenerator = f->res.classid; 439 tunnel_bts(data); 440 } 441 } 442 } 443 } 444 445 memcpy(data->tmap, tmap, sizeof(tmap)); 446} 447 448static u32 gen_tunnel(struct rsvp_head *data) 449{ 450 int i, k; 451 452 for (k = 0; k < 2; k++) { 453 for (i = 255; i > 0; i--) { 454 if (++data->tgenerator == 0) 455 data->tgenerator = 1; 456 if (tunnel_bts(data)) 457 return data->tgenerator; 458 } 459 tunnel_recycle(data); 460 } 461 return 0; 462} 463 464static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = { 465 [TCA_RSVP_CLASSID] = { .type = NLA_U32 }, 466 [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) }, 467 [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) }, 468 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, 469}; 470 471static int rsvp_change(struct net *net, struct sk_buff *in_skb, 472 struct tcf_proto *tp, unsigned long base, 473 u32 handle, struct nlattr **tca, 474 void **arg, u32 flags, 475 struct netlink_ext_ack *extack) 476{ 477 struct rsvp_head *data = rtnl_dereference(tp->root); 478 struct rsvp_filter *f, *nfp; 479 struct rsvp_filter __rcu **fp; 480 struct rsvp_session *nsp, *s; 481 struct rsvp_session __rcu **sp; 482 struct tc_rsvp_pinfo *pinfo = NULL; 483 struct nlattr *opt = tca[TCA_OPTIONS]; 484 struct nlattr *tb[TCA_RSVP_MAX + 1]; 485 struct tcf_exts e; 486 unsigned int h1, h2; 487 __be32 *dst; 488 int err; 489 490 if (opt == NULL) 491 return handle ? -EINVAL : 0; 492 493 err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy, 494 NULL); 495 if (err < 0) 496 return err; 497 498 err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE); 499 if (err < 0) 500 return err; 501 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, flags, 502 extack); 503 if (err < 0) 504 goto errout2; 505 506 f = *arg; 507 if (f) { 508 /* Node exists: adjust only classid */ 509 struct rsvp_filter *n; 510 511 if (f->handle != handle && handle) 512 goto errout2; 513 514 n = kmemdup(f, sizeof(*f), GFP_KERNEL); 515 if (!n) { 516 err = -ENOMEM; 517 goto errout2; 518 } 519 520 err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT, 521 TCA_RSVP_POLICE); 522 if (err < 0) { 523 kfree(n); 524 goto errout2; 525 } 526 527 if (tb[TCA_RSVP_CLASSID]) { 528 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 529 tcf_bind_filter(tp, &n->res, base); 530 } 531 532 tcf_exts_change(&n->exts, &e); 533 rsvp_replace(tp, n, handle); 534 return 0; 535 } 536 537 /* Now more serious part... */ 538 err = -EINVAL; 539 if (handle) 540 goto errout2; 541 if (tb[TCA_RSVP_DST] == NULL) 542 goto errout2; 543 544 err = -ENOBUFS; 545 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 546 if (f == NULL) 547 goto errout2; 548 549 err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE); 550 if (err < 0) 551 goto errout; 552 h2 = 16; 553 if (tb[TCA_RSVP_SRC]) { 554 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); 555 h2 = hash_src(f->src); 556 } 557 if (tb[TCA_RSVP_PINFO]) { 558 pinfo = nla_data(tb[TCA_RSVP_PINFO]); 559 f->spi = pinfo->spi; 560 f->tunnelhdr = pinfo->tunnelhdr; 561 } 562 if (tb[TCA_RSVP_CLASSID]) 563 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 564 565 dst = nla_data(tb[TCA_RSVP_DST]); 566 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 567 568 err = -ENOMEM; 569 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0) 570 goto errout; 571 572 if (f->tunnelhdr) { 573 err = -EINVAL; 574 if (f->res.classid > 255) 575 goto errout; 576 577 err = -ENOMEM; 578 if (f->res.classid == 0 && 579 (f->res.classid = gen_tunnel(data)) == 0) 580 goto errout; 581 } 582 583 for (sp = &data->ht[h1]; 584 (s = rtnl_dereference(*sp)) != NULL; 585 sp = &s->next) { 586 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && 587 pinfo && pinfo->protocol == s->protocol && 588 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && 589#if RSVP_DST_LEN == 4 590 dst[0] == s->dst[0] && 591 dst[1] == s->dst[1] && 592 dst[2] == s->dst[2] && 593#endif 594 pinfo->tunnelid == s->tunnelid) { 595 596insert: 597 /* OK, we found appropriate session */ 598 599 fp = &s->ht[h2]; 600 601 f->sess = s; 602 if (f->tunnelhdr == 0) 603 tcf_bind_filter(tp, &f->res, base); 604 605 tcf_exts_change(&f->exts, &e); 606 607 fp = &s->ht[h2]; 608 for (nfp = rtnl_dereference(*fp); nfp; 609 fp = &nfp->next, nfp = rtnl_dereference(*fp)) { 610 __u32 mask = nfp->spi.mask & f->spi.mask; 611 612 if (mask != f->spi.mask) 613 break; 614 } 615 RCU_INIT_POINTER(f->next, nfp); 616 rcu_assign_pointer(*fp, f); 617 618 *arg = f; 619 return 0; 620 } 621 } 622 623 /* No session found. Create new one. */ 624 625 err = -ENOBUFS; 626 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL); 627 if (s == NULL) 628 goto errout; 629 memcpy(s->dst, dst, sizeof(s->dst)); 630 631 if (pinfo) { 632 s->dpi = pinfo->dpi; 633 s->protocol = pinfo->protocol; 634 s->tunnelid = pinfo->tunnelid; 635 } 636 sp = &data->ht[h1]; 637 for (nsp = rtnl_dereference(*sp); nsp; 638 sp = &nsp->next, nsp = rtnl_dereference(*sp)) { 639 if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask) 640 break; 641 } 642 RCU_INIT_POINTER(s->next, nsp); 643 rcu_assign_pointer(*sp, s); 644 645 goto insert; 646 647errout: 648 tcf_exts_destroy(&f->exts); 649 kfree(f); 650errout2: 651 tcf_exts_destroy(&e); 652 return err; 653} 654 655static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg, 656 bool rtnl_held) 657{ 658 struct rsvp_head *head = rtnl_dereference(tp->root); 659 unsigned int h, h1; 660 661 if (arg->stop) 662 return; 663 664 for (h = 0; h < 256; h++) { 665 struct rsvp_session *s; 666 667 for (s = rtnl_dereference(head->ht[h]); s; 668 s = rtnl_dereference(s->next)) { 669 for (h1 = 0; h1 <= 16; h1++) { 670 struct rsvp_filter *f; 671 672 for (f = rtnl_dereference(s->ht[h1]); f; 673 f = rtnl_dereference(f->next)) { 674 if (arg->count < arg->skip) { 675 arg->count++; 676 continue; 677 } 678 if (arg->fn(tp, f, arg) < 0) { 679 arg->stop = 1; 680 return; 681 } 682 arg->count++; 683 } 684 } 685 } 686 } 687} 688 689static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh, 690 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 691{ 692 struct rsvp_filter *f = fh; 693 struct rsvp_session *s; 694 struct nlattr *nest; 695 struct tc_rsvp_pinfo pinfo; 696 697 if (f == NULL) 698 return skb->len; 699 s = f->sess; 700 701 t->tcm_handle = f->handle; 702 703 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 704 if (nest == NULL) 705 goto nla_put_failure; 706 707 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst)) 708 goto nla_put_failure; 709 pinfo.dpi = s->dpi; 710 pinfo.spi = f->spi; 711 pinfo.protocol = s->protocol; 712 pinfo.tunnelid = s->tunnelid; 713 pinfo.tunnelhdr = f->tunnelhdr; 714 pinfo.pad = 0; 715 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo)) 716 goto nla_put_failure; 717 if (f->res.classid && 718 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid)) 719 goto nla_put_failure; 720 if (((f->handle >> 8) & 0xFF) != 16 && 721 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src)) 722 goto nla_put_failure; 723 724 if (tcf_exts_dump(skb, &f->exts) < 0) 725 goto nla_put_failure; 726 727 nla_nest_end(skb, nest); 728 729 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 730 goto nla_put_failure; 731 return skb->len; 732 733nla_put_failure: 734 nla_nest_cancel(skb, nest); 735 return -1; 736} 737 738static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 739 unsigned long base) 740{ 741 struct rsvp_filter *f = fh; 742 743 if (f && f->res.classid == classid) { 744 if (cl) 745 __tcf_bind_filter(q, &f->res, base); 746 else 747 __tcf_unbind_filter(q, &f->res); 748 } 749} 750 751static struct tcf_proto_ops RSVP_OPS __read_mostly = { 752 .kind = RSVP_ID, 753 .classify = rsvp_classify, 754 .init = rsvp_init, 755 .destroy = rsvp_destroy, 756 .get = rsvp_get, 757 .change = rsvp_change, 758 .delete = rsvp_delete, 759 .walk = rsvp_walk, 760 .dump = rsvp_dump, 761 .bind_class = rsvp_bind_class, 762 .owner = THIS_MODULE, 763}; 764 765static int __init init_rsvp(void) 766{ 767 return register_tcf_proto_ops(&RSVP_OPS); 768} 769 770static void __exit exit_rsvp(void) 771{ 772 unregister_tcf_proto_ops(&RSVP_OPS); 773} 774 775module_init(init_rsvp) 776module_exit(exit_rsvp)