cls_tcindex.c (17923B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index 4 * 5 * Written 1998,1999 by Werner Almesberger, EPFL ICA 6 */ 7 8#include <linux/module.h> 9#include <linux/types.h> 10#include <linux/kernel.h> 11#include <linux/skbuff.h> 12#include <linux/errno.h> 13#include <linux/slab.h> 14#include <linux/refcount.h> 15#include <net/act_api.h> 16#include <net/netlink.h> 17#include <net/pkt_cls.h> 18#include <net/sch_generic.h> 19 20/* 21 * Passing parameters to the root seems to be done more awkwardly than really 22 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be 23 * verified. FIXME. 24 */ 25 26#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */ 27#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ 28 29 30struct tcindex_data; 31 32struct tcindex_filter_result { 33 struct tcf_exts exts; 34 struct tcf_result res; 35 struct tcindex_data *p; 36 struct rcu_work rwork; 37}; 38 39struct tcindex_filter { 40 u16 key; 41 struct tcindex_filter_result result; 42 struct tcindex_filter __rcu *next; 43 struct rcu_work rwork; 44}; 45 46 47struct tcindex_data { 48 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ 49 struct tcindex_filter __rcu **h; /* imperfect hash; */ 50 struct tcf_proto *tp; 51 u16 mask; /* AND key with mask */ 52 u32 shift; /* shift ANDed key to the right */ 53 u32 hash; /* hash table size; 0 if undefined */ 54 u32 alloc_hash; /* allocated size */ 55 u32 fall_through; /* 0: only classify if explicit match */ 56 refcount_t refcnt; /* a temporary refcnt for perfect hash */ 57 struct rcu_work rwork; 58}; 59 60static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) 61{ 62 return tcf_exts_has_actions(&r->exts) || r->res.classid; 63} 64 65static void tcindex_data_get(struct tcindex_data *p) 66{ 67 refcount_inc(&p->refcnt); 68} 69 70static void tcindex_data_put(struct tcindex_data *p) 71{ 72 if (refcount_dec_and_test(&p->refcnt)) { 73 kfree(p->perfect); 74 kfree(p->h); 75 kfree(p); 76 } 77} 78 79static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, 80 u16 key) 81{ 82 if (p->perfect) { 83 struct tcindex_filter_result *f = p->perfect + key; 84 85 return tcindex_filter_is_set(f) ? f : NULL; 86 } else if (p->h) { 87 struct tcindex_filter __rcu **fp; 88 struct tcindex_filter *f; 89 90 fp = &p->h[key % p->hash]; 91 for (f = rcu_dereference_bh_rtnl(*fp); 92 f; 93 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) 94 if (f->key == key) 95 return &f->result; 96 } 97 98 return NULL; 99} 100 101 102static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, 103 struct tcf_result *res) 104{ 105 struct tcindex_data *p = rcu_dereference_bh(tp->root); 106 struct tcindex_filter_result *f; 107 int key = (skb->tc_index & p->mask) >> p->shift; 108 109 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n", 110 skb, tp, res, p); 111 112 f = tcindex_lookup(p, key); 113 if (!f) { 114 struct Qdisc *q = tcf_block_q(tp->chain->block); 115 116 if (!p->fall_through) 117 return -1; 118 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key); 119 res->class = 0; 120 pr_debug("alg 0x%x\n", res->classid); 121 return 0; 122 } 123 *res = f->res; 124 pr_debug("map 0x%x\n", res->classid); 125 126 return tcf_exts_exec(skb, &f->exts, res); 127} 128 129 130static void *tcindex_get(struct tcf_proto *tp, u32 handle) 131{ 132 struct tcindex_data *p = rtnl_dereference(tp->root); 133 struct tcindex_filter_result *r; 134 135 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); 136 if (p->perfect && handle >= p->alloc_hash) 137 return NULL; 138 r = tcindex_lookup(p, handle); 139 return r && tcindex_filter_is_set(r) ? r : NULL; 140} 141 142static int tcindex_init(struct tcf_proto *tp) 143{ 144 struct tcindex_data *p; 145 146 pr_debug("tcindex_init(tp %p)\n", tp); 147 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL); 148 if (!p) 149 return -ENOMEM; 150 151 p->mask = 0xffff; 152 p->hash = DEFAULT_HASH_SIZE; 153 p->fall_through = 1; 154 refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */ 155 156 rcu_assign_pointer(tp->root, p); 157 return 0; 158} 159 160static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) 161{ 162 tcf_exts_destroy(&r->exts); 163 tcf_exts_put_net(&r->exts); 164 tcindex_data_put(r->p); 165} 166 167static void tcindex_destroy_rexts_work(struct work_struct *work) 168{ 169 struct tcindex_filter_result *r; 170 171 r = container_of(to_rcu_work(work), 172 struct tcindex_filter_result, 173 rwork); 174 rtnl_lock(); 175 __tcindex_destroy_rexts(r); 176 rtnl_unlock(); 177} 178 179static void __tcindex_destroy_fexts(struct tcindex_filter *f) 180{ 181 tcf_exts_destroy(&f->result.exts); 182 tcf_exts_put_net(&f->result.exts); 183 kfree(f); 184} 185 186static void tcindex_destroy_fexts_work(struct work_struct *work) 187{ 188 struct tcindex_filter *f = container_of(to_rcu_work(work), 189 struct tcindex_filter, 190 rwork); 191 192 rtnl_lock(); 193 __tcindex_destroy_fexts(f); 194 rtnl_unlock(); 195} 196 197static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last, 198 bool rtnl_held, struct netlink_ext_ack *extack) 199{ 200 struct tcindex_data *p = rtnl_dereference(tp->root); 201 struct tcindex_filter_result *r = arg; 202 struct tcindex_filter __rcu **walk; 203 struct tcindex_filter *f = NULL; 204 205 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p); 206 if (p->perfect) { 207 if (!r->res.class) 208 return -ENOENT; 209 } else { 210 int i; 211 212 for (i = 0; i < p->hash; i++) { 213 walk = p->h + i; 214 for (f = rtnl_dereference(*walk); f; 215 walk = &f->next, f = rtnl_dereference(*walk)) { 216 if (&f->result == r) 217 goto found; 218 } 219 } 220 return -ENOENT; 221 222found: 223 rcu_assign_pointer(*walk, rtnl_dereference(f->next)); 224 } 225 tcf_unbind_filter(tp, &r->res); 226 /* all classifiers are required to call tcf_exts_destroy() after rcu 227 * grace period, since converted-to-rcu actions are relying on that 228 * in cleanup() callback 229 */ 230 if (f) { 231 if (tcf_exts_get_net(&f->result.exts)) 232 tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work); 233 else 234 __tcindex_destroy_fexts(f); 235 } else { 236 tcindex_data_get(p); 237 238 if (tcf_exts_get_net(&r->exts)) 239 tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work); 240 else 241 __tcindex_destroy_rexts(r); 242 } 243 244 *last = false; 245 return 0; 246} 247 248static void tcindex_destroy_work(struct work_struct *work) 249{ 250 struct tcindex_data *p = container_of(to_rcu_work(work), 251 struct tcindex_data, 252 rwork); 253 254 tcindex_data_put(p); 255} 256 257static inline int 258valid_perfect_hash(struct tcindex_data *p) 259{ 260 return p->hash > (p->mask >> p->shift); 261} 262 263static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { 264 [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, 265 [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, 266 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, 267 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, 268 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, 269}; 270 271static int tcindex_filter_result_init(struct tcindex_filter_result *r, 272 struct tcindex_data *p, 273 struct net *net) 274{ 275 memset(r, 0, sizeof(*r)); 276 r->p = p; 277 return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT, 278 TCA_TCINDEX_POLICE); 279} 280 281static void tcindex_free_perfect_hash(struct tcindex_data *cp); 282 283static void tcindex_partial_destroy_work(struct work_struct *work) 284{ 285 struct tcindex_data *p = container_of(to_rcu_work(work), 286 struct tcindex_data, 287 rwork); 288 289 rtnl_lock(); 290 if (p->perfect) 291 tcindex_free_perfect_hash(p); 292 kfree(p); 293 rtnl_unlock(); 294} 295 296static void tcindex_free_perfect_hash(struct tcindex_data *cp) 297{ 298 int i; 299 300 for (i = 0; i < cp->hash; i++) 301 tcf_exts_destroy(&cp->perfect[i].exts); 302 kfree(cp->perfect); 303} 304 305static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) 306{ 307 int i, err = 0; 308 309 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), 310 GFP_KERNEL | __GFP_NOWARN); 311 if (!cp->perfect) 312 return -ENOMEM; 313 314 for (i = 0; i < cp->hash; i++) { 315 err = tcf_exts_init(&cp->perfect[i].exts, net, 316 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 317 if (err < 0) 318 goto errout; 319 cp->perfect[i].p = cp; 320 } 321 322 return 0; 323 324errout: 325 tcindex_free_perfect_hash(cp); 326 return err; 327} 328 329static int 330tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, 331 u32 handle, struct tcindex_data *p, 332 struct tcindex_filter_result *r, struct nlattr **tb, 333 struct nlattr *est, u32 flags, struct netlink_ext_ack *extack) 334{ 335 struct tcindex_filter_result new_filter_result, *old_r = r; 336 struct tcindex_data *cp = NULL, *oldp; 337 struct tcindex_filter *f = NULL; /* make gcc behave */ 338 struct tcf_result cr = {}; 339 int err, balloc = 0; 340 struct tcf_exts e; 341 342 err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 343 if (err < 0) 344 return err; 345 err = tcf_exts_validate(net, tp, tb, est, &e, flags, extack); 346 if (err < 0) 347 goto errout; 348 349 err = -ENOMEM; 350 /* tcindex_data attributes must look atomic to classifier/lookup so 351 * allocate new tcindex data and RCU assign it onto root. Keeping 352 * perfect hash and hash pointers from old data. 353 */ 354 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 355 if (!cp) 356 goto errout; 357 358 cp->mask = p->mask; 359 cp->shift = p->shift; 360 cp->hash = p->hash; 361 cp->alloc_hash = p->alloc_hash; 362 cp->fall_through = p->fall_through; 363 cp->tp = tp; 364 refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */ 365 366 if (tb[TCA_TCINDEX_HASH]) 367 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 368 369 if (tb[TCA_TCINDEX_MASK]) 370 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); 371 372 if (tb[TCA_TCINDEX_SHIFT]) { 373 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); 374 if (cp->shift > 16) { 375 err = -EINVAL; 376 goto errout; 377 } 378 } 379 if (!cp->hash) { 380 /* Hash not specified, use perfect hash if the upper limit 381 * of the hashing index is below the threshold. 382 */ 383 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) 384 cp->hash = (cp->mask >> cp->shift) + 1; 385 else 386 cp->hash = DEFAULT_HASH_SIZE; 387 } 388 389 if (p->perfect) { 390 int i; 391 392 if (tcindex_alloc_perfect_hash(net, cp) < 0) 393 goto errout; 394 cp->alloc_hash = cp->hash; 395 for (i = 0; i < min(cp->hash, p->hash); i++) 396 cp->perfect[i].res = p->perfect[i].res; 397 balloc = 1; 398 } 399 cp->h = p->h; 400 401 err = tcindex_filter_result_init(&new_filter_result, cp, net); 402 if (err < 0) 403 goto errout_alloc; 404 if (old_r) 405 cr = r->res; 406 407 err = -EBUSY; 408 409 /* Hash already allocated, make sure that we still meet the 410 * requirements for the allocated hash. 411 */ 412 if (cp->perfect) { 413 if (!valid_perfect_hash(cp) || 414 cp->hash > cp->alloc_hash) 415 goto errout_alloc; 416 } else if (cp->h && cp->hash != cp->alloc_hash) { 417 goto errout_alloc; 418 } 419 420 err = -EINVAL; 421 if (tb[TCA_TCINDEX_FALL_THROUGH]) 422 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); 423 424 if (!cp->perfect && !cp->h) 425 cp->alloc_hash = cp->hash; 426 427 /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) 428 * but then, we'd fail handles that may become valid after some future 429 * mask change. While this is extremely unlikely to ever matter, 430 * the check below is safer (and also more backwards-compatible). 431 */ 432 if (cp->perfect || valid_perfect_hash(cp)) 433 if (handle >= cp->alloc_hash) 434 goto errout_alloc; 435 436 437 err = -ENOMEM; 438 if (!cp->perfect && !cp->h) { 439 if (valid_perfect_hash(cp)) { 440 if (tcindex_alloc_perfect_hash(net, cp) < 0) 441 goto errout_alloc; 442 balloc = 1; 443 } else { 444 struct tcindex_filter __rcu **hash; 445 446 hash = kcalloc(cp->hash, 447 sizeof(struct tcindex_filter *), 448 GFP_KERNEL); 449 450 if (!hash) 451 goto errout_alloc; 452 453 cp->h = hash; 454 balloc = 2; 455 } 456 } 457 458 if (cp->perfect) 459 r = cp->perfect + handle; 460 else 461 r = tcindex_lookup(cp, handle) ? : &new_filter_result; 462 463 if (r == &new_filter_result) { 464 f = kzalloc(sizeof(*f), GFP_KERNEL); 465 if (!f) 466 goto errout_alloc; 467 f->key = handle; 468 f->next = NULL; 469 err = tcindex_filter_result_init(&f->result, cp, net); 470 if (err < 0) { 471 kfree(f); 472 goto errout_alloc; 473 } 474 } 475 476 if (tb[TCA_TCINDEX_CLASSID]) { 477 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); 478 tcf_bind_filter(tp, &cr, base); 479 } 480 481 if (old_r && old_r != r) { 482 err = tcindex_filter_result_init(old_r, cp, net); 483 if (err < 0) { 484 kfree(f); 485 goto errout_alloc; 486 } 487 } 488 489 oldp = p; 490 r->res = cr; 491 tcf_exts_change(&r->exts, &e); 492 493 rcu_assign_pointer(tp->root, cp); 494 495 if (r == &new_filter_result) { 496 struct tcindex_filter *nfp; 497 struct tcindex_filter __rcu **fp; 498 499 f->result.res = r->res; 500 tcf_exts_change(&f->result.exts, &r->exts); 501 502 fp = cp->h + (handle % cp->hash); 503 for (nfp = rtnl_dereference(*fp); 504 nfp; 505 fp = &nfp->next, nfp = rtnl_dereference(*fp)) 506 ; /* nothing */ 507 508 rcu_assign_pointer(*fp, f); 509 } else { 510 tcf_exts_destroy(&new_filter_result.exts); 511 } 512 513 if (oldp) 514 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); 515 return 0; 516 517errout_alloc: 518 if (balloc == 1) 519 tcindex_free_perfect_hash(cp); 520 else if (balloc == 2) 521 kfree(cp->h); 522 tcf_exts_destroy(&new_filter_result.exts); 523errout: 524 kfree(cp); 525 tcf_exts_destroy(&e); 526 return err; 527} 528 529static int 530tcindex_change(struct net *net, struct sk_buff *in_skb, 531 struct tcf_proto *tp, unsigned long base, u32 handle, 532 struct nlattr **tca, void **arg, u32 flags, 533 struct netlink_ext_ack *extack) 534{ 535 struct nlattr *opt = tca[TCA_OPTIONS]; 536 struct nlattr *tb[TCA_TCINDEX_MAX + 1]; 537 struct tcindex_data *p = rtnl_dereference(tp->root); 538 struct tcindex_filter_result *r = *arg; 539 int err; 540 541 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," 542 "p %p,r %p,*arg %p\n", 543 tp, handle, tca, arg, opt, p, r, *arg); 544 545 if (!opt) 546 return 0; 547 548 err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt, 549 tcindex_policy, NULL); 550 if (err < 0) 551 return err; 552 553 return tcindex_set_parms(net, tp, base, handle, p, r, tb, 554 tca[TCA_RATE], flags, extack); 555} 556 557static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker, 558 bool rtnl_held) 559{ 560 struct tcindex_data *p = rtnl_dereference(tp->root); 561 struct tcindex_filter *f, *next; 562 int i; 563 564 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p); 565 if (p->perfect) { 566 for (i = 0; i < p->hash; i++) { 567 if (!p->perfect[i].res.class) 568 continue; 569 if (walker->count >= walker->skip) { 570 if (walker->fn(tp, p->perfect + i, walker) < 0) { 571 walker->stop = 1; 572 return; 573 } 574 } 575 walker->count++; 576 } 577 } 578 if (!p->h) 579 return; 580 for (i = 0; i < p->hash; i++) { 581 for (f = rtnl_dereference(p->h[i]); f; f = next) { 582 next = rtnl_dereference(f->next); 583 if (walker->count >= walker->skip) { 584 if (walker->fn(tp, &f->result, walker) < 0) { 585 walker->stop = 1; 586 return; 587 } 588 } 589 walker->count++; 590 } 591 } 592} 593 594static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held, 595 struct netlink_ext_ack *extack) 596{ 597 struct tcindex_data *p = rtnl_dereference(tp->root); 598 int i; 599 600 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); 601 602 if (p->perfect) { 603 for (i = 0; i < p->hash; i++) { 604 struct tcindex_filter_result *r = p->perfect + i; 605 606 /* tcf_queue_work() does not guarantee the ordering we 607 * want, so we have to take this refcnt temporarily to 608 * ensure 'p' is freed after all tcindex_filter_result 609 * here. Imperfect hash does not need this, because it 610 * uses linked lists rather than an array. 611 */ 612 tcindex_data_get(p); 613 614 tcf_unbind_filter(tp, &r->res); 615 if (tcf_exts_get_net(&r->exts)) 616 tcf_queue_work(&r->rwork, 617 tcindex_destroy_rexts_work); 618 else 619 __tcindex_destroy_rexts(r); 620 } 621 } 622 623 for (i = 0; p->h && i < p->hash; i++) { 624 struct tcindex_filter *f, *next; 625 bool last; 626 627 for (f = rtnl_dereference(p->h[i]); f; f = next) { 628 next = rtnl_dereference(f->next); 629 tcindex_delete(tp, &f->result, &last, rtnl_held, NULL); 630 } 631 } 632 633 tcf_queue_work(&p->rwork, tcindex_destroy_work); 634} 635 636 637static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh, 638 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 639{ 640 struct tcindex_data *p = rtnl_dereference(tp->root); 641 struct tcindex_filter_result *r = fh; 642 struct nlattr *nest; 643 644 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n", 645 tp, fh, skb, t, p, r); 646 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); 647 648 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 649 if (nest == NULL) 650 goto nla_put_failure; 651 652 if (!fh) { 653 t->tcm_handle = ~0; /* whatever ... */ 654 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || 655 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || 656 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || 657 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) 658 goto nla_put_failure; 659 nla_nest_end(skb, nest); 660 } else { 661 if (p->perfect) { 662 t->tcm_handle = r - p->perfect; 663 } else { 664 struct tcindex_filter *f; 665 struct tcindex_filter __rcu **fp; 666 int i; 667 668 t->tcm_handle = 0; 669 for (i = 0; !t->tcm_handle && i < p->hash; i++) { 670 fp = &p->h[i]; 671 for (f = rtnl_dereference(*fp); 672 !t->tcm_handle && f; 673 fp = &f->next, f = rtnl_dereference(*fp)) { 674 if (&f->result == r) 675 t->tcm_handle = f->key; 676 } 677 } 678 } 679 pr_debug("handle = %d\n", t->tcm_handle); 680 if (r->res.class && 681 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) 682 goto nla_put_failure; 683 684 if (tcf_exts_dump(skb, &r->exts) < 0) 685 goto nla_put_failure; 686 nla_nest_end(skb, nest); 687 688 if (tcf_exts_dump_stats(skb, &r->exts) < 0) 689 goto nla_put_failure; 690 } 691 692 return skb->len; 693 694nla_put_failure: 695 nla_nest_cancel(skb, nest); 696 return -1; 697} 698 699static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl, 700 void *q, unsigned long base) 701{ 702 struct tcindex_filter_result *r = fh; 703 704 if (r && r->res.classid == classid) { 705 if (cl) 706 __tcf_bind_filter(q, &r->res, base); 707 else 708 __tcf_unbind_filter(q, &r->res); 709 } 710} 711 712static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { 713 .kind = "tcindex", 714 .classify = tcindex_classify, 715 .init = tcindex_init, 716 .destroy = tcindex_destroy, 717 .get = tcindex_get, 718 .change = tcindex_change, 719 .delete = tcindex_delete, 720 .walk = tcindex_walk, 721 .dump = tcindex_dump, 722 .bind_class = tcindex_bind_class, 723 .owner = THIS_MODULE, 724}; 725 726static int __init init_tcindex(void) 727{ 728 return register_tcf_proto_ops(&cls_tcindex_ops); 729} 730 731static void __exit exit_tcindex(void) 732{ 733 unregister_tcf_proto_ops(&cls_tcindex_ops); 734} 735 736module_init(init_tcindex) 737module_exit(exit_tcindex) 738MODULE_LICENSE("GPL");