otx2_tc.c (29608B)
1// SPDX-License-Identifier: GPL-2.0 2/* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8#include <linux/netdevice.h> 9#include <linux/etherdevice.h> 10#include <linux/inetdevice.h> 11#include <linux/rhashtable.h> 12#include <linux/bitfield.h> 13#include <net/flow_dissector.h> 14#include <net/pkt_cls.h> 15#include <net/tc_act/tc_gact.h> 16#include <net/tc_act/tc_mirred.h> 17#include <net/tc_act/tc_vlan.h> 18#include <net/ipv6.h> 19 20#include "cn10k.h" 21#include "otx2_common.h" 22 23/* Egress rate limiting definitions */ 24#define MAX_BURST_EXPONENT 0x0FULL 25#define MAX_BURST_MANTISSA 0xFFULL 26#define MAX_BURST_SIZE 130816ULL 27#define MAX_RATE_DIVIDER_EXPONENT 12ULL 28#define MAX_RATE_EXPONENT 0x0FULL 29#define MAX_RATE_MANTISSA 0xFFULL 30 31/* Bitfields in NIX_TLX_PIR register */ 32#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 33#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 34#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 35#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 36#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 37 38struct otx2_tc_flow_stats { 39 u64 bytes; 40 u64 pkts; 41 u64 used; 42}; 43 44struct otx2_tc_flow { 45 struct rhash_head node; 46 unsigned long cookie; 47 unsigned int bitpos; 48 struct rcu_head rcu; 49 struct otx2_tc_flow_stats stats; 50 spinlock_t lock; /* lock for stats */ 51 u16 rq; 52 u16 entry; 53 u16 leaf_profile; 54 bool is_act_police; 55}; 56 57int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) 58{ 59 struct otx2_tc_info *tc = &nic->tc_info; 60 61 if (!nic->flow_cfg->max_flows) 62 return 0; 63 64 /* Max flows changed, free the existing bitmap */ 65 kfree(tc->tc_entries_bitmap); 66 67 tc->tc_entries_bitmap = 68 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), 69 sizeof(long), GFP_KERNEL); 70 if (!tc->tc_entries_bitmap) { 71 netdev_err(nic->netdev, 72 "Unable to alloc TC flow entries bitmap\n"); 73 return -ENOMEM; 74 } 75 76 return 0; 77} 78EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); 79 80static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, 81 u32 *burst_mantissa) 82{ 83 unsigned int tmp; 84 85 /* Burst is calculated as 86 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 87 * Max supported burst size is 130,816 bytes. 88 */ 89 burst = min_t(u32, burst, MAX_BURST_SIZE); 90 if (burst) { 91 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 92 tmp = burst - rounddown_pow_of_two(burst); 93 if (burst < MAX_BURST_MANTISSA) 94 *burst_mantissa = tmp * 2; 95 else 96 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 97 } else { 98 *burst_exp = MAX_BURST_EXPONENT; 99 *burst_mantissa = MAX_BURST_MANTISSA; 100 } 101} 102 103static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, 104 u32 *mantissa, u32 *div_exp) 105{ 106 unsigned int tmp; 107 108 /* Rate calculation by hardware 109 * 110 * PIR_ADD = ((256 + mantissa) << exp) / 256 111 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 112 * The resultant rate is in Mbps. 113 */ 114 115 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 116 * Setting this to '0' will ease the calculation of 117 * exponent and mantissa. 118 */ 119 *div_exp = 0; 120 121 if (maxrate) { 122 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 123 tmp = maxrate - rounddown_pow_of_two(maxrate); 124 if (maxrate < MAX_RATE_MANTISSA) 125 *mantissa = tmp * 2; 126 else 127 *mantissa = tmp / (1ULL << (*exp - 7)); 128 } else { 129 /* Instead of disabling rate limiting, set all values to max */ 130 *exp = MAX_RATE_EXPONENT; 131 *mantissa = MAX_RATE_MANTISSA; 132 } 133} 134 135static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) 136{ 137 struct otx2_hw *hw = &nic->hw; 138 struct nix_txschq_config *req; 139 u32 burst_exp, burst_mantissa; 140 u32 exp, mantissa, div_exp; 141 int txschq, err; 142 143 /* All SQs share the same TL4, so pick the first scheduler */ 144 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 145 146 /* Get exponent and mantissa values from the desired rate */ 147 otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa); 148 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 149 150 mutex_lock(&nic->mbox.lock); 151 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 152 if (!req) { 153 mutex_unlock(&nic->mbox.lock); 154 return -ENOMEM; 155 } 156 157 req->lvl = NIX_TXSCH_LVL_TL4; 158 req->num_regs = 1; 159 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 160 req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | 161 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) | 162 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 163 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 164 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 165 166 err = otx2_sync_mbox_msg(&nic->mbox); 167 mutex_unlock(&nic->mbox.lock); 168 return err; 169} 170 171static int otx2_tc_validate_flow(struct otx2_nic *nic, 172 struct flow_action *actions, 173 struct netlink_ext_ack *extack) 174{ 175 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 176 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 177 return -EINVAL; 178 } 179 180 if (!flow_action_has_entries(actions)) { 181 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 182 return -EINVAL; 183 } 184 185 if (!flow_offload_has_one_action(actions)) { 186 NL_SET_ERR_MSG_MOD(extack, 187 "Egress MATCHALL offload supports only 1 policing action"); 188 return -EINVAL; 189 } 190 return 0; 191} 192 193static int otx2_policer_validate(const struct flow_action *action, 194 const struct flow_action_entry *act, 195 struct netlink_ext_ack *extack) 196{ 197 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 198 NL_SET_ERR_MSG_MOD(extack, 199 "Offload not supported when exceed action is not drop"); 200 return -EOPNOTSUPP; 201 } 202 203 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 204 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 205 NL_SET_ERR_MSG_MOD(extack, 206 "Offload not supported when conform action is not pipe or ok"); 207 return -EOPNOTSUPP; 208 } 209 210 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 211 !flow_action_is_last_entry(action, act)) { 212 NL_SET_ERR_MSG_MOD(extack, 213 "Offload not supported when conform action is ok, but action is not last"); 214 return -EOPNOTSUPP; 215 } 216 217 if (act->police.peakrate_bytes_ps || 218 act->police.avrate || act->police.overhead) { 219 NL_SET_ERR_MSG_MOD(extack, 220 "Offload not supported when peakrate/avrate/overhead is configured"); 221 return -EOPNOTSUPP; 222 } 223 224 return 0; 225} 226 227static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 228 struct tc_cls_matchall_offload *cls) 229{ 230 struct netlink_ext_ack *extack = cls->common.extack; 231 struct flow_action *actions = &cls->rule->action; 232 struct flow_action_entry *entry; 233 u32 rate; 234 int err; 235 236 err = otx2_tc_validate_flow(nic, actions, extack); 237 if (err) 238 return err; 239 240 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 241 NL_SET_ERR_MSG_MOD(extack, 242 "Only one Egress MATCHALL ratelimiter can be offloaded"); 243 return -ENOMEM; 244 } 245 246 entry = &cls->rule->action.entries[0]; 247 switch (entry->id) { 248 case FLOW_ACTION_POLICE: 249 err = otx2_policer_validate(&cls->rule->action, entry, extack); 250 if (err) 251 return err; 252 253 if (entry->police.rate_pkt_ps) { 254 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 255 return -EOPNOTSUPP; 256 } 257 /* Convert bytes per second to Mbps */ 258 rate = entry->police.rate_bytes_ps * 8; 259 rate = max_t(u32, rate / 1000000, 1); 260 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); 261 if (err) 262 return err; 263 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 264 break; 265 default: 266 NL_SET_ERR_MSG_MOD(extack, 267 "Only police action is supported with Egress MATCHALL offload"); 268 return -EOPNOTSUPP; 269 } 270 271 return 0; 272} 273 274static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 275 struct tc_cls_matchall_offload *cls) 276{ 277 struct netlink_ext_ack *extack = cls->common.extack; 278 int err; 279 280 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 281 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 282 return -EINVAL; 283 } 284 285 err = otx2_set_matchall_egress_rate(nic, 0, 0); 286 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 287 return err; 288} 289 290static int otx2_tc_act_set_police(struct otx2_nic *nic, 291 struct otx2_tc_flow *node, 292 struct flow_cls_offload *f, 293 u64 rate, u32 burst, u32 mark, 294 struct npc_install_flow_req *req, bool pps) 295{ 296 struct netlink_ext_ack *extack = f->common.extack; 297 struct otx2_hw *hw = &nic->hw; 298 int rq_idx, rc; 299 300 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 301 if (rq_idx >= hw->rx_queues) { 302 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 303 return -EINVAL; 304 } 305 306 mutex_lock(&nic->mbox.lock); 307 308 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 309 if (rc) { 310 mutex_unlock(&nic->mbox.lock); 311 return rc; 312 } 313 314 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 315 if (rc) 316 goto free_leaf; 317 318 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 319 if (rc) 320 goto free_leaf; 321 322 mutex_unlock(&nic->mbox.lock); 323 324 req->match_id = mark & 0xFFFFULL; 325 req->index = rq_idx; 326 req->op = NIX_RX_ACTIONOP_UCAST; 327 set_bit(rq_idx, &nic->rq_bmap); 328 node->is_act_police = true; 329 node->rq = rq_idx; 330 331 return 0; 332 333free_leaf: 334 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 335 netdev_err(nic->netdev, 336 "Unable to free leaf bandwidth profile(%d)\n", 337 node->leaf_profile); 338 mutex_unlock(&nic->mbox.lock); 339 return rc; 340} 341 342static int otx2_tc_parse_actions(struct otx2_nic *nic, 343 struct flow_action *flow_action, 344 struct npc_install_flow_req *req, 345 struct flow_cls_offload *f, 346 struct otx2_tc_flow *node) 347{ 348 struct netlink_ext_ack *extack = f->common.extack; 349 struct flow_action_entry *act; 350 struct net_device *target; 351 struct otx2_nic *priv; 352 u32 burst, mark = 0; 353 u8 nr_police = 0; 354 bool pps = false; 355 u64 rate; 356 int err; 357 int i; 358 359 if (!flow_action_has_entries(flow_action)) { 360 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 361 return -EINVAL; 362 } 363 364 flow_action_for_each(i, act, flow_action) { 365 switch (act->id) { 366 case FLOW_ACTION_DROP: 367 req->op = NIX_RX_ACTIONOP_DROP; 368 return 0; 369 case FLOW_ACTION_ACCEPT: 370 req->op = NIX_RX_ACTION_DEFAULT; 371 return 0; 372 case FLOW_ACTION_REDIRECT_INGRESS: 373 target = act->dev; 374 priv = netdev_priv(target); 375 /* npc_install_flow_req doesn't support passing a target pcifunc */ 376 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 377 NL_SET_ERR_MSG_MOD(extack, 378 "can't redirect to other pf/vf"); 379 return -EOPNOTSUPP; 380 } 381 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 382 req->op = NIX_RX_ACTION_DEFAULT; 383 return 0; 384 case FLOW_ACTION_VLAN_POP: 385 req->vtag0_valid = true; 386 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 387 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 388 break; 389 case FLOW_ACTION_POLICE: 390 /* Ingress ratelimiting is not supported on OcteonTx2 */ 391 if (is_dev_otx2(nic->pdev)) { 392 NL_SET_ERR_MSG_MOD(extack, 393 "Ingress policing not supported on this platform"); 394 return -EOPNOTSUPP; 395 } 396 397 err = otx2_policer_validate(flow_action, act, extack); 398 if (err) 399 return err; 400 401 if (act->police.rate_bytes_ps > 0) { 402 rate = act->police.rate_bytes_ps * 8; 403 burst = act->police.burst; 404 } else if (act->police.rate_pkt_ps > 0) { 405 /* The algorithm used to calculate rate 406 * mantissa, exponent values for a given token 407 * rate (token can be byte or packet) requires 408 * token rate to be mutiplied by 8. 409 */ 410 rate = act->police.rate_pkt_ps * 8; 411 burst = act->police.burst_pkt; 412 pps = true; 413 } 414 nr_police++; 415 break; 416 case FLOW_ACTION_MARK: 417 mark = act->mark; 418 break; 419 default: 420 return -EOPNOTSUPP; 421 } 422 } 423 424 if (nr_police > 1) { 425 NL_SET_ERR_MSG_MOD(extack, 426 "rate limit police offload requires a single action"); 427 return -EOPNOTSUPP; 428 } 429 430 if (nr_police) 431 return otx2_tc_act_set_police(nic, node, f, rate, burst, 432 mark, req, pps); 433 434 return 0; 435} 436 437static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 438 struct flow_cls_offload *f, 439 struct npc_install_flow_req *req) 440{ 441 struct netlink_ext_ack *extack = f->common.extack; 442 struct flow_msg *flow_spec = &req->packet; 443 struct flow_msg *flow_mask = &req->mask; 444 struct flow_dissector *dissector; 445 struct flow_rule *rule; 446 u8 ip_proto = 0; 447 448 rule = flow_cls_offload_flow_rule(f); 449 dissector = rule->match.dissector; 450 451 if ((dissector->used_keys & 452 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 453 BIT(FLOW_DISSECTOR_KEY_BASIC) | 454 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 455 BIT(FLOW_DISSECTOR_KEY_VLAN) | 456 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 457 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 458 BIT(FLOW_DISSECTOR_KEY_PORTS) | 459 BIT(FLOW_DISSECTOR_KEY_IP)))) { 460 netdev_info(nic->netdev, "unsupported flow used key 0x%x", 461 dissector->used_keys); 462 return -EOPNOTSUPP; 463 } 464 465 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 466 struct flow_match_basic match; 467 468 flow_rule_match_basic(rule, &match); 469 470 /* All EtherTypes can be matched, no hw limitation */ 471 flow_spec->etype = match.key->n_proto; 472 flow_mask->etype = match.mask->n_proto; 473 req->features |= BIT_ULL(NPC_ETYPE); 474 475 if (match.mask->ip_proto && 476 (match.key->ip_proto != IPPROTO_TCP && 477 match.key->ip_proto != IPPROTO_UDP && 478 match.key->ip_proto != IPPROTO_SCTP && 479 match.key->ip_proto != IPPROTO_ICMP && 480 match.key->ip_proto != IPPROTO_ICMPV6)) { 481 netdev_info(nic->netdev, 482 "ip_proto=0x%x not supported\n", 483 match.key->ip_proto); 484 return -EOPNOTSUPP; 485 } 486 if (match.mask->ip_proto) 487 ip_proto = match.key->ip_proto; 488 489 if (ip_proto == IPPROTO_UDP) 490 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 491 else if (ip_proto == IPPROTO_TCP) 492 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 493 else if (ip_proto == IPPROTO_SCTP) 494 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 495 else if (ip_proto == IPPROTO_ICMP) 496 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 497 else if (ip_proto == IPPROTO_ICMPV6) 498 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 499 } 500 501 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 502 struct flow_match_eth_addrs match; 503 504 flow_rule_match_eth_addrs(rule, &match); 505 if (!is_zero_ether_addr(match.mask->src)) { 506 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 507 return -EOPNOTSUPP; 508 } 509 510 if (!is_zero_ether_addr(match.mask->dst)) { 511 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 512 ether_addr_copy(flow_mask->dmac, 513 (u8 *)&match.mask->dst); 514 req->features |= BIT_ULL(NPC_DMAC); 515 } 516 } 517 518 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 519 struct flow_match_ip match; 520 521 flow_rule_match_ip(rule, &match); 522 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 523 match.mask->tos) { 524 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 525 return -EOPNOTSUPP; 526 } 527 if (match.mask->ttl) { 528 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 529 return -EOPNOTSUPP; 530 } 531 flow_spec->tos = match.key->tos; 532 flow_mask->tos = match.mask->tos; 533 req->features |= BIT_ULL(NPC_TOS); 534 } 535 536 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 537 struct flow_match_vlan match; 538 u16 vlan_tci, vlan_tci_mask; 539 540 flow_rule_match_vlan(rule, &match); 541 542 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 543 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 544 ntohs(match.key->vlan_tpid)); 545 return -EOPNOTSUPP; 546 } 547 548 if (match.mask->vlan_id || 549 match.mask->vlan_dei || 550 match.mask->vlan_priority) { 551 vlan_tci = match.key->vlan_id | 552 match.key->vlan_dei << 12 | 553 match.key->vlan_priority << 13; 554 555 vlan_tci_mask = match.mask->vlan_id | 556 match.mask->vlan_dei << 12 | 557 match.mask->vlan_priority << 13; 558 559 flow_spec->vlan_tci = htons(vlan_tci); 560 flow_mask->vlan_tci = htons(vlan_tci_mask); 561 req->features |= BIT_ULL(NPC_OUTER_VID); 562 } 563 } 564 565 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 566 struct flow_match_ipv4_addrs match; 567 568 flow_rule_match_ipv4_addrs(rule, &match); 569 570 flow_spec->ip4dst = match.key->dst; 571 flow_mask->ip4dst = match.mask->dst; 572 req->features |= BIT_ULL(NPC_DIP_IPV4); 573 574 flow_spec->ip4src = match.key->src; 575 flow_mask->ip4src = match.mask->src; 576 req->features |= BIT_ULL(NPC_SIP_IPV4); 577 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 578 struct flow_match_ipv6_addrs match; 579 580 flow_rule_match_ipv6_addrs(rule, &match); 581 582 if (ipv6_addr_loopback(&match.key->dst) || 583 ipv6_addr_loopback(&match.key->src)) { 584 NL_SET_ERR_MSG_MOD(extack, 585 "Flow matching IPv6 loopback addr not supported"); 586 return -EOPNOTSUPP; 587 } 588 589 if (!ipv6_addr_any(&match.mask->dst)) { 590 memcpy(&flow_spec->ip6dst, 591 (struct in6_addr *)&match.key->dst, 592 sizeof(flow_spec->ip6dst)); 593 memcpy(&flow_mask->ip6dst, 594 (struct in6_addr *)&match.mask->dst, 595 sizeof(flow_spec->ip6dst)); 596 req->features |= BIT_ULL(NPC_DIP_IPV6); 597 } 598 599 if (!ipv6_addr_any(&match.mask->src)) { 600 memcpy(&flow_spec->ip6src, 601 (struct in6_addr *)&match.key->src, 602 sizeof(flow_spec->ip6src)); 603 memcpy(&flow_mask->ip6src, 604 (struct in6_addr *)&match.mask->src, 605 sizeof(flow_spec->ip6src)); 606 req->features |= BIT_ULL(NPC_SIP_IPV6); 607 } 608 } 609 610 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 611 struct flow_match_ports match; 612 613 flow_rule_match_ports(rule, &match); 614 615 flow_spec->dport = match.key->dst; 616 flow_mask->dport = match.mask->dst; 617 if (ip_proto == IPPROTO_UDP) 618 req->features |= BIT_ULL(NPC_DPORT_UDP); 619 else if (ip_proto == IPPROTO_TCP) 620 req->features |= BIT_ULL(NPC_DPORT_TCP); 621 else if (ip_proto == IPPROTO_SCTP) 622 req->features |= BIT_ULL(NPC_DPORT_SCTP); 623 624 flow_spec->sport = match.key->src; 625 flow_mask->sport = match.mask->src; 626 if (ip_proto == IPPROTO_UDP) 627 req->features |= BIT_ULL(NPC_SPORT_UDP); 628 else if (ip_proto == IPPROTO_TCP) 629 req->features |= BIT_ULL(NPC_SPORT_TCP); 630 else if (ip_proto == IPPROTO_SCTP) 631 req->features |= BIT_ULL(NPC_SPORT_SCTP); 632 } 633 634 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 635} 636 637static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) 638{ 639 struct npc_delete_flow_req *req; 640 int err; 641 642 mutex_lock(&nic->mbox.lock); 643 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 644 if (!req) { 645 mutex_unlock(&nic->mbox.lock); 646 return -ENOMEM; 647 } 648 649 req->entry = entry; 650 651 /* Send message to AF */ 652 err = otx2_sync_mbox_msg(&nic->mbox); 653 if (err) { 654 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 655 entry); 656 mutex_unlock(&nic->mbox.lock); 657 return -EFAULT; 658 } 659 mutex_unlock(&nic->mbox.lock); 660 661 return 0; 662} 663 664static int otx2_tc_del_flow(struct otx2_nic *nic, 665 struct flow_cls_offload *tc_flow_cmd) 666{ 667 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 668 struct otx2_tc_info *tc_info = &nic->tc_info; 669 struct otx2_tc_flow *flow_node; 670 int err; 671 672 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 673 &tc_flow_cmd->cookie, 674 tc_info->flow_ht_params); 675 if (!flow_node) { 676 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 677 tc_flow_cmd->cookie); 678 return -EINVAL; 679 } 680 681 if (flow_node->is_act_police) { 682 mutex_lock(&nic->mbox.lock); 683 684 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 685 flow_node->leaf_profile, false); 686 if (err) 687 netdev_err(nic->netdev, 688 "Unmapping RQ %d & profile %d failed\n", 689 flow_node->rq, flow_node->leaf_profile); 690 691 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 692 if (err) 693 netdev_err(nic->netdev, 694 "Unable to free leaf bandwidth profile(%d)\n", 695 flow_node->leaf_profile); 696 697 __clear_bit(flow_node->rq, &nic->rq_bmap); 698 699 mutex_unlock(&nic->mbox.lock); 700 } 701 702 otx2_del_mcam_flow_entry(nic, flow_node->entry); 703 704 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, 705 &flow_node->node, 706 nic->tc_info.flow_ht_params)); 707 kfree_rcu(flow_node, rcu); 708 709 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); 710 flow_cfg->nr_flows--; 711 712 return 0; 713} 714 715static int otx2_tc_add_flow(struct otx2_nic *nic, 716 struct flow_cls_offload *tc_flow_cmd) 717{ 718 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 719 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 720 struct otx2_tc_info *tc_info = &nic->tc_info; 721 struct otx2_tc_flow *new_node, *old_node; 722 struct npc_install_flow_req *req, dummy; 723 int rc, err; 724 725 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 726 return -ENOMEM; 727 728 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { 729 NL_SET_ERR_MSG_MOD(extack, 730 "Free MCAM entry not available to add the flow"); 731 return -ENOMEM; 732 } 733 734 /* allocate memory for the new flow and it's node */ 735 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 736 if (!new_node) 737 return -ENOMEM; 738 spin_lock_init(&new_node->lock); 739 new_node->cookie = tc_flow_cmd->cookie; 740 741 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 742 743 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 744 if (rc) { 745 kfree_rcu(new_node, rcu); 746 return rc; 747 } 748 749 /* If a flow exists with the same cookie, delete it */ 750 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 751 &tc_flow_cmd->cookie, 752 tc_info->flow_ht_params); 753 if (old_node) 754 otx2_tc_del_flow(nic, tc_flow_cmd); 755 756 mutex_lock(&nic->mbox.lock); 757 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 758 if (!req) { 759 mutex_unlock(&nic->mbox.lock); 760 rc = -ENOMEM; 761 goto free_leaf; 762 } 763 764 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 765 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 766 767 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, 768 flow_cfg->max_flows); 769 req->channel = nic->hw.rx_chan_base; 770 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; 771 req->intf = NIX_INTF_RX; 772 req->set_cntr = 1; 773 new_node->entry = req->entry; 774 775 /* Send message to AF */ 776 rc = otx2_sync_mbox_msg(&nic->mbox); 777 if (rc) { 778 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 779 mutex_unlock(&nic->mbox.lock); 780 kfree_rcu(new_node, rcu); 781 goto free_leaf; 782 } 783 mutex_unlock(&nic->mbox.lock); 784 785 /* add new flow to flow-table */ 786 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, 787 nic->tc_info.flow_ht_params); 788 if (rc) { 789 otx2_del_mcam_flow_entry(nic, req->entry); 790 kfree_rcu(new_node, rcu); 791 goto free_leaf; 792 } 793 794 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); 795 flow_cfg->nr_flows++; 796 797 return 0; 798 799free_leaf: 800 if (new_node->is_act_police) { 801 mutex_lock(&nic->mbox.lock); 802 803 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 804 new_node->leaf_profile, false); 805 if (err) 806 netdev_err(nic->netdev, 807 "Unmapping RQ %d & profile %d failed\n", 808 new_node->rq, new_node->leaf_profile); 809 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 810 if (err) 811 netdev_err(nic->netdev, 812 "Unable to free leaf bandwidth profile(%d)\n", 813 new_node->leaf_profile); 814 815 __clear_bit(new_node->rq, &nic->rq_bmap); 816 817 mutex_unlock(&nic->mbox.lock); 818 } 819 820 return rc; 821} 822 823static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 824 struct flow_cls_offload *tc_flow_cmd) 825{ 826 struct otx2_tc_info *tc_info = &nic->tc_info; 827 struct npc_mcam_get_stats_req *req; 828 struct npc_mcam_get_stats_rsp *rsp; 829 struct otx2_tc_flow_stats *stats; 830 struct otx2_tc_flow *flow_node; 831 int err; 832 833 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 834 &tc_flow_cmd->cookie, 835 tc_info->flow_ht_params); 836 if (!flow_node) { 837 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 838 tc_flow_cmd->cookie); 839 return -EINVAL; 840 } 841 842 mutex_lock(&nic->mbox.lock); 843 844 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 845 if (!req) { 846 mutex_unlock(&nic->mbox.lock); 847 return -ENOMEM; 848 } 849 850 req->entry = flow_node->entry; 851 852 err = otx2_sync_mbox_msg(&nic->mbox); 853 if (err) { 854 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 855 req->entry); 856 mutex_unlock(&nic->mbox.lock); 857 return -EFAULT; 858 } 859 860 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 861 (&nic->mbox.mbox, 0, &req->hdr); 862 if (IS_ERR(rsp)) { 863 mutex_unlock(&nic->mbox.lock); 864 return PTR_ERR(rsp); 865 } 866 867 mutex_unlock(&nic->mbox.lock); 868 869 if (!rsp->stat_ena) 870 return -EINVAL; 871 872 stats = &flow_node->stats; 873 874 spin_lock(&flow_node->lock); 875 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 876 FLOW_ACTION_HW_STATS_IMMEDIATE); 877 stats->pkts = rsp->stat; 878 spin_unlock(&flow_node->lock); 879 880 return 0; 881} 882 883static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 884 struct flow_cls_offload *cls_flower) 885{ 886 switch (cls_flower->command) { 887 case FLOW_CLS_REPLACE: 888 return otx2_tc_add_flow(nic, cls_flower); 889 case FLOW_CLS_DESTROY: 890 return otx2_tc_del_flow(nic, cls_flower); 891 case FLOW_CLS_STATS: 892 return otx2_tc_get_flow_stats(nic, cls_flower); 893 default: 894 return -EOPNOTSUPP; 895 } 896} 897 898static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 899 struct tc_cls_matchall_offload *cls) 900{ 901 struct netlink_ext_ack *extack = cls->common.extack; 902 struct flow_action *actions = &cls->rule->action; 903 struct flow_action_entry *entry; 904 u64 rate; 905 int err; 906 907 err = otx2_tc_validate_flow(nic, actions, extack); 908 if (err) 909 return err; 910 911 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 912 NL_SET_ERR_MSG_MOD(extack, 913 "Only one ingress MATCHALL ratelimitter can be offloaded"); 914 return -ENOMEM; 915 } 916 917 entry = &cls->rule->action.entries[0]; 918 switch (entry->id) { 919 case FLOW_ACTION_POLICE: 920 /* Ingress ratelimiting is not supported on OcteonTx2 */ 921 if (is_dev_otx2(nic->pdev)) { 922 NL_SET_ERR_MSG_MOD(extack, 923 "Ingress policing not supported on this platform"); 924 return -EOPNOTSUPP; 925 } 926 927 err = cn10k_alloc_matchall_ipolicer(nic); 928 if (err) 929 return err; 930 931 /* Convert to bits per second */ 932 rate = entry->police.rate_bytes_ps * 8; 933 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 934 if (err) 935 return err; 936 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 937 break; 938 default: 939 NL_SET_ERR_MSG_MOD(extack, 940 "Only police action supported with Ingress MATCHALL offload"); 941 return -EOPNOTSUPP; 942 } 943 944 return 0; 945} 946 947static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 948 struct tc_cls_matchall_offload *cls) 949{ 950 struct netlink_ext_ack *extack = cls->common.extack; 951 int err; 952 953 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 954 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 955 return -EINVAL; 956 } 957 958 err = cn10k_free_matchall_ipolicer(nic); 959 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 960 return err; 961} 962 963static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 964 struct tc_cls_matchall_offload *cls_matchall) 965{ 966 switch (cls_matchall->command) { 967 case TC_CLSMATCHALL_REPLACE: 968 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 969 case TC_CLSMATCHALL_DESTROY: 970 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 971 case TC_CLSMATCHALL_STATS: 972 default: 973 break; 974 } 975 976 return -EOPNOTSUPP; 977} 978 979static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 980 void *type_data, void *cb_priv) 981{ 982 struct otx2_nic *nic = cb_priv; 983 984 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 985 return -EOPNOTSUPP; 986 987 switch (type) { 988 case TC_SETUP_CLSFLOWER: 989 return otx2_setup_tc_cls_flower(nic, type_data); 990 case TC_SETUP_CLSMATCHALL: 991 return otx2_setup_tc_ingress_matchall(nic, type_data); 992 default: 993 break; 994 } 995 996 return -EOPNOTSUPP; 997} 998 999static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1000 struct tc_cls_matchall_offload *cls_matchall) 1001{ 1002 switch (cls_matchall->command) { 1003 case TC_CLSMATCHALL_REPLACE: 1004 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1005 case TC_CLSMATCHALL_DESTROY: 1006 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1007 case TC_CLSMATCHALL_STATS: 1008 default: 1009 break; 1010 } 1011 1012 return -EOPNOTSUPP; 1013} 1014 1015static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1016 void *type_data, void *cb_priv) 1017{ 1018 struct otx2_nic *nic = cb_priv; 1019 1020 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1021 return -EOPNOTSUPP; 1022 1023 switch (type) { 1024 case TC_SETUP_CLSMATCHALL: 1025 return otx2_setup_tc_egress_matchall(nic, type_data); 1026 default: 1027 break; 1028 } 1029 1030 return -EOPNOTSUPP; 1031} 1032 1033static LIST_HEAD(otx2_block_cb_list); 1034 1035static int otx2_setup_tc_block(struct net_device *netdev, 1036 struct flow_block_offload *f) 1037{ 1038 struct otx2_nic *nic = netdev_priv(netdev); 1039 flow_setup_cb_t *cb; 1040 bool ingress; 1041 1042 if (f->block_shared) 1043 return -EOPNOTSUPP; 1044 1045 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1046 cb = otx2_setup_tc_block_ingress_cb; 1047 ingress = true; 1048 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1049 cb = otx2_setup_tc_block_egress_cb; 1050 ingress = false; 1051 } else { 1052 return -EOPNOTSUPP; 1053 } 1054 1055 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1056 nic, nic, ingress); 1057} 1058 1059int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1060 void *type_data) 1061{ 1062 switch (type) { 1063 case TC_SETUP_BLOCK: 1064 return otx2_setup_tc_block(netdev, type_data); 1065 default: 1066 return -EOPNOTSUPP; 1067 } 1068} 1069EXPORT_SYMBOL(otx2_setup_tc); 1070 1071static const struct rhashtable_params tc_flow_ht_params = { 1072 .head_offset = offsetof(struct otx2_tc_flow, node), 1073 .key_offset = offsetof(struct otx2_tc_flow, cookie), 1074 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), 1075 .automatic_shrinking = true, 1076}; 1077 1078int otx2_init_tc(struct otx2_nic *nic) 1079{ 1080 struct otx2_tc_info *tc = &nic->tc_info; 1081 int err; 1082 1083 /* Exclude receive queue 0 being used for police action */ 1084 set_bit(0, &nic->rq_bmap); 1085 1086 if (!nic->flow_cfg) { 1087 netdev_err(nic->netdev, 1088 "Can't init TC, nic->flow_cfg is not setup\n"); 1089 return -EINVAL; 1090 } 1091 1092 err = otx2_tc_alloc_ent_bitmap(nic); 1093 if (err) 1094 return err; 1095 1096 tc->flow_ht_params = tc_flow_ht_params; 1097 return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); 1098} 1099EXPORT_SYMBOL(otx2_init_tc); 1100 1101void otx2_shutdown_tc(struct otx2_nic *nic) 1102{ 1103 struct otx2_tc_info *tc = &nic->tc_info; 1104 1105 kfree(tc->tc_entries_bitmap); 1106 rhashtable_destroy(&tc->flow_table); 1107} 1108EXPORT_SYMBOL(otx2_shutdown_tc);