mvpp2_prs.c (72475B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Header Parser helpers for Marvell PPv2 Network Controller 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10#include <linux/kernel.h> 11#include <linux/netdevice.h> 12#include <linux/etherdevice.h> 13#include <linux/platform_device.h> 14#include <uapi/linux/ppp_defs.h> 15#include <net/ip.h> 16#include <net/ipv6.h> 17 18#include "mvpp2.h" 19#include "mvpp2_prs.h" 20 21/* Update parser tcam and sram hw entries */ 22static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 23{ 24 int i; 25 26 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 27 return -EINVAL; 28 29 /* Clear entry invalidation bit */ 30 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 31 32 /* Write sram index - indirect access */ 33 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 34 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 35 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); 36 37 /* Write tcam index - indirect access */ 38 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 39 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 40 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); 41 42 return 0; 43} 44 45/* Initialize tcam entry from hw */ 46int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, 47 int tid) 48{ 49 int i; 50 51 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 52 return -EINVAL; 53 54 memset(pe, 0, sizeof(*pe)); 55 pe->index = tid; 56 57 /* Write tcam index - indirect access */ 58 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 59 60 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 61 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 62 if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 63 return MVPP2_PRS_TCAM_ENTRY_INVALID; 64 65 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 66 pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 67 68 /* Write sram index - indirect access */ 69 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 70 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 71 pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 72 73 return 0; 74} 75 76/* Invalidate tcam hw entry */ 77static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 78{ 79 /* Write index - indirect access */ 80 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 81 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 82 MVPP2_PRS_TCAM_INV_MASK); 83} 84 85/* Enable shadow table entry and set its lookup ID */ 86static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 87{ 88 priv->prs_shadow[index].valid = true; 89 priv->prs_shadow[index].lu = lu; 90} 91 92/* Update ri fields in shadow table entry */ 93static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 94 unsigned int ri, unsigned int ri_mask) 95{ 96 priv->prs_shadow[index].ri_mask = ri_mask; 97 priv->prs_shadow[index].ri = ri; 98} 99 100/* Update lookup field in tcam sw entry */ 101static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 102{ 103 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); 104 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); 105 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); 106 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); 107} 108 109/* Update mask for single port in tcam sw entry */ 110static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 111 unsigned int port, bool add) 112{ 113 if (add) 114 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); 115 else 116 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); 117} 118 119/* Update port map in tcam sw entry */ 120static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 121 unsigned int ports) 122{ 123 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); 124 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); 125 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); 126} 127 128/* Obtain port map from tcam sw entry */ 129unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 130{ 131 return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; 132} 133 134/* Set byte of data and its enable bits in tcam sw entry */ 135static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 136 unsigned int offs, unsigned char byte, 137 unsigned char enable) 138{ 139 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; 140 141 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); 142 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); 143 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; 144 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); 145} 146 147/* Get byte of data and its enable bits from tcam sw entry */ 148void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 149 unsigned int offs, unsigned char *byte, 150 unsigned char *enable) 151{ 152 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; 153 154 *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; 155 *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; 156} 157 158/* Compare tcam data bytes with a pattern */ 159static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, 160 u16 data) 161{ 162 u16 tcam_data; 163 164 tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; 165 return tcam_data == data; 166} 167 168/* Update ai bits in tcam sw entry */ 169static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, 170 unsigned int bits, unsigned int enable) 171{ 172 int i; 173 174 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { 175 if (!(enable & BIT(i))) 176 continue; 177 178 if (bits & BIT(i)) 179 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); 180 else 181 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); 182 } 183 184 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); 185} 186 187/* Get ai bits from tcam sw entry */ 188static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) 189{ 190 return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; 191} 192 193/* Set ethertype in tcam sw entry */ 194static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 195 unsigned short ethertype) 196{ 197 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 198 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 199} 200 201/* Set vid in tcam sw entry */ 202static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, 203 unsigned short vid) 204{ 205 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); 206 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); 207} 208 209/* Set bits in sram sw entry */ 210static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 211 u32 val) 212{ 213 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); 214} 215 216/* Clear bits in sram sw entry */ 217static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 218 u32 val) 219{ 220 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); 221} 222 223/* Update ri bits in sram sw entry */ 224static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 225 unsigned int bits, unsigned int mask) 226{ 227 unsigned int i; 228 229 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 230 if (!(mask & BIT(i))) 231 continue; 232 233 if (bits & BIT(i)) 234 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, 235 1); 236 else 237 mvpp2_prs_sram_bits_clear(pe, 238 MVPP2_PRS_SRAM_RI_OFFS + i, 239 1); 240 241 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 242 } 243} 244 245/* Obtain ri bits from sram sw entry */ 246static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) 247{ 248 return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; 249} 250 251/* Update ai bits in sram sw entry */ 252static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 253 unsigned int bits, unsigned int mask) 254{ 255 unsigned int i; 256 257 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 258 if (!(mask & BIT(i))) 259 continue; 260 261 if (bits & BIT(i)) 262 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, 263 1); 264 else 265 mvpp2_prs_sram_bits_clear(pe, 266 MVPP2_PRS_SRAM_AI_OFFS + i, 267 1); 268 269 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 270 } 271} 272 273/* Read ai bits from sram sw entry */ 274static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 275{ 276 u8 bits; 277 /* ai is stored on bits 90->97; so it spreads across two u32 */ 278 int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); 279 int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); 280 281 bits = (pe->sram[ai_off] >> ai_shift) | 282 (pe->sram[ai_off + 1] << (32 - ai_shift)); 283 284 return bits; 285} 286 287/* In sram sw entry set lookup ID field of the tcam key to be used in the next 288 * lookup interation 289 */ 290static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 291 unsigned int lu) 292{ 293 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 294 295 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 296 MVPP2_PRS_SRAM_NEXT_LU_MASK); 297 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 298} 299 300/* In the sram sw entry set sign and value of the next lookup offset 301 * and the offset value generated to the classifier 302 */ 303static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 304 unsigned int op) 305{ 306 /* Set sign */ 307 if (shift < 0) { 308 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 309 shift = 0 - shift; 310 } else { 311 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 312 } 313 314 /* Set value */ 315 pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |= 316 shift & MVPP2_PRS_SRAM_SHIFT_MASK; 317 318 /* Reset and set operation */ 319 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 320 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 321 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 322 323 /* Set base offset as current */ 324 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 325} 326 327/* In the sram sw entry set sign and value of the user defined offset 328 * generated to the classifier 329 */ 330static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 331 unsigned int type, int offset, 332 unsigned int op) 333{ 334 /* Set sign */ 335 if (offset < 0) { 336 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 337 offset = 0 - offset; 338 } else { 339 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 340 } 341 342 /* Set value */ 343 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 344 MVPP2_PRS_SRAM_UDF_MASK); 345 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, 346 offset & MVPP2_PRS_SRAM_UDF_MASK); 347 348 /* Set offset type */ 349 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 350 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 351 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 352 353 /* Set offset operation */ 354 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 355 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 356 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 357 op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 358 359 /* Set base offset as current */ 360 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 361} 362 363/* Find parser flow entry */ 364static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 365{ 366 struct mvpp2_prs_entry pe; 367 int tid; 368 369 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 370 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 371 u8 bits; 372 373 if (!priv->prs_shadow[tid].valid || 374 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 375 continue; 376 377 mvpp2_prs_init_from_hw(priv, &pe, tid); 378 bits = mvpp2_prs_sram_ai_get(&pe); 379 380 /* Sram store classification lookup ID in AI bits [5:0] */ 381 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 382 return tid; 383 } 384 385 return -ENOENT; 386} 387 388/* Return first free tcam index, seeking from start to end */ 389static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 390 unsigned char end) 391{ 392 int tid; 393 394 if (start > end) 395 swap(start, end); 396 397 for (tid = start; tid <= end; tid++) { 398 if (!priv->prs_shadow[tid].valid) 399 return tid; 400 } 401 402 return -EINVAL; 403} 404 405/* Drop flow control pause frames */ 406static void mvpp2_prs_drop_fc(struct mvpp2 *priv) 407{ 408 unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 }; 409 struct mvpp2_prs_entry pe; 410 unsigned int len; 411 412 memset(&pe, 0, sizeof(pe)); 413 414 /* For all ports - drop flow control frames */ 415 pe.index = MVPP2_PE_FC_DROP; 416 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 417 418 /* Set match on DA */ 419 len = ETH_ALEN; 420 while (len--) 421 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); 422 423 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 424 MVPP2_PRS_RI_DROP_MASK); 425 426 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 427 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 428 429 /* Mask all ports */ 430 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 431 432 /* Update shadow table and hw entry */ 433 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 434 mvpp2_prs_hw_write(priv, &pe); 435} 436 437/* Enable/disable dropping all mac da's */ 438static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 439{ 440 struct mvpp2_prs_entry pe; 441 442 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 443 /* Entry exist - update port only */ 444 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); 445 } else { 446 /* Entry doesn't exist - create new */ 447 memset(&pe, 0, sizeof(pe)); 448 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 449 pe.index = MVPP2_PE_DROP_ALL; 450 451 /* Non-promiscuous mode for all ports - DROP unknown packets */ 452 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 453 MVPP2_PRS_RI_DROP_MASK); 454 455 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 456 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 457 458 /* Update shadow table */ 459 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 460 461 /* Mask all ports */ 462 mvpp2_prs_tcam_port_map_set(&pe, 0); 463 } 464 465 /* Update port mask */ 466 mvpp2_prs_tcam_port_set(&pe, port, add); 467 468 mvpp2_prs_hw_write(priv, &pe); 469} 470 471/* Set port to unicast or multicast promiscuous mode */ 472void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, 473 enum mvpp2_prs_l2_cast l2_cast, bool add) 474{ 475 struct mvpp2_prs_entry pe; 476 unsigned char cast_match; 477 unsigned int ri; 478 int tid; 479 480 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { 481 cast_match = MVPP2_PRS_UCAST_VAL; 482 tid = MVPP2_PE_MAC_UC_PROMISCUOUS; 483 ri = MVPP2_PRS_RI_L2_UCAST; 484 } else { 485 cast_match = MVPP2_PRS_MCAST_VAL; 486 tid = MVPP2_PE_MAC_MC_PROMISCUOUS; 487 ri = MVPP2_PRS_RI_L2_MCAST; 488 } 489 490 /* promiscuous mode - Accept unknown unicast or multicast packets */ 491 if (priv->prs_shadow[tid].valid) { 492 mvpp2_prs_init_from_hw(priv, &pe, tid); 493 } else { 494 memset(&pe, 0, sizeof(pe)); 495 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 496 pe.index = tid; 497 498 /* Continue - set next lookup */ 499 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 500 501 /* Set result info bits */ 502 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); 503 504 /* Match UC or MC addresses */ 505 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, 506 MVPP2_PRS_CAST_MASK); 507 508 /* Shift to ethertype */ 509 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 510 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 511 512 /* Mask all ports */ 513 mvpp2_prs_tcam_port_map_set(&pe, 0); 514 515 /* Update shadow table */ 516 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 517 } 518 519 /* Update port mask */ 520 mvpp2_prs_tcam_port_set(&pe, port, add); 521 522 mvpp2_prs_hw_write(priv, &pe); 523} 524 525/* Set entry for dsa packets */ 526static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, 527 bool tagged, bool extend) 528{ 529 struct mvpp2_prs_entry pe; 530 int tid, shift; 531 532 if (extend) { 533 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; 534 shift = 8; 535 } else { 536 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; 537 shift = 4; 538 } 539 540 if (priv->prs_shadow[tid].valid) { 541 /* Entry exist - update port only */ 542 mvpp2_prs_init_from_hw(priv, &pe, tid); 543 } else { 544 /* Entry doesn't exist - create new */ 545 memset(&pe, 0, sizeof(pe)); 546 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 547 pe.index = tid; 548 549 /* Update shadow table */ 550 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); 551 552 if (tagged) { 553 /* Set tagged bit in DSA tag */ 554 mvpp2_prs_tcam_data_byte_set(&pe, 0, 555 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 556 MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 557 558 /* Set ai bits for next iteration */ 559 if (extend) 560 mvpp2_prs_sram_ai_update(&pe, 1, 561 MVPP2_PRS_SRAM_AI_MASK); 562 else 563 mvpp2_prs_sram_ai_update(&pe, 0, 564 MVPP2_PRS_SRAM_AI_MASK); 565 566 /* Set result info bits to 'single vlan' */ 567 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, 568 MVPP2_PRS_RI_VLAN_MASK); 569 /* If packet is tagged continue check vid filtering */ 570 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); 571 } else { 572 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ 573 mvpp2_prs_sram_shift_set(&pe, shift, 574 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 575 576 /* Set result info bits to 'no vlans' */ 577 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 578 MVPP2_PRS_RI_VLAN_MASK); 579 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 580 } 581 582 /* Mask all ports */ 583 mvpp2_prs_tcam_port_map_set(&pe, 0); 584 } 585 586 /* Update port mask */ 587 mvpp2_prs_tcam_port_set(&pe, port, add); 588 589 mvpp2_prs_hw_write(priv, &pe); 590} 591 592/* Set entry for dsa ethertype */ 593static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, 594 bool add, bool tagged, bool extend) 595{ 596 struct mvpp2_prs_entry pe; 597 int tid, shift, port_mask; 598 599 if (extend) { 600 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : 601 MVPP2_PE_ETYPE_EDSA_UNTAGGED; 602 port_mask = 0; 603 shift = 8; 604 } else { 605 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : 606 MVPP2_PE_ETYPE_DSA_UNTAGGED; 607 port_mask = MVPP2_PRS_PORT_MASK; 608 shift = 4; 609 } 610 611 if (priv->prs_shadow[tid].valid) { 612 /* Entry exist - update port only */ 613 mvpp2_prs_init_from_hw(priv, &pe, tid); 614 } else { 615 /* Entry doesn't exist - create new */ 616 memset(&pe, 0, sizeof(pe)); 617 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 618 pe.index = tid; 619 620 /* Set ethertype */ 621 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); 622 mvpp2_prs_match_etype(&pe, 2, 0); 623 624 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, 625 MVPP2_PRS_RI_DSA_MASK); 626 /* Shift ethertype + 2 byte reserved + tag*/ 627 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, 628 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 629 630 /* Update shadow table */ 631 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); 632 633 if (tagged) { 634 /* Set tagged bit in DSA tag */ 635 mvpp2_prs_tcam_data_byte_set(&pe, 636 MVPP2_ETH_TYPE_LEN + 2 + 3, 637 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 638 MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 639 /* Clear all ai bits for next iteration */ 640 mvpp2_prs_sram_ai_update(&pe, 0, 641 MVPP2_PRS_SRAM_AI_MASK); 642 /* If packet is tagged continue check vlans */ 643 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 644 } else { 645 /* Set result info bits to 'no vlans' */ 646 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 647 MVPP2_PRS_RI_VLAN_MASK); 648 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 649 } 650 /* Mask/unmask all ports, depending on dsa type */ 651 mvpp2_prs_tcam_port_map_set(&pe, port_mask); 652 } 653 654 /* Update port mask */ 655 mvpp2_prs_tcam_port_set(&pe, port, add); 656 657 mvpp2_prs_hw_write(priv, &pe); 658} 659 660/* Search for existing single/triple vlan entry */ 661static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) 662{ 663 struct mvpp2_prs_entry pe; 664 int tid; 665 666 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 667 for (tid = MVPP2_PE_FIRST_FREE_TID; 668 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 669 unsigned int ri_bits, ai_bits; 670 bool match; 671 672 if (!priv->prs_shadow[tid].valid || 673 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) 674 continue; 675 676 mvpp2_prs_init_from_hw(priv, &pe, tid); 677 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); 678 if (!match) 679 continue; 680 681 /* Get vlan type */ 682 ri_bits = mvpp2_prs_sram_ri_get(&pe); 683 ri_bits &= MVPP2_PRS_RI_VLAN_MASK; 684 685 /* Get current ai value from tcam */ 686 ai_bits = mvpp2_prs_tcam_ai_get(&pe); 687 /* Clear double vlan bit */ 688 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; 689 690 if (ai != ai_bits) 691 continue; 692 693 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || 694 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) 695 return tid; 696 } 697 698 return -ENOENT; 699} 700 701/* Add/update single/triple vlan entry */ 702static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, 703 unsigned int port_map) 704{ 705 struct mvpp2_prs_entry pe; 706 int tid_aux, tid; 707 int ret = 0; 708 709 memset(&pe, 0, sizeof(pe)); 710 711 tid = mvpp2_prs_vlan_find(priv, tpid, ai); 712 713 if (tid < 0) { 714 /* Create new tcam entry */ 715 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, 716 MVPP2_PE_FIRST_FREE_TID); 717 if (tid < 0) 718 return tid; 719 720 /* Get last double vlan tid */ 721 for (tid_aux = MVPP2_PE_LAST_FREE_TID; 722 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { 723 unsigned int ri_bits; 724 725 if (!priv->prs_shadow[tid_aux].valid || 726 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) 727 continue; 728 729 mvpp2_prs_init_from_hw(priv, &pe, tid_aux); 730 ri_bits = mvpp2_prs_sram_ri_get(&pe); 731 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == 732 MVPP2_PRS_RI_VLAN_DOUBLE) 733 break; 734 } 735 736 if (tid <= tid_aux) 737 return -EINVAL; 738 739 memset(&pe, 0, sizeof(pe)); 740 pe.index = tid; 741 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 742 743 mvpp2_prs_match_etype(&pe, 0, tpid); 744 745 /* VLAN tag detected, proceed with VID filtering */ 746 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); 747 748 /* Clear all ai bits for next iteration */ 749 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 750 751 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { 752 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, 753 MVPP2_PRS_RI_VLAN_MASK); 754 } else { 755 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; 756 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, 757 MVPP2_PRS_RI_VLAN_MASK); 758 } 759 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); 760 761 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 762 } else { 763 mvpp2_prs_init_from_hw(priv, &pe, tid); 764 } 765 /* Update ports' mask */ 766 mvpp2_prs_tcam_port_map_set(&pe, port_map); 767 768 mvpp2_prs_hw_write(priv, &pe); 769 770 return ret; 771} 772 773/* Get first free double vlan ai number */ 774static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) 775{ 776 int i; 777 778 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { 779 if (!priv->prs_double_vlans[i]) 780 return i; 781 } 782 783 return -EINVAL; 784} 785 786/* Search for existing double vlan entry */ 787static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, 788 unsigned short tpid2) 789{ 790 struct mvpp2_prs_entry pe; 791 int tid; 792 793 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 794 for (tid = MVPP2_PE_FIRST_FREE_TID; 795 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 796 unsigned int ri_mask; 797 bool match; 798 799 if (!priv->prs_shadow[tid].valid || 800 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) 801 continue; 802 803 mvpp2_prs_init_from_hw(priv, &pe, tid); 804 805 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && 806 mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); 807 808 if (!match) 809 continue; 810 811 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; 812 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) 813 return tid; 814 } 815 816 return -ENOENT; 817} 818 819/* Add or update double vlan entry */ 820static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, 821 unsigned short tpid2, 822 unsigned int port_map) 823{ 824 int tid_aux, tid, ai, ret = 0; 825 struct mvpp2_prs_entry pe; 826 827 memset(&pe, 0, sizeof(pe)); 828 829 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); 830 831 if (tid < 0) { 832 /* Create new tcam entry */ 833 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 834 MVPP2_PE_LAST_FREE_TID); 835 if (tid < 0) 836 return tid; 837 838 /* Set ai value for new double vlan entry */ 839 ai = mvpp2_prs_double_vlan_ai_free_get(priv); 840 if (ai < 0) 841 return ai; 842 843 /* Get first single/triple vlan tid */ 844 for (tid_aux = MVPP2_PE_FIRST_FREE_TID; 845 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { 846 unsigned int ri_bits; 847 848 if (!priv->prs_shadow[tid_aux].valid || 849 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) 850 continue; 851 852 mvpp2_prs_init_from_hw(priv, &pe, tid_aux); 853 ri_bits = mvpp2_prs_sram_ri_get(&pe); 854 ri_bits &= MVPP2_PRS_RI_VLAN_MASK; 855 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || 856 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) 857 break; 858 } 859 860 if (tid >= tid_aux) 861 return -ERANGE; 862 863 memset(&pe, 0, sizeof(pe)); 864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 865 pe.index = tid; 866 867 priv->prs_double_vlans[ai] = true; 868 869 mvpp2_prs_match_etype(&pe, 0, tpid1); 870 mvpp2_prs_match_etype(&pe, 4, tpid2); 871 872 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 873 /* Shift 4 bytes - skip outer vlan tag */ 874 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, 875 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 876 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, 877 MVPP2_PRS_RI_VLAN_MASK); 878 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, 879 MVPP2_PRS_SRAM_AI_MASK); 880 881 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 882 } else { 883 mvpp2_prs_init_from_hw(priv, &pe, tid); 884 } 885 886 /* Update ports' mask */ 887 mvpp2_prs_tcam_port_map_set(&pe, port_map); 888 mvpp2_prs_hw_write(priv, &pe); 889 890 return ret; 891} 892 893/* IPv4 header parsing for fragmentation and L4 offset */ 894static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, 895 unsigned int ri, unsigned int ri_mask) 896{ 897 struct mvpp2_prs_entry pe; 898 int tid; 899 900 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && 901 (proto != IPPROTO_IGMP)) 902 return -EINVAL; 903 904 /* Not fragmented packet */ 905 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 906 MVPP2_PE_LAST_FREE_TID); 907 if (tid < 0) 908 return tid; 909 910 memset(&pe, 0, sizeof(pe)); 911 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 912 pe.index = tid; 913 914 /* Finished: go to flowid generation */ 915 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 916 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 917 918 /* Set L3 offset */ 919 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, 920 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 921 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 922 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 923 924 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 925 MVPP2_PRS_TCAM_PROTO_MASK_L); 926 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 927 MVPP2_PRS_TCAM_PROTO_MASK); 928 929 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); 930 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 931 MVPP2_PRS_IPV4_DIP_AI_BIT); 932 /* Unmask all ports */ 933 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 934 935 /* Update shadow table and hw entry */ 936 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 937 mvpp2_prs_hw_write(priv, &pe); 938 939 /* Fragmented packet */ 940 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 941 MVPP2_PE_LAST_FREE_TID); 942 if (tid < 0) 943 return tid; 944 945 pe.index = tid; 946 /* Clear ri before updating */ 947 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 948 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 949 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 950 951 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, 952 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 953 954 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); 955 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); 956 957 /* Update shadow table and hw entry */ 958 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 959 mvpp2_prs_hw_write(priv, &pe); 960 961 return 0; 962} 963 964/* IPv4 L3 multicast or broadcast */ 965static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) 966{ 967 struct mvpp2_prs_entry pe; 968 int mask, tid; 969 970 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 971 MVPP2_PE_LAST_FREE_TID); 972 if (tid < 0) 973 return tid; 974 975 memset(&pe, 0, sizeof(pe)); 976 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 977 pe.index = tid; 978 979 switch (l3_cast) { 980 case MVPP2_PRS_L3_MULTI_CAST: 981 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, 982 MVPP2_PRS_IPV4_MC_MASK); 983 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, 984 MVPP2_PRS_RI_L3_ADDR_MASK); 985 break; 986 case MVPP2_PRS_L3_BROAD_CAST: 987 mask = MVPP2_PRS_IPV4_BC_MASK; 988 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); 989 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); 990 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); 991 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); 992 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, 993 MVPP2_PRS_RI_L3_ADDR_MASK); 994 break; 995 default: 996 return -EINVAL; 997 } 998 999 /* Go again to ipv4 */ 1000 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1001 1002 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 1003 MVPP2_PRS_IPV4_DIP_AI_BIT); 1004 1005 /* Shift back to IPv4 proto */ 1006 mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1007 1008 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 1009 1010 /* Unmask all ports */ 1011 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1012 1013 /* Update shadow table and hw entry */ 1014 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1015 mvpp2_prs_hw_write(priv, &pe); 1016 1017 return 0; 1018} 1019 1020/* Set entries for protocols over IPv6 */ 1021static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, 1022 unsigned int ri, unsigned int ri_mask) 1023{ 1024 struct mvpp2_prs_entry pe; 1025 int tid; 1026 1027 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && 1028 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) 1029 return -EINVAL; 1030 1031 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1032 MVPP2_PE_LAST_FREE_TID); 1033 if (tid < 0) 1034 return tid; 1035 1036 memset(&pe, 0, sizeof(pe)); 1037 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1038 pe.index = tid; 1039 1040 /* Finished: go to flowid generation */ 1041 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1042 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1043 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 1044 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 1045 sizeof(struct ipv6hdr) - 6, 1046 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1047 1048 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); 1049 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1050 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1051 /* Unmask all ports */ 1052 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1053 1054 /* Write HW */ 1055 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 1056 mvpp2_prs_hw_write(priv, &pe); 1057 1058 return 0; 1059} 1060 1061/* IPv6 L3 multicast entry */ 1062static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) 1063{ 1064 struct mvpp2_prs_entry pe; 1065 int tid; 1066 1067 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) 1068 return -EINVAL; 1069 1070 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1071 MVPP2_PE_LAST_FREE_TID); 1072 if (tid < 0) 1073 return tid; 1074 1075 memset(&pe, 0, sizeof(pe)); 1076 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1077 pe.index = tid; 1078 1079 /* Finished: go to flowid generation */ 1080 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1081 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, 1082 MVPP2_PRS_RI_L3_ADDR_MASK); 1083 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1084 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1085 /* Shift back to IPv6 NH */ 1086 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1087 1088 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, 1089 MVPP2_PRS_IPV6_MC_MASK); 1090 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1091 /* Unmask all ports */ 1092 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1093 1094 /* Update shadow table and hw entry */ 1095 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 1096 mvpp2_prs_hw_write(priv, &pe); 1097 1098 return 0; 1099} 1100 1101/* Parser per-port initialization */ 1102static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1103 int lu_max, int offset) 1104{ 1105 u32 val; 1106 1107 /* Set lookup ID */ 1108 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1109 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1110 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1111 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1112 1113 /* Set maximum number of loops for packet received from port */ 1114 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1115 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1116 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1117 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1118 1119 /* Set initial offset for packet header extraction for the first 1120 * searching loop 1121 */ 1122 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1123 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1124 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1125 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1126} 1127 1128/* Default flow entries initialization for all ports */ 1129static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1130{ 1131 struct mvpp2_prs_entry pe; 1132 int port; 1133 1134 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1135 memset(&pe, 0, sizeof(pe)); 1136 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1137 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1138 1139 /* Mask all ports */ 1140 mvpp2_prs_tcam_port_map_set(&pe, 0); 1141 1142 /* Set flow ID*/ 1143 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1144 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1145 1146 /* Update shadow table and hw entry */ 1147 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1148 mvpp2_prs_hw_write(priv, &pe); 1149 } 1150} 1151 1152/* Set default entry for Marvell Header field */ 1153static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1154{ 1155 struct mvpp2_prs_entry pe; 1156 1157 memset(&pe, 0, sizeof(pe)); 1158 1159 pe.index = MVPP2_PE_MH_DEFAULT; 1160 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1161 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1162 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1163 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1164 1165 /* Unmask all ports */ 1166 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1167 1168 /* Update shadow table and hw entry */ 1169 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1170 mvpp2_prs_hw_write(priv, &pe); 1171 1172 /* Set MH entry that skip parser */ 1173 pe.index = MVPP2_PE_MH_SKIP_PRS; 1174 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1175 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1176 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1177 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1178 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1179 1180 /* Mask all ports */ 1181 mvpp2_prs_tcam_port_map_set(&pe, 0); 1182 1183 /* Update shadow table and hw entry */ 1184 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1185 mvpp2_prs_hw_write(priv, &pe); 1186} 1187 1188/* Set default entires (place holder) for promiscuous, non-promiscuous and 1189 * multicast MAC addresses 1190 */ 1191static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1192{ 1193 struct mvpp2_prs_entry pe; 1194 1195 memset(&pe, 0, sizeof(pe)); 1196 1197 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1198 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1199 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1200 1201 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1202 MVPP2_PRS_RI_DROP_MASK); 1203 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1204 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1205 1206 /* Unmask all ports */ 1207 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1208 1209 /* Update shadow table and hw entry */ 1210 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1211 mvpp2_prs_hw_write(priv, &pe); 1212 1213 /* Create dummy entries for drop all and promiscuous modes */ 1214 mvpp2_prs_drop_fc(priv); 1215 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1216 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); 1217 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); 1218} 1219 1220/* Set default entries for various types of dsa packets */ 1221static void mvpp2_prs_dsa_init(struct mvpp2 *priv) 1222{ 1223 struct mvpp2_prs_entry pe; 1224 1225 /* None tagged EDSA entry - place holder */ 1226 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, 1227 MVPP2_PRS_EDSA); 1228 1229 /* Tagged EDSA entry - place holder */ 1230 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 1231 1232 /* None tagged DSA entry - place holder */ 1233 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, 1234 MVPP2_PRS_DSA); 1235 1236 /* Tagged DSA entry - place holder */ 1237 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 1238 1239 /* None tagged EDSA ethertype entry - place holder*/ 1240 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, 1241 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 1242 1243 /* Tagged EDSA ethertype entry - place holder*/ 1244 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, 1245 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 1246 1247 /* None tagged DSA ethertype entry */ 1248 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, 1249 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 1250 1251 /* Tagged DSA ethertype entry */ 1252 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, 1253 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 1254 1255 /* Set default entry, in case DSA or EDSA tag not found */ 1256 memset(&pe, 0, sizeof(pe)); 1257 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 1258 pe.index = MVPP2_PE_DSA_DEFAULT; 1259 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 1260 1261 /* Shift 0 bytes */ 1262 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1263 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1264 1265 /* Clear all sram ai bits for next iteration */ 1266 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1267 1268 /* Unmask all ports */ 1269 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1270 1271 mvpp2_prs_hw_write(priv, &pe); 1272} 1273 1274/* Initialize parser entries for VID filtering */ 1275static void mvpp2_prs_vid_init(struct mvpp2 *priv) 1276{ 1277 struct mvpp2_prs_entry pe; 1278 1279 memset(&pe, 0, sizeof(pe)); 1280 1281 /* Set default vid entry */ 1282 pe.index = MVPP2_PE_VID_FLTR_DEFAULT; 1283 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 1284 1285 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); 1286 1287 /* Skip VLAN header - Set offset to 4 bytes */ 1288 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, 1289 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1290 1291 /* Clear all ai bits for next iteration */ 1292 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1293 1294 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1295 1296 /* Unmask all ports */ 1297 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1298 1299 /* Update shadow table and hw entry */ 1300 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 1301 mvpp2_prs_hw_write(priv, &pe); 1302 1303 /* Set default vid entry for extended DSA*/ 1304 memset(&pe, 0, sizeof(pe)); 1305 1306 /* Set default vid entry */ 1307 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; 1308 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 1309 1310 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, 1311 MVPP2_PRS_EDSA_VID_AI_BIT); 1312 1313 /* Skip VLAN header - Set offset to 8 bytes */ 1314 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, 1315 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1316 1317 /* Clear all ai bits for next iteration */ 1318 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1319 1320 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1321 1322 /* Unmask all ports */ 1323 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1324 1325 /* Update shadow table and hw entry */ 1326 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 1327 mvpp2_prs_hw_write(priv, &pe); 1328} 1329 1330/* Match basic ethertypes */ 1331static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1332{ 1333 struct mvpp2_prs_entry pe; 1334 int tid, ihl; 1335 1336 /* Ethertype: PPPoE */ 1337 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1338 MVPP2_PE_LAST_FREE_TID); 1339 if (tid < 0) 1340 return tid; 1341 1342 memset(&pe, 0, sizeof(pe)); 1343 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1344 pe.index = tid; 1345 1346 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); 1347 1348 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 1349 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1350 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1351 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 1352 MVPP2_PRS_RI_PPPOE_MASK); 1353 1354 /* Update shadow table and hw entry */ 1355 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1356 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1357 priv->prs_shadow[pe.index].finish = false; 1358 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 1359 MVPP2_PRS_RI_PPPOE_MASK); 1360 mvpp2_prs_hw_write(priv, &pe); 1361 1362 /* Ethertype: ARP */ 1363 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1364 MVPP2_PE_LAST_FREE_TID); 1365 if (tid < 0) 1366 return tid; 1367 1368 memset(&pe, 0, sizeof(pe)); 1369 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1370 pe.index = tid; 1371 1372 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); 1373 1374 /* Generate flow in the next iteration*/ 1375 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1376 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1377 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 1378 MVPP2_PRS_RI_L3_PROTO_MASK); 1379 /* Set L3 offset */ 1380 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1381 MVPP2_ETH_TYPE_LEN, 1382 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1383 1384 /* Update shadow table and hw entry */ 1385 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1386 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1387 priv->prs_shadow[pe.index].finish = true; 1388 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 1389 MVPP2_PRS_RI_L3_PROTO_MASK); 1390 mvpp2_prs_hw_write(priv, &pe); 1391 1392 /* Ethertype: LBTD */ 1393 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1394 MVPP2_PE_LAST_FREE_TID); 1395 if (tid < 0) 1396 return tid; 1397 1398 memset(&pe, 0, sizeof(pe)); 1399 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1400 pe.index = tid; 1401 1402 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 1403 1404 /* Generate flow in the next iteration*/ 1405 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1406 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1407 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1408 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1409 MVPP2_PRS_RI_CPU_CODE_MASK | 1410 MVPP2_PRS_RI_UDF3_MASK); 1411 /* Set L3 offset */ 1412 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1413 MVPP2_ETH_TYPE_LEN, 1414 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1415 1416 /* Update shadow table and hw entry */ 1417 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1418 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1419 priv->prs_shadow[pe.index].finish = true; 1420 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1421 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1422 MVPP2_PRS_RI_CPU_CODE_MASK | 1423 MVPP2_PRS_RI_UDF3_MASK); 1424 mvpp2_prs_hw_write(priv, &pe); 1425 1426 /* Ethertype: IPv4 with header length >= 5 */ 1427 for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) { 1428 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1429 MVPP2_PE_LAST_FREE_TID); 1430 if (tid < 0) 1431 return tid; 1432 1433 memset(&pe, 0, sizeof(pe)); 1434 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1435 pe.index = tid; 1436 1437 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); 1438 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1439 MVPP2_PRS_IPV4_HEAD | ihl, 1440 MVPP2_PRS_IPV4_HEAD_MASK | 1441 MVPP2_PRS_IPV4_IHL_MASK); 1442 1443 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1444 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 1445 MVPP2_PRS_RI_L3_PROTO_MASK); 1446 /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */ 1447 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 1448 sizeof(struct iphdr) - 4, 1449 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1450 /* Set L4 offset */ 1451 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 1452 MVPP2_ETH_TYPE_LEN + (ihl * 4), 1453 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1454 1455 /* Update shadow table and hw entry */ 1456 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1457 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1458 priv->prs_shadow[pe.index].finish = false; 1459 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 1460 MVPP2_PRS_RI_L3_PROTO_MASK); 1461 mvpp2_prs_hw_write(priv, &pe); 1462 } 1463 1464 /* Ethertype: IPv6 without options */ 1465 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1466 MVPP2_PE_LAST_FREE_TID); 1467 if (tid < 0) 1468 return tid; 1469 1470 memset(&pe, 0, sizeof(pe)); 1471 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1472 pe.index = tid; 1473 1474 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); 1475 1476 /* Skip DIP of IPV6 header */ 1477 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 1478 MVPP2_MAX_L3_ADDR_SIZE, 1479 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1480 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1481 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 1482 MVPP2_PRS_RI_L3_PROTO_MASK); 1483 /* Set L3 offset */ 1484 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1485 MVPP2_ETH_TYPE_LEN, 1486 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1487 1488 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1489 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1490 priv->prs_shadow[pe.index].finish = false; 1491 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 1492 MVPP2_PRS_RI_L3_PROTO_MASK); 1493 mvpp2_prs_hw_write(priv, &pe); 1494 1495 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 1496 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1497 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1498 pe.index = MVPP2_PE_ETH_TYPE_UN; 1499 1500 /* Unmask all ports */ 1501 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1502 1503 /* Generate flow in the next iteration*/ 1504 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1505 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1506 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 1507 MVPP2_PRS_RI_L3_PROTO_MASK); 1508 /* Set L3 offset even it's unknown L3 */ 1509 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1510 MVPP2_ETH_TYPE_LEN, 1511 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1512 1513 /* Update shadow table and hw entry */ 1514 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1515 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1516 priv->prs_shadow[pe.index].finish = true; 1517 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 1518 MVPP2_PRS_RI_L3_PROTO_MASK); 1519 mvpp2_prs_hw_write(priv, &pe); 1520 1521 return 0; 1522} 1523 1524/* Configure vlan entries and detect up to 2 successive VLAN tags. 1525 * Possible options: 1526 * 0x8100, 0x88A8 1527 * 0x8100, 0x8100 1528 * 0x8100 1529 * 0x88A8 1530 */ 1531static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) 1532{ 1533 struct mvpp2_prs_entry pe; 1534 int err; 1535 1536 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), 1537 MVPP2_PRS_DBL_VLANS_MAX, 1538 GFP_KERNEL); 1539 if (!priv->prs_double_vlans) 1540 return -ENOMEM; 1541 1542 /* Double VLAN: 0x8100, 0x88A8 */ 1543 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, 1544 MVPP2_PRS_PORT_MASK); 1545 if (err) 1546 return err; 1547 1548 /* Double VLAN: 0x8100, 0x8100 */ 1549 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, 1550 MVPP2_PRS_PORT_MASK); 1551 if (err) 1552 return err; 1553 1554 /* Single VLAN: 0x88a8 */ 1555 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, 1556 MVPP2_PRS_PORT_MASK); 1557 if (err) 1558 return err; 1559 1560 /* Single VLAN: 0x8100 */ 1561 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, 1562 MVPP2_PRS_PORT_MASK); 1563 if (err) 1564 return err; 1565 1566 /* Set default double vlan entry */ 1567 memset(&pe, 0, sizeof(pe)); 1568 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 1569 pe.index = MVPP2_PE_VLAN_DBL; 1570 1571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); 1572 1573 /* Clear ai for next iterations */ 1574 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1575 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, 1576 MVPP2_PRS_RI_VLAN_MASK); 1577 1578 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, 1579 MVPP2_PRS_DBL_VLAN_AI_BIT); 1580 /* Unmask all ports */ 1581 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1582 1583 /* Update shadow table and hw entry */ 1584 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 1585 mvpp2_prs_hw_write(priv, &pe); 1586 1587 /* Set default vlan none entry */ 1588 memset(&pe, 0, sizeof(pe)); 1589 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 1590 pe.index = MVPP2_PE_VLAN_NONE; 1591 1592 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 1593 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 1594 MVPP2_PRS_RI_VLAN_MASK); 1595 1596 /* Unmask all ports */ 1597 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1598 1599 /* Update shadow table and hw entry */ 1600 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 1601 mvpp2_prs_hw_write(priv, &pe); 1602 1603 return 0; 1604} 1605 1606/* Set entries for PPPoE ethertype */ 1607static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) 1608{ 1609 struct mvpp2_prs_entry pe; 1610 int tid; 1611 1612 /* IPv4 over PPPoE with options */ 1613 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1614 MVPP2_PE_LAST_FREE_TID); 1615 if (tid < 0) 1616 return tid; 1617 1618 memset(&pe, 0, sizeof(pe)); 1619 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1620 pe.index = tid; 1621 1622 mvpp2_prs_match_etype(&pe, 0, PPP_IP); 1623 1624 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1625 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 1626 MVPP2_PRS_RI_L3_PROTO_MASK); 1627 /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */ 1628 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 1629 sizeof(struct iphdr) - 4, 1630 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1631 /* Set L3 offset */ 1632 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1633 MVPP2_ETH_TYPE_LEN, 1634 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1635 1636 /* Update shadow table and hw entry */ 1637 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1638 mvpp2_prs_hw_write(priv, &pe); 1639 1640 /* IPv4 over PPPoE without options */ 1641 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1642 MVPP2_PE_LAST_FREE_TID); 1643 if (tid < 0) 1644 return tid; 1645 1646 pe.index = tid; 1647 1648 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1649 MVPP2_PRS_IPV4_HEAD | 1650 MVPP2_PRS_IPV4_IHL_MIN, 1651 MVPP2_PRS_IPV4_HEAD_MASK | 1652 MVPP2_PRS_IPV4_IHL_MASK); 1653 1654 /* Clear ri before updating */ 1655 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1656 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1657 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 1658 MVPP2_PRS_RI_L3_PROTO_MASK); 1659 1660 /* Update shadow table and hw entry */ 1661 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1662 mvpp2_prs_hw_write(priv, &pe); 1663 1664 /* IPv6 over PPPoE */ 1665 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1666 MVPP2_PE_LAST_FREE_TID); 1667 if (tid < 0) 1668 return tid; 1669 1670 memset(&pe, 0, sizeof(pe)); 1671 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1672 pe.index = tid; 1673 1674 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); 1675 1676 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1677 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 1678 MVPP2_PRS_RI_L3_PROTO_MASK); 1679 /* Jump to DIP of IPV6 header */ 1680 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 1681 MVPP2_MAX_L3_ADDR_SIZE, 1682 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1683 /* Set L3 offset */ 1684 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1685 MVPP2_ETH_TYPE_LEN, 1686 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1687 1688 /* Update shadow table and hw entry */ 1689 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1690 mvpp2_prs_hw_write(priv, &pe); 1691 1692 /* Non-IP over PPPoE */ 1693 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1694 MVPP2_PE_LAST_FREE_TID); 1695 if (tid < 0) 1696 return tid; 1697 1698 memset(&pe, 0, sizeof(pe)); 1699 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1700 pe.index = tid; 1701 1702 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 1703 MVPP2_PRS_RI_L3_PROTO_MASK); 1704 1705 /* Finished: go to flowid generation */ 1706 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1707 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1708 /* Set L3 offset even if it's unknown L3 */ 1709 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1710 MVPP2_ETH_TYPE_LEN, 1711 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1712 1713 /* Update shadow table and hw entry */ 1714 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 1715 mvpp2_prs_hw_write(priv, &pe); 1716 1717 return 0; 1718} 1719 1720/* Initialize entries for IPv4 */ 1721static int mvpp2_prs_ip4_init(struct mvpp2 *priv) 1722{ 1723 struct mvpp2_prs_entry pe; 1724 int err; 1725 1726 /* Set entries for TCP, UDP and IGMP over IPv4 */ 1727 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, 1728 MVPP2_PRS_RI_L4_PROTO_MASK); 1729 if (err) 1730 return err; 1731 1732 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, 1733 MVPP2_PRS_RI_L4_PROTO_MASK); 1734 if (err) 1735 return err; 1736 1737 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, 1738 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1739 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1740 MVPP2_PRS_RI_CPU_CODE_MASK | 1741 MVPP2_PRS_RI_UDF3_MASK); 1742 if (err) 1743 return err; 1744 1745 /* IPv4 Broadcast */ 1746 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); 1747 if (err) 1748 return err; 1749 1750 /* IPv4 Multicast */ 1751 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); 1752 if (err) 1753 return err; 1754 1755 /* Default IPv4 entry for unknown protocols */ 1756 memset(&pe, 0, sizeof(pe)); 1757 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 1758 pe.index = MVPP2_PE_IP4_PROTO_UN; 1759 1760 /* Finished: go to flowid generation */ 1761 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1762 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1763 1764 /* Set L3 offset */ 1765 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, 1766 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1767 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 1768 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 1769 MVPP2_PRS_RI_L4_PROTO_MASK); 1770 1771 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 1772 MVPP2_PRS_IPV4_DIP_AI_BIT); 1773 /* Unmask all ports */ 1774 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1775 1776 /* Update shadow table and hw entry */ 1777 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1778 mvpp2_prs_hw_write(priv, &pe); 1779 1780 /* Default IPv4 entry for unicast address */ 1781 memset(&pe, 0, sizeof(pe)); 1782 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 1783 pe.index = MVPP2_PE_IP4_ADDR_UN; 1784 1785 /* Go again to ipv4 */ 1786 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1787 1788 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 1789 MVPP2_PRS_IPV4_DIP_AI_BIT); 1790 1791 /* Shift back to IPv4 proto */ 1792 mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1793 1794 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, 1795 MVPP2_PRS_RI_L3_ADDR_MASK); 1796 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 1797 1798 /* Unmask all ports */ 1799 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1800 1801 /* Update shadow table and hw entry */ 1802 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1803 mvpp2_prs_hw_write(priv, &pe); 1804 1805 return 0; 1806} 1807 1808/* Initialize entries for IPv6 */ 1809static int mvpp2_prs_ip6_init(struct mvpp2 *priv) 1810{ 1811 struct mvpp2_prs_entry pe; 1812 int tid, err; 1813 1814 /* Set entries for TCP, UDP and ICMP over IPv6 */ 1815 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, 1816 MVPP2_PRS_RI_L4_TCP, 1817 MVPP2_PRS_RI_L4_PROTO_MASK); 1818 if (err) 1819 return err; 1820 1821 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, 1822 MVPP2_PRS_RI_L4_UDP, 1823 MVPP2_PRS_RI_L4_PROTO_MASK); 1824 if (err) 1825 return err; 1826 1827 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, 1828 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1829 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1830 MVPP2_PRS_RI_CPU_CODE_MASK | 1831 MVPP2_PRS_RI_UDF3_MASK); 1832 if (err) 1833 return err; 1834 1835 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ 1836 /* Result Info: UDF7=1, DS lite */ 1837 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, 1838 MVPP2_PRS_RI_UDF7_IP6_LITE, 1839 MVPP2_PRS_RI_UDF7_MASK); 1840 if (err) 1841 return err; 1842 1843 /* IPv6 multicast */ 1844 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); 1845 if (err) 1846 return err; 1847 1848 /* Entry for checking hop limit */ 1849 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1850 MVPP2_PE_LAST_FREE_TID); 1851 if (tid < 0) 1852 return tid; 1853 1854 memset(&pe, 0, sizeof(pe)); 1855 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1856 pe.index = tid; 1857 1858 /* Finished: go to flowid generation */ 1859 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1860 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1861 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | 1862 MVPP2_PRS_RI_DROP_MASK, 1863 MVPP2_PRS_RI_L3_PROTO_MASK | 1864 MVPP2_PRS_RI_DROP_MASK); 1865 1866 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); 1867 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1868 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1869 1870 /* Update shadow table and hw entry */ 1871 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1872 mvpp2_prs_hw_write(priv, &pe); 1873 1874 /* Default IPv6 entry for unknown protocols */ 1875 memset(&pe, 0, sizeof(pe)); 1876 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1877 pe.index = MVPP2_PE_IP6_PROTO_UN; 1878 1879 /* Finished: go to flowid generation */ 1880 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1881 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1882 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 1883 MVPP2_PRS_RI_L4_PROTO_MASK); 1884 /* Set L4 offset relatively to our current place */ 1885 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 1886 sizeof(struct ipv6hdr) - 4, 1887 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1888 1889 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1890 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1891 /* Unmask all ports */ 1892 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1893 1894 /* Update shadow table and hw entry */ 1895 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1896 mvpp2_prs_hw_write(priv, &pe); 1897 1898 /* Default IPv6 entry for unknown ext protocols */ 1899 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1900 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1901 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; 1902 1903 /* Finished: go to flowid generation */ 1904 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1905 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1906 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 1907 MVPP2_PRS_RI_L4_PROTO_MASK); 1908 1909 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, 1910 MVPP2_PRS_IPV6_EXT_AI_BIT); 1911 /* Unmask all ports */ 1912 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1913 1914 /* Update shadow table and hw entry */ 1915 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 1916 mvpp2_prs_hw_write(priv, &pe); 1917 1918 /* Default IPv6 entry for unicast address */ 1919 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1920 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 1921 pe.index = MVPP2_PE_IP6_ADDR_UN; 1922 1923 /* Finished: go to IPv6 again */ 1924 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1925 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, 1926 MVPP2_PRS_RI_L3_ADDR_MASK); 1927 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 1928 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1929 /* Shift back to IPV6 NH */ 1930 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1931 1932 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1933 /* Unmask all ports */ 1934 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1935 1936 /* Update shadow table and hw entry */ 1937 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 1938 mvpp2_prs_hw_write(priv, &pe); 1939 1940 return 0; 1941} 1942 1943/* Find tcam entry with matched pair <vid,port> */ 1944static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) 1945{ 1946 unsigned char byte[2], enable[2]; 1947 struct mvpp2_prs_entry pe; 1948 u16 rvid, rmask; 1949 int tid; 1950 1951 /* Go through the all entries with MVPP2_PRS_LU_VID */ 1952 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); 1953 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { 1954 if (!port->priv->prs_shadow[tid].valid || 1955 port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) 1956 continue; 1957 1958 mvpp2_prs_init_from_hw(port->priv, &pe, tid); 1959 1960 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); 1961 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); 1962 1963 rvid = ((byte[0] & 0xf) << 8) + byte[1]; 1964 rmask = ((enable[0] & 0xf) << 8) + enable[1]; 1965 1966 if (rvid != vid || rmask != mask) 1967 continue; 1968 1969 return tid; 1970 } 1971 1972 return -ENOENT; 1973} 1974 1975/* Write parser entry for VID filtering */ 1976int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) 1977{ 1978 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + 1979 port->id * MVPP2_PRS_VLAN_FILT_MAX; 1980 unsigned int mask = 0xfff, reg_val, shift; 1981 struct mvpp2 *priv = port->priv; 1982 struct mvpp2_prs_entry pe; 1983 int tid; 1984 1985 memset(&pe, 0, sizeof(pe)); 1986 1987 /* Scan TCAM and see if entry with this <vid,port> already exist */ 1988 tid = mvpp2_prs_vid_range_find(port, vid, mask); 1989 1990 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); 1991 if (reg_val & MVPP2_DSA_EXTENDED) 1992 shift = MVPP2_VLAN_TAG_EDSA_LEN; 1993 else 1994 shift = MVPP2_VLAN_TAG_LEN; 1995 1996 /* No such entry */ 1997 if (tid < 0) { 1998 1999 /* Go through all entries from first to last in vlan range */ 2000 tid = mvpp2_prs_tcam_first_free(priv, vid_start, 2001 vid_start + 2002 MVPP2_PRS_VLAN_FILT_MAX_ENTRY); 2003 2004 /* There isn't room for a new VID filter */ 2005 if (tid < 0) 2006 return tid; 2007 2008 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 2009 pe.index = tid; 2010 2011 /* Mask all ports */ 2012 mvpp2_prs_tcam_port_map_set(&pe, 0); 2013 } else { 2014 mvpp2_prs_init_from_hw(priv, &pe, tid); 2015 } 2016 2017 /* Enable the current port */ 2018 mvpp2_prs_tcam_port_set(&pe, port->id, true); 2019 2020 /* Continue - set next lookup */ 2021 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 2022 2023 /* Skip VLAN header - Set offset to 4 or 8 bytes */ 2024 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2025 2026 /* Set match on VID */ 2027 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); 2028 2029 /* Clear all ai bits for next iteration */ 2030 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 2031 2032 /* Update shadow table */ 2033 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 2034 mvpp2_prs_hw_write(priv, &pe); 2035 2036 return 0; 2037} 2038 2039/* Write parser entry for VID filtering */ 2040void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) 2041{ 2042 struct mvpp2 *priv = port->priv; 2043 int tid; 2044 2045 /* Scan TCAM and see if entry with this <vid,port> already exist */ 2046 tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); 2047 2048 /* No such entry */ 2049 if (tid < 0) 2050 return; 2051 2052 mvpp2_prs_hw_inv(priv, tid); 2053 priv->prs_shadow[tid].valid = false; 2054} 2055 2056/* Remove all existing VID filters on this port */ 2057void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) 2058{ 2059 struct mvpp2 *priv = port->priv; 2060 int tid; 2061 2062 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); 2063 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { 2064 if (priv->prs_shadow[tid].valid) { 2065 mvpp2_prs_hw_inv(priv, tid); 2066 priv->prs_shadow[tid].valid = false; 2067 } 2068 } 2069} 2070 2071/* Remove VID filering entry for this port */ 2072void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) 2073{ 2074 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); 2075 struct mvpp2 *priv = port->priv; 2076 2077 /* Invalidate the guard entry */ 2078 mvpp2_prs_hw_inv(priv, tid); 2079 2080 priv->prs_shadow[tid].valid = false; 2081} 2082 2083/* Add guard entry that drops packets when no VID is matched on this port */ 2084void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) 2085{ 2086 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); 2087 struct mvpp2 *priv = port->priv; 2088 unsigned int reg_val, shift; 2089 struct mvpp2_prs_entry pe; 2090 2091 if (priv->prs_shadow[tid].valid) 2092 return; 2093 2094 memset(&pe, 0, sizeof(pe)); 2095 2096 pe.index = tid; 2097 2098 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); 2099 if (reg_val & MVPP2_DSA_EXTENDED) 2100 shift = MVPP2_VLAN_TAG_EDSA_LEN; 2101 else 2102 shift = MVPP2_VLAN_TAG_LEN; 2103 2104 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); 2105 2106 /* Mask all ports */ 2107 mvpp2_prs_tcam_port_map_set(&pe, 0); 2108 2109 /* Update port mask */ 2110 mvpp2_prs_tcam_port_set(&pe, port->id, true); 2111 2112 /* Continue - set next lookup */ 2113 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 2114 2115 /* Skip VLAN header - Set offset to 4 or 8 bytes */ 2116 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2117 2118 /* Drop VLAN packets that don't belong to any VIDs on this port */ 2119 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 2120 MVPP2_PRS_RI_DROP_MASK); 2121 2122 /* Clear all ai bits for next iteration */ 2123 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 2124 2125 /* Update shadow table */ 2126 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); 2127 mvpp2_prs_hw_write(priv, &pe); 2128} 2129 2130/* Parser default initialization */ 2131int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) 2132{ 2133 int err, index, i; 2134 2135 /* Enable tcam table */ 2136 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2137 2138 /* Clear all tcam and sram entries */ 2139 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2140 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2141 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2142 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2143 2144 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2145 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2146 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2147 } 2148 2149 /* Invalidate all tcam entries */ 2150 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2151 mvpp2_prs_hw_inv(priv, index); 2152 2153 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2154 sizeof(*priv->prs_shadow), 2155 GFP_KERNEL); 2156 if (!priv->prs_shadow) 2157 return -ENOMEM; 2158 2159 /* Always start from lookup = 0 */ 2160 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2161 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2162 MVPP2_PRS_PORT_LU_MAX, 0); 2163 2164 mvpp2_prs_def_flow_init(priv); 2165 2166 mvpp2_prs_mh_init(priv); 2167 2168 mvpp2_prs_mac_init(priv); 2169 2170 mvpp2_prs_dsa_init(priv); 2171 2172 mvpp2_prs_vid_init(priv); 2173 2174 err = mvpp2_prs_etype_init(priv); 2175 if (err) 2176 return err; 2177 2178 err = mvpp2_prs_vlan_init(pdev, priv); 2179 if (err) 2180 return err; 2181 2182 err = mvpp2_prs_pppoe_init(priv); 2183 if (err) 2184 return err; 2185 2186 err = mvpp2_prs_ip6_init(priv); 2187 if (err) 2188 return err; 2189 2190 err = mvpp2_prs_ip4_init(priv); 2191 if (err) 2192 return err; 2193 2194 return 0; 2195} 2196 2197/* Compare MAC DA with tcam entry data */ 2198static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2199 const u8 *da, unsigned char *mask) 2200{ 2201 unsigned char tcam_byte, tcam_mask; 2202 int index; 2203 2204 for (index = 0; index < ETH_ALEN; index++) { 2205 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2206 if (tcam_mask != mask[index]) 2207 return false; 2208 2209 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2210 return false; 2211 } 2212 2213 return true; 2214} 2215 2216/* Find tcam entry with matched pair <MAC DA, port> */ 2217static int 2218mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2219 unsigned char *mask, int udf_type) 2220{ 2221 struct mvpp2_prs_entry pe; 2222 int tid; 2223 2224 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2225 for (tid = MVPP2_PE_MAC_RANGE_START; 2226 tid <= MVPP2_PE_MAC_RANGE_END; tid++) { 2227 unsigned int entry_pmap; 2228 2229 if (!priv->prs_shadow[tid].valid || 2230 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2231 (priv->prs_shadow[tid].udf != udf_type)) 2232 continue; 2233 2234 mvpp2_prs_init_from_hw(priv, &pe, tid); 2235 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); 2236 2237 if (mvpp2_prs_mac_range_equals(&pe, da, mask) && 2238 entry_pmap == pmap) 2239 return tid; 2240 } 2241 2242 return -ENOENT; 2243} 2244 2245/* Update parser's mac da entry */ 2246int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) 2247{ 2248 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2249 struct mvpp2 *priv = port->priv; 2250 unsigned int pmap, len, ri; 2251 struct mvpp2_prs_entry pe; 2252 int tid; 2253 2254 memset(&pe, 0, sizeof(pe)); 2255 2256 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2257 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, 2258 MVPP2_PRS_UDF_MAC_DEF); 2259 2260 /* No such entry */ 2261 if (tid < 0) { 2262 if (!add) 2263 return 0; 2264 2265 /* Create new TCAM entry */ 2266 /* Go through the all entries from first to last */ 2267 tid = mvpp2_prs_tcam_first_free(priv, 2268 MVPP2_PE_MAC_RANGE_START, 2269 MVPP2_PE_MAC_RANGE_END); 2270 if (tid < 0) 2271 return tid; 2272 2273 pe.index = tid; 2274 2275 /* Mask all ports */ 2276 mvpp2_prs_tcam_port_map_set(&pe, 0); 2277 } else { 2278 mvpp2_prs_init_from_hw(priv, &pe, tid); 2279 } 2280 2281 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 2282 2283 /* Update port mask */ 2284 mvpp2_prs_tcam_port_set(&pe, port->id, add); 2285 2286 /* Invalidate the entry if no ports are left enabled */ 2287 pmap = mvpp2_prs_tcam_port_map_get(&pe); 2288 if (pmap == 0) { 2289 if (add) 2290 return -EINVAL; 2291 2292 mvpp2_prs_hw_inv(priv, pe.index); 2293 priv->prs_shadow[pe.index].valid = false; 2294 return 0; 2295 } 2296 2297 /* Continue - set next lookup */ 2298 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 2299 2300 /* Set match on DA */ 2301 len = ETH_ALEN; 2302 while (len--) 2303 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); 2304 2305 /* Set result info bits */ 2306 if (is_broadcast_ether_addr(da)) { 2307 ri = MVPP2_PRS_RI_L2_BCAST; 2308 } else if (is_multicast_ether_addr(da)) { 2309 ri = MVPP2_PRS_RI_L2_MCAST; 2310 } else { 2311 ri = MVPP2_PRS_RI_L2_UCAST; 2312 2313 if (ether_addr_equal(da, port->dev->dev_addr)) 2314 ri |= MVPP2_PRS_RI_MAC_ME_MASK; 2315 } 2316 2317 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2318 MVPP2_PRS_RI_MAC_ME_MASK); 2319 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2320 MVPP2_PRS_RI_MAC_ME_MASK); 2321 2322 /* Shift to ethertype */ 2323 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 2324 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2325 2326 /* Update shadow table and hw entry */ 2327 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; 2328 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 2329 mvpp2_prs_hw_write(priv, &pe); 2330 2331 return 0; 2332} 2333 2334int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) 2335{ 2336 struct mvpp2_port *port = netdev_priv(dev); 2337 int err; 2338 2339 /* Remove old parser entry */ 2340 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); 2341 if (err) 2342 return err; 2343 2344 /* Add new parser entry */ 2345 err = mvpp2_prs_mac_da_accept(port, da, true); 2346 if (err) 2347 return err; 2348 2349 /* Set addr in the device */ 2350 eth_hw_addr_set(dev, da); 2351 2352 return 0; 2353} 2354 2355void mvpp2_prs_mac_del_all(struct mvpp2_port *port) 2356{ 2357 struct mvpp2 *priv = port->priv; 2358 struct mvpp2_prs_entry pe; 2359 unsigned long pmap; 2360 int index, tid; 2361 2362 for (tid = MVPP2_PE_MAC_RANGE_START; 2363 tid <= MVPP2_PE_MAC_RANGE_END; tid++) { 2364 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; 2365 2366 if (!priv->prs_shadow[tid].valid || 2367 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2368 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) 2369 continue; 2370 2371 mvpp2_prs_init_from_hw(priv, &pe, tid); 2372 2373 pmap = mvpp2_prs_tcam_port_map_get(&pe); 2374 2375 /* We only want entries active on this port */ 2376 if (!test_bit(port->id, &pmap)) 2377 continue; 2378 2379 /* Read mac addr from entry */ 2380 for (index = 0; index < ETH_ALEN; index++) 2381 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], 2382 &da_mask[index]); 2383 2384 /* Special cases : Don't remove broadcast and port's own 2385 * address 2386 */ 2387 if (is_broadcast_ether_addr(da) || 2388 ether_addr_equal(da, port->dev->dev_addr)) 2389 continue; 2390 2391 /* Remove entry from TCAM */ 2392 mvpp2_prs_mac_da_accept(port, da, false); 2393 } 2394} 2395 2396int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) 2397{ 2398 switch (type) { 2399 case MVPP2_TAG_TYPE_EDSA: 2400 /* Add port to EDSA entries */ 2401 mvpp2_prs_dsa_tag_set(priv, port, true, 2402 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2403 mvpp2_prs_dsa_tag_set(priv, port, true, 2404 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2405 /* Remove port from DSA entries */ 2406 mvpp2_prs_dsa_tag_set(priv, port, false, 2407 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2408 mvpp2_prs_dsa_tag_set(priv, port, false, 2409 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2410 break; 2411 2412 case MVPP2_TAG_TYPE_DSA: 2413 /* Add port to DSA entries */ 2414 mvpp2_prs_dsa_tag_set(priv, port, true, 2415 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2416 mvpp2_prs_dsa_tag_set(priv, port, true, 2417 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2418 /* Remove port from EDSA entries */ 2419 mvpp2_prs_dsa_tag_set(priv, port, false, 2420 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2421 mvpp2_prs_dsa_tag_set(priv, port, false, 2422 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2423 break; 2424 2425 case MVPP2_TAG_TYPE_MH: 2426 case MVPP2_TAG_TYPE_NONE: 2427 /* Remove port form EDSA and DSA entries */ 2428 mvpp2_prs_dsa_tag_set(priv, port, false, 2429 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2430 mvpp2_prs_dsa_tag_set(priv, port, false, 2431 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2432 mvpp2_prs_dsa_tag_set(priv, port, false, 2433 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2434 mvpp2_prs_dsa_tag_set(priv, port, false, 2435 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2436 break; 2437 2438 default: 2439 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) 2440 return -EINVAL; 2441 } 2442 2443 return 0; 2444} 2445 2446int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) 2447{ 2448 struct mvpp2_prs_entry pe; 2449 u8 *ri_byte, *ri_byte_mask; 2450 int tid, i; 2451 2452 memset(&pe, 0, sizeof(pe)); 2453 2454 tid = mvpp2_prs_tcam_first_free(priv, 2455 MVPP2_PE_LAST_FREE_TID, 2456 MVPP2_PE_FIRST_FREE_TID); 2457 if (tid < 0) 2458 return tid; 2459 2460 pe.index = tid; 2461 2462 ri_byte = (u8 *)&ri; 2463 ri_byte_mask = (u8 *)&ri_mask; 2464 2465 mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); 2466 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2467 2468 for (i = 0; i < 4; i++) { 2469 mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], 2470 ri_byte_mask[i]); 2471 } 2472 2473 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 2474 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2475 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2476 mvpp2_prs_hw_write(priv, &pe); 2477 2478 return 0; 2479} 2480 2481/* Set prs flow for the port */ 2482int mvpp2_prs_def_flow(struct mvpp2_port *port) 2483{ 2484 struct mvpp2_prs_entry pe; 2485 int tid; 2486 2487 memset(&pe, 0, sizeof(pe)); 2488 2489 tid = mvpp2_prs_flow_find(port->priv, port->id); 2490 2491 /* Such entry not exist */ 2492 if (tid < 0) { 2493 /* Go through the all entires from last to first */ 2494 tid = mvpp2_prs_tcam_first_free(port->priv, 2495 MVPP2_PE_LAST_FREE_TID, 2496 MVPP2_PE_FIRST_FREE_TID); 2497 if (tid < 0) 2498 return tid; 2499 2500 pe.index = tid; 2501 2502 /* Set flow ID*/ 2503 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2504 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2505 2506 /* Update shadow table */ 2507 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); 2508 } else { 2509 mvpp2_prs_init_from_hw(port->priv, &pe, tid); 2510 } 2511 2512 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2513 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); 2514 mvpp2_prs_hw_write(port->priv, &pe); 2515 2516 return 0; 2517} 2518 2519int mvpp2_prs_hits(struct mvpp2 *priv, int index) 2520{ 2521 u32 val; 2522 2523 if (index > MVPP2_PRS_TCAM_SRAM_SIZE) 2524 return -EINVAL; 2525 2526 mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); 2527 2528 val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); 2529 2530 val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; 2531 2532 return val; 2533}