tag_dsa.c (11886B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Regular and Ethertype DSA tagging 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 * 6 * Regular DSA 7 * ----------- 8 9 * For untagged (in 802.1Q terms) packets, the switch will splice in 10 * the tag between the SA and the ethertype of the original 11 * packet. Tagged frames will instead have their outermost .1Q tag 12 * converted to a DSA tag. It expects the same layout when receiving 13 * packets from the CPU. 14 * 15 * Example: 16 * 17 * .----.----.----.--------- 18 * Pu: | DA | SA | ET | Payload ... 19 * '----'----'----'--------- 20 * 6 6 2 N 21 * .----.----.--------.-----.----.--------- 22 * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ... 23 * '----'----'--------'-----'----'--------- 24 * 6 6 2 2 2 N 25 * .----.----.-----.----.--------- 26 * Pd: | DA | SA | DSA | ET | Payload ... 27 * '----'----'-----'----'--------- 28 * 6 6 4 2 N 29 * 30 * No matter if a packet is received untagged (Pu) or tagged (Pt), 31 * they will both have the same layout (Pd) when they are sent to the 32 * CPU. This is done by ignoring 802.3, replacing the ethertype field 33 * with more metadata, among which is a bit to signal if the original 34 * packet was tagged or not. 35 * 36 * Ethertype DSA 37 * ------------- 38 * Uses the exact same tag format as regular DSA, but also includes a 39 * proper ethertype field (which the mv88e6xxx driver sets to 40 * ETH_P_EDSA/0xdada) followed by two zero bytes: 41 * 42 * .----.----.--------.--------.-----.----.--------- 43 * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ... 44 * '----'----'--------'--------'-----'----'--------- 45 * 6 6 2 2 4 2 N 46 */ 47 48#include <linux/dsa/mv88e6xxx.h> 49#include <linux/etherdevice.h> 50#include <linux/list.h> 51#include <linux/slab.h> 52 53#include "dsa_priv.h" 54 55#define DSA_HLEN 4 56 57/** 58 * enum dsa_cmd - DSA Command 59 * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to 60 * the CPU port. This is needed to implement control protocols, 61 * e.g. STP and LLDP, that must not allow those control packets to 62 * be switched according to the normal rules. 63 * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific 64 * port, ignoring all the barriers that the switch normally 65 * enforces (VLANs, STP port states etc.). No source address 66 * learning takes place. "sudo send packet" 67 * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some 68 * user configured ingress or egress monitor criteria. These are 69 * forwarded by the switch tree to the user configured ingress or 70 * egress monitor port, which can be set to the CPU port or a 71 * regular port. If the destination is a regular port, the tag 72 * will be removed before egressing the port. If the destination 73 * is the CPU port, the tag will not be removed. 74 * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing 75 * through the switch tree, including the flows that are directed 76 * towards the CPU. Its device/port tuple encodes the original 77 * source port on which the packet ingressed. It can also be used 78 * on transmit by the CPU to defer the forwarding decision to the 79 * hardware, based on the current config of PVT/VTU/ATU 80 * etc. Source address learning takes places if enabled on the 81 * receiving DSA/CPU port. 82 */ 83enum dsa_cmd { 84 DSA_CMD_TO_CPU = 0, 85 DSA_CMD_FROM_CPU = 1, 86 DSA_CMD_TO_SNIFFER = 2, 87 DSA_CMD_FORWARD = 3 88}; 89 90/** 91 * enum dsa_code - TO_CPU Code 92 * 93 * @DSA_CODE_MGMT_TRAP: DA was classified as a management 94 * address. Typical examples include STP BPDUs and LLDP. 95 * @DSA_CODE_FRAME2REG: Response to a "remote management" request. 96 * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling. 97 * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on 98 * the device. Typical examples are matching on DA/SA/VID and DHCP 99 * snooping. 100 * @DSA_CODE_ARP_MIRROR: The name says it all really. 101 * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the 102 * particular policy was set to trigger a mirror instead of a 103 * trap. 104 * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X. 105 * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X. 106 * 107 * A 3-bit code is used to relay why a particular frame was sent to 108 * the CPU. We only use this to determine if the packet was mirrored 109 * or trapped, i.e. whether the packet has been forwarded by hardware 110 * or not. 111 * 112 * This is the superset of all possible codes. Any particular device 113 * may only implement a subset. 114 */ 115enum dsa_code { 116 DSA_CODE_MGMT_TRAP = 0, 117 DSA_CODE_FRAME2REG = 1, 118 DSA_CODE_IGMP_MLD_TRAP = 2, 119 DSA_CODE_POLICY_TRAP = 3, 120 DSA_CODE_ARP_MIRROR = 4, 121 DSA_CODE_POLICY_MIRROR = 5, 122 DSA_CODE_RESERVED_6 = 6, 123 DSA_CODE_RESERVED_7 = 7 124}; 125 126static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, 127 u8 extra) 128{ 129 struct dsa_port *dp = dsa_slave_to_port(dev); 130 struct net_device *br_dev; 131 u8 tag_dev, tag_port; 132 enum dsa_cmd cmd; 133 u8 *dsa_header; 134 135 if (skb->offload_fwd_mark) { 136 unsigned int bridge_num = dsa_port_bridge_num_get(dp); 137 struct dsa_switch_tree *dst = dp->ds->dst; 138 139 cmd = DSA_CMD_FORWARD; 140 141 /* When offloading forwarding for a bridge, inject FORWARD 142 * packets on behalf of a virtual switch device with an index 143 * past the physical switches. 144 */ 145 tag_dev = dst->last_switch + bridge_num; 146 tag_port = 0; 147 } else { 148 cmd = DSA_CMD_FROM_CPU; 149 tag_dev = dp->ds->index; 150 tag_port = dp->index; 151 } 152 153 br_dev = dsa_port_bridge_dev_get(dp); 154 155 /* If frame is already 802.1Q tagged, we can convert it to a DSA 156 * tag (avoiding a memmove), but only if the port is standalone 157 * (in which case we always send FROM_CPU) or if the port's 158 * bridge has VLAN filtering enabled (in which case the CPU port 159 * will be a member of the VLAN). 160 */ 161 if (skb->protocol == htons(ETH_P_8021Q) && 162 (!br_dev || br_vlan_enabled(br_dev))) { 163 if (extra) { 164 skb_push(skb, extra); 165 dsa_alloc_etype_header(skb, extra); 166 } 167 168 /* Construct tagged DSA tag from 802.1Q tag. */ 169 dsa_header = dsa_etype_header_pos_tx(skb) + extra; 170 dsa_header[0] = (cmd << 6) | 0x20 | tag_dev; 171 dsa_header[1] = tag_port << 3; 172 173 /* Move CFI field from byte 2 to byte 1. */ 174 if (dsa_header[2] & 0x10) { 175 dsa_header[1] |= 0x01; 176 dsa_header[2] &= ~0x10; 177 } 178 } else { 179 u16 vid; 180 181 vid = br_dev ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE; 182 183 skb_push(skb, DSA_HLEN + extra); 184 dsa_alloc_etype_header(skb, DSA_HLEN + extra); 185 186 /* Construct DSA header from untagged frame. */ 187 dsa_header = dsa_etype_header_pos_tx(skb) + extra; 188 189 dsa_header[0] = (cmd << 6) | tag_dev; 190 dsa_header[1] = tag_port << 3; 191 dsa_header[2] = vid >> 8; 192 dsa_header[3] = vid & 0xff; 193 } 194 195 return skb; 196} 197 198static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev, 199 u8 extra) 200{ 201 bool trap = false, trunk = false; 202 int source_device, source_port; 203 enum dsa_code code; 204 enum dsa_cmd cmd; 205 u8 *dsa_header; 206 207 /* The ethertype field is part of the DSA header. */ 208 dsa_header = dsa_etype_header_pos_rx(skb); 209 210 cmd = dsa_header[0] >> 6; 211 switch (cmd) { 212 case DSA_CMD_FORWARD: 213 trunk = !!(dsa_header[1] & 4); 214 break; 215 216 case DSA_CMD_TO_CPU: 217 code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1); 218 219 switch (code) { 220 case DSA_CODE_FRAME2REG: 221 /* Remote management is not implemented yet, 222 * drop. 223 */ 224 return NULL; 225 case DSA_CODE_ARP_MIRROR: 226 case DSA_CODE_POLICY_MIRROR: 227 /* Mark mirrored packets to notify any upper 228 * device (like a bridge) that forwarding has 229 * already been done by hardware. 230 */ 231 break; 232 case DSA_CODE_MGMT_TRAP: 233 case DSA_CODE_IGMP_MLD_TRAP: 234 case DSA_CODE_POLICY_TRAP: 235 /* Traps have, by definition, not been 236 * forwarded by hardware, so don't mark them. 237 */ 238 trap = true; 239 break; 240 default: 241 /* Reserved code, this could be anything. Drop 242 * seems like the safest option. 243 */ 244 return NULL; 245 } 246 247 break; 248 249 default: 250 return NULL; 251 } 252 253 source_device = dsa_header[0] & 0x1f; 254 source_port = (dsa_header[1] >> 3) & 0x1f; 255 256 if (trunk) { 257 struct dsa_port *cpu_dp = dev->dsa_ptr; 258 struct dsa_lag *lag; 259 260 /* The exact source port is not available in the tag, 261 * so we inject the frame directly on the upper 262 * team/bond. 263 */ 264 lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1); 265 skb->dev = lag ? lag->dev : NULL; 266 } else { 267 skb->dev = dsa_master_find_slave(dev, source_device, 268 source_port); 269 } 270 271 if (!skb->dev) 272 return NULL; 273 274 /* When using LAG offload, skb->dev is not a DSA slave interface, 275 * so we cannot call dsa_default_offload_fwd_mark and we need to 276 * special-case it. 277 */ 278 if (trunk) 279 skb->offload_fwd_mark = true; 280 else if (!trap) 281 dsa_default_offload_fwd_mark(skb); 282 283 /* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q 284 * tag, and delete the ethertype (extra) if applicable. If the 285 * 'tagged' bit is cleared; delete the DSA tag, and ethertype 286 * if applicable. 287 */ 288 if (dsa_header[0] & 0x20) { 289 u8 new_header[4]; 290 291 /* Insert 802.1Q ethertype and copy the VLAN-related 292 * fields, but clear the bit that will hold CFI (since 293 * DSA uses that bit location for another purpose). 294 */ 295 new_header[0] = (ETH_P_8021Q >> 8) & 0xff; 296 new_header[1] = ETH_P_8021Q & 0xff; 297 new_header[2] = dsa_header[2] & ~0x10; 298 new_header[3] = dsa_header[3]; 299 300 /* Move CFI bit from its place in the DSA header to 301 * its 802.1Q-designated place. 302 */ 303 if (dsa_header[1] & 0x01) 304 new_header[2] |= 0x10; 305 306 /* Update packet checksum if skb is CHECKSUM_COMPLETE. */ 307 if (skb->ip_summed == CHECKSUM_COMPLETE) { 308 __wsum c = skb->csum; 309 c = csum_add(c, csum_partial(new_header + 2, 2, 0)); 310 c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0)); 311 skb->csum = c; 312 } 313 314 memcpy(dsa_header, new_header, DSA_HLEN); 315 316 if (extra) 317 dsa_strip_etype_header(skb, extra); 318 } else { 319 skb_pull_rcsum(skb, DSA_HLEN); 320 dsa_strip_etype_header(skb, DSA_HLEN + extra); 321 } 322 323 return skb; 324} 325 326#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA) 327 328static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev) 329{ 330 return dsa_xmit_ll(skb, dev, 0); 331} 332 333static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev) 334{ 335 if (unlikely(!pskb_may_pull(skb, DSA_HLEN))) 336 return NULL; 337 338 return dsa_rcv_ll(skb, dev, 0); 339} 340 341static const struct dsa_device_ops dsa_netdev_ops = { 342 .name = "dsa", 343 .proto = DSA_TAG_PROTO_DSA, 344 .xmit = dsa_xmit, 345 .rcv = dsa_rcv, 346 .needed_headroom = DSA_HLEN, 347}; 348 349DSA_TAG_DRIVER(dsa_netdev_ops); 350MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA); 351#endif /* CONFIG_NET_DSA_TAG_DSA */ 352 353#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA) 354 355#define EDSA_HLEN 8 356 357static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) 358{ 359 u8 *edsa_header; 360 361 skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN); 362 if (!skb) 363 return NULL; 364 365 edsa_header = dsa_etype_header_pos_tx(skb); 366 edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff; 367 edsa_header[1] = ETH_P_EDSA & 0xff; 368 edsa_header[2] = 0x00; 369 edsa_header[3] = 0x00; 370 return skb; 371} 372 373static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev) 374{ 375 if (unlikely(!pskb_may_pull(skb, EDSA_HLEN))) 376 return NULL; 377 378 skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN); 379 380 return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN); 381} 382 383static const struct dsa_device_ops edsa_netdev_ops = { 384 .name = "edsa", 385 .proto = DSA_TAG_PROTO_EDSA, 386 .xmit = edsa_xmit, 387 .rcv = edsa_rcv, 388 .needed_headroom = EDSA_HLEN, 389}; 390 391DSA_TAG_DRIVER(edsa_netdev_ops); 392MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA); 393#endif /* CONFIG_NET_DSA_TAG_EDSA */ 394 395static struct dsa_tag_driver *dsa_tag_drivers[] = { 396#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA) 397 &DSA_TAG_DRIVER_NAME(dsa_netdev_ops), 398#endif 399#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA) 400 &DSA_TAG_DRIVER_NAME(edsa_netdev_ops), 401#endif 402}; 403 404module_dsa_tag_drivers(dsa_tag_drivers); 405 406MODULE_LICENSE("GPL");